blob: e4bdf3507170e7eefdadac422c238db139e039a5 [file] [log] [blame]
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Central free lists.
6//
7// See malloc.go for an overview.
8//
Colin Crossd9c6b802019-03-19 21:10:31 -07009// The mcentral doesn't actually contain the list of free objects; the mspan does.
10// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
Dan Willemsen09eb3b12015-09-16 14:34:17 -070011// and those that are completely allocated (c->empty).
12
13package runtime
14
Dan Willemsen38f2dba2016-07-08 14:54:35 -070015import "runtime/internal/atomic"
16
Dan Willemsen09eb3b12015-09-16 14:34:17 -070017// Central list of free objects of a given size.
Dan Willemsenebae3022017-01-13 23:01:08 -080018//
19//go:notinheap
Dan Willemsen09eb3b12015-09-16 14:34:17 -070020type mcentral struct {
Dan Willemsend2797482017-07-26 13:13:13 -070021 spanclass spanClass
Patrice Arruda748609c2020-06-25 12:12:21 -070022
Patrice Arruda748609c2020-06-25 12:12:21 -070023 // partial and full contain two mspan sets: one of swept in-use
24 // spans, and one of unswept in-use spans. These two trade
25 // roles on each GC cycle. The unswept set is drained either by
26 // allocation or by the background sweeper in every GC cycle,
27 // so only two roles are necessary.
28 //
29 // sweepgen is increased by 2 on each GC cycle, so the swept
30 // spans are in partial[sweepgen/2%2] and the unswept spans are in
31 // partial[1-sweepgen/2%2]. Sweeping pops spans from the
32 // unswept set and pushes spans that are still in-use on the
33 // swept set. Likewise, allocating an in-use span pushes it
34 // on the swept set.
35 //
36 // Some parts of the sweeper can sweep arbitrary spans, and hence
37 // can't remove them from the unswept set, but will add the span
38 // to the appropriate swept list. As a result, the parts of the
39 // sweeper and mcentral that do consume from the unswept list may
40 // encounter swept spans, and these should be ignored.
41 partial [2]spanSet // list of spans with a free object
42 full [2]spanSet // list of spans with no free objects
Dan Willemsen09eb3b12015-09-16 14:34:17 -070043}
44
45// Initialize a single central free list.
Dan Willemsend2797482017-07-26 13:13:13 -070046func (c *mcentral) init(spc spanClass) {
47 c.spanclass = spc
Colin Cross1f805522021-05-14 11:10:59 -070048 lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
49 lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
50 lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
51 lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
Patrice Arruda748609c2020-06-25 12:12:21 -070052}
53
54// partialUnswept returns the spanSet which holds partially-filled
55// unswept spans for this sweepgen.
56func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
57 return &c.partial[1-sweepgen/2%2]
58}
59
60// partialSwept returns the spanSet which holds partially-filled
61// swept spans for this sweepgen.
62func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
63 return &c.partial[sweepgen/2%2]
64}
65
66// fullUnswept returns the spanSet which holds unswept spans without any
67// free slots for this sweepgen.
68func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
69 return &c.full[1-sweepgen/2%2]
70}
71
72// fullSwept returns the spanSet which holds swept spans without any
73// free slots for this sweepgen.
74func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
75 return &c.full[sweepgen/2%2]
Dan Willemsen09eb3b12015-09-16 14:34:17 -070076}
77
Colin Crossd9c6b802019-03-19 21:10:31 -070078// Allocate a span to use in an mcache.
Dan Willemsen38f2dba2016-07-08 14:54:35 -070079func (c *mcentral) cacheSpan() *mspan {
Patrice Arruda748609c2020-06-25 12:12:21 -070080 // Deduct credit for this span allocation and sweep if necessary.
81 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
82 deductSweepCredit(spanBytes, 0)
83
Patrice Arruda748609c2020-06-25 12:12:21 -070084 traceDone := false
85 if trace.enabled {
86 traceGCSweepStart()
87 }
88
89 // If we sweep spanBudget spans without finding any free
90 // space, just allocate a fresh span. This limits the amount
91 // of time we can spend trying to find free space and
92 // amortizes the cost of small object sweeping over the
93 // benefit of having a full free span to allocate from. By
94 // setting this to 100, we limit the space overhead to 1%.
95 //
96 // TODO(austin,mknyszek): This still has bad worst-case
97 // throughput. For example, this could find just one free slot
98 // on the 100th swept span. That limits allocation latency, but
99 // still has very poor throughput. We could instead keep a
100 // running free-to-used budget and switch to fresh span
101 // allocation if the budget runs low.
102 spanBudget := 100
103
104 var s *mspan
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800105 var sl sweepLocker
Patrice Arruda748609c2020-06-25 12:12:21 -0700106
107 // Try partial swept spans first.
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800108 sg := mheap_.sweepgen
Patrice Arruda748609c2020-06-25 12:12:21 -0700109 if s = c.partialSwept(sg).pop(); s != nil {
110 goto havespan
111 }
112
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800113 sl = sweep.active.begin()
114 if sl.valid {
115 // Now try partial unswept spans.
116 for ; spanBudget >= 0; spanBudget-- {
117 s = c.partialUnswept(sg).pop()
118 if s == nil {
119 break
120 }
121 if s, ok := sl.tryAcquire(s); ok {
122 // We got ownership of the span, so let's sweep it and use it.
123 s.sweep(true)
124 sweep.active.end(sl)
Patrice Arruda748609c2020-06-25 12:12:21 -0700125 goto havespan
126 }
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800127 // We failed to get ownership of the span, which means it's being or
128 // has been swept by an asynchronous sweeper that just couldn't remove it
129 // from the unswept list. That sweeper took ownership of the span and
130 // responsibility for either freeing it to the heap or putting it on the
131 // right swept list. Either way, we should just ignore it (and it's unsafe
132 // for us to do anything else).
Patrice Arruda748609c2020-06-25 12:12:21 -0700133 }
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800134 // Now try full unswept spans, sweeping them and putting them into the
135 // right list if we fail to get a span.
136 for ; spanBudget >= 0; spanBudget-- {
137 s = c.fullUnswept(sg).pop()
138 if s == nil {
139 break
140 }
141 if s, ok := sl.tryAcquire(s); ok {
142 // We got ownership of the span, so let's sweep it.
143 s.sweep(true)
144 // Check if there's any free space.
145 freeIndex := s.nextFreeIndex()
146 if freeIndex != s.nelems {
147 s.freeindex = freeIndex
148 sweep.active.end(sl)
149 goto havespan
150 }
151 // Add it to the swept list, because sweeping didn't give us any free space.
152 c.fullSwept(sg).push(s.mspan)
153 }
154 // See comment for partial unswept spans.
155 }
156 sweep.active.end(sl)
Patrice Arruda748609c2020-06-25 12:12:21 -0700157 }
158 if trace.enabled {
159 traceGCSweepDone()
160 traceDone = true
161 }
162
163 // We failed to get a span from the mcentral so get one from mheap.
164 s = c.grow()
165 if s == nil {
166 return nil
167 }
168
169 // At this point s is a span that should have free slots.
170havespan:
171 if trace.enabled && !traceDone {
172 traceGCSweepDone()
173 }
174 n := int(s.nelems) - int(s.allocCount)
175 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
176 throw("span has no free objects")
177 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700178 freeByteBase := s.freeindex &^ (64 - 1)
179 whichByte := freeByteBase / 8
180 // Init alloc bits cache.
181 s.refillAllocCache(whichByte)
182
183 // Adjust the allocCache so that s.freeindex corresponds to the low bit in
184 // s.allocCache.
185 s.allocCache >>= s.freeindex % 64
186
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700187 return s
188}
189
Colin Crossd9c6b802019-03-19 21:10:31 -0700190// Return span from an mcache.
Patrice Arruda748609c2020-06-25 12:12:21 -0700191//
192// s must have a span class corresponding to this
193// mcentral and it must not be empty.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700194func (c *mcentral) uncacheSpan(s *mspan) {
Patrice Arruda748609c2020-06-25 12:12:21 -0700195 if s.allocCount == 0 {
196 throw("uncaching span but s.allocCount == 0")
197 }
198
199 sg := mheap_.sweepgen
200 stale := s.sweepgen == sg+1
201
202 // Fix up sweepgen.
203 if stale {
204 // Span was cached before sweep began. It's our
205 // responsibility to sweep it.
206 //
207 // Set sweepgen to indicate it's not cached but needs
208 // sweeping and can't be allocated from. sweep will
209 // set s.sweepgen to indicate s is swept.
210 atomic.Store(&s.sweepgen, sg-1)
211 } else {
212 // Indicate that s is no longer cached.
213 atomic.Store(&s.sweepgen, sg)
214 }
Patrice Arruda748609c2020-06-25 12:12:21 -0700215
216 // Put the span in the appropriate place.
217 if stale {
218 // It's stale, so just sweep it. Sweeping will put it on
219 // the right list.
Dan Willemsencc753b72021-08-31 13:25:42 -0700220 //
221 // We don't use a sweepLocker here. Stale cached spans
222 // aren't in the global sweep lists, so mark termination
223 // itself holds up sweep completion until all mcaches
224 // have been swept.
225 ss := sweepLocked{s}
226 ss.sweep(false)
Patrice Arruda748609c2020-06-25 12:12:21 -0700227 } else {
Colin Cross1f805522021-05-14 11:10:59 -0700228 if int(s.nelems)-int(s.allocCount) > 0 {
Patrice Arruda748609c2020-06-25 12:12:21 -0700229 // Put it back on the partial swept list.
230 c.partialSwept(sg).push(s)
231 } else {
232 // There's no free space and it's not stale, so put it on the
233 // full swept list.
234 c.fullSwept(sg).push(s)
235 }
236 }
237}
238
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700239// grow allocates a new empty span from the heap and initializes it for c's size class.
240func (c *mcentral) grow() *mspan {
Dan Willemsend2797482017-07-26 13:13:13 -0700241 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
242 size := uintptr(class_to_size[c.spanclass.sizeclass()])
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700243
Dan Willemsenbc60c3c2021-12-15 01:09:00 -0800244 s := mheap_.alloc(npages, c.spanclass)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700245 if s == nil {
246 return nil
247 }
248
Colin Cross430342c2019-09-07 08:36:04 -0700249 // Use division by multiplication and shifts to quickly compute:
250 // n := (npages << _PageShift) / size
Dan Willemsencc753b72021-08-31 13:25:42 -0700251 n := s.divideByElemSize(npages << _PageShift)
Colin Cross430342c2019-09-07 08:36:04 -0700252 s.limit = s.base() + size*n
Dan Willemsenc7413322018-08-27 23:21:26 -0700253 heapBitsForAddr(s.base()).initSpan(s)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700254 return s
255}