blob: 6013c94c69808a9d728b9e4d67c9db1fa16dc91f [file] [log] [blame]
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Central free lists.
6//
7// See malloc.go for an overview.
8//
Colin Crossd9c6b802019-03-19 21:10:31 -07009// The mcentral doesn't actually contain the list of free objects; the mspan does.
10// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
Dan Willemsen09eb3b12015-09-16 14:34:17 -070011// and those that are completely allocated (c->empty).
12
13package runtime
14
Dan Willemsen38f2dba2016-07-08 14:54:35 -070015import "runtime/internal/atomic"
16
Dan Willemsen09eb3b12015-09-16 14:34:17 -070017// Central list of free objects of a given size.
Dan Willemsenebae3022017-01-13 23:01:08 -080018//
19//go:notinheap
Dan Willemsen09eb3b12015-09-16 14:34:17 -070020type mcentral struct {
Dan Willemsend2797482017-07-26 13:13:13 -070021 spanclass spanClass
Patrice Arruda748609c2020-06-25 12:12:21 -070022
Patrice Arruda748609c2020-06-25 12:12:21 -070023 // partial and full contain two mspan sets: one of swept in-use
24 // spans, and one of unswept in-use spans. These two trade
25 // roles on each GC cycle. The unswept set is drained either by
26 // allocation or by the background sweeper in every GC cycle,
27 // so only two roles are necessary.
28 //
29 // sweepgen is increased by 2 on each GC cycle, so the swept
30 // spans are in partial[sweepgen/2%2] and the unswept spans are in
31 // partial[1-sweepgen/2%2]. Sweeping pops spans from the
32 // unswept set and pushes spans that are still in-use on the
33 // swept set. Likewise, allocating an in-use span pushes it
34 // on the swept set.
35 //
36 // Some parts of the sweeper can sweep arbitrary spans, and hence
37 // can't remove them from the unswept set, but will add the span
38 // to the appropriate swept list. As a result, the parts of the
39 // sweeper and mcentral that do consume from the unswept list may
40 // encounter swept spans, and these should be ignored.
41 partial [2]spanSet // list of spans with a free object
42 full [2]spanSet // list of spans with no free objects
Dan Willemsen09eb3b12015-09-16 14:34:17 -070043}
44
45// Initialize a single central free list.
Dan Willemsend2797482017-07-26 13:13:13 -070046func (c *mcentral) init(spc spanClass) {
47 c.spanclass = spc
Colin Cross1f805522021-05-14 11:10:59 -070048 lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
49 lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
50 lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
51 lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
Patrice Arruda748609c2020-06-25 12:12:21 -070052}
53
54// partialUnswept returns the spanSet which holds partially-filled
55// unswept spans for this sweepgen.
56func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
57 return &c.partial[1-sweepgen/2%2]
58}
59
60// partialSwept returns the spanSet which holds partially-filled
61// swept spans for this sweepgen.
62func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
63 return &c.partial[sweepgen/2%2]
64}
65
66// fullUnswept returns the spanSet which holds unswept spans without any
67// free slots for this sweepgen.
68func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
69 return &c.full[1-sweepgen/2%2]
70}
71
72// fullSwept returns the spanSet which holds swept spans without any
73// free slots for this sweepgen.
74func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
75 return &c.full[sweepgen/2%2]
Dan Willemsen09eb3b12015-09-16 14:34:17 -070076}
77
Colin Crossd9c6b802019-03-19 21:10:31 -070078// Allocate a span to use in an mcache.
Dan Willemsen38f2dba2016-07-08 14:54:35 -070079func (c *mcentral) cacheSpan() *mspan {
Patrice Arruda748609c2020-06-25 12:12:21 -070080 // Deduct credit for this span allocation and sweep if necessary.
81 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
82 deductSweepCredit(spanBytes, 0)
83
Patrice Arruda748609c2020-06-25 12:12:21 -070084 traceDone := false
85 if trace.enabled {
86 traceGCSweepStart()
87 }
88
89 // If we sweep spanBudget spans without finding any free
90 // space, just allocate a fresh span. This limits the amount
91 // of time we can spend trying to find free space and
92 // amortizes the cost of small object sweeping over the
93 // benefit of having a full free span to allocate from. By
94 // setting this to 100, we limit the space overhead to 1%.
95 //
96 // TODO(austin,mknyszek): This still has bad worst-case
97 // throughput. For example, this could find just one free slot
98 // on the 100th swept span. That limits allocation latency, but
99 // still has very poor throughput. We could instead keep a
100 // running free-to-used budget and switch to fresh span
101 // allocation if the budget runs low.
102 spanBudget := 100
103
104 var s *mspan
Dan Willemsencc753b72021-08-31 13:25:42 -0700105 sl := newSweepLocker()
106 sg := sl.sweepGen
Patrice Arruda748609c2020-06-25 12:12:21 -0700107
108 // Try partial swept spans first.
109 if s = c.partialSwept(sg).pop(); s != nil {
110 goto havespan
111 }
112
113 // Now try partial unswept spans.
114 for ; spanBudget >= 0; spanBudget-- {
115 s = c.partialUnswept(sg).pop()
116 if s == nil {
117 break
118 }
Dan Willemsencc753b72021-08-31 13:25:42 -0700119 if s, ok := sl.tryAcquire(s); ok {
Patrice Arruda748609c2020-06-25 12:12:21 -0700120 // We got ownership of the span, so let's sweep it and use it.
121 s.sweep(true)
Dan Willemsencc753b72021-08-31 13:25:42 -0700122 sl.dispose()
Patrice Arruda748609c2020-06-25 12:12:21 -0700123 goto havespan
124 }
125 // We failed to get ownership of the span, which means it's being or
126 // has been swept by an asynchronous sweeper that just couldn't remove it
127 // from the unswept list. That sweeper took ownership of the span and
128 // responsibility for either freeing it to the heap or putting it on the
129 // right swept list. Either way, we should just ignore it (and it's unsafe
130 // for us to do anything else).
131 }
132 // Now try full unswept spans, sweeping them and putting them into the
133 // right list if we fail to get a span.
134 for ; spanBudget >= 0; spanBudget-- {
135 s = c.fullUnswept(sg).pop()
136 if s == nil {
137 break
138 }
Dan Willemsencc753b72021-08-31 13:25:42 -0700139 if s, ok := sl.tryAcquire(s); ok {
Patrice Arruda748609c2020-06-25 12:12:21 -0700140 // We got ownership of the span, so let's sweep it.
141 s.sweep(true)
142 // Check if there's any free space.
143 freeIndex := s.nextFreeIndex()
144 if freeIndex != s.nelems {
145 s.freeindex = freeIndex
Dan Willemsencc753b72021-08-31 13:25:42 -0700146 sl.dispose()
Patrice Arruda748609c2020-06-25 12:12:21 -0700147 goto havespan
148 }
149 // Add it to the swept list, because sweeping didn't give us any free space.
Dan Willemsencc753b72021-08-31 13:25:42 -0700150 c.fullSwept(sg).push(s.mspan)
Patrice Arruda748609c2020-06-25 12:12:21 -0700151 }
152 // See comment for partial unswept spans.
153 }
Dan Willemsencc753b72021-08-31 13:25:42 -0700154 sl.dispose()
Patrice Arruda748609c2020-06-25 12:12:21 -0700155 if trace.enabled {
156 traceGCSweepDone()
157 traceDone = true
158 }
159
160 // We failed to get a span from the mcentral so get one from mheap.
161 s = c.grow()
162 if s == nil {
163 return nil
164 }
165
166 // At this point s is a span that should have free slots.
167havespan:
168 if trace.enabled && !traceDone {
169 traceGCSweepDone()
170 }
171 n := int(s.nelems) - int(s.allocCount)
172 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
173 throw("span has no free objects")
174 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700175 freeByteBase := s.freeindex &^ (64 - 1)
176 whichByte := freeByteBase / 8
177 // Init alloc bits cache.
178 s.refillAllocCache(whichByte)
179
180 // Adjust the allocCache so that s.freeindex corresponds to the low bit in
181 // s.allocCache.
182 s.allocCache >>= s.freeindex % 64
183
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700184 return s
185}
186
Colin Crossd9c6b802019-03-19 21:10:31 -0700187// Return span from an mcache.
Patrice Arruda748609c2020-06-25 12:12:21 -0700188//
189// s must have a span class corresponding to this
190// mcentral and it must not be empty.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700191func (c *mcentral) uncacheSpan(s *mspan) {
Patrice Arruda748609c2020-06-25 12:12:21 -0700192 if s.allocCount == 0 {
193 throw("uncaching span but s.allocCount == 0")
194 }
195
196 sg := mheap_.sweepgen
197 stale := s.sweepgen == sg+1
198
199 // Fix up sweepgen.
200 if stale {
201 // Span was cached before sweep began. It's our
202 // responsibility to sweep it.
203 //
204 // Set sweepgen to indicate it's not cached but needs
205 // sweeping and can't be allocated from. sweep will
206 // set s.sweepgen to indicate s is swept.
207 atomic.Store(&s.sweepgen, sg-1)
208 } else {
209 // Indicate that s is no longer cached.
210 atomic.Store(&s.sweepgen, sg)
211 }
Patrice Arruda748609c2020-06-25 12:12:21 -0700212
213 // Put the span in the appropriate place.
214 if stale {
215 // It's stale, so just sweep it. Sweeping will put it on
216 // the right list.
Dan Willemsencc753b72021-08-31 13:25:42 -0700217 //
218 // We don't use a sweepLocker here. Stale cached spans
219 // aren't in the global sweep lists, so mark termination
220 // itself holds up sweep completion until all mcaches
221 // have been swept.
222 ss := sweepLocked{s}
223 ss.sweep(false)
Patrice Arruda748609c2020-06-25 12:12:21 -0700224 } else {
Colin Cross1f805522021-05-14 11:10:59 -0700225 if int(s.nelems)-int(s.allocCount) > 0 {
Patrice Arruda748609c2020-06-25 12:12:21 -0700226 // Put it back on the partial swept list.
227 c.partialSwept(sg).push(s)
228 } else {
229 // There's no free space and it's not stale, so put it on the
230 // full swept list.
231 c.fullSwept(sg).push(s)
232 }
233 }
234}
235
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700236// grow allocates a new empty span from the heap and initializes it for c's size class.
237func (c *mcentral) grow() *mspan {
Dan Willemsend2797482017-07-26 13:13:13 -0700238 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
239 size := uintptr(class_to_size[c.spanclass.sizeclass()])
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700240
Dan Willemsencc753b72021-08-31 13:25:42 -0700241 s, _ := mheap_.alloc(npages, c.spanclass, true)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700242 if s == nil {
243 return nil
244 }
245
Colin Cross430342c2019-09-07 08:36:04 -0700246 // Use division by multiplication and shifts to quickly compute:
247 // n := (npages << _PageShift) / size
Dan Willemsencc753b72021-08-31 13:25:42 -0700248 n := s.divideByElemSize(npages << _PageShift)
Colin Cross430342c2019-09-07 08:36:04 -0700249 s.limit = s.base() + size*n
Dan Willemsenc7413322018-08-27 23:21:26 -0700250 heapBitsForAddr(s.base()).initSpan(s)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700251 return s
252}