Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 1 | // Copyright 2009 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | // Central free lists. |
| 6 | // |
| 7 | // See malloc.go for an overview. |
| 8 | // |
Colin Cross | d9c6b80 | 2019-03-19 21:10:31 -0700 | [diff] [blame] | 9 | // The mcentral doesn't actually contain the list of free objects; the mspan does. |
| 10 | // Each mcentral is two lists of mspans: those with free objects (c->nonempty) |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 11 | // and those that are completely allocated (c->empty). |
| 12 | |
| 13 | package runtime |
| 14 | |
Dan Willemsen | 38f2dba | 2016-07-08 14:54:35 -0700 | [diff] [blame] | 15 | import "runtime/internal/atomic" |
| 16 | |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 17 | // Central list of free objects of a given size. |
Dan Willemsen | ebae302 | 2017-01-13 23:01:08 -0800 | [diff] [blame] | 18 | // |
| 19 | //go:notinheap |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 20 | type mcentral struct { |
Dan Willemsen | d279748 | 2017-07-26 13:13:13 -0700 | [diff] [blame] | 21 | spanclass spanClass |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 22 | |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 23 | // partial and full contain two mspan sets: one of swept in-use |
| 24 | // spans, and one of unswept in-use spans. These two trade |
| 25 | // roles on each GC cycle. The unswept set is drained either by |
| 26 | // allocation or by the background sweeper in every GC cycle, |
| 27 | // so only two roles are necessary. |
| 28 | // |
| 29 | // sweepgen is increased by 2 on each GC cycle, so the swept |
| 30 | // spans are in partial[sweepgen/2%2] and the unswept spans are in |
| 31 | // partial[1-sweepgen/2%2]. Sweeping pops spans from the |
| 32 | // unswept set and pushes spans that are still in-use on the |
| 33 | // swept set. Likewise, allocating an in-use span pushes it |
| 34 | // on the swept set. |
| 35 | // |
| 36 | // Some parts of the sweeper can sweep arbitrary spans, and hence |
| 37 | // can't remove them from the unswept set, but will add the span |
| 38 | // to the appropriate swept list. As a result, the parts of the |
| 39 | // sweeper and mcentral that do consume from the unswept list may |
| 40 | // encounter swept spans, and these should be ignored. |
| 41 | partial [2]spanSet // list of spans with a free object |
| 42 | full [2]spanSet // list of spans with no free objects |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | // Initialize a single central free list. |
Dan Willemsen | d279748 | 2017-07-26 13:13:13 -0700 | [diff] [blame] | 46 | func (c *mcentral) init(spc spanClass) { |
| 47 | c.spanclass = spc |
Colin Cross | 1f80552 | 2021-05-14 11:10:59 -0700 | [diff] [blame] | 48 | lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) |
| 49 | lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) |
| 50 | lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) |
| 51 | lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | // partialUnswept returns the spanSet which holds partially-filled |
| 55 | // unswept spans for this sweepgen. |
| 56 | func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet { |
| 57 | return &c.partial[1-sweepgen/2%2] |
| 58 | } |
| 59 | |
| 60 | // partialSwept returns the spanSet which holds partially-filled |
| 61 | // swept spans for this sweepgen. |
| 62 | func (c *mcentral) partialSwept(sweepgen uint32) *spanSet { |
| 63 | return &c.partial[sweepgen/2%2] |
| 64 | } |
| 65 | |
| 66 | // fullUnswept returns the spanSet which holds unswept spans without any |
| 67 | // free slots for this sweepgen. |
| 68 | func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet { |
| 69 | return &c.full[1-sweepgen/2%2] |
| 70 | } |
| 71 | |
| 72 | // fullSwept returns the spanSet which holds swept spans without any |
| 73 | // free slots for this sweepgen. |
| 74 | func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { |
| 75 | return &c.full[sweepgen/2%2] |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Colin Cross | d9c6b80 | 2019-03-19 21:10:31 -0700 | [diff] [blame] | 78 | // Allocate a span to use in an mcache. |
Dan Willemsen | 38f2dba | 2016-07-08 14:54:35 -0700 | [diff] [blame] | 79 | func (c *mcentral) cacheSpan() *mspan { |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 80 | // Deduct credit for this span allocation and sweep if necessary. |
| 81 | spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize |
| 82 | deductSweepCredit(spanBytes, 0) |
| 83 | |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 84 | traceDone := false |
| 85 | if trace.enabled { |
| 86 | traceGCSweepStart() |
| 87 | } |
| 88 | |
| 89 | // If we sweep spanBudget spans without finding any free |
| 90 | // space, just allocate a fresh span. This limits the amount |
| 91 | // of time we can spend trying to find free space and |
| 92 | // amortizes the cost of small object sweeping over the |
| 93 | // benefit of having a full free span to allocate from. By |
| 94 | // setting this to 100, we limit the space overhead to 1%. |
| 95 | // |
| 96 | // TODO(austin,mknyszek): This still has bad worst-case |
| 97 | // throughput. For example, this could find just one free slot |
| 98 | // on the 100th swept span. That limits allocation latency, but |
| 99 | // still has very poor throughput. We could instead keep a |
| 100 | // running free-to-used budget and switch to fresh span |
| 101 | // allocation if the budget runs low. |
| 102 | spanBudget := 100 |
| 103 | |
| 104 | var s *mspan |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 105 | var sl sweepLocker |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 106 | |
| 107 | // Try partial swept spans first. |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 108 | sg := mheap_.sweepgen |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 109 | if s = c.partialSwept(sg).pop(); s != nil { |
| 110 | goto havespan |
| 111 | } |
| 112 | |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 113 | sl = sweep.active.begin() |
| 114 | if sl.valid { |
| 115 | // Now try partial unswept spans. |
| 116 | for ; spanBudget >= 0; spanBudget-- { |
| 117 | s = c.partialUnswept(sg).pop() |
| 118 | if s == nil { |
| 119 | break |
| 120 | } |
| 121 | if s, ok := sl.tryAcquire(s); ok { |
| 122 | // We got ownership of the span, so let's sweep it and use it. |
| 123 | s.sweep(true) |
| 124 | sweep.active.end(sl) |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 125 | goto havespan |
| 126 | } |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 127 | // We failed to get ownership of the span, which means it's being or |
| 128 | // has been swept by an asynchronous sweeper that just couldn't remove it |
| 129 | // from the unswept list. That sweeper took ownership of the span and |
| 130 | // responsibility for either freeing it to the heap or putting it on the |
| 131 | // right swept list. Either way, we should just ignore it (and it's unsafe |
| 132 | // for us to do anything else). |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 133 | } |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 134 | // Now try full unswept spans, sweeping them and putting them into the |
| 135 | // right list if we fail to get a span. |
| 136 | for ; spanBudget >= 0; spanBudget-- { |
| 137 | s = c.fullUnswept(sg).pop() |
| 138 | if s == nil { |
| 139 | break |
| 140 | } |
| 141 | if s, ok := sl.tryAcquire(s); ok { |
| 142 | // We got ownership of the span, so let's sweep it. |
| 143 | s.sweep(true) |
| 144 | // Check if there's any free space. |
| 145 | freeIndex := s.nextFreeIndex() |
| 146 | if freeIndex != s.nelems { |
| 147 | s.freeindex = freeIndex |
| 148 | sweep.active.end(sl) |
| 149 | goto havespan |
| 150 | } |
| 151 | // Add it to the swept list, because sweeping didn't give us any free space. |
| 152 | c.fullSwept(sg).push(s.mspan) |
| 153 | } |
| 154 | // See comment for partial unswept spans. |
| 155 | } |
| 156 | sweep.active.end(sl) |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 157 | } |
| 158 | if trace.enabled { |
| 159 | traceGCSweepDone() |
| 160 | traceDone = true |
| 161 | } |
| 162 | |
| 163 | // We failed to get a span from the mcentral so get one from mheap. |
| 164 | s = c.grow() |
| 165 | if s == nil { |
| 166 | return nil |
| 167 | } |
| 168 | |
| 169 | // At this point s is a span that should have free slots. |
| 170 | havespan: |
| 171 | if trace.enabled && !traceDone { |
| 172 | traceGCSweepDone() |
| 173 | } |
| 174 | n := int(s.nelems) - int(s.allocCount) |
| 175 | if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { |
| 176 | throw("span has no free objects") |
| 177 | } |
Dan Willemsen | 38f2dba | 2016-07-08 14:54:35 -0700 | [diff] [blame] | 178 | freeByteBase := s.freeindex &^ (64 - 1) |
| 179 | whichByte := freeByteBase / 8 |
| 180 | // Init alloc bits cache. |
| 181 | s.refillAllocCache(whichByte) |
| 182 | |
| 183 | // Adjust the allocCache so that s.freeindex corresponds to the low bit in |
| 184 | // s.allocCache. |
| 185 | s.allocCache >>= s.freeindex % 64 |
| 186 | |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 187 | return s |
| 188 | } |
| 189 | |
Colin Cross | d9c6b80 | 2019-03-19 21:10:31 -0700 | [diff] [blame] | 190 | // Return span from an mcache. |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 191 | // |
| 192 | // s must have a span class corresponding to this |
| 193 | // mcentral and it must not be empty. |
Dan Willemsen | 38f2dba | 2016-07-08 14:54:35 -0700 | [diff] [blame] | 194 | func (c *mcentral) uncacheSpan(s *mspan) { |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 195 | if s.allocCount == 0 { |
| 196 | throw("uncaching span but s.allocCount == 0") |
| 197 | } |
| 198 | |
| 199 | sg := mheap_.sweepgen |
| 200 | stale := s.sweepgen == sg+1 |
| 201 | |
| 202 | // Fix up sweepgen. |
| 203 | if stale { |
| 204 | // Span was cached before sweep began. It's our |
| 205 | // responsibility to sweep it. |
| 206 | // |
| 207 | // Set sweepgen to indicate it's not cached but needs |
| 208 | // sweeping and can't be allocated from. sweep will |
| 209 | // set s.sweepgen to indicate s is swept. |
| 210 | atomic.Store(&s.sweepgen, sg-1) |
| 211 | } else { |
| 212 | // Indicate that s is no longer cached. |
| 213 | atomic.Store(&s.sweepgen, sg) |
| 214 | } |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 215 | |
| 216 | // Put the span in the appropriate place. |
| 217 | if stale { |
| 218 | // It's stale, so just sweep it. Sweeping will put it on |
| 219 | // the right list. |
Dan Willemsen | cc753b7 | 2021-08-31 13:25:42 -0700 | [diff] [blame] | 220 | // |
| 221 | // We don't use a sweepLocker here. Stale cached spans |
| 222 | // aren't in the global sweep lists, so mark termination |
| 223 | // itself holds up sweep completion until all mcaches |
| 224 | // have been swept. |
| 225 | ss := sweepLocked{s} |
| 226 | ss.sweep(false) |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 227 | } else { |
Colin Cross | 1f80552 | 2021-05-14 11:10:59 -0700 | [diff] [blame] | 228 | if int(s.nelems)-int(s.allocCount) > 0 { |
Patrice Arruda | 748609c | 2020-06-25 12:12:21 -0700 | [diff] [blame] | 229 | // Put it back on the partial swept list. |
| 230 | c.partialSwept(sg).push(s) |
| 231 | } else { |
| 232 | // There's no free space and it's not stale, so put it on the |
| 233 | // full swept list. |
| 234 | c.fullSwept(sg).push(s) |
| 235 | } |
| 236 | } |
| 237 | } |
| 238 | |
Dan Willemsen | 38f2dba | 2016-07-08 14:54:35 -0700 | [diff] [blame] | 239 | // grow allocates a new empty span from the heap and initializes it for c's size class. |
| 240 | func (c *mcentral) grow() *mspan { |
Dan Willemsen | d279748 | 2017-07-26 13:13:13 -0700 | [diff] [blame] | 241 | npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) |
| 242 | size := uintptr(class_to_size[c.spanclass.sizeclass()]) |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 243 | |
Dan Willemsen | bc60c3c | 2021-12-15 01:09:00 -0800 | [diff] [blame] | 244 | s := mheap_.alloc(npages, c.spanclass) |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 245 | if s == nil { |
| 246 | return nil |
| 247 | } |
| 248 | |
Colin Cross | 430342c | 2019-09-07 08:36:04 -0700 | [diff] [blame] | 249 | // Use division by multiplication and shifts to quickly compute: |
| 250 | // n := (npages << _PageShift) / size |
Dan Willemsen | cc753b7 | 2021-08-31 13:25:42 -0700 | [diff] [blame] | 251 | n := s.divideByElemSize(npages << _PageShift) |
Colin Cross | 430342c | 2019-09-07 08:36:04 -0700 | [diff] [blame] | 252 | s.limit = s.base() + size*n |
Dan Willemsen | c741332 | 2018-08-27 23:21:26 -0700 | [diff] [blame] | 253 | heapBitsForAddr(s.base()).initSpan(s) |
Dan Willemsen | 09eb3b1 | 2015-09-16 14:34:17 -0700 | [diff] [blame] | 254 | return s |
| 255 | } |