blob: 2f00add83e43dab125c51d1397d3ee18967d0d4e [file] [log] [blame]
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001// Copyright 2009 The Go Authors. All rights reserved.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Garbage collector: type and heap bitmaps.
6//
7// Stack, data, and bss bitmaps
8//
9// Stack frames and global variables in the data and bss sections are described
10// by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
11// to be visited during GC. The bits in each byte are consumed starting with
12// the low bit: 1<<0, 1<<1, and so on.
13//
14// Heap bitmap
15//
Dan Willemsenc7413322018-08-27 23:21:26 -070016// The heap bitmap comprises 2 bits for each pointer-sized word in the heap,
17// stored in the heapArena metadata backing each heap arena.
18// That is, if ha is the heapArena for the arena starting a start,
19// then ha.bitmap[0] holds the 2-bit entries for the four words start
20// through start+3*ptrSize, ha.bitmap[1] holds the entries for
Dan Willemsen09eb3b12015-09-16 14:34:17 -070021// start+4*ptrSize through start+7*ptrSize, and so on.
22//
23// In each 2-bit entry, the lower bit holds the same information as in the 1-bit
24// bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
25// The meaning of the high bit depends on the position of the word being described
Dan Willemsen38f2dba2016-07-08 14:54:35 -070026// in its allocated object. In all words *except* the second word, the
27// high bit indicates that the object is still being described. In
28// these words, if a bit pair with a high bit 0 is encountered, the
29// low bit can also be assumed to be 0, and the object description is
30// over. This 00 is called the ``dead'' encoding: it signals that the
31// rest of the words in the object are uninteresting to the garbage
32// collector.
33//
Dan Willemsen09eb3b12015-09-16 14:34:17 -070034// In the second word, the high bit is the GC ``checkmarked'' bit (see below).
Dan Willemsen09eb3b12015-09-16 14:34:17 -070035//
36// The 2-bit entries are split when written into the byte, so that the top half
Dan Willemsen38f2dba2016-07-08 14:54:35 -070037// of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
38// bits.
Dan Willemsen09eb3b12015-09-16 14:34:17 -070039// This form allows a copy from the 1-bit to the 4-bit form to keep the
40// pointer bits contiguous, instead of having to space them out.
41//
42// The code makes use of the fact that the zero value for a heap bitmap
Dan Willemsen38f2dba2016-07-08 14:54:35 -070043// has no live pointer bit set and is (depending on position), not used,
Dan Willemsen09eb3b12015-09-16 14:34:17 -070044// not checkmarked, and is the dead encoding.
45// These properties must be preserved when modifying the encoding.
46//
Dan Willemsend2797482017-07-26 13:13:13 -070047// The bitmap for noscan spans is not maintained. Code must ensure
48// that an object is scannable before consulting its bitmap by
49// checking either the noscan bit in the span or by consulting its
50// type's information.
51//
Dan Willemsen09eb3b12015-09-16 14:34:17 -070052// Checkmarks
53//
54// In a concurrent garbage collector, one worries about failing to mark
55// a live object due to mutations without write barriers or bugs in the
56// collector implementation. As a sanity check, the GC has a 'checkmark'
57// mode that retraverses the object graph with the world stopped, to make
58// sure that everything that should be marked is marked.
59// In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
60// for the second word of the object holds the checkmark bit.
61// When not in checkmark mode, this bit is set to 1.
62//
63// The smallest possible allocation is 8 bytes. On a 32-bit machine, that
64// means every allocated object has two words, so there is room for the
65// checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
66// just one word, so the second bit pair is not available for encoding the
67// checkmark. However, because non-pointer allocations are combined
68// into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
69// must be a pointer, so the type bit in the first word is not actually needed.
70// It is still used in general, except in checkmark the type bit is repurposed
71// as the checkmark bit and then reinitialized (to 1) as the type bit when
72// finished.
Dan Willemsen38f2dba2016-07-08 14:54:35 -070073//
Dan Willemsen09eb3b12015-09-16 14:34:17 -070074
75package runtime
76
Dan Willemsen38f2dba2016-07-08 14:54:35 -070077import (
78 "runtime/internal/atomic"
79 "runtime/internal/sys"
80 "unsafe"
81)
Dan Willemsen09eb3b12015-09-16 14:34:17 -070082
83const (
84 bitPointer = 1 << 0
Dan Willemsenebae3022017-01-13 23:01:08 -080085 bitScan = 1 << 4
Dan Willemsen09eb3b12015-09-16 14:34:17 -070086
Dan Willemsenc7413322018-08-27 23:21:26 -070087 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
88 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte
Dan Willemsen09eb3b12015-09-16 14:34:17 -070089
Dan Willemsenebae3022017-01-13 23:01:08 -080090 // all scan/pointer bits in a byte
91 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
Dan Willemsen09eb3b12015-09-16 14:34:17 -070092 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
93)
94
95// addb returns the byte pointer p+n.
96//go:nowritebarrier
Dan Willemsen38f2dba2016-07-08 14:54:35 -070097//go:nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -070098func addb(p *byte, n uintptr) *byte {
99 // Note: wrote out full expression instead of calling add(p, n)
100 // to reduce the number of temporaries generated by the
101 // compiler for this trivial expression during inlining.
102 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
103}
104
105// subtractb returns the byte pointer p-n.
106//go:nowritebarrier
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700107//go:nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700108func subtractb(p *byte, n uintptr) *byte {
109 // Note: wrote out full expression instead of calling add(p, -n)
110 // to reduce the number of temporaries generated by the
111 // compiler for this trivial expression during inlining.
112 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
113}
114
115// add1 returns the byte pointer p+1.
116//go:nowritebarrier
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700117//go:nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700118func add1(p *byte) *byte {
119 // Note: wrote out full expression instead of calling addb(p, 1)
120 // to reduce the number of temporaries generated by the
121 // compiler for this trivial expression during inlining.
122 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
123}
124
125// subtract1 returns the byte pointer p-1.
126//go:nowritebarrier
127//
128// nosplit because it is used during write barriers and must not be preempted.
129//go:nosplit
130func subtract1(p *byte) *byte {
131 // Note: wrote out full expression instead of calling subtractb(p, 1)
132 // to reduce the number of temporaries generated by the
133 // compiler for this trivial expression during inlining.
134 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
135}
136
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700137// heapBits provides access to the bitmap bits for a single heap word.
138// The methods on heapBits take value receivers so that the compiler
139// can more easily inline calls to those methods and registerize the
140// struct fields independently.
141type heapBits struct {
142 bitp *uint8
143 shift uint32
Dan Willemsenc7413322018-08-27 23:21:26 -0700144 arena uint32 // Index of heap arena containing bitp
145 last *uint8 // Last byte arena's bitmap
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700146}
147
Dan Willemsenc7413322018-08-27 23:21:26 -0700148// Make the compiler check that heapBits.arena is large enough to hold
149// the maximum arena frame number.
150var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
151
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700152// markBits provides access to the mark bit for an object in the heap.
153// bytep points to the byte holding the mark bit.
154// mask is a byte with a single bit set that can be &ed with *bytep
155// to see if the bit has been set.
156// *m.byte&m.mask != 0 indicates the mark bit is set.
157// index can be used along with span information to generate
158// the address of the object in the heap.
159// We maintain one set of mark bits for allocation and one for
160// marking purposes.
161type markBits struct {
162 bytep *uint8
163 mask uint8
164 index uintptr
165}
166
167//go:nosplit
168func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
Dan Willemsend2797482017-07-26 13:13:13 -0700169 bytep, mask := s.allocBits.bitp(allocBitIndex)
170 return markBits{bytep, mask, allocBitIndex}
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700171}
172
Dan Willemsenc7413322018-08-27 23:21:26 -0700173// refillAllocCache takes 8 bytes s.allocBits starting at whichByte
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700174// and negates them so that ctz (count trailing zeros) instructions
175// can be used. It then places these 8 bytes into the cached 64 bit
176// s.allocCache.
177func (s *mspan) refillAllocCache(whichByte uintptr) {
Dan Willemsend2797482017-07-26 13:13:13 -0700178 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700179 aCache := uint64(0)
180 aCache |= uint64(bytes[0])
181 aCache |= uint64(bytes[1]) << (1 * 8)
182 aCache |= uint64(bytes[2]) << (2 * 8)
183 aCache |= uint64(bytes[3]) << (3 * 8)
184 aCache |= uint64(bytes[4]) << (4 * 8)
185 aCache |= uint64(bytes[5]) << (5 * 8)
186 aCache |= uint64(bytes[6]) << (6 * 8)
187 aCache |= uint64(bytes[7]) << (7 * 8)
188 s.allocCache = ^aCache
189}
190
191// nextFreeIndex returns the index of the next free object in s at
192// or after s.freeindex.
193// There are hardware instructions that can be used to make this
194// faster if profiling warrants it.
195func (s *mspan) nextFreeIndex() uintptr {
196 sfreeindex := s.freeindex
197 snelems := s.nelems
198 if sfreeindex == snelems {
199 return sfreeindex
200 }
201 if sfreeindex > snelems {
202 throw("s.freeindex > s.nelems")
203 }
204
205 aCache := s.allocCache
206
207 bitIndex := sys.Ctz64(aCache)
208 for bitIndex == 64 {
209 // Move index to start of next cached bits.
210 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
211 if sfreeindex >= snelems {
212 s.freeindex = snelems
213 return snelems
214 }
215 whichByte := sfreeindex / 8
216 // Refill s.allocCache with the next 64 alloc bits.
217 s.refillAllocCache(whichByte)
218 aCache = s.allocCache
219 bitIndex = sys.Ctz64(aCache)
220 // nothing available in cached bits
221 // grab the next 8 bytes and try again.
222 }
223 result := sfreeindex + uintptr(bitIndex)
224 if result >= snelems {
225 s.freeindex = snelems
226 return snelems
227 }
228
Dan Willemsend2797482017-07-26 13:13:13 -0700229 s.allocCache >>= uint(bitIndex + 1)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700230 sfreeindex = result + 1
231
232 if sfreeindex%64 == 0 && sfreeindex != snelems {
233 // We just incremented s.freeindex so it isn't 0.
234 // As each 1 in s.allocCache was encountered and used for allocation
235 // it was shifted away. At this point s.allocCache contains all 0s.
236 // Refill s.allocCache so that it corresponds
237 // to the bits at s.allocBits starting at s.freeindex.
238 whichByte := sfreeindex / 8
239 s.refillAllocCache(whichByte)
240 }
241 s.freeindex = sfreeindex
242 return result
243}
244
Colin Crossd9c6b802019-03-19 21:10:31 -0700245// isFree reports whether the index'th object in s is unallocated.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700246func (s *mspan) isFree(index uintptr) bool {
Dan Willemsenebae3022017-01-13 23:01:08 -0800247 if index < s.freeindex {
248 return false
249 }
Dan Willemsend2797482017-07-26 13:13:13 -0700250 bytep, mask := s.allocBits.bitp(index)
251 return *bytep&mask == 0
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700252}
253
254func (s *mspan) objIndex(p uintptr) uintptr {
255 byteOffset := p - s.base()
256 if byteOffset == 0 {
257 return 0
258 }
259 if s.baseMask != 0 {
Dan Willemsenc7413322018-08-27 23:21:26 -0700260 // s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700261 return byteOffset >> s.divShift
262 }
263 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
264}
265
266func markBitsForAddr(p uintptr) markBits {
267 s := spanOf(p)
268 objIndex := s.objIndex(p)
269 return s.markBitsForIndex(objIndex)
270}
271
272func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
Dan Willemsend2797482017-07-26 13:13:13 -0700273 bytep, mask := s.gcmarkBits.bitp(objIndex)
274 return markBits{bytep, mask, objIndex}
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700275}
276
277func (s *mspan) markBitsForBase() markBits {
Dan Willemsend2797482017-07-26 13:13:13 -0700278 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700279}
280
281// isMarked reports whether mark bit m is set.
282func (m markBits) isMarked() bool {
283 return *m.bytep&m.mask != 0
284}
285
Colin Crossd9c6b802019-03-19 21:10:31 -0700286// setMarked sets the marked bit in the markbits, atomically.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700287func (m markBits) setMarked() {
288 // Might be racing with other updates, so use atomic update always.
289 // We used to be clever here and use a non-atomic update in certain
290 // cases, but it's not worth the risk.
291 atomic.Or8(m.bytep, m.mask)
292}
293
294// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
295func (m markBits) setMarkedNonAtomic() {
296 *m.bytep |= m.mask
297}
298
299// clearMarked clears the marked bit in the markbits, atomically.
300func (m markBits) clearMarked() {
301 // Might be racing with other updates, so use atomic update always.
302 // We used to be clever here and use a non-atomic update in certain
303 // cases, but it's not worth the risk.
304 atomic.And8(m.bytep, ^m.mask)
305}
306
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700307// markBitsForSpan returns the markBits for the span base address base.
308func markBitsForSpan(base uintptr) (mbits markBits) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700309 mbits = markBitsForAddr(base)
310 if mbits.mask != 1 {
311 throw("markBitsForSpan: unaligned start")
312 }
313 return mbits
314}
315
316// advance advances the markBits to the next object in the span.
317func (m *markBits) advance() {
318 if m.mask == 1<<7 {
319 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
320 m.mask = 1
321 } else {
322 m.mask = m.mask << 1
323 }
324 m.index++
325}
326
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700327// heapBitsForAddr returns the heapBits for the address addr.
Dan Willemsenc7413322018-08-27 23:21:26 -0700328// The caller must ensure addr is in an allocated span.
329// In particular, be careful not to point past the end of an object.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700330//
331// nosplit because it is used during write barriers and must not be preempted.
332//go:nosplit
Dan Willemsenc7413322018-08-27 23:21:26 -0700333func heapBitsForAddr(addr uintptr) (h heapBits) {
334 // 2 bits per word, 4 pairs per byte, and a mask is hard coded.
335 arena := arenaIndex(addr)
336 ha := mheap_.arenas[arena.l1()][arena.l2()]
337 // The compiler uses a load for nil checking ha, but in this
338 // case we'll almost never hit that cache line again, so it
339 // makes more sense to do a value check.
340 if ha == nil {
341 // addr is not in the heap. Return nil heapBits, which
342 // we expect to crash in the caller.
343 return
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700344 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700345 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
346 h.shift = uint32((addr / sys.PtrSize) & 3)
347 h.arena = uint32(arena)
348 h.last = &ha.bitmap[len(ha.bitmap)-1]
349 return
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700350}
351
Dan Willemsenc7413322018-08-27 23:21:26 -0700352// findObject returns the base address for the heap object containing
353// the address p, the object's span, and the index of the object in s.
354// If p does not point into a heap object, it returns base == 0.
355//
356// If p points is an invalid heap pointer and debug.invalidptr != 0,
357// findObject panics.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700358//
359// refBase and refOff optionally give the base address of the object
360// in which the pointer p was found and the byte offset at which it
361// was found. These are used for error reporting.
Dan Willemsenc7413322018-08-27 23:21:26 -0700362func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
363 s = spanOf(p)
364 // If p is a bad pointer, it may not be in s's bounds.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700365 if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
Colin Crossd9c6b802019-03-19 21:10:31 -0700366 if s == nil || s.state == mSpanManual {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700367 // If s is nil, the virtual address has never been part of the heap.
368 // This pointer may be to some mmap'd region, so we allow it.
369 // Pointers into stacks are also ok, the runtime manages these explicitly.
370 return
371 }
372
373 // The following ensures that we are rigorous about what data
374 // structures hold valid pointers.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700375 if debug.invalidptr != 0 {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700376 // Typically this indicates an incorrect use
377 // of unsafe or cgo to store a bad pointer in
378 // the Go heap. It may also indicate a runtime
379 // bug.
380 //
381 // TODO(austin): We could be more aggressive
382 // and detect pointers to unallocated objects
383 // in allocated spans.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700384 printlock()
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700385 print("runtime: pointer ", hex(p))
386 if s.state != mSpanInUse {
387 print(" to unallocated span")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700388 } else {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700389 print(" to unused region of span")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700390 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700391 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700392 if refBase != 0 {
393 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
394 gcDumpObject("object", refBase, refOff)
395 }
Dan Willemsend2797482017-07-26 13:13:13 -0700396 getg().m.traceback = 2
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700397 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700398 }
399 return
400 }
401 // If this span holds object of a power of 2 size, just mask off the bits to
402 // the interior of the object. Otherwise use the size to get the base.
403 if s.baseMask != 0 {
404 // optimize for power of 2 sized objects.
405 base = s.base()
Dan Willemsenebae3022017-01-13 23:01:08 -0800406 base = base + (p-base)&uintptr(s.baseMask)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700407 objIndex = (base - s.base()) >> s.divShift
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700408 // base = p & s.baseMask is faster for small spans,
409 // but doesn't work for large spans.
410 // Overall, it's faster to use the more general computation above.
411 } else {
412 base = s.base()
413 if p-base >= s.elemsize {
414 // n := (p - base) / s.elemsize, using division by multiplication
Dan Willemsenebae3022017-01-13 23:01:08 -0800415 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700416 base += objIndex * s.elemsize
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700417 }
418 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700419 return
420}
421
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700422// next returns the heapBits describing the next pointer-sized word in memory.
423// That is, if h describes address p, h.next() describes p+ptrSize.
424// Note that next does not modify h. The caller must record the result.
425//
426// nosplit because it is used during write barriers and must not be preempted.
427//go:nosplit
428func (h heapBits) next() heapBits {
429 if h.shift < 3*heapBitsShift {
Dan Willemsenc7413322018-08-27 23:21:26 -0700430 h.shift += heapBitsShift
431 } else if h.bitp != h.last {
432 h.bitp, h.shift = add1(h.bitp), 0
433 } else {
434 // Move to the next arena.
435 return h.nextArena()
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700436 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700437 return h
438}
439
440// nextArena advances h to the beginning of the next heap arena.
441//
442// This is a slow-path helper to next. gc's inliner knows that
443// heapBits.next can be inlined even though it calls this. This is
444// marked noinline so it doesn't get inlined into next and cause next
445// to be too big to inline.
446//
447//go:nosplit
448//go:noinline
449func (h heapBits) nextArena() heapBits {
450 h.arena++
451 ai := arenaIdx(h.arena)
452 l2 := mheap_.arenas[ai.l1()]
453 if l2 == nil {
454 // We just passed the end of the object, which
455 // was also the end of the heap. Poison h. It
456 // should never be dereferenced at this point.
457 return heapBits{}
458 }
459 ha := l2[ai.l2()]
460 if ha == nil {
461 return heapBits{}
462 }
463 h.bitp, h.shift = &ha.bitmap[0], 0
464 h.last = &ha.bitmap[len(ha.bitmap)-1]
465 return h
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700466}
467
468// forward returns the heapBits describing n pointer-sized words ahead of h in memory.
469// That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
470// h.forward(1) is equivalent to h.next(), just slower.
471// Note that forward does not modify h. The caller must record the result.
472// bits returns the heap bits for the current word.
Dan Willemsenc7413322018-08-27 23:21:26 -0700473//go:nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700474func (h heapBits) forward(n uintptr) heapBits {
475 n += uintptr(h.shift) / heapBitsShift
Dan Willemsenc7413322018-08-27 23:21:26 -0700476 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
477 h.shift = uint32(n%4) * heapBitsShift
478 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
479 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
480 return h
481 }
482
483 // We're in a new heap arena.
484 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
485 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
486 ai := arenaIdx(h.arena)
487 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
488 a := l2[ai.l2()]
489 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
490 h.last = &a.bitmap[len(a.bitmap)-1]
491 } else {
492 h.bitp, h.last = nil, nil
493 }
494 return h
495}
496
497// forwardOrBoundary is like forward, but stops at boundaries between
498// contiguous sections of the bitmap. It returns the number of words
499// advanced over, which will be <= n.
500func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
501 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
502 if n > maxn {
503 n = maxn
504 }
505 return h.forward(n), n
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700506}
507
Dan Willemsenebae3022017-01-13 23:01:08 -0800508// The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700509// The result includes in its higher bits the bits for subsequent words
510// described by the same bitmap byte.
Dan Willemsena3223282018-02-27 19:41:43 -0800511//
512// nosplit because it is used during write barriers and must not be preempted.
513//go:nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700514func (h heapBits) bits() uint32 {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700515 // The (shift & 31) eliminates a test and conditional branch
516 // from the generated code.
517 return uint32(*h.bitp) >> (h.shift & 31)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700518}
519
Colin Crossd9c6b802019-03-19 21:10:31 -0700520// morePointers reports whether this word and all remaining words in this object
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700521// are scalars.
522// h must not describe the second word of the object.
523func (h heapBits) morePointers() bool {
Dan Willemsenebae3022017-01-13 23:01:08 -0800524 return h.bits()&bitScan != 0
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700525}
526
527// isPointer reports whether the heap bits describe a pointer word.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700528//
529// nosplit because it is used during write barriers and must not be preempted.
530//go:nosplit
531func (h heapBits) isPointer() bool {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700532 return h.bits()&bitPointer != 0
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700533}
534
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700535// isCheckmarked reports whether the heap bits have the checkmarked bit set.
536// It must be told how large the object at h is, because the encoding of the
537// checkmark bit varies by size.
538// h must describe the initial word of the object.
539func (h heapBits) isCheckmarked(size uintptr) bool {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700540 if size == sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700541 return (*h.bitp>>h.shift)&bitPointer != 0
542 }
543 // All multiword objects are 2-word aligned,
544 // so we know that the initial word's 2-bit pair
545 // and the second word's 2-bit pair are in the
546 // same heap bitmap byte, *h.bitp.
Dan Willemsenebae3022017-01-13 23:01:08 -0800547 return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700548}
549
550// setCheckmarked sets the checkmarked bit.
551// It must be told how large the object at h is, because the encoding of the
552// checkmark bit varies by size.
553// h must describe the initial word of the object.
554func (h heapBits) setCheckmarked(size uintptr) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700555 if size == sys.PtrSize {
556 atomic.Or8(h.bitp, bitPointer<<h.shift)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700557 return
558 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800559 atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700560}
561
Dan Willemsena3223282018-02-27 19:41:43 -0800562// bulkBarrierPreWrite executes a write barrier
Dan Willemsenebae3022017-01-13 23:01:08 -0800563// for every pointer slot in the memory range [src, src+size),
564// using pointer/scalar information from [dst, dst+size).
565// This executes the write barriers necessary before a memmove.
566// src, dst, and size must be pointer-aligned.
567// The range [dst, dst+size) must lie within a single object.
Dan Willemsena3223282018-02-27 19:41:43 -0800568// It does not perform the actual writes.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700569//
Dan Willemsenebae3022017-01-13 23:01:08 -0800570// As a special case, src == 0 indicates that this is being used for a
571// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
572// barrier.
573//
574// Callers should call bulkBarrierPreWrite immediately before
575// calling memmove(dst, src, size). This function is marked nosplit
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700576// to avoid being preempted; the GC must not stop the goroutine
577// between the memmove and the execution of the barriers.
Dan Willemsenebae3022017-01-13 23:01:08 -0800578// The caller is also responsible for cgo pointer checks if this
579// may be writing Go pointers into non-Go memory.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700580//
Dan Willemsenebae3022017-01-13 23:01:08 -0800581// The pointer bitmap is not maintained for allocations containing
582// no pointers at all; any caller of bulkBarrierPreWrite must first
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700583// make sure the underlying allocation contains pointers, usually
584// by checking typ.kind&kindNoPointers.
585//
Dan Willemsenc7413322018-08-27 23:21:26 -0700586// Callers must perform cgo checks if writeBarrier.cgo.
587//
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700588//go:nosplit
Dan Willemsenebae3022017-01-13 23:01:08 -0800589func bulkBarrierPreWrite(dst, src, size uintptr) {
590 if (dst|src|size)&(sys.PtrSize-1) != 0 {
591 throw("bulkBarrierPreWrite: unaligned arguments")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700592 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700593 if !writeBarrier.needed {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700594 return
595 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700596 if s := spanOf(dst); s == nil {
Dan Willemsenebae3022017-01-13 23:01:08 -0800597 // If dst is a global, use the data or BSS bitmaps to
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700598 // execute write barriers.
Dan Willemsenebae3022017-01-13 23:01:08 -0800599 for _, datap := range activeModules() {
600 if datap.data <= dst && dst < datap.edata {
601 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700602 return
603 }
604 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800605 for _, datap := range activeModules() {
606 if datap.bss <= dst && dst < datap.ebss {
607 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700608 return
609 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700610 }
611 return
Colin Crossd9c6b802019-03-19 21:10:31 -0700612 } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst {
Dan Willemsenc7413322018-08-27 23:21:26 -0700613 // dst was heap memory at some point, but isn't now.
614 // It can't be a global. It must be either our stack,
615 // or in the case of direct channel sends, it could be
616 // another stack. Either way, no need for barriers.
617 // This will also catch if dst is in a freed span,
618 // though that should never have.
619 return
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700620 }
621
Dan Willemsena3223282018-02-27 19:41:43 -0800622 buf := &getg().m.p.ptr().wbBuf
Dan Willemsenebae3022017-01-13 23:01:08 -0800623 h := heapBitsForAddr(dst)
624 if src == 0 {
625 for i := uintptr(0); i < size; i += sys.PtrSize {
626 if h.isPointer() {
627 dstx := (*uintptr)(unsafe.Pointer(dst + i))
Dan Willemsena3223282018-02-27 19:41:43 -0800628 if !buf.putFast(*dstx, 0) {
629 wbBufFlush(nil, 0)
630 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800631 }
632 h = h.next()
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700633 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800634 } else {
635 for i := uintptr(0); i < size; i += sys.PtrSize {
636 if h.isPointer() {
637 dstx := (*uintptr)(unsafe.Pointer(dst + i))
638 srcx := (*uintptr)(unsafe.Pointer(src + i))
Dan Willemsena3223282018-02-27 19:41:43 -0800639 if !buf.putFast(*dstx, *srcx) {
640 wbBufFlush(nil, 0)
641 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800642 }
643 h = h.next()
644 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700645 }
646}
647
Colin Crossd9c6b802019-03-19 21:10:31 -0700648// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
649// does not execute write barriers for [dst, dst+size).
650//
651// In addition to the requirements of bulkBarrierPreWrite
652// callers need to ensure [dst, dst+size) is zeroed.
653//
654// This is used for special cases where e.g. dst was just
655// created and zeroed with malloc.
656//go:nosplit
657func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
658 if (dst|src|size)&(sys.PtrSize-1) != 0 {
659 throw("bulkBarrierPreWrite: unaligned arguments")
660 }
661 if !writeBarrier.needed {
662 return
663 }
664 buf := &getg().m.p.ptr().wbBuf
665 h := heapBitsForAddr(dst)
666 for i := uintptr(0); i < size; i += sys.PtrSize {
667 if h.isPointer() {
668 srcx := (*uintptr)(unsafe.Pointer(src + i))
669 if !buf.putFast(0, *srcx) {
670 wbBufFlush(nil, 0)
671 }
672 }
673 h = h.next()
674 }
675}
676
Dan Willemsenebae3022017-01-13 23:01:08 -0800677// bulkBarrierBitmap executes write barriers for copying from [src,
678// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
679// assumed to start maskOffset bytes into the data covered by the
680// bitmap in bits (which may not be a multiple of 8).
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700681//
Dan Willemsenebae3022017-01-13 23:01:08 -0800682// This is used by bulkBarrierPreWrite for writes to data and BSS.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700683//
684//go:nosplit
Dan Willemsenebae3022017-01-13 23:01:08 -0800685func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700686 word := maskOffset / sys.PtrSize
687 bits = addb(bits, word/8)
688 mask := uint8(1) << (word % 8)
689
Dan Willemsena3223282018-02-27 19:41:43 -0800690 buf := &getg().m.p.ptr().wbBuf
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700691 for i := uintptr(0); i < size; i += sys.PtrSize {
692 if mask == 0 {
693 bits = addb(bits, 1)
694 if *bits == 0 {
695 // Skip 8 words.
696 i += 7 * sys.PtrSize
697 continue
698 }
699 mask = 1
700 }
701 if *bits&mask != 0 {
Dan Willemsenebae3022017-01-13 23:01:08 -0800702 dstx := (*uintptr)(unsafe.Pointer(dst + i))
703 if src == 0 {
Dan Willemsena3223282018-02-27 19:41:43 -0800704 if !buf.putFast(*dstx, 0) {
705 wbBufFlush(nil, 0)
706 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800707 } else {
708 srcx := (*uintptr)(unsafe.Pointer(src + i))
Dan Willemsena3223282018-02-27 19:41:43 -0800709 if !buf.putFast(*dstx, *srcx) {
710 wbBufFlush(nil, 0)
711 }
Dan Willemsenebae3022017-01-13 23:01:08 -0800712 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700713 }
714 mask <<= 1
715 }
716}
717
Dan Willemsenc7413322018-08-27 23:21:26 -0700718// typeBitsBulkBarrier executes a write barrier for every
Dan Willemsenebae3022017-01-13 23:01:08 -0800719// pointer that would be copied from [src, src+size) to [dst,
720// dst+size) by a memmove using the type bitmap to locate those
721// pointer slots.
722//
723// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
724// dst, src, and size must be pointer-aligned.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700725// The type typ must have a plain bitmap, not a GC program.
726// The only use of this function is in channel sends, and the
727// 64 kB channel element limit takes care of this for us.
728//
Dan Willemsenebae3022017-01-13 23:01:08 -0800729// Must not be preempted because it typically runs right before memmove,
730// and the GC must observe them as an atomic action.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700731//
Dan Willemsenc7413322018-08-27 23:21:26 -0700732// Callers must perform cgo checks if writeBarrier.cgo.
733//
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700734//go:nosplit
Dan Willemsenebae3022017-01-13 23:01:08 -0800735func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700736 if typ == nil {
737 throw("runtime: typeBitsBulkBarrier without type")
738 }
739 if typ.size != size {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700740 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700741 throw("runtime: invalid typeBitsBulkBarrier")
742 }
743 if typ.kind&kindGCProg != 0 {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700744 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700745 throw("runtime: invalid typeBitsBulkBarrier")
746 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700747 if !writeBarrier.needed {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700748 return
749 }
750 ptrmask := typ.gcdata
Dan Willemsenc7413322018-08-27 23:21:26 -0700751 buf := &getg().m.p.ptr().wbBuf
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700752 var bits uint32
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700753 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
754 if i&(sys.PtrSize*8-1) == 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700755 bits = uint32(*ptrmask)
756 ptrmask = addb(ptrmask, 1)
757 } else {
758 bits = bits >> 1
759 }
760 if bits&1 != 0 {
Dan Willemsenebae3022017-01-13 23:01:08 -0800761 dstx := (*uintptr)(unsafe.Pointer(dst + i))
762 srcx := (*uintptr)(unsafe.Pointer(src + i))
Dan Willemsenc7413322018-08-27 23:21:26 -0700763 if !buf.putFast(*dstx, *srcx) {
764 wbBufFlush(nil, 0)
765 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700766 }
767 }
768}
769
770// The methods operating on spans all require that h has been returned
771// by heapBitsForSpan and that size, n, total are the span layout description
772// returned by the mspan's layout method.
773// If total > size*n, it means that there is extra leftover memory in the span,
774// usually due to rounding.
775//
776// TODO(rsc): Perhaps introduce a different heapBitsSpan type.
777
778// initSpan initializes the heap bitmap for a span.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700779// It clears all checkmark bits.
780// If this is a span of pointer-sized objects, it initializes all
781// words to pointer/scan.
782// Otherwise, it initializes all words to scalar/dead.
783func (h heapBits) initSpan(s *mspan) {
784 size, n, total := s.layout()
785
786 // Init the markbit structures
787 s.freeindex = 0
788 s.allocCache = ^uint64(0) // all 1s indicating all free.
789 s.nelems = n
790 s.allocBits = nil
791 s.gcmarkBits = nil
792 s.gcmarkBits = newMarkBits(s.nelems)
793 s.allocBits = newAllocBits(s.nelems)
794
795 // Clear bits corresponding to objects.
Dan Willemsenc7413322018-08-27 23:21:26 -0700796 nw := total / sys.PtrSize
797 if nw%wordsPerBitmapByte != 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700798 throw("initSpan: unaligned length")
799 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700800 if h.shift != 0 {
801 throw("initSpan: unaligned base")
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700802 }
Dan Willemsenc7413322018-08-27 23:21:26 -0700803 for nw > 0 {
804 hNext, anw := h.forwardOrBoundary(nw)
805 nbyte := anw / wordsPerBitmapByte
806 if sys.PtrSize == 8 && size == sys.PtrSize {
807 bitp := h.bitp
808 for i := uintptr(0); i < nbyte; i++ {
809 *bitp = bitPointerAll | bitScanAll
810 bitp = add1(bitp)
811 }
812 } else {
813 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
814 }
815 h = hNext
816 nw -= anw
817 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700818}
819
820// initCheckmarkSpan initializes a span for being checkmarked.
821// It clears the checkmark bits, which are set to 1 in normal operation.
822func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
823 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700824 if sys.PtrSize == 8 && size == sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700825 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
826 // Only possible on 64-bit system, since minimum size is 8.
827 // Must clear type bit (checkmark bit) of every word.
828 // The type bit is the lower of every two-bit pair.
Dan Willemsenc7413322018-08-27 23:21:26 -0700829 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
830 *h.bitp &^= bitPointerAll
831 h = h.forward(wordsPerBitmapByte)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700832 }
833 return
834 }
835 for i := uintptr(0); i < n; i++ {
Dan Willemsenebae3022017-01-13 23:01:08 -0800836 *h.bitp &^= bitScan << (heapBitsShift + h.shift)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700837 h = h.forward(size / sys.PtrSize)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700838 }
839}
840
841// clearCheckmarkSpan undoes all the checkmarking in a span.
842// The actual checkmark bits are ignored, so the only work to do
843// is to fix the pointer bits. (Pointer bits are ignored by scanobject
844// but consulted by typedmemmove.)
845func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
846 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700847 if sys.PtrSize == 8 && size == sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700848 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
849 // Only possible on 64-bit system, since minimum size is 8.
850 // Must clear type bit (checkmark bit) of every word.
851 // The type bit is the lower of every two-bit pair.
Dan Willemsenc7413322018-08-27 23:21:26 -0700852 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
853 *h.bitp |= bitPointerAll
854 h = h.forward(wordsPerBitmapByte)
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700855 }
856 }
857}
858
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700859// oneBitCount is indexed by byte and produces the
860// number of 1 bits in that byte. For example 128 has 1 bit set
861// and oneBitCount[128] will holds 1.
862var oneBitCount = [256]uint8{
863 0, 1, 1, 2, 1, 2, 2, 3,
864 1, 2, 2, 3, 2, 3, 3, 4,
865 1, 2, 2, 3, 2, 3, 3, 4,
866 2, 3, 3, 4, 3, 4, 4, 5,
867 1, 2, 2, 3, 2, 3, 3, 4,
868 2, 3, 3, 4, 3, 4, 4, 5,
869 2, 3, 3, 4, 3, 4, 4, 5,
870 3, 4, 4, 5, 4, 5, 5, 6,
871 1, 2, 2, 3, 2, 3, 3, 4,
872 2, 3, 3, 4, 3, 4, 4, 5,
873 2, 3, 3, 4, 3, 4, 4, 5,
874 3, 4, 4, 5, 4, 5, 5, 6,
875 2, 3, 3, 4, 3, 4, 4, 5,
876 3, 4, 4, 5, 4, 5, 5, 6,
877 3, 4, 4, 5, 4, 5, 5, 6,
878 4, 5, 5, 6, 5, 6, 6, 7,
879 1, 2, 2, 3, 2, 3, 3, 4,
880 2, 3, 3, 4, 3, 4, 4, 5,
881 2, 3, 3, 4, 3, 4, 4, 5,
882 3, 4, 4, 5, 4, 5, 5, 6,
883 2, 3, 3, 4, 3, 4, 4, 5,
884 3, 4, 4, 5, 4, 5, 5, 6,
885 3, 4, 4, 5, 4, 5, 5, 6,
886 4, 5, 5, 6, 5, 6, 6, 7,
887 2, 3, 3, 4, 3, 4, 4, 5,
888 3, 4, 4, 5, 4, 5, 5, 6,
889 3, 4, 4, 5, 4, 5, 5, 6,
890 4, 5, 5, 6, 5, 6, 6, 7,
891 3, 4, 4, 5, 4, 5, 5, 6,
892 4, 5, 5, 6, 5, 6, 6, 7,
893 4, 5, 5, 6, 5, 6, 6, 7,
894 5, 6, 6, 7, 6, 7, 7, 8}
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700895
Dan Willemsend2797482017-07-26 13:13:13 -0700896// countAlloc returns the number of objects allocated in span s by
897// scanning the allocation bitmap.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700898// TODO:(rlh) Use popcount intrinsic.
Dan Willemsend2797482017-07-26 13:13:13 -0700899func (s *mspan) countAlloc() int {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700900 count := 0
901 maxIndex := s.nelems / 8
902 for i := uintptr(0); i < maxIndex; i++ {
Dan Willemsend2797482017-07-26 13:13:13 -0700903 mrkBits := *s.gcmarkBits.bytep(i)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700904 count += int(oneBitCount[mrkBits])
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700905 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700906 if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
Dan Willemsend2797482017-07-26 13:13:13 -0700907 mrkBits := *s.gcmarkBits.bytep(maxIndex)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700908 mask := uint8((1 << bitsInLastByte) - 1)
909 bits := mrkBits & mask
910 count += int(oneBitCount[bits])
911 }
Dan Willemsend2797482017-07-26 13:13:13 -0700912 return count
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700913}
914
915// heapBitsSetType records that the new allocation [x, x+size)
916// holds in [x, x+dataSize) one or more values of type typ.
917// (The number of values is given by dataSize / typ.size.)
918// If dataSize < size, the fragment [x+dataSize, x+size) is
919// recorded as non-pointer data.
920// It is known that the type has pointers somewhere;
921// malloc does not call heapBitsSetType when there are no pointers,
922// because all free objects are marked as noscan during
923// heapBitsSweepSpan.
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700924//
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700925// There can only be one allocation from a given span active at a time,
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700926// and the bitmap for a span always falls on byte boundaries,
927// so there are no write-write races for access to the heap bitmap.
928// Hence, heapBitsSetType can access the bitmap without atomics.
929//
930// There can be read-write races between heapBitsSetType and things
931// that read the heap bitmap like scanobject. However, since
932// heapBitsSetType is only used for objects that have not yet been
933// made reachable, readers will ignore bits being modified by this
934// function. This does mean this function cannot transiently modify
935// bits that belong to neighboring objects. Also, on weakly-ordered
936// machines, callers must execute a store/store (publication) barrier
937// between calling this function and making the object reachable.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700938func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
939 const doubleCheck = false // slow but helpful; enable to test modifications to this code
940
941 // dataSize is always size rounded up to the next malloc size class,
942 // except in the case of allocating a defer block, in which case
943 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
944 // arbitrarily larger.
945 //
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700946 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700947 // assume that dataSize == size without checking it explicitly.
948
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700949 if sys.PtrSize == 8 && size == sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700950 // It's one word and it has pointers, it must be a pointer.
Dan Willemsenebae3022017-01-13 23:01:08 -0800951 // Since all allocated one-word objects are pointers
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700952 // (non-pointers are aggregated into tinySize allocations),
953 // initSpan sets the pointer bits for us. Nothing to do here.
954 if doubleCheck {
955 h := heapBitsForAddr(x)
956 if !h.isPointer() {
957 throw("heapBitsSetType: pointer bit missing")
958 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700959 if !h.morePointers() {
960 throw("heapBitsSetType: scan bit missing")
961 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700962 }
963 return
964 }
965
966 h := heapBitsForAddr(x)
967 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
968
969 // Heap bitmap bits for 2-word object are only 4 bits,
Dan Willemsenebae3022017-01-13 23:01:08 -0800970 // so also shared with objects next to it.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700971 // This is called out as a special case primarily for 32-bit systems,
972 // so that on 32-bit systems the code below can assume all objects
973 // are 4-word aligned (because they're all 16-byte aligned).
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700974 if size == 2*sys.PtrSize {
975 if typ.size == sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700976 // We're allocating a block big enough to hold two pointers.
977 // On 64-bit, that means the actual object must be two pointers,
978 // or else we'd have used the one-pointer-sized block.
979 // On 32-bit, however, this is the 8-byte block, the smallest one.
980 // So it could be that we're allocating one pointer and this was
981 // just the smallest block available. Distinguish by checking dataSize.
982 // (In general the number of instances of typ being allocated is
983 // dataSize/typ.size.)
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700984 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
985 // 1 pointer object. On 32-bit machines clear the bit for the
986 // unused second word.
Dan Willemsenebae3022017-01-13 23:01:08 -0800987 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
988 *h.bitp |= (bitPointer | bitScan) << h.shift
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700989 } else {
990 // 2-element slice of pointer.
Dan Willemsenebae3022017-01-13 23:01:08 -0800991 *h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700992 }
993 return
994 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700995 // Otherwise typ.size must be 2*sys.PtrSize,
996 // and typ.kind&kindGCProg == 0.
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700997 if doubleCheck {
Dan Willemsen38f2dba2016-07-08 14:54:35 -0700998 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -0700999 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
1000 throw("heapBitsSetType")
1001 }
1002 }
1003 b := uint32(*ptrmask)
Dan Willemsenebae3022017-01-13 23:01:08 -08001004 hb := (b & 3) | bitScan
1005 // bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
1006 // 110011 is shifted h.shift and complemented.
1007 // This clears out the bits that are about to be
1008 // ored into *h.hbitp in the next instructions.
1009 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
1010 *h.bitp |= uint8(hb << h.shift)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001011 return
1012 }
1013
1014 // Copy from 1-bit ptrmask into 2-bit bitmap.
1015 // The basic approach is to use a single uintptr as a bit buffer,
1016 // alternating between reloading the buffer and writing bitmap bytes.
1017 // In general, one load can supply two bitmap byte writes.
1018 // This is a lot of lines of code, but it compiles into relatively few
1019 // machine instructions.
1020
Dan Willemsenc7413322018-08-27 23:21:26 -07001021 outOfPlace := false
1022 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
1023 // This object spans heap arenas, so the bitmap may be
1024 // discontiguous. Unroll it into the object instead
1025 // and then copy it out.
1026 //
1027 // In doubleCheck mode, we randomly do this anyway to
1028 // stress test the bitmap copying path.
1029 outOfPlace = true
1030 h.bitp = (*uint8)(unsafe.Pointer(x))
1031 h.last = nil
1032 }
1033
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001034 var (
1035 // Ptrmask input.
1036 p *byte // last ptrmask byte read
1037 b uintptr // ptrmask bits already loaded
1038 nb uintptr // number of bits in b at next read
1039 endp *byte // final ptrmask byte to read (then repeat)
1040 endnb uintptr // number of valid bits in *endp
1041 pbits uintptr // alternate source of bits
1042
1043 // Heap bitmap output.
1044 w uintptr // words processed
1045 nw uintptr // number of words to process
1046 hbitp *byte // next heap bitmap byte to write
1047 hb uintptr // bits being prepared for *hbitp
1048 )
1049
1050 hbitp = h.bitp
1051
1052 // Handle GC program. Delayed until this part of the code
1053 // so that we can use the same double-checking mechanism
1054 // as the 1-bit case. Nothing above could have encountered
1055 // GC programs: the cases were all too small.
1056 if typ.kind&kindGCProg != 0 {
1057 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1058 if doubleCheck {
1059 // Double-check the heap bits written by GC program
1060 // by running the GC program to create a 1-bit pointer mask
1061 // and then jumping to the double-check code below.
1062 // This doesn't catch bugs shared between the 1-bit and 4-bit
1063 // GC program execution, but it does catch mistakes specific
1064 // to just one of those and bugs in heapBitsSetTypeGCProg's
1065 // implementation of arrays.
1066 lock(&debugPtrmask.lock)
1067 if debugPtrmask.data == nil {
1068 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1069 }
1070 ptrmask = debugPtrmask.data
1071 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001072 }
Dan Willemsenc7413322018-08-27 23:21:26 -07001073 goto Phase4
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001074 }
1075
1076 // Note about sizes:
1077 //
1078 // typ.size is the number of words in the object,
1079 // and typ.ptrdata is the number of words in the prefix
1080 // of the object that contains pointers. That is, the final
1081 // typ.size - typ.ptrdata words contain no pointers.
1082 // This allows optimization of a common pattern where
1083 // an object has a small header followed by a large scalar
1084 // buffer. If we know the pointers are over, we don't have
1085 // to scan the buffer's heap bitmap at all.
1086 // The 1-bit ptrmasks are sized to contain only bits for
1087 // the typ.ptrdata prefix, zero padded out to a full byte
1088 // of bitmap. This code sets nw (below) so that heap bitmap
1089 // bits are only written for the typ.ptrdata prefix; if there is
1090 // more room in the allocated object, the next heap bitmap
1091 // entry is a 00, indicating that there are no more pointers
1092 // to scan. So only the ptrmask for the ptrdata bytes is needed.
1093 //
1094 // Replicated copies are not as nice: if there is an array of
1095 // objects with scalar tails, all but the last tail does have to
1096 // be initialized, because there is no way to say "skip forward".
1097 // However, because of the possibility of a repeated type with
1098 // size not a multiple of 4 pointers (one heap bitmap byte),
1099 // the code already must handle the last ptrmask byte specially
1100 // by treating it as containing only the bits for endnb pointers,
1101 // where endnb <= 4. We represent large scalar tails that must
1102 // be expanded in the replication by setting endnb larger than 4.
1103 // This will have the effect of reading many bits out of b,
1104 // but once the real bits are shifted out, b will supply as many
1105 // zero bits as we try to read, which is exactly what we need.
1106
1107 p = ptrmask
1108 if typ.size < dataSize {
1109 // Filling in bits for an array of typ.
1110 // Set up for repetition of ptrmask during main loop.
1111 // Note that ptrmask describes only a prefix of
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001112 const maxBits = sys.PtrSize*8 - 7
1113 if typ.ptrdata/sys.PtrSize <= maxBits {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001114 // Entire ptrmask fits in uintptr with room for a byte fragment.
1115 // Load into pbits and never read from ptrmask again.
1116 // This is especially important when the ptrmask has
1117 // fewer than 8 bits in it; otherwise the reload in the middle
1118 // of the Phase 2 loop would itself need to loop to gather
1119 // at least 8 bits.
1120
1121 // Accumulate ptrmask into b.
1122 // ptrmask is sized to describe only typ.ptrdata, but we record
1123 // it as describing typ.size bytes, since all the high bits are zero.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001124 nb = typ.ptrdata / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001125 for i := uintptr(0); i < nb; i += 8 {
1126 b |= uintptr(*p) << i
1127 p = add1(p)
1128 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001129 nb = typ.size / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001130
1131 // Replicate ptrmask to fill entire pbits uintptr.
1132 // Doubling and truncating is fewer steps than
1133 // iterating by nb each time. (nb could be 1.)
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001134 // Since we loaded typ.ptrdata/sys.PtrSize bits
1135 // but are pretending to have typ.size/sys.PtrSize,
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001136 // there might be no replication necessary/possible.
1137 pbits = b
1138 endnb = nb
1139 if nb+nb <= maxBits {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001140 for endnb <= sys.PtrSize*8 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001141 pbits |= pbits << endnb
1142 endnb += endnb
1143 }
1144 // Truncate to a multiple of original ptrmask.
Dan Willemsend2797482017-07-26 13:13:13 -07001145 // Because nb+nb <= maxBits, nb fits in a byte.
1146 // Byte division is cheaper than uintptr division.
1147 endnb = uintptr(maxBits/byte(nb)) * nb
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001148 pbits &= 1<<endnb - 1
1149 b = pbits
1150 nb = endnb
1151 }
1152
1153 // Clear p and endp as sentinel for using pbits.
1154 // Checked during Phase 2 loop.
1155 p = nil
1156 endp = nil
1157 } else {
1158 // Ptrmask is larger. Read it multiple times.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001159 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001160 endp = addb(ptrmask, n)
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001161 endnb = typ.size/sys.PtrSize - n*8
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001162 }
1163 }
1164 if p != nil {
1165 b = uintptr(*p)
1166 p = add1(p)
1167 nb = 8
1168 }
1169
1170 if typ.size == dataSize {
1171 // Single entry: can stop once we reach the non-pointer data.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001172 nw = typ.ptrdata / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001173 } else {
1174 // Repeated instances of typ in an array.
1175 // Have to process first N-1 entries in full, but can stop
1176 // once we reach the non-pointer data in the final entry.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001177 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001178 }
1179 if nw == 0 {
1180 // No pointers! Caller was supposed to check.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001181 println("runtime: invalid type ", typ.string())
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001182 throw("heapBitsSetType: called with non-pointer type")
1183 return
1184 }
1185 if nw < 2 {
1186 // Must write at least 2 words, because the "no scan"
1187 // encoding doesn't take effect until the third word.
1188 nw = 2
1189 }
1190
Dan Willemsenc7413322018-08-27 23:21:26 -07001191 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2).
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001192 // The leading byte is special because it contains the bits for word 1,
Dan Willemsenebae3022017-01-13 23:01:08 -08001193 // which does not have the scan bit set.
1194 // The leading half-byte is special because it's a half a byte,
1195 // so we have to be careful with the bits already there.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001196 switch {
1197 default:
1198 throw("heapBitsSetType: unexpected shift")
1199
1200 case h.shift == 0:
1201 // Ptrmask and heap bitmap are aligned.
1202 // Handle first byte of bitmap specially.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001203 //
1204 // The first byte we write out covers the first four
1205 // words of the object. The scan/dead bit on the first
1206 // word must be set to scan since there are pointers
1207 // somewhere in the object. The scan/dead bit on the
1208 // second word is the checkmark, so we don't set it.
1209 // In all following words, we set the scan/dead
1210 // appropriately to indicate that the object contains
1211 // to the next 2-bit entry in the bitmap.
1212 //
1213 // TODO: It doesn't matter if we set the checkmark, so
1214 // maybe this case isn't needed any more.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001215 hb = b & bitPointerAll
Dan Willemsenebae3022017-01-13 23:01:08 -08001216 hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001217 if w += 4; w >= nw {
1218 goto Phase3
1219 }
1220 *hbitp = uint8(hb)
Dan Willemsenc7413322018-08-27 23:21:26 -07001221 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001222 b >>= 4
1223 nb -= 4
1224
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001225 case sys.PtrSize == 8 && h.shift == 2:
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001226 // Ptrmask and heap bitmap are misaligned.
Dan Willemsenebae3022017-01-13 23:01:08 -08001227 // The bits for the first two words are in a byte shared
1228 // with another object, so we must be careful with the bits
1229 // already there.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001230 // We took care of 1-word and 2-word objects above,
Dan Willemsenebae3022017-01-13 23:01:08 -08001231 // so this is at least a 6-word object.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001232 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1233 // This is not noscan, so set the scan bit in the
1234 // first word.
Dan Willemsenebae3022017-01-13 23:01:08 -08001235 hb |= bitScan << (2 * heapBitsShift)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001236 b >>= 2
1237 nb -= 2
Dan Willemsenebae3022017-01-13 23:01:08 -08001238 // Note: no bitScan for second word because that's
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001239 // the checkmark.
Dan Willemsenebae3022017-01-13 23:01:08 -08001240 *hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1241 *hbitp |= uint8(hb)
Dan Willemsenc7413322018-08-27 23:21:26 -07001242 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001243 if w += 2; w >= nw {
1244 // We know that there is more data, because we handled 2-word objects above.
1245 // This must be at least a 6-word object. If we're out of pointer words,
1246 // mark no scan in next bitmap byte and finish.
1247 hb = 0
1248 w += 4
1249 goto Phase3
1250 }
1251 }
1252
1253 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1254 // The loop computes the bits for that last write but does not execute the write;
1255 // it leaves the bits in hb for processing by phase 3.
1256 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1257 // use in the first half of the loop right now, and then we only adjust nb explicitly
1258 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1259 nb -= 4
1260 for {
1261 // Emit bitmap byte.
1262 // b has at least nb+4 bits, with one exception:
1263 // if w+4 >= nw, then b has only nw-w bits,
1264 // but we'll stop at the break and then truncate
1265 // appropriately in Phase 3.
1266 hb = b & bitPointerAll
Dan Willemsenebae3022017-01-13 23:01:08 -08001267 hb |= bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001268 if w += 4; w >= nw {
1269 break
1270 }
1271 *hbitp = uint8(hb)
Dan Willemsenc7413322018-08-27 23:21:26 -07001272 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001273 b >>= 4
1274
1275 // Load more bits. b has nb right now.
1276 if p != endp {
1277 // Fast path: keep reading from ptrmask.
1278 // nb unmodified: we just loaded 8 bits,
1279 // and the next iteration will consume 8 bits,
1280 // leaving us with the same nb the next time we're here.
1281 if nb < 8 {
1282 b |= uintptr(*p) << nb
1283 p = add1(p)
1284 } else {
1285 // Reduce the number of bits in b.
1286 // This is important if we skipped
1287 // over a scalar tail, since nb could
1288 // be larger than the bit width of b.
1289 nb -= 8
1290 }
1291 } else if p == nil {
1292 // Almost as fast path: track bit count and refill from pbits.
1293 // For short repetitions.
1294 if nb < 8 {
1295 b |= pbits << nb
1296 nb += endnb
1297 }
1298 nb -= 8 // for next iteration
1299 } else {
1300 // Slow path: reached end of ptrmask.
1301 // Process final partial byte and rewind to start.
1302 b |= uintptr(*p) << nb
1303 nb += endnb
1304 if nb < 8 {
1305 b |= uintptr(*ptrmask) << nb
1306 p = add1(ptrmask)
1307 } else {
1308 nb -= 8
1309 p = ptrmask
1310 }
1311 }
1312
1313 // Emit bitmap byte.
1314 hb = b & bitPointerAll
Dan Willemsenebae3022017-01-13 23:01:08 -08001315 hb |= bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001316 if w += 4; w >= nw {
1317 break
1318 }
1319 *hbitp = uint8(hb)
Dan Willemsenc7413322018-08-27 23:21:26 -07001320 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001321 b >>= 4
1322 }
1323
1324Phase3:
1325 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1326 if w > nw {
1327 // Counting the 4 entries in hb not yet written to memory,
1328 // there are more entries than possible pointer slots.
1329 // Discard the excess entries (can't be more than 3).
1330 mask := uintptr(1)<<(4-(w-nw)) - 1
Dan Willemsenebae3022017-01-13 23:01:08 -08001331 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001332 }
1333
1334 // Change nw from counting possibly-pointer words to total words in allocation.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001335 nw = size / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001336
1337 // Write whole bitmap bytes.
1338 // The first is hb, the rest are zero.
1339 if w <= nw {
1340 *hbitp = uint8(hb)
Dan Willemsenc7413322018-08-27 23:21:26 -07001341 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001342 hb = 0 // for possible final half-byte below
1343 for w += 4; w <= nw; w += 4 {
1344 *hbitp = 0
Dan Willemsenc7413322018-08-27 23:21:26 -07001345 hbitp = add1(hbitp)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001346 }
1347 }
1348
1349 // Write final partial bitmap byte if any.
1350 // We know w > nw, or else we'd still be in the loop above.
1351 // It can be bigger only due to the 4 entries in hb that it counts.
1352 // If w == nw+4 then there's nothing left to do: we wrote all nw entries
1353 // and can discard the 4 sitting in hb.
1354 // But if w == nw+2, we need to write first two in hb.
Dan Willemsenebae3022017-01-13 23:01:08 -08001355 // The byte is shared with the next object, so be careful with
1356 // existing bits.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001357 if w == nw+2 {
Dan Willemsenebae3022017-01-13 23:01:08 -08001358 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001359 }
1360
1361Phase4:
Dan Willemsenc7413322018-08-27 23:21:26 -07001362 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary.
1363 if outOfPlace {
1364 // TODO: We could probably make this faster by
1365 // handling [x+dataSize, x+size) specially.
1366 h := heapBitsForAddr(x)
1367 // cnw is the number of heap words, or bit pairs
1368 // remaining (like nw above).
1369 cnw := size / sys.PtrSize
1370 src := (*uint8)(unsafe.Pointer(x))
1371 // We know the first and last byte of the bitmap are
1372 // not the same, but it's still possible for small
1373 // objects span arenas, so it may share bitmap bytes
1374 // with neighboring objects.
1375 //
1376 // Handle the first byte specially if it's shared. See
1377 // Phase 1 for why this is the only special case we need.
1378 if doubleCheck {
1379 if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
1380 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1381 throw("bad start shift")
1382 }
1383 }
1384 if sys.PtrSize == 8 && h.shift == 2 {
1385 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1386 h = h.next().next()
1387 cnw -= 2
1388 src = addb(src, 1)
1389 }
1390 // We're now byte aligned. Copy out to per-arena
1391 // bitmaps until the last byte (which may again be
1392 // partial).
1393 for cnw >= 4 {
1394 // This loop processes four words at a time,
1395 // so round cnw down accordingly.
1396 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1397
1398 // n is the number of bitmap bytes to copy.
1399 n := words / 4
1400 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1401 cnw -= words
1402 h = hNext
1403 src = addb(src, n)
1404 }
1405 if doubleCheck && h.shift != 0 {
1406 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1407 throw("bad shift after block copy")
1408 }
1409 // Handle the last byte if it's shared.
1410 if cnw == 2 {
1411 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1412 src = addb(src, 1)
1413 h = h.next().next()
1414 }
1415 if doubleCheck {
1416 if uintptr(unsafe.Pointer(src)) > x+size {
1417 throw("copy exceeded object size")
1418 }
1419 if !(cnw == 0 || cnw == 2) {
1420 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1421 throw("bad number of remaining words")
1422 }
1423 // Set up hbitp so doubleCheck code below can check it.
1424 hbitp = h.bitp
1425 }
1426 // Zero the object where we wrote the bitmap.
1427 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1428 }
1429
1430 // Double check the whole bitmap.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001431 if doubleCheck {
Dan Willemsenc7413322018-08-27 23:21:26 -07001432 // x+size may not point to the heap, so back up one
1433 // word and then call next().
1434 end := heapBitsForAddr(x + size - sys.PtrSize).next()
1435 endAI := arenaIdx(end.arena)
1436 if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
1437 // The unrolling code above walks hbitp just
1438 // past the bitmap without moving to the next
1439 // arena. Synthesize this for end.bitp.
1440 end.arena--
1441 endAI = arenaIdx(end.arena)
1442 end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
1443 end.last = nil
1444 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001445 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001446 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001447 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1448 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1449 h0 := heapBitsForAddr(x)
1450 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1451 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1452 throw("bad heapBitsSetType")
1453 }
1454
1455 // Double-check that bits to be written were written correctly.
1456 // Does not check that other bits were not written, unfortunately.
1457 h := heapBitsForAddr(x)
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001458 nptr := typ.ptrdata / sys.PtrSize
1459 ndata := typ.size / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001460 count := dataSize / typ.size
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001461 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1462 for i := uintptr(0); i < size/sys.PtrSize; i++ {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001463 j := i % ndata
1464 var have, want uint8
Dan Willemsenebae3022017-01-13 23:01:08 -08001465 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001466 if i >= totalptr {
1467 want = 0 // deadmarker
1468 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
Dan Willemsenebae3022017-01-13 23:01:08 -08001469 want = bitScan
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001470 }
1471 } else {
1472 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1473 want |= bitPointer
1474 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001475 if i != 1 {
Dan Willemsenebae3022017-01-13 23:01:08 -08001476 want |= bitScan
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001477 } else {
Dan Willemsenebae3022017-01-13 23:01:08 -08001478 have &^= bitScan
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001479 }
1480 }
1481 if have != want {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001482 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001483 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
Dan Willemsenc7413322018-08-27 23:21:26 -07001484 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001485 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1486 h0 := heapBitsForAddr(x)
1487 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1488 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1489 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
Dan Willemsenc7413322018-08-27 23:21:26 -07001490 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001491 if typ.kind&kindGCProg != 0 {
1492 println("GC program:")
1493 dumpGCProg(addb(typ.gcdata, 4))
1494 }
1495 throw("bad heapBitsSetType")
1496 }
1497 h = h.next()
1498 }
1499 if ptrmask == debugPtrmask.data {
1500 unlock(&debugPtrmask.lock)
1501 }
1502 }
1503}
1504
1505var debugPtrmask struct {
1506 lock mutex
1507 data *byte
1508}
1509
1510// heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1511// progSize is the size of the memory described by the program.
1512// elemSize is the size of the element that the GC program describes (a prefix of).
1513// dataSize is the total size of the intended data, a multiple of elemSize.
1514// allocSize is the total size of the allocated memory.
1515//
1516// GC programs are only used for large allocations.
1517// heapBitsSetType requires that allocSize is a multiple of 4 words,
1518// so that the relevant bitmap bytes are not shared with surrounding
Dan Willemsenebae3022017-01-13 23:01:08 -08001519// objects.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001520func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001521 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001522 // Alignment will be wrong.
1523 throw("heapBitsSetTypeGCProg: small allocation")
1524 }
1525 var totalBits uintptr
1526 if elemSize == dataSize {
1527 totalBits = runGCProg(prog, nil, h.bitp, 2)
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001528 if totalBits*sys.PtrSize != progSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001529 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1530 throw("heapBitsSetTypeGCProg: unexpected bit count")
1531 }
1532 } else {
1533 count := dataSize / elemSize
1534
1535 // Piece together program trailer to run after prog that does:
1536 // literal(0)
1537 // repeat(1, elemSize-progSize-1) // zeros to fill element size
1538 // repeat(elemSize, count-1) // repeat that element for count
1539 // This zero-pads the data remaining in the first element and then
1540 // repeats that first element to fill the array.
1541 var trailer [40]byte // 3 varints (max 10 each) + some bytes
1542 i := 0
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001543 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001544 // literal(0)
1545 trailer[i] = 0x01
1546 i++
1547 trailer[i] = 0
1548 i++
1549 if n > 1 {
1550 // repeat(1, n-1)
1551 trailer[i] = 0x81
1552 i++
1553 n--
1554 for ; n >= 0x80; n >>= 7 {
1555 trailer[i] = byte(n | 0x80)
1556 i++
1557 }
1558 trailer[i] = byte(n)
1559 i++
1560 }
1561 }
1562 // repeat(elemSize/ptrSize, count-1)
1563 trailer[i] = 0x80
1564 i++
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001565 n := elemSize / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001566 for ; n >= 0x80; n >>= 7 {
1567 trailer[i] = byte(n | 0x80)
1568 i++
1569 }
1570 trailer[i] = byte(n)
1571 i++
1572 n = count - 1
1573 for ; n >= 0x80; n >>= 7 {
1574 trailer[i] = byte(n | 0x80)
1575 i++
1576 }
1577 trailer[i] = byte(n)
1578 i++
1579 trailer[i] = 0
1580 i++
1581
1582 runGCProg(prog, &trailer[0], h.bitp, 2)
1583
1584 // Even though we filled in the full array just now,
1585 // record that we only filled in up to the ptrdata of the
1586 // last element. This will cause the code below to
1587 // memclr the dead section of the final array element,
1588 // so that scanobject can stop early in the final element.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001589 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001590 }
Dan Willemsenc7413322018-08-27 23:21:26 -07001591 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1592 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
1593 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001594}
1595
1596// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1597// size the size of the region described by prog, in bytes.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001598// The resulting bitvector will have no more than size/sys.PtrSize bits.
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001599func progToPointerMask(prog *byte, size uintptr) bitvector {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001600 n := (size/sys.PtrSize + 7) / 8
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001601 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1602 x[len(x)-1] = 0xa1 // overflow check sentinel
1603 n = runGCProg(prog, nil, &x[0], 1)
1604 if x[len(x)-1] != 0xa1 {
1605 throw("progToPointerMask: overflow")
1606 }
1607 return bitvector{int32(n), &x[0]}
1608}
1609
1610// Packed GC pointer bitmaps, aka GC programs.
1611//
1612// For large types containing arrays, the type information has a
1613// natural repetition that can be encoded to save space in the
1614// binary and in the memory representation of the type information.
1615//
1616// The encoding is a simple Lempel-Ziv style bytecode machine
1617// with the following instructions:
1618//
1619// 00000000: stop
1620// 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1621// 10000000 n c: repeat the previous n bits c times; n, c are varints
1622// 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1623
1624// runGCProg executes the GC program prog, and then trailer if non-nil,
1625// writing to dst with entries of the given size.
1626// If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1627// If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1628// starting at dst (because the heap bitmap does). In this case, the caller guarantees
1629// that only whole bytes in dst need to be written.
1630//
1631// runGCProg returns the number of 1- or 2-bit entries written to memory.
1632func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1633 dstStart := dst
1634
1635 // Bits waiting to be written to memory.
1636 var bits uintptr
1637 var nbits uintptr
1638
1639 p := prog
1640Run:
1641 for {
1642 // Flush accumulated full bytes.
1643 // The rest of the loop assumes that nbits <= 7.
1644 for ; nbits >= 8; nbits -= 8 {
1645 if size == 1 {
1646 *dst = uint8(bits)
1647 dst = add1(dst)
1648 bits >>= 8
1649 } else {
Dan Willemsenebae3022017-01-13 23:01:08 -08001650 v := bits&bitPointerAll | bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001651 *dst = uint8(v)
Dan Willemsenc7413322018-08-27 23:21:26 -07001652 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001653 bits >>= 4
Dan Willemsenebae3022017-01-13 23:01:08 -08001654 v = bits&bitPointerAll | bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001655 *dst = uint8(v)
Dan Willemsenc7413322018-08-27 23:21:26 -07001656 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001657 bits >>= 4
1658 }
1659 }
1660
1661 // Process one instruction.
1662 inst := uintptr(*p)
1663 p = add1(p)
1664 n := inst & 0x7F
1665 if inst&0x80 == 0 {
1666 // Literal bits; n == 0 means end of program.
1667 if n == 0 {
1668 // Program is over; continue in trailer if present.
1669 if trailer != nil {
1670 //println("trailer")
1671 p = trailer
1672 trailer = nil
1673 continue
1674 }
1675 //println("done")
1676 break Run
1677 }
1678 //println("lit", n, dst)
1679 nbyte := n / 8
1680 for i := uintptr(0); i < nbyte; i++ {
1681 bits |= uintptr(*p) << nbits
1682 p = add1(p)
1683 if size == 1 {
1684 *dst = uint8(bits)
1685 dst = add1(dst)
1686 bits >>= 8
1687 } else {
Dan Willemsenebae3022017-01-13 23:01:08 -08001688 v := bits&0xf | bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001689 *dst = uint8(v)
Dan Willemsenc7413322018-08-27 23:21:26 -07001690 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001691 bits >>= 4
Dan Willemsenebae3022017-01-13 23:01:08 -08001692 v = bits&0xf | bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001693 *dst = uint8(v)
Dan Willemsenc7413322018-08-27 23:21:26 -07001694 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001695 bits >>= 4
1696 }
1697 }
1698 if n %= 8; n > 0 {
1699 bits |= uintptr(*p) << nbits
1700 p = add1(p)
1701 nbits += n
1702 }
1703 continue Run
1704 }
1705
1706 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1707 if n == 0 {
1708 for off := uint(0); ; off += 7 {
1709 x := uintptr(*p)
1710 p = add1(p)
1711 n |= (x & 0x7F) << off
1712 if x&0x80 == 0 {
1713 break
1714 }
1715 }
1716 }
1717
1718 // Count is encoded in a varint in the next bytes.
1719 c := uintptr(0)
1720 for off := uint(0); ; off += 7 {
1721 x := uintptr(*p)
1722 p = add1(p)
1723 c |= (x & 0x7F) << off
1724 if x&0x80 == 0 {
1725 break
1726 }
1727 }
1728 c *= n // now total number of bits to copy
1729
1730 // If the number of bits being repeated is small, load them
1731 // into a register and use that register for the entire loop
1732 // instead of repeatedly reading from memory.
1733 // Handling fewer than 8 bits here makes the general loop simpler.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001734 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001735 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1736 // it will not overflow.
1737 src := dst
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001738 const maxBits = sys.PtrSize*8 - 7
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001739 if n <= maxBits {
1740 // Start with bits in output buffer.
1741 pattern := bits
1742 npattern := nbits
1743
1744 // If we need more bits, fetch them from memory.
1745 if size == 1 {
1746 src = subtract1(src)
1747 for npattern < n {
1748 pattern <<= 8
1749 pattern |= uintptr(*src)
1750 src = subtract1(src)
1751 npattern += 8
1752 }
1753 } else {
Dan Willemsenc7413322018-08-27 23:21:26 -07001754 src = subtract1(src)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001755 for npattern < n {
1756 pattern <<= 4
1757 pattern |= uintptr(*src) & 0xf
Dan Willemsenc7413322018-08-27 23:21:26 -07001758 src = subtract1(src)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001759 npattern += 4
1760 }
1761 }
1762
1763 // We started with the whole bit output buffer,
1764 // and then we loaded bits from whole bytes.
1765 // Either way, we might now have too many instead of too few.
1766 // Discard the extra.
1767 if npattern > n {
1768 pattern >>= npattern - n
1769 npattern = n
1770 }
1771
1772 // Replicate pattern to at most maxBits.
1773 if npattern == 1 {
1774 // One bit being repeated.
1775 // If the bit is 1, make the pattern all 1s.
1776 // If the bit is 0, the pattern is already all 0s,
1777 // but we can claim that the number of bits
1778 // in the word is equal to the number we need (c),
1779 // because right shift of bits will zero fill.
1780 if pattern == 1 {
1781 pattern = 1<<maxBits - 1
1782 npattern = maxBits
1783 } else {
1784 npattern = c
1785 }
1786 } else {
1787 b := pattern
1788 nb := npattern
1789 if nb+nb <= maxBits {
1790 // Double pattern until the whole uintptr is filled.
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001791 for nb <= sys.PtrSize*8 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001792 b |= b << nb
1793 nb += nb
1794 }
1795 // Trim away incomplete copy of original pattern in high bits.
1796 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1797 nb = maxBits / npattern * npattern
1798 b &= 1<<nb - 1
1799 pattern = b
1800 npattern = nb
1801 }
1802 }
1803
1804 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1805 // Since pattern contains >8 bits, there will be full bytes to flush
1806 // on each iteration.
1807 for ; c >= npattern; c -= npattern {
1808 bits |= pattern << nbits
1809 nbits += npattern
1810 if size == 1 {
1811 for nbits >= 8 {
1812 *dst = uint8(bits)
1813 dst = add1(dst)
1814 bits >>= 8
1815 nbits -= 8
1816 }
1817 } else {
1818 for nbits >= 4 {
Dan Willemsenebae3022017-01-13 23:01:08 -08001819 *dst = uint8(bits&0xf | bitScanAll)
Dan Willemsenc7413322018-08-27 23:21:26 -07001820 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001821 bits >>= 4
1822 nbits -= 4
1823 }
1824 }
1825 }
1826
1827 // Add final fragment to bit buffer.
1828 if c > 0 {
1829 pattern &= 1<<c - 1
1830 bits |= pattern << nbits
1831 nbits += c
1832 }
1833 continue Run
1834 }
1835
1836 // Repeat; n too large to fit in a register.
1837 // Since nbits <= 7, we know the first few bytes of repeated data
1838 // are already written to memory.
1839 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1840 if size == 1 {
1841 // Leading src fragment.
1842 src = subtractb(src, (off+7)/8)
1843 if frag := off & 7; frag != 0 {
1844 bits |= uintptr(*src) >> (8 - frag) << nbits
1845 src = add1(src)
1846 nbits += frag
1847 c -= frag
1848 }
1849 // Main loop: load one byte, write another.
1850 // The bits are rotating through the bit buffer.
1851 for i := c / 8; i > 0; i-- {
1852 bits |= uintptr(*src) << nbits
1853 src = add1(src)
1854 *dst = uint8(bits)
1855 dst = add1(dst)
1856 bits >>= 8
1857 }
1858 // Final src fragment.
1859 if c %= 8; c > 0 {
1860 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1861 nbits += c
1862 }
1863 } else {
1864 // Leading src fragment.
Dan Willemsenc7413322018-08-27 23:21:26 -07001865 src = subtractb(src, (off+3)/4)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001866 if frag := off & 3; frag != 0 {
1867 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
Dan Willemsenc7413322018-08-27 23:21:26 -07001868 src = add1(src)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001869 nbits += frag
1870 c -= frag
1871 }
1872 // Main loop: load one byte, write another.
1873 // The bits are rotating through the bit buffer.
1874 for i := c / 4; i > 0; i-- {
1875 bits |= (uintptr(*src) & 0xf) << nbits
Dan Willemsenc7413322018-08-27 23:21:26 -07001876 src = add1(src)
Dan Willemsenebae3022017-01-13 23:01:08 -08001877 *dst = uint8(bits&0xf | bitScanAll)
Dan Willemsenc7413322018-08-27 23:21:26 -07001878 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001879 bits >>= 4
1880 }
1881 // Final src fragment.
1882 if c %= 4; c > 0 {
1883 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1884 nbits += c
1885 }
1886 }
1887 }
1888
1889 // Write any final bits out, using full-byte writes, even for the final byte.
1890 var totalBits uintptr
1891 if size == 1 {
1892 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1893 nbits += -nbits & 7
1894 for ; nbits > 0; nbits -= 8 {
1895 *dst = uint8(bits)
1896 dst = add1(dst)
1897 bits >>= 8
1898 }
1899 } else {
Dan Willemsenc7413322018-08-27 23:21:26 -07001900 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001901 nbits += -nbits & 3
1902 for ; nbits > 0; nbits -= 4 {
Dan Willemsenebae3022017-01-13 23:01:08 -08001903 v := bits&0xf | bitScanAll
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001904 *dst = uint8(v)
Dan Willemsenc7413322018-08-27 23:21:26 -07001905 dst = add1(dst)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001906 bits >>= 4
1907 }
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001908 }
1909 return totalBits
1910}
1911
Colin Crossd9c6b802019-03-19 21:10:31 -07001912// materializeGCProg allocates space for the (1-bit) pointer bitmask
1913// for an object of size ptrdata. Then it fills that space with the
1914// pointer bitmask specified by the program prog.
1915// The bitmask starts at s.startAddr.
1916// The result must be deallocated with dematerializeGCProg.
1917func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1918 s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys)
1919 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1920 return s
1921}
1922func dematerializeGCProg(s *mspan) {
1923 mheap_.freeManual(s, &memstats.gc_sys)
1924}
1925
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001926func dumpGCProg(p *byte) {
1927 nptr := 0
1928 for {
1929 x := *p
1930 p = add1(p)
1931 if x == 0 {
1932 print("\t", nptr, " end\n")
1933 break
1934 }
1935 if x&0x80 == 0 {
1936 print("\t", nptr, " lit ", x, ":")
1937 n := int(x+7) / 8
1938 for i := 0; i < n; i++ {
1939 print(" ", hex(*p))
1940 p = add1(p)
1941 }
1942 print("\n")
1943 nptr += int(x)
1944 } else {
1945 nbit := int(x &^ 0x80)
1946 if nbit == 0 {
1947 for nb := uint(0); ; nb += 7 {
1948 x := *p
1949 p = add1(p)
1950 nbit |= int(x&0x7f) << nb
1951 if x&0x80 == 0 {
1952 break
1953 }
1954 }
1955 }
1956 count := 0
1957 for nb := uint(0); ; nb += 7 {
1958 x := *p
1959 p = add1(p)
1960 count |= int(x&0x7f) << nb
1961 if x&0x80 == 0 {
1962 break
1963 }
1964 }
1965 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1966 nptr += nbit * count
1967 }
1968 }
1969}
1970
1971// Testing.
1972
1973func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1974 target := (*stkframe)(ctxt)
1975 if frame.sp <= target.sp && target.sp < frame.varp {
1976 *target = *frame
1977 return false
1978 }
1979 return true
1980}
1981
1982// gcbits returns the GC type info for x, for testing.
1983// The result is the bitmap entries (0 or 1), one entry per byte.
1984//go:linkname reflect_gcbits reflect.gcbits
1985func reflect_gcbits(x interface{}) []byte {
1986 ret := getgcmask(x)
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001987 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1988 nptr := typ.ptrdata / sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001989 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1990 ret = ret[:len(ret)-1]
1991 }
1992 return ret
1993}
1994
Colin Crossd9c6b802019-03-19 21:10:31 -07001995// Returns GC type info for the pointer stored in ep for testing.
1996// If ep points to the stack, only static live information will be returned
1997// (i.e. not for objects which are only dynamically live stack objects).
Dan Willemsen09eb3b12015-09-16 14:34:17 -07001998func getgcmask(ep interface{}) (mask []byte) {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07001999 e := *efaceOf(&ep)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002000 p := e.data
2001 t := e._type
2002 // data or bss
Dan Willemsenebae3022017-01-13 23:01:08 -08002003 for _, datap := range activeModules() {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002004 // data
2005 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
2006 bitmap := datap.gcdatamask.bytedata
2007 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002008 mask = make([]byte, n/sys.PtrSize)
2009 for i := uintptr(0); i < n; i += sys.PtrSize {
2010 off := (uintptr(p) + i - datap.data) / sys.PtrSize
2011 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002012 }
2013 return
2014 }
2015
2016 // bss
2017 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
2018 bitmap := datap.gcbssmask.bytedata
2019 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002020 mask = make([]byte, n/sys.PtrSize)
2021 for i := uintptr(0); i < n; i += sys.PtrSize {
2022 off := (uintptr(p) + i - datap.bss) / sys.PtrSize
2023 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002024 }
2025 return
2026 }
2027 }
2028
2029 // heap
Dan Willemsenc7413322018-08-27 23:21:26 -07002030 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
2031 hbits := heapBitsForAddr(base)
2032 n := s.elemsize
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002033 mask = make([]byte, n/sys.PtrSize)
2034 for i := uintptr(0); i < n; i += sys.PtrSize {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002035 if hbits.isPointer() {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002036 mask[i/sys.PtrSize] = 1
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002037 }
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002038 if i != 1*sys.PtrSize && !hbits.morePointers() {
2039 mask = mask[:i/sys.PtrSize]
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002040 break
2041 }
Dan Willemsenc7413322018-08-27 23:21:26 -07002042 hbits = hbits.next()
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002043 }
2044 return
2045 }
2046
2047 // stack
2048 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
2049 var frame stkframe
2050 frame.sp = uintptr(p)
2051 _g_ := getg()
2052 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
Dan Willemsend2797482017-07-26 13:13:13 -07002053 if frame.fn.valid() {
Colin Crossd9c6b802019-03-19 21:10:31 -07002054 locals, _, _ := getStackMap(&frame, nil, false)
Dan Willemsenc7413322018-08-27 23:21:26 -07002055 if locals.n == 0 {
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002056 return
2057 }
Dan Willemsenc7413322018-08-27 23:21:26 -07002058 size := uintptr(locals.n) * sys.PtrSize
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002059 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002060 mask = make([]byte, n/sys.PtrSize)
2061 for i := uintptr(0); i < n; i += sys.PtrSize {
Dan Willemsen38f2dba2016-07-08 14:54:35 -07002062 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
Dan Willemsenc7413322018-08-27 23:21:26 -07002063 mask[i/sys.PtrSize] = locals.ptrbit(off)
Dan Willemsen09eb3b12015-09-16 14:34:17 -07002064 }
2065 }
2066 return
2067 }
2068
2069 // otherwise, not something the GC knows about.
2070 // possibly read-only data, like malloc(0).
2071 // must not have pointers
2072 return
2073}