Update prebuilts to go1.9rc1 ab/4215840
Test: m -j blueprint_tools
Change-Id: I6e92d224c7b1185c0813593ab11403ef50017916
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 89d8a4c..2a9f1b8 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -45,6 +45,11 @@
// not checkmarked, and is the dead encoding.
// These properties must be preserved when modifying the encoding.
//
+// The bitmap for noscan spans is not maintained. Code must ensure
+// that an object is scannable before consulting its bitmap by
+// checking either the noscan bit in the span or by consulting its
+// type's information.
+//
// Checkmarks
//
// In a concurrent garbage collector, one worries about failing to mark
@@ -134,13 +139,9 @@
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
}
-// mHeap_MapBits is called each time arena_used is extended.
-// It maps any additional bitmap memory needed for the new arena memory.
-// It must be called with the expected new value of arena_used,
-// *before* h.arena_used has been updated.
-// Waiting to update arena_used until after the memory has been mapped
-// avoids faults when other threads try access the bitmap immediately
-// after observing the change to arena_used.
+// mapBits maps any additional bitmap memory needed for the new arena memory.
+//
+// Don't call this directly. Call mheap.setArenaUsed.
//
//go:nowritebarrier
func (h *mheap) mapBits(arena_used uintptr) {
@@ -186,10 +187,8 @@
//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
- whichByte := allocBitIndex / 8
- whichBit := allocBitIndex % 8
- bytePtr := addb(s.allocBits, whichByte)
- return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
+ bytep, mask := s.allocBits.bitp(allocBitIndex)
+ return markBits{bytep, mask, allocBitIndex}
}
// refillaCache takes 8 bytes s.allocBits starting at whichByte
@@ -197,7 +196,7 @@
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uintptr) {
- bytes := (*[8]uint8)(unsafe.Pointer(addb(s.allocBits, whichByte)))
+ bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
aCache := uint64(0)
aCache |= uint64(bytes[0])
aCache |= uint64(bytes[1]) << (1 * 8)
@@ -248,7 +247,7 @@
return snelems
}
- s.allocCache >>= (bitIndex + 1)
+ s.allocCache >>= uint(bitIndex + 1)
sfreeindex = result + 1
if sfreeindex%64 == 0 && sfreeindex != snelems {
@@ -269,10 +268,8 @@
if index < s.freeindex {
return false
}
- whichByte := index / 8
- whichBit := index % 8
- byteVal := *addb(s.allocBits, whichByte)
- return byteVal&uint8(1<<whichBit) == 0
+ bytep, mask := s.allocBits.bitp(index)
+ return *bytep&mask == 0
}
func (s *mspan) objIndex(p uintptr) uintptr {
@@ -294,14 +291,12 @@
}
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
- whichByte := objIndex / 8
- bitMask := uint8(1 << (objIndex % 8)) // low 3 bits hold the bit index
- bytePtr := addb(s.gcmarkBits, whichByte)
- return markBits{bytePtr, bitMask, objIndex}
+ bytep, mask := s.gcmarkBits.bitp(objIndex)
+ return markBits{bytep, mask, objIndex}
}
func (s *mspan) markBitsForBase() markBits {
- return markBits{s.gcmarkBits, uint8(1), 0}
+ return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
}
// isMarked reports whether mark bit m is set.
@@ -332,11 +327,6 @@
atomic.And8(m.bytep, ^m.mask)
}
-// clearMarkedNonAtomic clears the marked bit non-atomically.
-func (m markBits) clearMarkedNonAtomic() {
- *m.bytep ^= m.mask
-}
-
// markBitsForSpan returns the markBits for the span base address base.
func markBitsForSpan(base uintptr) (mbits markBits) {
if base < mheap_.arena_start || base >= mheap_.arena_used {
@@ -374,6 +364,7 @@
// heapBitsForSpan returns the heapBits for the span base address base.
func heapBitsForSpan(base uintptr) (hbits heapBits) {
if base < mheap_.arena_start || base >= mheap_.arena_used {
+ print("runtime: base ", hex(base), " not in range [", hex(mheap_.arena_start), ",", hex(mheap_.arena_used), ")\n")
throw("heapBitsForSpan: base out of range")
}
return heapBitsForAddr(base)
@@ -400,7 +391,7 @@
// Consult the span table to find the block beginning.
s = mheap_.spans[idx]
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
- if s == nil || s.state == _MSpanStack {
+ if s == nil || s.state == _MSpanManual {
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly.
@@ -430,6 +421,7 @@
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
}
+ getg().m.traceback = 2
throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
}
return
@@ -509,16 +501,6 @@
return h.bits()&bitPointer != 0
}
-// hasPointers reports whether the given object has any pointers.
-// It must be told how large the object at h is for efficiency.
-// h must describe the initial word of the object.
-func (h heapBits) hasPointers(size uintptr) bool {
- if size == sys.PtrSize { // 1-word objects are always pointers
- return true
- }
- return (*h.bitp>>h.shift)&bitScan != 0
-}
-
// isCheckmarked reports whether the heap bits have the checkmarked bit set.
// It must be told how large the object at h is, because the encoding of the
// checkmark bit varies by size.
@@ -578,29 +560,9 @@
return
}
if !inheap(dst) {
- // If dst is on the stack and in a higher frame than the
- // caller, we either need to execute write barriers on
- // it (which is what happens for normal stack writes
- // through pointers to higher frames), or we need to
- // force the mark termination stack scan to scan the
- // frame containing dst.
- //
- // Executing write barriers on dst is complicated in the
- // general case because we either need to unwind the
- // stack to get the stack map, or we need the type's
- // bitmap, which may be a GC program.
- //
- // Hence, we opt for forcing the re-scan to scan the
- // frame containing dst, which we can do by simply
- // unwinding the stack barriers between the current SP
- // and dst's frame.
gp := getg().m.curg
if gp != nil && gp.stack.lo <= dst && dst < gp.stack.hi {
- // Run on the system stack to give it more
- // stack space.
- systemstack(func() {
- gcUnwindBarriers(gp, dst)
- })
+ // Destination is our own stack. No need for barriers.
return
}
@@ -848,23 +810,23 @@
4, 5, 5, 6, 5, 6, 6, 7,
5, 6, 6, 7, 6, 7, 7, 8}
-// countFree runs through the mark bits in a span and counts the number of free objects
-// in the span.
+// countAlloc returns the number of objects allocated in span s by
+// scanning the allocation bitmap.
// TODO:(rlh) Use popcount intrinsic.
-func (s *mspan) countFree() int {
+func (s *mspan) countAlloc() int {
count := 0
maxIndex := s.nelems / 8
for i := uintptr(0); i < maxIndex; i++ {
- mrkBits := *addb(s.gcmarkBits, i)
+ mrkBits := *s.gcmarkBits.bytep(i)
count += int(oneBitCount[mrkBits])
}
if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
- mrkBits := *addb(s.gcmarkBits, maxIndex)
+ mrkBits := *s.gcmarkBits.bytep(maxIndex)
mask := uint8((1 << bitsInLastByte) - 1)
bits := mrkBits & mask
count += int(oneBitCount[bits])
}
- return int(s.nelems) - count
+ return count
}
// heapBitsSetType records that the new allocation [x, x+size)
@@ -1085,7 +1047,9 @@
endnb += endnb
}
// Truncate to a multiple of original ptrmask.
- endnb = maxBits / nb * nb
+ // Because nb+nb <= maxBits, nb fits in a byte.
+ // Byte division is cheaper than uintptr division.
+ endnb = uintptr(maxBits/byte(nb)) * nb
pbits &= 1<<endnb - 1
b = pbits
nb = endnb
@@ -1363,13 +1327,6 @@
}
}
-// heapBitsSetTypeNoScan marks x as noscan by setting the first word
-// of x in the heap bitmap to scalar/dead.
-func heapBitsSetTypeNoScan(x uintptr) {
- h := heapBitsForAddr(uintptr(x))
- *h.bitp &^= (bitPointer | bitScan) << h.shift
-}
-
var debugPtrmask struct {
lock mutex
data *byte
@@ -1902,7 +1859,7 @@
frame.sp = uintptr(p)
_g_ := getg()
gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
- if frame.fn != nil {
+ if frame.fn.valid() {
f := frame.fn
targetpc := frame.continpc
if targetpc == 0 {