blob: 525d0b14c1c5d7bdc933ec9bf2e4d2db6efdae52 [file] [log] [blame]
Dan Willemsen6ff23252015-09-15 13:49:18 -07001// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
Dan Willemsen0c157092016-07-08 13:57:52 -07007import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11)
12
13/*
14Stack layout parameters.
15Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
16
17The per-goroutine g->stackguard is set to point StackGuard bytes
18above the bottom of the stack. Each function compares its stack
19pointer against g->stackguard to check for overflow. To cut one
20instruction from the check sequence for functions with tiny frames,
21the stack is allowed to protrude StackSmall bytes below the stack
22guard. Functions with large frames don't bother with the check and
23always call morestack. The sequences are (for amd64, others are
24similar):
25
26 guard = g->stackguard
27 frame = function's stack frame size
28 argsize = size of function arguments (call + return)
29
30 stack frame size <= StackSmall:
31 CMPQ guard, SP
32 JHI 3(PC)
33 MOVQ m->morearg, $(argsize << 32)
34 CALL morestack(SB)
35
36 stack frame size > StackSmall but < StackBig
37 LEAQ (frame-StackSmall)(SP), R0
38 CMPQ guard, R0
39 JHI 3(PC)
40 MOVQ m->morearg, $(argsize << 32)
41 CALL morestack(SB)
42
43 stack frame size >= StackBig:
44 MOVQ m->morearg, $((argsize << 32) | frame)
45 CALL morestack(SB)
46
47The bottom StackGuard - StackSmall bytes are important: there has
48to be enough room to execute functions that refuse to check for
49stack overflow, either because they need to be adjacent to the
50actual caller's frame (deferproc) or because they handle the imminent
51stack overflow (morestack).
52
53For example, deferproc might call malloc, which does one of the
54above checks (without allocating a full frame), which might trigger
55a call to morestack. This sequence needs to fit in the bottom
56section of the stack. On amd64, morestack's frame is 40 bytes, and
57deferproc's frame is 56 bytes. That fits well within the
58StackGuard - StackSmall bytes at the bottom.
59The linkers explore all possible call traces involving non-splitting
60functions to make sure that this limit cannot be violated.
61*/
62
63const (
64 // StackSystem is a number of additional bytes to add
65 // to each stack below the usual guard area for OS-specific
66 // purposes like signal handling. Used on Windows, Plan 9,
67 // and Darwin/ARM because they do not use a separate stack.
68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
69
70 // The minimum size of stack used by Go code
71 _StackMin = 2048
72
73 // The minimum stack size to allocate.
74 // The hackery here rounds FixedStack0 up to a power of 2.
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
83
84 // Functions that need frames bigger than this use an extra
85 // instruction to do the stack split check, to avoid overflow
86 // in case SP - framesize wraps below zero.
87 // This value can be no bigger than the size of the unmapped
88 // space at zero.
89 _StackBig = 4096
90
91 // The stack guard is a pointer this many bytes above the
92 // bottom of the stack.
Dan Willemsenbbdf6642017-01-13 22:57:23 -080093 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
Dan Willemsen0c157092016-07-08 13:57:52 -070094
95 // After a stack split check the SP is allowed to be this
96 // many bytes below the stack guard. This saves an instruction
97 // in the checking sequence for tiny frames.
98 _StackSmall = 128
99
100 // The maximum number of bytes that a chain of NOSPLIT
101 // functions can use.
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
103)
104
Dan Willemsen6ff23252015-09-15 13:49:18 -0700105const (
106 // stackDebug == 0: no logging
107 // == 1: logging of per-stack operations
108 // == 2: logging of per-frame operations
109 // == 3: logging of per-word updates
110 // == 4: logging of per-word reads
111 stackDebug = 0
112 stackFromSystem = 0 // allocate stacks from system memory instead of the heap
113 stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
114 stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
Dan Willemsenc78f7142017-07-26 13:08:14 -0700115 stackNoCache = 0 // disable per-P small stack caches
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800116
117 // check the BP links during traceback.
118 debugCheckBP = false
Dan Willemsen6ff23252015-09-15 13:49:18 -0700119)
120
121const (
Dan Willemsen0c157092016-07-08 13:57:52 -0700122 uintptrMask = 1<<(8*sys.PtrSize) - 1
Dan Willemsen6ff23252015-09-15 13:49:18 -0700123
124 // Goroutine preemption request.
125 // Stored into g->stackguard0 to cause split stack check failure.
126 // Must be greater than any real sp.
127 // 0xfffffade in hex.
128 stackPreempt = uintptrMask & -1314
129
130 // Thread is forking.
131 // Stored into g->stackguard0 to cause split stack check failure.
132 // Must be greater than any real sp.
133 stackFork = uintptrMask & -1234
134)
135
136// Global pool of spans that have free stacks.
137// Stacks are assigned an order according to size.
138// order = log_2(size/FixedStack)
139// There is a free list for each order.
140// TODO: one lock per order?
Dan Willemsen0c157092016-07-08 13:57:52 -0700141var stackpool [_NumStackOrders]mSpanList
Dan Willemsen6ff23252015-09-15 13:49:18 -0700142var stackpoolmu mutex
143
Dan Willemsen0c157092016-07-08 13:57:52 -0700144// Global pool of large stack spans.
145var stackLarge struct {
146 lock mutex
147 free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
148}
Dan Willemsen6ff23252015-09-15 13:49:18 -0700149
150func stackinit() {
151 if _StackCacheSize&_PageMask != 0 {
152 throw("cache size must be a multiple of page size")
153 }
154 for i := range stackpool {
Dan Willemsen0c157092016-07-08 13:57:52 -0700155 stackpool[i].init()
Dan Willemsen6ff23252015-09-15 13:49:18 -0700156 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700157 for i := range stackLarge.free {
158 stackLarge.free[i].init()
159 }
Dan Willemsen6ff23252015-09-15 13:49:18 -0700160}
161
Dan Willemsen0c157092016-07-08 13:57:52 -0700162// stacklog2 returns ⌊log_2(n)⌋.
163func stacklog2(n uintptr) int {
164 log2 := 0
165 for n > 1 {
166 n >>= 1
167 log2++
168 }
169 return log2
170}
171
172// Allocates a stack from the free pool. Must be called with
Dan Willemsen6ff23252015-09-15 13:49:18 -0700173// stackpoolmu held.
174func stackpoolalloc(order uint8) gclinkptr {
175 list := &stackpool[order]
Dan Willemsen0c157092016-07-08 13:57:52 -0700176 s := list.first
177 if s == nil {
178 // no free stacks. Allocate another span worth.
Dan Willemsenc78f7142017-07-26 13:08:14 -0700179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700180 if s == nil {
181 throw("out of memory")
182 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700183 if s.allocCount != 0 {
184 throw("bad allocCount")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700185 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700186 if s.manualFreeList.ptr() != nil {
187 throw("bad manualFreeList")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700188 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700189 s.elemsize = _FixedStack << order
190 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
Dan Willemsen0c157092016-07-08 13:57:52 -0700191 x := gclinkptr(s.base() + i)
Dan Willemsenc78f7142017-07-26 13:08:14 -0700192 x.ptr().next = s.manualFreeList
193 s.manualFreeList = x
Dan Willemsen6ff23252015-09-15 13:49:18 -0700194 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700195 list.insert(s)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700196 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700197 x := s.manualFreeList
Dan Willemsen6ff23252015-09-15 13:49:18 -0700198 if x.ptr() == nil {
199 throw("span has no free stacks")
200 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700201 s.manualFreeList = x.ptr().next
Dan Willemsen0c157092016-07-08 13:57:52 -0700202 s.allocCount++
Dan Willemsenc78f7142017-07-26 13:08:14 -0700203 if s.manualFreeList.ptr() == nil {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700204 // all stacks in s are allocated.
Dan Willemsen0c157092016-07-08 13:57:52 -0700205 list.remove(s)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700206 }
207 return x
208}
209
Dan Willemsen0c157092016-07-08 13:57:52 -0700210// Adds stack x to the free pool. Must be called with stackpoolmu held.
Dan Willemsen6ff23252015-09-15 13:49:18 -0700211func stackpoolfree(x gclinkptr, order uint8) {
Dan Willemsen0c157092016-07-08 13:57:52 -0700212 s := mheap_.lookup(unsafe.Pointer(x))
Dan Willemsenc78f7142017-07-26 13:08:14 -0700213 if s.state != _MSpanManual {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700214 throw("freeing stack not in a stack span")
215 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700216 if s.manualFreeList.ptr() == nil {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700217 // s will now have a free stack
Dan Willemsen0c157092016-07-08 13:57:52 -0700218 stackpool[order].insert(s)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700219 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700220 x.ptr().next = s.manualFreeList
221 s.manualFreeList = x
Dan Willemsen0c157092016-07-08 13:57:52 -0700222 s.allocCount--
223 if gcphase == _GCoff && s.allocCount == 0 {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700224 // Span is completely free. Return it to the heap
225 // immediately if we're sweeping.
226 //
227 // If GC is active, we delay the free until the end of
228 // GC to avoid the following type of situation:
229 //
230 // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
231 // 2) The stack that pointer points to is copied
232 // 3) The old stack is freed
233 // 4) The containing span is marked free
234 // 5) GC attempts to mark the SudoG.elem pointer. The
235 // marking fails because the pointer looks like a
236 // pointer into a free span.
237 //
238 // By not freeing, we prevent step #4 until GC is done.
Dan Willemsen0c157092016-07-08 13:57:52 -0700239 stackpool[order].remove(s)
Dan Willemsenc78f7142017-07-26 13:08:14 -0700240 s.manualFreeList = 0
241 mheap_.freeManual(s, &memstats.stacks_inuse)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700242 }
243}
244
245// stackcacherefill/stackcacherelease implement a global pool of stack segments.
246// The pool is required to prevent unlimited growth of per-thread caches.
Dan Willemsen0c157092016-07-08 13:57:52 -0700247//
248//go:systemstack
Dan Willemsen6ff23252015-09-15 13:49:18 -0700249func stackcacherefill(c *mcache, order uint8) {
250 if stackDebug >= 1 {
251 print("stackcacherefill order=", order, "\n")
252 }
253
254 // Grab some stacks from the global cache.
255 // Grab half of the allowed capacity (to prevent thrashing).
256 var list gclinkptr
257 var size uintptr
258 lock(&stackpoolmu)
259 for size < _StackCacheSize/2 {
260 x := stackpoolalloc(order)
261 x.ptr().next = list
262 list = x
263 size += _FixedStack << order
264 }
265 unlock(&stackpoolmu)
266 c.stackcache[order].list = list
267 c.stackcache[order].size = size
268}
269
Dan Willemsen0c157092016-07-08 13:57:52 -0700270//go:systemstack
Dan Willemsen6ff23252015-09-15 13:49:18 -0700271func stackcacherelease(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherelease order=", order, "\n")
274 }
275 x := c.stackcache[order].list
276 size := c.stackcache[order].size
277 lock(&stackpoolmu)
278 for size > _StackCacheSize/2 {
279 y := x.ptr().next
280 stackpoolfree(x, order)
281 x = y
282 size -= _FixedStack << order
283 }
284 unlock(&stackpoolmu)
285 c.stackcache[order].list = x
286 c.stackcache[order].size = size
287}
288
Dan Willemsen0c157092016-07-08 13:57:52 -0700289//go:systemstack
Dan Willemsen6ff23252015-09-15 13:49:18 -0700290func stackcache_clear(c *mcache) {
291 if stackDebug >= 1 {
292 print("stackcache clear\n")
293 }
294 lock(&stackpoolmu)
295 for order := uint8(0); order < _NumStackOrders; order++ {
296 x := c.stackcache[order].list
297 for x.ptr() != nil {
298 y := x.ptr().next
299 stackpoolfree(x, order)
300 x = y
301 }
302 c.stackcache[order].list = 0
303 c.stackcache[order].size = 0
304 }
305 unlock(&stackpoolmu)
306}
307
Dan Willemsen0c157092016-07-08 13:57:52 -0700308// stackalloc allocates an n byte stack.
309//
310// stackalloc must run on the system stack because it uses per-P
311// resources and must not split the stack.
312//
313//go:systemstack
Dan Willemsenc78f7142017-07-26 13:08:14 -0700314func stackalloc(n uint32) stack {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700315 // Stackalloc must be called on scheduler stack, so that we
316 // never try to grow the stack during the code that stackalloc runs.
317 // Doing so would cause a deadlock (issue 1547).
318 thisg := getg()
319 if thisg != thisg.m.g0 {
320 throw("stackalloc not on scheduler stack")
321 }
322 if n&(n-1) != 0 {
323 throw("stack size not a power of 2")
324 }
325 if stackDebug >= 1 {
326 print("stackalloc ", n, "\n")
327 }
328
Dan Willemsen6ff23252015-09-15 13:49:18 -0700329 if debug.efence != 0 || stackFromSystem != 0 {
Dan Willemsenc78f7142017-07-26 13:08:14 -0700330 n = uint32(round(uintptr(n), physPageSize))
331 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700332 if v == nil {
333 throw("out of memory (stackalloc)")
334 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700335 return stack{uintptr(v), uintptr(v) + uintptr(n)}
Dan Willemsen6ff23252015-09-15 13:49:18 -0700336 }
337
338 // Small stacks are allocated with a fixed-size free-list allocator.
339 // If we need a stack of a bigger size, we fall back on allocating
340 // a dedicated span.
341 var v unsafe.Pointer
Dan Willemsenc78f7142017-07-26 13:08:14 -0700342 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700343 order := uint8(0)
344 n2 := n
345 for n2 > _FixedStack {
346 order++
347 n2 >>= 1
348 }
349 var x gclinkptr
350 c := thisg.m.mcache
Dan Willemsenc78f7142017-07-26 13:08:14 -0700351 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700352 // c == nil can happen in the guts of exitsyscall or
353 // procresize. Just get a stack from the global pool.
354 // Also don't touch stackcache during gc
355 // as it's flushed concurrently.
356 lock(&stackpoolmu)
357 x = stackpoolalloc(order)
358 unlock(&stackpoolmu)
359 } else {
360 x = c.stackcache[order].list
361 if x.ptr() == nil {
362 stackcacherefill(c, order)
363 x = c.stackcache[order].list
364 }
365 c.stackcache[order].list = x.ptr().next
366 c.stackcache[order].size -= uintptr(n)
367 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700368 v = unsafe.Pointer(x)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700369 } else {
Dan Willemsen0c157092016-07-08 13:57:52 -0700370 var s *mspan
371 npage := uintptr(n) >> _PageShift
372 log2npage := stacklog2(npage)
373
374 // Try to get a stack from the large stack cache.
375 lock(&stackLarge.lock)
376 if !stackLarge.free[log2npage].isEmpty() {
377 s = stackLarge.free[log2npage].first
378 stackLarge.free[log2npage].remove(s)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700379 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700380 unlock(&stackLarge.lock)
381
382 if s == nil {
383 // Allocate a new stack from the heap.
Dan Willemsenc78f7142017-07-26 13:08:14 -0700384 s = mheap_.allocManual(npage, &memstats.stacks_inuse)
Dan Willemsen0c157092016-07-08 13:57:52 -0700385 if s == nil {
386 throw("out of memory")
387 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700388 s.elemsize = uintptr(n)
Dan Willemsen0c157092016-07-08 13:57:52 -0700389 }
390 v = unsafe.Pointer(s.base())
Dan Willemsen6ff23252015-09-15 13:49:18 -0700391 }
392
393 if raceenabled {
394 racemalloc(v, uintptr(n))
395 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700396 if msanenabled {
397 msanmalloc(v, uintptr(n))
398 }
Dan Willemsen6ff23252015-09-15 13:49:18 -0700399 if stackDebug >= 1 {
400 print(" allocated ", v, "\n")
401 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700402 return stack{uintptr(v), uintptr(v) + uintptr(n)}
Dan Willemsen6ff23252015-09-15 13:49:18 -0700403}
404
Dan Willemsen0c157092016-07-08 13:57:52 -0700405// stackfree frees an n byte stack allocation at stk.
406//
407// stackfree must run on the system stack because it uses per-P
408// resources and must not split the stack.
409//
410//go:systemstack
Dan Willemsenc78f7142017-07-26 13:08:14 -0700411func stackfree(stk stack) {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700412 gp := getg()
Dan Willemsen0c157092016-07-08 13:57:52 -0700413 v := unsafe.Pointer(stk.lo)
Dan Willemsenc78f7142017-07-26 13:08:14 -0700414 n := stk.hi - stk.lo
Dan Willemsen6ff23252015-09-15 13:49:18 -0700415 if n&(n-1) != 0 {
416 throw("stack not a power of 2")
417 }
418 if stk.lo+n < stk.hi {
419 throw("bad stack size")
420 }
421 if stackDebug >= 1 {
422 println("stackfree", v, n)
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800423 memclrNoHeapPointers(v, n) // for testing, clobber stack data
Dan Willemsen6ff23252015-09-15 13:49:18 -0700424 }
425 if debug.efence != 0 || stackFromSystem != 0 {
426 if debug.efence != 0 || stackFaultOnFree != 0 {
427 sysFault(v, n)
428 } else {
429 sysFree(v, n, &memstats.stacks_sys)
430 }
431 return
432 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700433 if msanenabled {
434 msanfree(v, n)
435 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700436 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700437 order := uint8(0)
438 n2 := n
439 for n2 > _FixedStack {
440 order++
441 n2 >>= 1
442 }
443 x := gclinkptr(v)
444 c := gp.m.mcache
Dan Willemsenc78f7142017-07-26 13:08:14 -0700445 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700446 lock(&stackpoolmu)
447 stackpoolfree(x, order)
448 unlock(&stackpoolmu)
449 } else {
450 if c.stackcache[order].size >= _StackCacheSize {
451 stackcacherelease(c, order)
452 }
453 x.ptr().next = c.stackcache[order].list
454 c.stackcache[order].list = x
455 c.stackcache[order].size += n
456 }
457 } else {
Dan Willemsen0c157092016-07-08 13:57:52 -0700458 s := mheap_.lookup(v)
Dan Willemsenc78f7142017-07-26 13:08:14 -0700459 if s.state != _MSpanManual {
Dan Willemsen0c157092016-07-08 13:57:52 -0700460 println(hex(s.base()), v)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700461 throw("bad span state")
462 }
463 if gcphase == _GCoff {
464 // Free the stack immediately if we're
465 // sweeping.
Dan Willemsenc78f7142017-07-26 13:08:14 -0700466 mheap_.freeManual(s, &memstats.stacks_inuse)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700467 } else {
Dan Willemsen0c157092016-07-08 13:57:52 -0700468 // If the GC is running, we can't return a
469 // stack span to the heap because it could be
470 // reused as a heap span, and this state
471 // change would race with GC. Add it to the
472 // large stack cache instead.
473 log2npage := stacklog2(s.npages)
474 lock(&stackLarge.lock)
475 stackLarge.free[log2npage].insert(s)
476 unlock(&stackLarge.lock)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700477 }
478 }
479}
480
481var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
482
483var ptrnames = []string{
484 0: "scalar",
485 1: "ptr",
486}
487
488// Stack frame layout
489//
490// (x86)
491// +------------------+
492// | args from caller |
493// +------------------+ <- frame->argp
494// | return address |
495// +------------------+
496// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
497// +------------------+ <- frame->varp
498// | locals |
499// +------------------+
500// | args to callee |
501// +------------------+ <- frame->sp
502//
503// (arm)
504// +------------------+
505// | args from caller |
506// +------------------+ <- frame->argp
507// | caller's retaddr |
508// +------------------+ <- frame->varp
509// | locals |
510// +------------------+
511// | args to callee |
512// +------------------+
513// | return address |
514// +------------------+ <- frame->sp
515
516type adjustinfo struct {
517 old stack
518 delta uintptr // ptr distance from old to new stack (newbase - oldbase)
Dan Willemsen0c157092016-07-08 13:57:52 -0700519 cache pcvalueCache
520
521 // sghi is the highest sudog.elem on the stack.
522 sghi uintptr
Dan Willemsen6ff23252015-09-15 13:49:18 -0700523}
524
525// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
526// If so, it rewrites *vpp to point into the new stack.
527func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
Dan Willemsen0c157092016-07-08 13:57:52 -0700528 pp := (*uintptr)(vpp)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700529 p := *pp
530 if stackDebug >= 4 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700531 print(" ", pp, ":", hex(p), "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700532 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700533 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
534 *pp = p + adjinfo.delta
Dan Willemsen6ff23252015-09-15 13:49:18 -0700535 if stackDebug >= 3 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700536 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700537 }
538 }
539}
540
541// Information from the compiler about the layout of stack frames.
542type bitvector struct {
543 n int32 // # of bits
544 bytedata *uint8
545}
546
547type gobitvector struct {
548 n uintptr
549 bytedata []uint8
550}
551
552func gobv(bv bitvector) gobitvector {
553 return gobitvector{
554 uintptr(bv.n),
555 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
556 }
557}
558
559func ptrbit(bv *gobitvector, i uintptr) uint8 {
560 return (bv.bytedata[i/8] >> (i % 8)) & 1
561}
562
563// bv describes the memory starting at address scanp.
564// Adjust any pointers contained therein.
Dan Willemsenc78f7142017-07-26 13:08:14 -0700565func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700566 bv := gobv(*cbv)
567 minp := adjinfo.old.lo
568 maxp := adjinfo.old.hi
569 delta := adjinfo.delta
Dan Willemsen0c157092016-07-08 13:57:52 -0700570 num := bv.n
571 // If this frame might contain channel receive slots, use CAS
572 // to adjust pointers. If the slot hasn't been received into
573 // yet, it may contain stack pointers and a concurrent send
574 // could race with adjusting those pointers. (The sent value
575 // itself can never contain stack pointers.)
576 useCAS := uintptr(scanp) < adjinfo.sghi
Dan Willemsen6ff23252015-09-15 13:49:18 -0700577 for i := uintptr(0); i < num; i++ {
578 if stackDebug >= 4 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700579 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700580 }
581 if ptrbit(&bv, i) == 1 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700582 pp := (*uintptr)(add(scanp, i*sys.PtrSize))
583 retry:
Dan Willemsen6ff23252015-09-15 13:49:18 -0700584 p := *pp
Dan Willemsenc78f7142017-07-26 13:08:14 -0700585 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700586 // Looks like a junk value in a pointer slot.
587 // Live analysis wrong?
588 getg().m.traceback = 2
589 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800590 throw("invalid pointer found on stack")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700591 }
592 if minp <= p && p < maxp {
593 if stackDebug >= 3 {
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800594 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700595 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700596 if useCAS {
597 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
598 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
599 goto retry
600 }
601 } else {
602 *pp = p + delta
603 }
Dan Willemsen6ff23252015-09-15 13:49:18 -0700604 }
605 }
606 }
607}
608
609// Note: the argument/return area is adjusted by the callee.
610func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
611 adjinfo := (*adjustinfo)(arg)
612 targetpc := frame.continpc
613 if targetpc == 0 {
614 // Frame is dead.
615 return true
616 }
617 f := frame.fn
618 if stackDebug >= 2 {
619 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
620 }
621 if f.entry == systemstack_switchPC {
622 // A special routine at the bottom of stack of a goroutine that does an systemstack call.
623 // We will allow it to be copied even though we don't
624 // have full GC info for it (because it is written in asm).
625 return true
626 }
627 if targetpc != f.entry {
628 targetpc--
629 }
Dan Willemsen0c157092016-07-08 13:57:52 -0700630 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700631 if pcdata == -1 {
632 pcdata = 0 // in prologue
633 }
634
635 // Adjust local variables if stack frame has been allocated.
636 size := frame.varp - frame.sp
637 var minsize uintptr
Dan Willemsen0c157092016-07-08 13:57:52 -0700638 switch sys.ArchFamily {
639 case sys.ARM64:
640 minsize = sys.SpAlign
Dan Willemsen6ff23252015-09-15 13:49:18 -0700641 default:
Dan Willemsen0c157092016-07-08 13:57:52 -0700642 minsize = sys.MinFrameSize
Dan Willemsen6ff23252015-09-15 13:49:18 -0700643 }
644 if size > minsize {
645 var bv bitvector
646 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
647 if stackmap == nil || stackmap.n <= 0 {
648 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
649 throw("missing stackmap")
650 }
651 // Locals bitmap information, scan just the pointers in locals.
652 if pcdata < 0 || pcdata >= stackmap.n {
653 // don't know where we are
654 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
655 throw("bad symbol table")
656 }
657 bv = stackmapdata(stackmap, pcdata)
Dan Willemsen0c157092016-07-08 13:57:52 -0700658 size = uintptr(bv.n) * sys.PtrSize
Dan Willemsen6ff23252015-09-15 13:49:18 -0700659 if stackDebug >= 3 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700660 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700661 }
662 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
663 }
664
665 // Adjust saved base pointer if there is one.
Dan Willemsen0c157092016-07-08 13:57:52 -0700666 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700667 if !framepointer_enabled {
668 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
669 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
670 throw("bad frame layout")
671 }
672 if stackDebug >= 3 {
673 print(" saved bp\n")
674 }
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800675 if debugCheckBP {
676 // Frame pointers should always point to the next higher frame on
677 // the Go stack (or be nil, for the top frame on the stack).
678 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
679 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
680 println("runtime: found invalid frame pointer")
681 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
682 throw("bad frame pointer")
683 }
684 }
Dan Willemsen6ff23252015-09-15 13:49:18 -0700685 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
686 }
687
688 // Adjust arguments.
689 if frame.arglen > 0 {
690 var bv bitvector
691 if frame.argmap != nil {
692 bv = *frame.argmap
693 } else {
694 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
695 if stackmap == nil || stackmap.n <= 0 {
Dan Willemsen0c157092016-07-08 13:57:52 -0700696 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700697 throw("missing stackmap")
698 }
699 if pcdata < 0 || pcdata >= stackmap.n {
700 // don't know where we are
701 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
702 throw("bad symbol table")
703 }
704 bv = stackmapdata(stackmap, pcdata)
705 }
706 if stackDebug >= 3 {
707 print(" args\n")
708 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700709 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{})
Dan Willemsen6ff23252015-09-15 13:49:18 -0700710 }
711 return true
712}
713
714func adjustctxt(gp *g, adjinfo *adjustinfo) {
Dan Willemsen0c157092016-07-08 13:57:52 -0700715 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800716 if !framepointer_enabled {
717 return
718 }
719 if debugCheckBP {
720 bp := gp.sched.bp
721 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
722 println("runtime: found invalid top frame pointer")
723 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
724 throw("bad top frame pointer")
725 }
726 }
727 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
Dan Willemsen6ff23252015-09-15 13:49:18 -0700728}
729
730func adjustdefers(gp *g, adjinfo *adjustinfo) {
731 // Adjust defer argument blocks the same way we adjust active stack frames.
732 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
733
734 // Adjust pointers in the Defer structs.
735 // Defer structs themselves are never on the stack.
736 for d := gp._defer; d != nil; d = d.link {
Dan Willemsen0c157092016-07-08 13:57:52 -0700737 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
738 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
739 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
Dan Willemsen6ff23252015-09-15 13:49:18 -0700740 }
741}
742
743func adjustpanics(gp *g, adjinfo *adjustinfo) {
744 // Panics are on stack and already adjusted.
745 // Update pointer to head of list in G.
Dan Willemsen0c157092016-07-08 13:57:52 -0700746 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
Dan Willemsen6ff23252015-09-15 13:49:18 -0700747}
748
749func adjustsudogs(gp *g, adjinfo *adjustinfo) {
750 // the data elements pointed to by a SudoG structure
751 // might be in the stack.
752 for s := gp.waiting; s != nil; s = s.waitlink {
Dan Willemsen0c157092016-07-08 13:57:52 -0700753 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
754 adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
Dan Willemsen6ff23252015-09-15 13:49:18 -0700755 }
756}
757
Dan Willemsen6ff23252015-09-15 13:49:18 -0700758func fillstack(stk stack, b byte) {
759 for p := stk.lo; p < stk.hi; p++ {
760 *(*byte)(unsafe.Pointer(p)) = b
761 }
762}
763
Dan Willemsen0c157092016-07-08 13:57:52 -0700764func findsghi(gp *g, stk stack) uintptr {
765 var sghi uintptr
766 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
767 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
768 if stk.lo <= p && p < stk.hi && p > sghi {
769 sghi = p
770 }
771 p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone)
772 if stk.lo <= p && p < stk.hi && p > sghi {
773 sghi = p
774 }
775 }
776 return sghi
777}
778
779// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
780// stack they refer to while synchronizing with concurrent channel
781// operations. It returns the number of bytes of stack copied.
782func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
783 if gp.waiting == nil {
784 return 0
785 }
786
787 // Lock channels to prevent concurrent send/receive.
788 // It's important that we *only* do this for async
789 // copystack; otherwise, gp may be in the middle of
790 // putting itself on wait queues and this would
791 // self-deadlock.
792 var lastc *hchan
793 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
794 if sg.c != lastc {
795 lock(&sg.c.lock)
796 }
797 lastc = sg.c
798 }
799
800 // Adjust sudogs.
801 adjustsudogs(gp, adjinfo)
802
803 // Copy the part of the stack the sudogs point in to
804 // while holding the lock to prevent races on
805 // send/receive slots.
806 var sgsize uintptr
807 if adjinfo.sghi != 0 {
808 oldBot := adjinfo.old.hi - used
809 newBot := oldBot + adjinfo.delta
810 sgsize = adjinfo.sghi - oldBot
811 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
812 }
813
814 // Unlock channels.
815 lastc = nil
816 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
817 if sg.c != lastc {
818 unlock(&sg.c.lock)
819 }
820 lastc = sg.c
821 }
822
823 return sgsize
824}
825
Dan Willemsen6ff23252015-09-15 13:49:18 -0700826// Copies gp's stack to a new stack of a different size.
827// Caller must have changed gp status to Gcopystack.
Dan Willemsen0c157092016-07-08 13:57:52 -0700828//
829// If sync is true, this is a self-triggered stack growth and, in
830// particular, no other G may be writing to gp's stack (e.g., via a
831// channel operation). If sync is false, copystack protects against
832// concurrent channel operations.
833func copystack(gp *g, newsize uintptr, sync bool) {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700834 if gp.syscallsp != 0 {
835 throw("stack growth not allowed in system call")
836 }
837 old := gp.stack
838 if old.lo == 0 {
839 throw("nil stackbase")
840 }
841 used := old.hi - gp.sched.sp
842
843 // allocate new stack
Dan Willemsenc78f7142017-07-26 13:08:14 -0700844 new := stackalloc(uint32(newsize))
Dan Willemsen6ff23252015-09-15 13:49:18 -0700845 if stackPoisonCopy != 0 {
846 fillstack(new, 0xfd)
847 }
848 if stackDebug >= 1 {
Dan Willemsenc78f7142017-07-26 13:08:14 -0700849 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700850 }
851
Dan Willemsen0c157092016-07-08 13:57:52 -0700852 // Compute adjustment.
Dan Willemsen6ff23252015-09-15 13:49:18 -0700853 var adjinfo adjustinfo
854 adjinfo.old = old
855 adjinfo.delta = new.hi - old.hi
Dan Willemsen6ff23252015-09-15 13:49:18 -0700856
Dan Willemsen0c157092016-07-08 13:57:52 -0700857 // Adjust sudogs, synchronizing with channel ops if necessary.
858 ncopy := used
859 if sync {
860 adjustsudogs(gp, &adjinfo)
861 } else {
862 // sudogs can point in to the stack. During concurrent
863 // shrinking, these areas may be written to. Find the
864 // highest such pointer so we can handle everything
865 // there and below carefully. (This shouldn't be far
866 // from the bottom of the stack, so there's little
867 // cost in handling everything below it carefully.)
868 adjinfo.sghi = findsghi(gp, old)
869
870 // Synchronize with channel ops and copy the part of
871 // the stack they may interact with.
872 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
873 }
874
875 // Copy the stack (or the rest of it) to the new location
876 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
877
Dan Willemsen0c157092016-07-08 13:57:52 -0700878 // Adjust remaining structures that have pointers into stacks.
879 // We have to do most of these before we traceback the new
880 // stack because gentraceback uses them.
Dan Willemsen6ff23252015-09-15 13:49:18 -0700881 adjustctxt(gp, &adjinfo)
882 adjustdefers(gp, &adjinfo)
883 adjustpanics(gp, &adjinfo)
Dan Willemsen0c157092016-07-08 13:57:52 -0700884 if adjinfo.sghi != 0 {
885 adjinfo.sghi += adjinfo.delta
Dan Willemsen6ff23252015-09-15 13:49:18 -0700886 }
Dan Willemsen6ff23252015-09-15 13:49:18 -0700887
Dan Willemsen6ff23252015-09-15 13:49:18 -0700888 // Swap out old stack for new one
889 gp.stack = new
890 gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
891 gp.sched.sp = new.hi - used
Dan Willemsen0c157092016-07-08 13:57:52 -0700892 gp.stktopsp += adjinfo.delta
893
894 // Adjust pointers in the new stack.
895 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
896
Dan Willemsen6ff23252015-09-15 13:49:18 -0700897 // free old stack
898 if stackPoisonCopy != 0 {
899 fillstack(old, 0xfc)
900 }
Dan Willemsenc78f7142017-07-26 13:08:14 -0700901 stackfree(old)
Dan Willemsen6ff23252015-09-15 13:49:18 -0700902}
903
904// round x up to a power of 2.
905func round2(x int32) int32 {
906 s := uint(0)
907 for 1<<s < x {
908 s++
909 }
910 return 1 << s
911}
912
913// Called from runtime·morestack when more stack is needed.
914// Allocate larger stack and relocate to new stack.
915// Stack growth is multiplicative, for constant amortized cost.
916//
917// g->atomicstatus will be Grunning or Gscanrunning upon entry.
918// If the GC is trying to stop this g then it will set preemptscan to true.
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800919//
920// ctxt is the value of the context register on morestack. newstack
921// will write it to g.sched.ctxt.
922func newstack(ctxt unsafe.Pointer) {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700923 thisg := getg()
924 // TODO: double check all gp. shouldn't be getg().
925 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
926 throw("stack growth after fork")
927 }
928 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
Dan Willemsen0c157092016-07-08 13:57:52 -0700929 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
Dan Willemsen6ff23252015-09-15 13:49:18 -0700930 morebuf := thisg.m.morebuf
931 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
932 throw("runtime: wrong goroutine in newstack")
933 }
Dan Willemsenbbdf6642017-01-13 22:57:23 -0800934
935 gp := thisg.m.curg
936 // Write ctxt to gp.sched. We do this here instead of in
937 // morestack so it has the necessary write barrier.
938 gp.sched.ctxt = ctxt
939
Dan Willemsen6ff23252015-09-15 13:49:18 -0700940 if thisg.m.curg.throwsplit {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700941 // Update syscallsp, syscallpc in case traceback uses them.
942 morebuf := thisg.m.morebuf
943 gp.syscallsp = morebuf.sp
944 gp.syscallpc = morebuf.pc
945 print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
946 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
947 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
948
949 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
950 throw("runtime: stack split at bad time")
951 }
952
Dan Willemsen6ff23252015-09-15 13:49:18 -0700953 morebuf := thisg.m.morebuf
954 thisg.m.morebuf.pc = 0
955 thisg.m.morebuf.lr = 0
956 thisg.m.morebuf.sp = 0
957 thisg.m.morebuf.g = 0
Dan Willemsen6ff23252015-09-15 13:49:18 -0700958
959 // NOTE: stackguard0 may change underfoot, if another thread
960 // is about to try to preempt gp. Read it just once and use that same
961 // value now and below.
Dan Willemsen0c157092016-07-08 13:57:52 -0700962 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
Dan Willemsen6ff23252015-09-15 13:49:18 -0700963
964 // Be conservative about where we preempt.
965 // We are interested in preempting user Go code, not runtime code.
966 // If we're holding locks, mallocing, or preemption is disabled, don't
967 // preempt.
968 // This check is very early in newstack so that even the status change
969 // from Grunning to Gwaiting and back doesn't happen in this case.
970 // That status change by itself can be viewed as a small preemption,
971 // because the GC might change Gwaiting to Gscanwaiting, and then
972 // this goroutine has to wait for the GC to finish before continuing.
973 // If the GC is in some way dependent on this goroutine (for example,
974 // it needs a lock held by the goroutine), that small preemption turns
975 // into a real deadlock.
976 if preempt {
977 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
978 // Let the goroutine keep running for now.
979 // gp->preempt is set, so it will be preempted next time.
980 gp.stackguard0 = gp.stack.lo + _StackGuard
981 gogo(&gp.sched) // never return
982 }
983 }
984
Dan Willemsen6ff23252015-09-15 13:49:18 -0700985 if gp.stack.lo == 0 {
986 throw("missing stack in newstack")
987 }
988 sp := gp.sched.sp
Dan Willemsen0c157092016-07-08 13:57:52 -0700989 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
Dan Willemsen6ff23252015-09-15 13:49:18 -0700990 // The call to morestack cost a word.
Dan Willemsen0c157092016-07-08 13:57:52 -0700991 sp -= sys.PtrSize
Dan Willemsen6ff23252015-09-15 13:49:18 -0700992 }
993 if stackDebug >= 1 || sp < gp.stack.lo {
994 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
995 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
996 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
997 }
998 if sp < gp.stack.lo {
999 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
1000 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1001 throw("runtime: split stack overflow")
1002 }
1003
Dan Willemsen6ff23252015-09-15 13:49:18 -07001004 if preempt {
1005 if gp == thisg.m.g0 {
1006 throw("runtime: preempt g0")
1007 }
1008 if thisg.m.p == 0 && thisg.m.locks == 0 {
1009 throw("runtime: g is running but p is not")
1010 }
Dan Willemsen0c157092016-07-08 13:57:52 -07001011 // Synchronize with scang.
1012 casgstatus(gp, _Grunning, _Gwaiting)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001013 if gp.preemptscan {
1014 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
1015 // Likely to be racing with the GC as
1016 // it sees a _Gwaiting and does the
1017 // stack scan. If so, gcworkdone will
1018 // be set and gcphasework will simply
1019 // return.
1020 }
1021 if !gp.gcscandone {
Dan Willemsen0c157092016-07-08 13:57:52 -07001022 // gcw is safe because we're on the
1023 // system stack.
1024 gcw := &gp.m.p.ptr().gcw
1025 scanstack(gp, gcw)
1026 if gcBlackenPromptly {
1027 gcw.dispose()
1028 }
Dan Willemsen6ff23252015-09-15 13:49:18 -07001029 gp.gcscandone = true
1030 }
1031 gp.preemptscan = false
1032 gp.preempt = false
1033 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
Dan Willemsen0c157092016-07-08 13:57:52 -07001034 // This clears gcscanvalid.
Dan Willemsen6ff23252015-09-15 13:49:18 -07001035 casgstatus(gp, _Gwaiting, _Grunning)
1036 gp.stackguard0 = gp.stack.lo + _StackGuard
1037 gogo(&gp.sched) // never return
1038 }
1039
1040 // Act like goroutine called runtime.Gosched.
1041 casgstatus(gp, _Gwaiting, _Grunning)
1042 gopreempt_m(gp) // never return
1043 }
1044
1045 // Allocate a bigger segment and move the stack.
Dan Willemsenc78f7142017-07-26 13:08:14 -07001046 oldsize := gp.stack.hi - gp.stack.lo
Dan Willemsen6ff23252015-09-15 13:49:18 -07001047 newsize := oldsize * 2
Dan Willemsenc78f7142017-07-26 13:08:14 -07001048 if newsize > maxstacksize {
Dan Willemsen6ff23252015-09-15 13:49:18 -07001049 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1050 throw("stack overflow")
1051 }
1052
Dan Willemsen0c157092016-07-08 13:57:52 -07001053 // The goroutine must be executing in order to call newstack,
1054 // so it must be Grunning (or Gscanrunning).
1055 casgstatus(gp, _Grunning, _Gcopystack)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001056
1057 // The concurrent GC will not scan the stack while we are doing the copy since
1058 // the gp is in a Gcopystack status.
Dan Willemsenc78f7142017-07-26 13:08:14 -07001059 copystack(gp, newsize, true)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001060 if stackDebug >= 1 {
1061 print("stack grow done\n")
1062 }
1063 casgstatus(gp, _Gcopystack, _Grunning)
1064 gogo(&gp.sched)
1065}
1066
1067//go:nosplit
1068func nilfunc() {
1069 *(*uint8)(nil) = 0
1070}
1071
1072// adjust Gobuf as if it executed a call to fn
1073// and then did an immediate gosave.
1074func gostartcallfn(gobuf *gobuf, fv *funcval) {
1075 var fn unsafe.Pointer
1076 if fv != nil {
Dan Willemsen0c157092016-07-08 13:57:52 -07001077 fn = unsafe.Pointer(fv.fn)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001078 } else {
1079 fn = unsafe.Pointer(funcPC(nilfunc))
1080 }
Dan Willemsen0c157092016-07-08 13:57:52 -07001081 gostartcall(gobuf, fn, unsafe.Pointer(fv))
Dan Willemsen6ff23252015-09-15 13:49:18 -07001082}
1083
1084// Maybe shrink the stack being used by gp.
1085// Called at garbage collection time.
Dan Willemsen0c157092016-07-08 13:57:52 -07001086// gp must be stopped, but the world need not be.
Dan Willemsen6ff23252015-09-15 13:49:18 -07001087func shrinkstack(gp *g) {
Dan Willemsen0c157092016-07-08 13:57:52 -07001088 gstatus := readgstatus(gp)
1089 if gstatus&^_Gscan == _Gdead {
Dan Willemsen6ff23252015-09-15 13:49:18 -07001090 if gp.stack.lo != 0 {
1091 // Free whole stack - it will get reallocated
1092 // if G is used again.
Dan Willemsenc78f7142017-07-26 13:08:14 -07001093 stackfree(gp.stack)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001094 gp.stack.lo = 0
1095 gp.stack.hi = 0
Dan Willemsen6ff23252015-09-15 13:49:18 -07001096 }
1097 return
1098 }
1099 if gp.stack.lo == 0 {
1100 throw("missing stack in shrinkstack")
1101 }
Dan Willemsen0c157092016-07-08 13:57:52 -07001102 if gstatus&_Gscan == 0 {
1103 throw("bad status in shrinkstack")
1104 }
Dan Willemsen6ff23252015-09-15 13:49:18 -07001105
1106 if debug.gcshrinkstackoff > 0 {
1107 return
1108 }
Dan Willemsenbbdf6642017-01-13 22:57:23 -08001109 if gp.startpc == gcBgMarkWorkerPC {
1110 // We're not allowed to shrink the gcBgMarkWorker
1111 // stack (see gcBgMarkWorker for explanation).
1112 return
1113 }
Dan Willemsen6ff23252015-09-15 13:49:18 -07001114
Dan Willemsenc78f7142017-07-26 13:08:14 -07001115 oldsize := gp.stack.hi - gp.stack.lo
Dan Willemsen6ff23252015-09-15 13:49:18 -07001116 newsize := oldsize / 2
1117 // Don't shrink the allocation below the minimum-sized stack
1118 // allocation.
1119 if newsize < _FixedStack {
1120 return
1121 }
1122 // Compute how much of the stack is currently in use and only
1123 // shrink the stack if gp is using less than a quarter of its
1124 // current stack. The currently used stack includes everything
1125 // down to the SP plus the stack guard space that ensures
1126 // there's room for nosplit functions.
1127 avail := gp.stack.hi - gp.stack.lo
1128 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1129 return
1130 }
1131
1132 // We can't copy the stack if we're in a syscall.
1133 // The syscall might have pointers into the stack.
1134 if gp.syscallsp != 0 {
1135 return
1136 }
Dan Willemsen0c157092016-07-08 13:57:52 -07001137 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
Dan Willemsen6ff23252015-09-15 13:49:18 -07001138 return
1139 }
1140
1141 if stackDebug > 0 {
1142 print("shrinking stack ", oldsize, "->", newsize, "\n")
1143 }
1144
Dan Willemsen0c157092016-07-08 13:57:52 -07001145 copystack(gp, newsize, false)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001146}
1147
1148// freeStackSpans frees unused stack spans at the end of GC.
1149func freeStackSpans() {
1150 lock(&stackpoolmu)
1151
1152 // Scan stack pools for empty stack spans.
1153 for order := range stackpool {
1154 list := &stackpool[order]
Dan Willemsen0c157092016-07-08 13:57:52 -07001155 for s := list.first; s != nil; {
Dan Willemsen6ff23252015-09-15 13:49:18 -07001156 next := s.next
Dan Willemsen0c157092016-07-08 13:57:52 -07001157 if s.allocCount == 0 {
1158 list.remove(s)
Dan Willemsenc78f7142017-07-26 13:08:14 -07001159 s.manualFreeList = 0
1160 mheap_.freeManual(s, &memstats.stacks_inuse)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001161 }
1162 s = next
1163 }
1164 }
1165
Dan Willemsen6ff23252015-09-15 13:49:18 -07001166 unlock(&stackpoolmu)
Dan Willemsen0c157092016-07-08 13:57:52 -07001167
1168 // Free large stack spans.
1169 lock(&stackLarge.lock)
1170 for i := range stackLarge.free {
1171 for s := stackLarge.free[i].first; s != nil; {
1172 next := s.next
1173 stackLarge.free[i].remove(s)
Dan Willemsenc78f7142017-07-26 13:08:14 -07001174 mheap_.freeManual(s, &memstats.stacks_inuse)
Dan Willemsen0c157092016-07-08 13:57:52 -07001175 s = next
1176 }
1177 }
1178 unlock(&stackLarge.lock)
Dan Willemsen6ff23252015-09-15 13:49:18 -07001179}
1180
1181//go:nosplit
1182func morestackc() {
1183 systemstack(func() {
Dan Willemsenc78f7142017-07-26 13:08:14 -07001184 throw("attempt to execute system stack code on user stack")
Dan Willemsen6ff23252015-09-15 13:49:18 -07001185 })
1186}