Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1 | // Copyright 2013 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 7 | import ( |
| 8 | "runtime/internal/atomic" |
| 9 | "runtime/internal/sys" |
| 10 | "unsafe" |
| 11 | ) |
| 12 | |
| 13 | /* |
| 14 | Stack layout parameters. |
| 15 | Included both by runtime (compiled via 6c) and linkers (compiled via gcc). |
| 16 | |
| 17 | The per-goroutine g->stackguard is set to point StackGuard bytes |
| 18 | above the bottom of the stack. Each function compares its stack |
| 19 | pointer against g->stackguard to check for overflow. To cut one |
| 20 | instruction from the check sequence for functions with tiny frames, |
| 21 | the stack is allowed to protrude StackSmall bytes below the stack |
| 22 | guard. Functions with large frames don't bother with the check and |
| 23 | always call morestack. The sequences are (for amd64, others are |
| 24 | similar): |
| 25 | |
| 26 | guard = g->stackguard |
| 27 | frame = function's stack frame size |
| 28 | argsize = size of function arguments (call + return) |
| 29 | |
| 30 | stack frame size <= StackSmall: |
| 31 | CMPQ guard, SP |
| 32 | JHI 3(PC) |
| 33 | MOVQ m->morearg, $(argsize << 32) |
| 34 | CALL morestack(SB) |
| 35 | |
| 36 | stack frame size > StackSmall but < StackBig |
| 37 | LEAQ (frame-StackSmall)(SP), R0 |
| 38 | CMPQ guard, R0 |
| 39 | JHI 3(PC) |
| 40 | MOVQ m->morearg, $(argsize << 32) |
| 41 | CALL morestack(SB) |
| 42 | |
| 43 | stack frame size >= StackBig: |
| 44 | MOVQ m->morearg, $((argsize << 32) | frame) |
| 45 | CALL morestack(SB) |
| 46 | |
| 47 | The bottom StackGuard - StackSmall bytes are important: there has |
| 48 | to be enough room to execute functions that refuse to check for |
| 49 | stack overflow, either because they need to be adjacent to the |
| 50 | actual caller's frame (deferproc) or because they handle the imminent |
| 51 | stack overflow (morestack). |
| 52 | |
| 53 | For example, deferproc might call malloc, which does one of the |
| 54 | above checks (without allocating a full frame), which might trigger |
| 55 | a call to morestack. This sequence needs to fit in the bottom |
| 56 | section of the stack. On amd64, morestack's frame is 40 bytes, and |
| 57 | deferproc's frame is 56 bytes. That fits well within the |
| 58 | StackGuard - StackSmall bytes at the bottom. |
| 59 | The linkers explore all possible call traces involving non-splitting |
| 60 | functions to make sure that this limit cannot be violated. |
| 61 | */ |
| 62 | |
| 63 | const ( |
| 64 | // StackSystem is a number of additional bytes to add |
| 65 | // to each stack below the usual guard area for OS-specific |
| 66 | // purposes like signal handling. Used on Windows, Plan 9, |
| 67 | // and Darwin/ARM because they do not use a separate stack. |
| 68 | _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 |
| 69 | |
| 70 | // The minimum size of stack used by Go code |
| 71 | _StackMin = 2048 |
| 72 | |
| 73 | // The minimum stack size to allocate. |
| 74 | // The hackery here rounds FixedStack0 up to a power of 2. |
| 75 | _FixedStack0 = _StackMin + _StackSystem |
| 76 | _FixedStack1 = _FixedStack0 - 1 |
| 77 | _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1) |
| 78 | _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2) |
| 79 | _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4) |
| 80 | _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8) |
| 81 | _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16) |
| 82 | _FixedStack = _FixedStack6 + 1 |
| 83 | |
| 84 | // Functions that need frames bigger than this use an extra |
| 85 | // instruction to do the stack split check, to avoid overflow |
| 86 | // in case SP - framesize wraps below zero. |
| 87 | // This value can be no bigger than the size of the unmapped |
| 88 | // space at zero. |
| 89 | _StackBig = 4096 |
| 90 | |
| 91 | // The stack guard is a pointer this many bytes above the |
| 92 | // bottom of the stack. |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 93 | _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 94 | |
| 95 | // After a stack split check the SP is allowed to be this |
| 96 | // many bytes below the stack guard. This saves an instruction |
| 97 | // in the checking sequence for tiny frames. |
| 98 | _StackSmall = 128 |
| 99 | |
| 100 | // The maximum number of bytes that a chain of NOSPLIT |
| 101 | // functions can use. |
| 102 | _StackLimit = _StackGuard - _StackSystem - _StackSmall |
| 103 | ) |
| 104 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 105 | const ( |
| 106 | // stackDebug == 0: no logging |
| 107 | // == 1: logging of per-stack operations |
| 108 | // == 2: logging of per-frame operations |
| 109 | // == 3: logging of per-word updates |
| 110 | // == 4: logging of per-word reads |
| 111 | stackDebug = 0 |
| 112 | stackFromSystem = 0 // allocate stacks from system memory instead of the heap |
| 113 | stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free |
| 114 | stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 115 | stackNoCache = 0 // disable per-P small stack caches |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 116 | |
| 117 | // check the BP links during traceback. |
| 118 | debugCheckBP = false |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 119 | ) |
| 120 | |
| 121 | const ( |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 122 | uintptrMask = 1<<(8*sys.PtrSize) - 1 |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 123 | |
| 124 | // Goroutine preemption request. |
| 125 | // Stored into g->stackguard0 to cause split stack check failure. |
| 126 | // Must be greater than any real sp. |
| 127 | // 0xfffffade in hex. |
| 128 | stackPreempt = uintptrMask & -1314 |
| 129 | |
| 130 | // Thread is forking. |
| 131 | // Stored into g->stackguard0 to cause split stack check failure. |
| 132 | // Must be greater than any real sp. |
| 133 | stackFork = uintptrMask & -1234 |
| 134 | ) |
| 135 | |
| 136 | // Global pool of spans that have free stacks. |
| 137 | // Stacks are assigned an order according to size. |
| 138 | // order = log_2(size/FixedStack) |
| 139 | // There is a free list for each order. |
| 140 | // TODO: one lock per order? |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 141 | var stackpool [_NumStackOrders]mSpanList |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 142 | var stackpoolmu mutex |
| 143 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 144 | // Global pool of large stack spans. |
| 145 | var stackLarge struct { |
| 146 | lock mutex |
| 147 | free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages) |
| 148 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 149 | |
| 150 | func stackinit() { |
| 151 | if _StackCacheSize&_PageMask != 0 { |
| 152 | throw("cache size must be a multiple of page size") |
| 153 | } |
| 154 | for i := range stackpool { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 155 | stackpool[i].init() |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 156 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 157 | for i := range stackLarge.free { |
| 158 | stackLarge.free[i].init() |
| 159 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 162 | // stacklog2 returns ⌊log_2(n)⌋. |
| 163 | func stacklog2(n uintptr) int { |
| 164 | log2 := 0 |
| 165 | for n > 1 { |
| 166 | n >>= 1 |
| 167 | log2++ |
| 168 | } |
| 169 | return log2 |
| 170 | } |
| 171 | |
| 172 | // Allocates a stack from the free pool. Must be called with |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 173 | // stackpoolmu held. |
| 174 | func stackpoolalloc(order uint8) gclinkptr { |
| 175 | list := &stackpool[order] |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 176 | s := list.first |
| 177 | if s == nil { |
| 178 | // no free stacks. Allocate another span worth. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 179 | s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 180 | if s == nil { |
| 181 | throw("out of memory") |
| 182 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 183 | if s.allocCount != 0 { |
| 184 | throw("bad allocCount") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 185 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 186 | if s.manualFreeList.ptr() != nil { |
| 187 | throw("bad manualFreeList") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 188 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 189 | s.elemsize = _FixedStack << order |
| 190 | for i := uintptr(0); i < _StackCacheSize; i += s.elemsize { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 191 | x := gclinkptr(s.base() + i) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 192 | x.ptr().next = s.manualFreeList |
| 193 | s.manualFreeList = x |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 194 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 195 | list.insert(s) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 196 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 197 | x := s.manualFreeList |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 198 | if x.ptr() == nil { |
| 199 | throw("span has no free stacks") |
| 200 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 201 | s.manualFreeList = x.ptr().next |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 202 | s.allocCount++ |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 203 | if s.manualFreeList.ptr() == nil { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 204 | // all stacks in s are allocated. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 205 | list.remove(s) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 206 | } |
| 207 | return x |
| 208 | } |
| 209 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 210 | // Adds stack x to the free pool. Must be called with stackpoolmu held. |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 211 | func stackpoolfree(x gclinkptr, order uint8) { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 212 | s := mheap_.lookup(unsafe.Pointer(x)) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 213 | if s.state != _MSpanManual { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 214 | throw("freeing stack not in a stack span") |
| 215 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 216 | if s.manualFreeList.ptr() == nil { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 217 | // s will now have a free stack |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 218 | stackpool[order].insert(s) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 219 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 220 | x.ptr().next = s.manualFreeList |
| 221 | s.manualFreeList = x |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 222 | s.allocCount-- |
| 223 | if gcphase == _GCoff && s.allocCount == 0 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 224 | // Span is completely free. Return it to the heap |
| 225 | // immediately if we're sweeping. |
| 226 | // |
| 227 | // If GC is active, we delay the free until the end of |
| 228 | // GC to avoid the following type of situation: |
| 229 | // |
| 230 | // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer |
| 231 | // 2) The stack that pointer points to is copied |
| 232 | // 3) The old stack is freed |
| 233 | // 4) The containing span is marked free |
| 234 | // 5) GC attempts to mark the SudoG.elem pointer. The |
| 235 | // marking fails because the pointer looks like a |
| 236 | // pointer into a free span. |
| 237 | // |
| 238 | // By not freeing, we prevent step #4 until GC is done. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 239 | stackpool[order].remove(s) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 240 | s.manualFreeList = 0 |
| 241 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 242 | } |
| 243 | } |
| 244 | |
| 245 | // stackcacherefill/stackcacherelease implement a global pool of stack segments. |
| 246 | // The pool is required to prevent unlimited growth of per-thread caches. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 247 | // |
| 248 | //go:systemstack |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 249 | func stackcacherefill(c *mcache, order uint8) { |
| 250 | if stackDebug >= 1 { |
| 251 | print("stackcacherefill order=", order, "\n") |
| 252 | } |
| 253 | |
| 254 | // Grab some stacks from the global cache. |
| 255 | // Grab half of the allowed capacity (to prevent thrashing). |
| 256 | var list gclinkptr |
| 257 | var size uintptr |
| 258 | lock(&stackpoolmu) |
| 259 | for size < _StackCacheSize/2 { |
| 260 | x := stackpoolalloc(order) |
| 261 | x.ptr().next = list |
| 262 | list = x |
| 263 | size += _FixedStack << order |
| 264 | } |
| 265 | unlock(&stackpoolmu) |
| 266 | c.stackcache[order].list = list |
| 267 | c.stackcache[order].size = size |
| 268 | } |
| 269 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 270 | //go:systemstack |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 271 | func stackcacherelease(c *mcache, order uint8) { |
| 272 | if stackDebug >= 1 { |
| 273 | print("stackcacherelease order=", order, "\n") |
| 274 | } |
| 275 | x := c.stackcache[order].list |
| 276 | size := c.stackcache[order].size |
| 277 | lock(&stackpoolmu) |
| 278 | for size > _StackCacheSize/2 { |
| 279 | y := x.ptr().next |
| 280 | stackpoolfree(x, order) |
| 281 | x = y |
| 282 | size -= _FixedStack << order |
| 283 | } |
| 284 | unlock(&stackpoolmu) |
| 285 | c.stackcache[order].list = x |
| 286 | c.stackcache[order].size = size |
| 287 | } |
| 288 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 289 | //go:systemstack |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 290 | func stackcache_clear(c *mcache) { |
| 291 | if stackDebug >= 1 { |
| 292 | print("stackcache clear\n") |
| 293 | } |
| 294 | lock(&stackpoolmu) |
| 295 | for order := uint8(0); order < _NumStackOrders; order++ { |
| 296 | x := c.stackcache[order].list |
| 297 | for x.ptr() != nil { |
| 298 | y := x.ptr().next |
| 299 | stackpoolfree(x, order) |
| 300 | x = y |
| 301 | } |
| 302 | c.stackcache[order].list = 0 |
| 303 | c.stackcache[order].size = 0 |
| 304 | } |
| 305 | unlock(&stackpoolmu) |
| 306 | } |
| 307 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 308 | // stackalloc allocates an n byte stack. |
| 309 | // |
| 310 | // stackalloc must run on the system stack because it uses per-P |
| 311 | // resources and must not split the stack. |
| 312 | // |
| 313 | //go:systemstack |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 314 | func stackalloc(n uint32) stack { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 315 | // Stackalloc must be called on scheduler stack, so that we |
| 316 | // never try to grow the stack during the code that stackalloc runs. |
| 317 | // Doing so would cause a deadlock (issue 1547). |
| 318 | thisg := getg() |
| 319 | if thisg != thisg.m.g0 { |
| 320 | throw("stackalloc not on scheduler stack") |
| 321 | } |
| 322 | if n&(n-1) != 0 { |
| 323 | throw("stack size not a power of 2") |
| 324 | } |
| 325 | if stackDebug >= 1 { |
| 326 | print("stackalloc ", n, "\n") |
| 327 | } |
| 328 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 329 | if debug.efence != 0 || stackFromSystem != 0 { |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 330 | n = uint32(round(uintptr(n), physPageSize)) |
| 331 | v := sysAlloc(uintptr(n), &memstats.stacks_sys) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 332 | if v == nil { |
| 333 | throw("out of memory (stackalloc)") |
| 334 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 335 | return stack{uintptr(v), uintptr(v) + uintptr(n)} |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | // Small stacks are allocated with a fixed-size free-list allocator. |
| 339 | // If we need a stack of a bigger size, we fall back on allocating |
| 340 | // a dedicated span. |
| 341 | var v unsafe.Pointer |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 342 | if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 343 | order := uint8(0) |
| 344 | n2 := n |
| 345 | for n2 > _FixedStack { |
| 346 | order++ |
| 347 | n2 >>= 1 |
| 348 | } |
| 349 | var x gclinkptr |
| 350 | c := thisg.m.mcache |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 351 | if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 352 | // c == nil can happen in the guts of exitsyscall or |
| 353 | // procresize. Just get a stack from the global pool. |
| 354 | // Also don't touch stackcache during gc |
| 355 | // as it's flushed concurrently. |
| 356 | lock(&stackpoolmu) |
| 357 | x = stackpoolalloc(order) |
| 358 | unlock(&stackpoolmu) |
| 359 | } else { |
| 360 | x = c.stackcache[order].list |
| 361 | if x.ptr() == nil { |
| 362 | stackcacherefill(c, order) |
| 363 | x = c.stackcache[order].list |
| 364 | } |
| 365 | c.stackcache[order].list = x.ptr().next |
| 366 | c.stackcache[order].size -= uintptr(n) |
| 367 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 368 | v = unsafe.Pointer(x) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 369 | } else { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 370 | var s *mspan |
| 371 | npage := uintptr(n) >> _PageShift |
| 372 | log2npage := stacklog2(npage) |
| 373 | |
| 374 | // Try to get a stack from the large stack cache. |
| 375 | lock(&stackLarge.lock) |
| 376 | if !stackLarge.free[log2npage].isEmpty() { |
| 377 | s = stackLarge.free[log2npage].first |
| 378 | stackLarge.free[log2npage].remove(s) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 379 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 380 | unlock(&stackLarge.lock) |
| 381 | |
| 382 | if s == nil { |
| 383 | // Allocate a new stack from the heap. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 384 | s = mheap_.allocManual(npage, &memstats.stacks_inuse) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 385 | if s == nil { |
| 386 | throw("out of memory") |
| 387 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 388 | s.elemsize = uintptr(n) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 389 | } |
| 390 | v = unsafe.Pointer(s.base()) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | if raceenabled { |
| 394 | racemalloc(v, uintptr(n)) |
| 395 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 396 | if msanenabled { |
| 397 | msanmalloc(v, uintptr(n)) |
| 398 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 399 | if stackDebug >= 1 { |
| 400 | print(" allocated ", v, "\n") |
| 401 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 402 | return stack{uintptr(v), uintptr(v) + uintptr(n)} |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 403 | } |
| 404 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 405 | // stackfree frees an n byte stack allocation at stk. |
| 406 | // |
| 407 | // stackfree must run on the system stack because it uses per-P |
| 408 | // resources and must not split the stack. |
| 409 | // |
| 410 | //go:systemstack |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 411 | func stackfree(stk stack) { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 412 | gp := getg() |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 413 | v := unsafe.Pointer(stk.lo) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 414 | n := stk.hi - stk.lo |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 415 | if n&(n-1) != 0 { |
| 416 | throw("stack not a power of 2") |
| 417 | } |
| 418 | if stk.lo+n < stk.hi { |
| 419 | throw("bad stack size") |
| 420 | } |
| 421 | if stackDebug >= 1 { |
| 422 | println("stackfree", v, n) |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 423 | memclrNoHeapPointers(v, n) // for testing, clobber stack data |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 424 | } |
| 425 | if debug.efence != 0 || stackFromSystem != 0 { |
| 426 | if debug.efence != 0 || stackFaultOnFree != 0 { |
| 427 | sysFault(v, n) |
| 428 | } else { |
| 429 | sysFree(v, n, &memstats.stacks_sys) |
| 430 | } |
| 431 | return |
| 432 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 433 | if msanenabled { |
| 434 | msanfree(v, n) |
| 435 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 436 | if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 437 | order := uint8(0) |
| 438 | n2 := n |
| 439 | for n2 > _FixedStack { |
| 440 | order++ |
| 441 | n2 >>= 1 |
| 442 | } |
| 443 | x := gclinkptr(v) |
| 444 | c := gp.m.mcache |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 445 | if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 446 | lock(&stackpoolmu) |
| 447 | stackpoolfree(x, order) |
| 448 | unlock(&stackpoolmu) |
| 449 | } else { |
| 450 | if c.stackcache[order].size >= _StackCacheSize { |
| 451 | stackcacherelease(c, order) |
| 452 | } |
| 453 | x.ptr().next = c.stackcache[order].list |
| 454 | c.stackcache[order].list = x |
| 455 | c.stackcache[order].size += n |
| 456 | } |
| 457 | } else { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 458 | s := mheap_.lookup(v) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 459 | if s.state != _MSpanManual { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 460 | println(hex(s.base()), v) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 461 | throw("bad span state") |
| 462 | } |
| 463 | if gcphase == _GCoff { |
| 464 | // Free the stack immediately if we're |
| 465 | // sweeping. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 466 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 467 | } else { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 468 | // If the GC is running, we can't return a |
| 469 | // stack span to the heap because it could be |
| 470 | // reused as a heap span, and this state |
| 471 | // change would race with GC. Add it to the |
| 472 | // large stack cache instead. |
| 473 | log2npage := stacklog2(s.npages) |
| 474 | lock(&stackLarge.lock) |
| 475 | stackLarge.free[log2npage].insert(s) |
| 476 | unlock(&stackLarge.lock) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 477 | } |
| 478 | } |
| 479 | } |
| 480 | |
| 481 | var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real |
| 482 | |
| 483 | var ptrnames = []string{ |
| 484 | 0: "scalar", |
| 485 | 1: "ptr", |
| 486 | } |
| 487 | |
| 488 | // Stack frame layout |
| 489 | // |
| 490 | // (x86) |
| 491 | // +------------------+ |
| 492 | // | args from caller | |
| 493 | // +------------------+ <- frame->argp |
| 494 | // | return address | |
| 495 | // +------------------+ |
| 496 | // | caller's BP (*) | (*) if framepointer_enabled && varp < sp |
| 497 | // +------------------+ <- frame->varp |
| 498 | // | locals | |
| 499 | // +------------------+ |
| 500 | // | args to callee | |
| 501 | // +------------------+ <- frame->sp |
| 502 | // |
| 503 | // (arm) |
| 504 | // +------------------+ |
| 505 | // | args from caller | |
| 506 | // +------------------+ <- frame->argp |
| 507 | // | caller's retaddr | |
| 508 | // +------------------+ <- frame->varp |
| 509 | // | locals | |
| 510 | // +------------------+ |
| 511 | // | args to callee | |
| 512 | // +------------------+ |
| 513 | // | return address | |
| 514 | // +------------------+ <- frame->sp |
| 515 | |
| 516 | type adjustinfo struct { |
| 517 | old stack |
| 518 | delta uintptr // ptr distance from old to new stack (newbase - oldbase) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 519 | cache pcvalueCache |
| 520 | |
| 521 | // sghi is the highest sudog.elem on the stack. |
| 522 | sghi uintptr |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 523 | } |
| 524 | |
| 525 | // Adjustpointer checks whether *vpp is in the old stack described by adjinfo. |
| 526 | // If so, it rewrites *vpp to point into the new stack. |
| 527 | func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 528 | pp := (*uintptr)(vpp) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 529 | p := *pp |
| 530 | if stackDebug >= 4 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 531 | print(" ", pp, ":", hex(p), "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 532 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 533 | if adjinfo.old.lo <= p && p < adjinfo.old.hi { |
| 534 | *pp = p + adjinfo.delta |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 535 | if stackDebug >= 3 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 536 | print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 537 | } |
| 538 | } |
| 539 | } |
| 540 | |
| 541 | // Information from the compiler about the layout of stack frames. |
| 542 | type bitvector struct { |
| 543 | n int32 // # of bits |
| 544 | bytedata *uint8 |
| 545 | } |
| 546 | |
| 547 | type gobitvector struct { |
| 548 | n uintptr |
| 549 | bytedata []uint8 |
| 550 | } |
| 551 | |
| 552 | func gobv(bv bitvector) gobitvector { |
| 553 | return gobitvector{ |
| 554 | uintptr(bv.n), |
| 555 | (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8], |
| 556 | } |
| 557 | } |
| 558 | |
| 559 | func ptrbit(bv *gobitvector, i uintptr) uint8 { |
| 560 | return (bv.bytedata[i/8] >> (i % 8)) & 1 |
| 561 | } |
| 562 | |
| 563 | // bv describes the memory starting at address scanp. |
| 564 | // Adjust any pointers contained therein. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 565 | func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 566 | bv := gobv(*cbv) |
| 567 | minp := adjinfo.old.lo |
| 568 | maxp := adjinfo.old.hi |
| 569 | delta := adjinfo.delta |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 570 | num := bv.n |
| 571 | // If this frame might contain channel receive slots, use CAS |
| 572 | // to adjust pointers. If the slot hasn't been received into |
| 573 | // yet, it may contain stack pointers and a concurrent send |
| 574 | // could race with adjusting those pointers. (The sent value |
| 575 | // itself can never contain stack pointers.) |
| 576 | useCAS := uintptr(scanp) < adjinfo.sghi |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 577 | for i := uintptr(0); i < num; i++ { |
| 578 | if stackDebug >= 4 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 579 | print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 580 | } |
| 581 | if ptrbit(&bv, i) == 1 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 582 | pp := (*uintptr)(add(scanp, i*sys.PtrSize)) |
| 583 | retry: |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 584 | p := *pp |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 585 | if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 586 | // Looks like a junk value in a pointer slot. |
| 587 | // Live analysis wrong? |
| 588 | getg().m.traceback = 2 |
| 589 | print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 590 | throw("invalid pointer found on stack") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 591 | } |
| 592 | if minp <= p && p < maxp { |
| 593 | if stackDebug >= 3 { |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 594 | print("adjust ptr ", hex(p), " ", funcname(f), "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 595 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 596 | if useCAS { |
| 597 | ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) |
| 598 | if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { |
| 599 | goto retry |
| 600 | } |
| 601 | } else { |
| 602 | *pp = p + delta |
| 603 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 604 | } |
| 605 | } |
| 606 | } |
| 607 | } |
| 608 | |
| 609 | // Note: the argument/return area is adjusted by the callee. |
| 610 | func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { |
| 611 | adjinfo := (*adjustinfo)(arg) |
| 612 | targetpc := frame.continpc |
| 613 | if targetpc == 0 { |
| 614 | // Frame is dead. |
| 615 | return true |
| 616 | } |
| 617 | f := frame.fn |
| 618 | if stackDebug >= 2 { |
| 619 | print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") |
| 620 | } |
| 621 | if f.entry == systemstack_switchPC { |
| 622 | // A special routine at the bottom of stack of a goroutine that does an systemstack call. |
| 623 | // We will allow it to be copied even though we don't |
| 624 | // have full GC info for it (because it is written in asm). |
| 625 | return true |
| 626 | } |
| 627 | if targetpc != f.entry { |
| 628 | targetpc-- |
| 629 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 630 | pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 631 | if pcdata == -1 { |
| 632 | pcdata = 0 // in prologue |
| 633 | } |
| 634 | |
| 635 | // Adjust local variables if stack frame has been allocated. |
| 636 | size := frame.varp - frame.sp |
| 637 | var minsize uintptr |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 638 | switch sys.ArchFamily { |
| 639 | case sys.ARM64: |
| 640 | minsize = sys.SpAlign |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 641 | default: |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 642 | minsize = sys.MinFrameSize |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 643 | } |
| 644 | if size > minsize { |
| 645 | var bv bitvector |
| 646 | stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) |
| 647 | if stackmap == nil || stackmap.n <= 0 { |
| 648 | print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") |
| 649 | throw("missing stackmap") |
| 650 | } |
| 651 | // Locals bitmap information, scan just the pointers in locals. |
| 652 | if pcdata < 0 || pcdata >= stackmap.n { |
| 653 | // don't know where we are |
| 654 | print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") |
| 655 | throw("bad symbol table") |
| 656 | } |
| 657 | bv = stackmapdata(stackmap, pcdata) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 658 | size = uintptr(bv.n) * sys.PtrSize |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 659 | if stackDebug >= 3 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 660 | print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 661 | } |
| 662 | adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f) |
| 663 | } |
| 664 | |
| 665 | // Adjust saved base pointer if there is one. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 666 | if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 667 | if !framepointer_enabled { |
| 668 | print("runtime: found space for saved base pointer, but no framepointer experiment\n") |
| 669 | print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n") |
| 670 | throw("bad frame layout") |
| 671 | } |
| 672 | if stackDebug >= 3 { |
| 673 | print(" saved bp\n") |
| 674 | } |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 675 | if debugCheckBP { |
| 676 | // Frame pointers should always point to the next higher frame on |
| 677 | // the Go stack (or be nil, for the top frame on the stack). |
| 678 | bp := *(*uintptr)(unsafe.Pointer(frame.varp)) |
| 679 | if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { |
| 680 | println("runtime: found invalid frame pointer") |
| 681 | print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") |
| 682 | throw("bad frame pointer") |
| 683 | } |
| 684 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 685 | adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) |
| 686 | } |
| 687 | |
| 688 | // Adjust arguments. |
| 689 | if frame.arglen > 0 { |
| 690 | var bv bitvector |
| 691 | if frame.argmap != nil { |
| 692 | bv = *frame.argmap |
| 693 | } else { |
| 694 | stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) |
| 695 | if stackmap == nil || stackmap.n <= 0 { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 696 | print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 697 | throw("missing stackmap") |
| 698 | } |
| 699 | if pcdata < 0 || pcdata >= stackmap.n { |
| 700 | // don't know where we are |
| 701 | print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n") |
| 702 | throw("bad symbol table") |
| 703 | } |
| 704 | bv = stackmapdata(stackmap, pcdata) |
| 705 | } |
| 706 | if stackDebug >= 3 { |
| 707 | print(" args\n") |
| 708 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 709 | adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{}) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 710 | } |
| 711 | return true |
| 712 | } |
| 713 | |
| 714 | func adjustctxt(gp *g, adjinfo *adjustinfo) { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 715 | adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 716 | if !framepointer_enabled { |
| 717 | return |
| 718 | } |
| 719 | if debugCheckBP { |
| 720 | bp := gp.sched.bp |
| 721 | if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { |
| 722 | println("runtime: found invalid top frame pointer") |
| 723 | print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") |
| 724 | throw("bad top frame pointer") |
| 725 | } |
| 726 | } |
| 727 | adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 728 | } |
| 729 | |
| 730 | func adjustdefers(gp *g, adjinfo *adjustinfo) { |
| 731 | // Adjust defer argument blocks the same way we adjust active stack frames. |
| 732 | tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo))) |
| 733 | |
| 734 | // Adjust pointers in the Defer structs. |
| 735 | // Defer structs themselves are never on the stack. |
| 736 | for d := gp._defer; d != nil; d = d.link { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 737 | adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) |
| 738 | adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) |
| 739 | adjustpointer(adjinfo, unsafe.Pointer(&d._panic)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 740 | } |
| 741 | } |
| 742 | |
| 743 | func adjustpanics(gp *g, adjinfo *adjustinfo) { |
| 744 | // Panics are on stack and already adjusted. |
| 745 | // Update pointer to head of list in G. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 746 | adjustpointer(adjinfo, unsafe.Pointer(&gp._panic)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 747 | } |
| 748 | |
| 749 | func adjustsudogs(gp *g, adjinfo *adjustinfo) { |
| 750 | // the data elements pointed to by a SudoG structure |
| 751 | // might be in the stack. |
| 752 | for s := gp.waiting; s != nil; s = s.waitlink { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 753 | adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) |
| 754 | adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 755 | } |
| 756 | } |
| 757 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 758 | func fillstack(stk stack, b byte) { |
| 759 | for p := stk.lo; p < stk.hi; p++ { |
| 760 | *(*byte)(unsafe.Pointer(p)) = b |
| 761 | } |
| 762 | } |
| 763 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 764 | func findsghi(gp *g, stk stack) uintptr { |
| 765 | var sghi uintptr |
| 766 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
| 767 | p := uintptr(sg.elem) + uintptr(sg.c.elemsize) |
| 768 | if stk.lo <= p && p < stk.hi && p > sghi { |
| 769 | sghi = p |
| 770 | } |
| 771 | p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone) |
| 772 | if stk.lo <= p && p < stk.hi && p > sghi { |
| 773 | sghi = p |
| 774 | } |
| 775 | } |
| 776 | return sghi |
| 777 | } |
| 778 | |
| 779 | // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's |
| 780 | // stack they refer to while synchronizing with concurrent channel |
| 781 | // operations. It returns the number of bytes of stack copied. |
| 782 | func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { |
| 783 | if gp.waiting == nil { |
| 784 | return 0 |
| 785 | } |
| 786 | |
| 787 | // Lock channels to prevent concurrent send/receive. |
| 788 | // It's important that we *only* do this for async |
| 789 | // copystack; otherwise, gp may be in the middle of |
| 790 | // putting itself on wait queues and this would |
| 791 | // self-deadlock. |
| 792 | var lastc *hchan |
| 793 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
| 794 | if sg.c != lastc { |
| 795 | lock(&sg.c.lock) |
| 796 | } |
| 797 | lastc = sg.c |
| 798 | } |
| 799 | |
| 800 | // Adjust sudogs. |
| 801 | adjustsudogs(gp, adjinfo) |
| 802 | |
| 803 | // Copy the part of the stack the sudogs point in to |
| 804 | // while holding the lock to prevent races on |
| 805 | // send/receive slots. |
| 806 | var sgsize uintptr |
| 807 | if adjinfo.sghi != 0 { |
| 808 | oldBot := adjinfo.old.hi - used |
| 809 | newBot := oldBot + adjinfo.delta |
| 810 | sgsize = adjinfo.sghi - oldBot |
| 811 | memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize) |
| 812 | } |
| 813 | |
| 814 | // Unlock channels. |
| 815 | lastc = nil |
| 816 | for sg := gp.waiting; sg != nil; sg = sg.waitlink { |
| 817 | if sg.c != lastc { |
| 818 | unlock(&sg.c.lock) |
| 819 | } |
| 820 | lastc = sg.c |
| 821 | } |
| 822 | |
| 823 | return sgsize |
| 824 | } |
| 825 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 826 | // Copies gp's stack to a new stack of a different size. |
| 827 | // Caller must have changed gp status to Gcopystack. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 828 | // |
| 829 | // If sync is true, this is a self-triggered stack growth and, in |
| 830 | // particular, no other G may be writing to gp's stack (e.g., via a |
| 831 | // channel operation). If sync is false, copystack protects against |
| 832 | // concurrent channel operations. |
| 833 | func copystack(gp *g, newsize uintptr, sync bool) { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 834 | if gp.syscallsp != 0 { |
| 835 | throw("stack growth not allowed in system call") |
| 836 | } |
| 837 | old := gp.stack |
| 838 | if old.lo == 0 { |
| 839 | throw("nil stackbase") |
| 840 | } |
| 841 | used := old.hi - gp.sched.sp |
| 842 | |
| 843 | // allocate new stack |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 844 | new := stackalloc(uint32(newsize)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 845 | if stackPoisonCopy != 0 { |
| 846 | fillstack(new, 0xfd) |
| 847 | } |
| 848 | if stackDebug >= 1 { |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 849 | print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 850 | } |
| 851 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 852 | // Compute adjustment. |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 853 | var adjinfo adjustinfo |
| 854 | adjinfo.old = old |
| 855 | adjinfo.delta = new.hi - old.hi |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 856 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 857 | // Adjust sudogs, synchronizing with channel ops if necessary. |
| 858 | ncopy := used |
| 859 | if sync { |
| 860 | adjustsudogs(gp, &adjinfo) |
| 861 | } else { |
| 862 | // sudogs can point in to the stack. During concurrent |
| 863 | // shrinking, these areas may be written to. Find the |
| 864 | // highest such pointer so we can handle everything |
| 865 | // there and below carefully. (This shouldn't be far |
| 866 | // from the bottom of the stack, so there's little |
| 867 | // cost in handling everything below it carefully.) |
| 868 | adjinfo.sghi = findsghi(gp, old) |
| 869 | |
| 870 | // Synchronize with channel ops and copy the part of |
| 871 | // the stack they may interact with. |
| 872 | ncopy -= syncadjustsudogs(gp, used, &adjinfo) |
| 873 | } |
| 874 | |
| 875 | // Copy the stack (or the rest of it) to the new location |
| 876 | memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) |
| 877 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 878 | // Adjust remaining structures that have pointers into stacks. |
| 879 | // We have to do most of these before we traceback the new |
| 880 | // stack because gentraceback uses them. |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 881 | adjustctxt(gp, &adjinfo) |
| 882 | adjustdefers(gp, &adjinfo) |
| 883 | adjustpanics(gp, &adjinfo) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 884 | if adjinfo.sghi != 0 { |
| 885 | adjinfo.sghi += adjinfo.delta |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 886 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 887 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 888 | // Swap out old stack for new one |
| 889 | gp.stack = new |
| 890 | gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request |
| 891 | gp.sched.sp = new.hi - used |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 892 | gp.stktopsp += adjinfo.delta |
| 893 | |
| 894 | // Adjust pointers in the new stack. |
| 895 | gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0) |
| 896 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 897 | // free old stack |
| 898 | if stackPoisonCopy != 0 { |
| 899 | fillstack(old, 0xfc) |
| 900 | } |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 901 | stackfree(old) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 902 | } |
| 903 | |
| 904 | // round x up to a power of 2. |
| 905 | func round2(x int32) int32 { |
| 906 | s := uint(0) |
| 907 | for 1<<s < x { |
| 908 | s++ |
| 909 | } |
| 910 | return 1 << s |
| 911 | } |
| 912 | |
| 913 | // Called from runtime·morestack when more stack is needed. |
| 914 | // Allocate larger stack and relocate to new stack. |
| 915 | // Stack growth is multiplicative, for constant amortized cost. |
| 916 | // |
| 917 | // g->atomicstatus will be Grunning or Gscanrunning upon entry. |
| 918 | // If the GC is trying to stop this g then it will set preemptscan to true. |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 919 | // |
| 920 | // ctxt is the value of the context register on morestack. newstack |
| 921 | // will write it to g.sched.ctxt. |
| 922 | func newstack(ctxt unsafe.Pointer) { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 923 | thisg := getg() |
| 924 | // TODO: double check all gp. shouldn't be getg(). |
| 925 | if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { |
| 926 | throw("stack growth after fork") |
| 927 | } |
| 928 | if thisg.m.morebuf.g.ptr() != thisg.m.curg { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 929 | print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 930 | morebuf := thisg.m.morebuf |
| 931 | traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr()) |
| 932 | throw("runtime: wrong goroutine in newstack") |
| 933 | } |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 934 | |
| 935 | gp := thisg.m.curg |
| 936 | // Write ctxt to gp.sched. We do this here instead of in |
| 937 | // morestack so it has the necessary write barrier. |
| 938 | gp.sched.ctxt = ctxt |
| 939 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 940 | if thisg.m.curg.throwsplit { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 941 | // Update syscallsp, syscallpc in case traceback uses them. |
| 942 | morebuf := thisg.m.morebuf |
| 943 | gp.syscallsp = morebuf.sp |
| 944 | gp.syscallpc = morebuf.pc |
| 945 | print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", |
| 946 | "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", |
| 947 | "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") |
| 948 | |
| 949 | traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp) |
| 950 | throw("runtime: stack split at bad time") |
| 951 | } |
| 952 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 953 | morebuf := thisg.m.morebuf |
| 954 | thisg.m.morebuf.pc = 0 |
| 955 | thisg.m.morebuf.lr = 0 |
| 956 | thisg.m.morebuf.sp = 0 |
| 957 | thisg.m.morebuf.g = 0 |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 958 | |
| 959 | // NOTE: stackguard0 may change underfoot, if another thread |
| 960 | // is about to try to preempt gp. Read it just once and use that same |
| 961 | // value now and below. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 962 | preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 963 | |
| 964 | // Be conservative about where we preempt. |
| 965 | // We are interested in preempting user Go code, not runtime code. |
| 966 | // If we're holding locks, mallocing, or preemption is disabled, don't |
| 967 | // preempt. |
| 968 | // This check is very early in newstack so that even the status change |
| 969 | // from Grunning to Gwaiting and back doesn't happen in this case. |
| 970 | // That status change by itself can be viewed as a small preemption, |
| 971 | // because the GC might change Gwaiting to Gscanwaiting, and then |
| 972 | // this goroutine has to wait for the GC to finish before continuing. |
| 973 | // If the GC is in some way dependent on this goroutine (for example, |
| 974 | // it needs a lock held by the goroutine), that small preemption turns |
| 975 | // into a real deadlock. |
| 976 | if preempt { |
| 977 | if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning { |
| 978 | // Let the goroutine keep running for now. |
| 979 | // gp->preempt is set, so it will be preempted next time. |
| 980 | gp.stackguard0 = gp.stack.lo + _StackGuard |
| 981 | gogo(&gp.sched) // never return |
| 982 | } |
| 983 | } |
| 984 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 985 | if gp.stack.lo == 0 { |
| 986 | throw("missing stack in newstack") |
| 987 | } |
| 988 | sp := gp.sched.sp |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 989 | if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 990 | // The call to morestack cost a word. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 991 | sp -= sys.PtrSize |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 992 | } |
| 993 | if stackDebug >= 1 || sp < gp.stack.lo { |
| 994 | print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", |
| 995 | "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", |
| 996 | "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") |
| 997 | } |
| 998 | if sp < gp.stack.lo { |
| 999 | print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ") |
| 1000 | print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") |
| 1001 | throw("runtime: split stack overflow") |
| 1002 | } |
| 1003 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1004 | if preempt { |
| 1005 | if gp == thisg.m.g0 { |
| 1006 | throw("runtime: preempt g0") |
| 1007 | } |
| 1008 | if thisg.m.p == 0 && thisg.m.locks == 0 { |
| 1009 | throw("runtime: g is running but p is not") |
| 1010 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1011 | // Synchronize with scang. |
| 1012 | casgstatus(gp, _Grunning, _Gwaiting) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1013 | if gp.preemptscan { |
| 1014 | for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) { |
| 1015 | // Likely to be racing with the GC as |
| 1016 | // it sees a _Gwaiting and does the |
| 1017 | // stack scan. If so, gcworkdone will |
| 1018 | // be set and gcphasework will simply |
| 1019 | // return. |
| 1020 | } |
| 1021 | if !gp.gcscandone { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1022 | // gcw is safe because we're on the |
| 1023 | // system stack. |
| 1024 | gcw := &gp.m.p.ptr().gcw |
| 1025 | scanstack(gp, gcw) |
| 1026 | if gcBlackenPromptly { |
| 1027 | gcw.dispose() |
| 1028 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1029 | gp.gcscandone = true |
| 1030 | } |
| 1031 | gp.preemptscan = false |
| 1032 | gp.preempt = false |
| 1033 | casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1034 | // This clears gcscanvalid. |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1035 | casgstatus(gp, _Gwaiting, _Grunning) |
| 1036 | gp.stackguard0 = gp.stack.lo + _StackGuard |
| 1037 | gogo(&gp.sched) // never return |
| 1038 | } |
| 1039 | |
| 1040 | // Act like goroutine called runtime.Gosched. |
| 1041 | casgstatus(gp, _Gwaiting, _Grunning) |
| 1042 | gopreempt_m(gp) // never return |
| 1043 | } |
| 1044 | |
| 1045 | // Allocate a bigger segment and move the stack. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1046 | oldsize := gp.stack.hi - gp.stack.lo |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1047 | newsize := oldsize * 2 |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1048 | if newsize > maxstacksize { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1049 | print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") |
| 1050 | throw("stack overflow") |
| 1051 | } |
| 1052 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1053 | // The goroutine must be executing in order to call newstack, |
| 1054 | // so it must be Grunning (or Gscanrunning). |
| 1055 | casgstatus(gp, _Grunning, _Gcopystack) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1056 | |
| 1057 | // The concurrent GC will not scan the stack while we are doing the copy since |
| 1058 | // the gp is in a Gcopystack status. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1059 | copystack(gp, newsize, true) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1060 | if stackDebug >= 1 { |
| 1061 | print("stack grow done\n") |
| 1062 | } |
| 1063 | casgstatus(gp, _Gcopystack, _Grunning) |
| 1064 | gogo(&gp.sched) |
| 1065 | } |
| 1066 | |
| 1067 | //go:nosplit |
| 1068 | func nilfunc() { |
| 1069 | *(*uint8)(nil) = 0 |
| 1070 | } |
| 1071 | |
| 1072 | // adjust Gobuf as if it executed a call to fn |
| 1073 | // and then did an immediate gosave. |
| 1074 | func gostartcallfn(gobuf *gobuf, fv *funcval) { |
| 1075 | var fn unsafe.Pointer |
| 1076 | if fv != nil { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1077 | fn = unsafe.Pointer(fv.fn) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1078 | } else { |
| 1079 | fn = unsafe.Pointer(funcPC(nilfunc)) |
| 1080 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1081 | gostartcall(gobuf, fn, unsafe.Pointer(fv)) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1082 | } |
| 1083 | |
| 1084 | // Maybe shrink the stack being used by gp. |
| 1085 | // Called at garbage collection time. |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1086 | // gp must be stopped, but the world need not be. |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1087 | func shrinkstack(gp *g) { |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1088 | gstatus := readgstatus(gp) |
| 1089 | if gstatus&^_Gscan == _Gdead { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1090 | if gp.stack.lo != 0 { |
| 1091 | // Free whole stack - it will get reallocated |
| 1092 | // if G is used again. |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1093 | stackfree(gp.stack) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1094 | gp.stack.lo = 0 |
| 1095 | gp.stack.hi = 0 |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1096 | } |
| 1097 | return |
| 1098 | } |
| 1099 | if gp.stack.lo == 0 { |
| 1100 | throw("missing stack in shrinkstack") |
| 1101 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1102 | if gstatus&_Gscan == 0 { |
| 1103 | throw("bad status in shrinkstack") |
| 1104 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1105 | |
| 1106 | if debug.gcshrinkstackoff > 0 { |
| 1107 | return |
| 1108 | } |
Dan Willemsen | bbdf664 | 2017-01-13 22:57:23 -0800 | [diff] [blame] | 1109 | if gp.startpc == gcBgMarkWorkerPC { |
| 1110 | // We're not allowed to shrink the gcBgMarkWorker |
| 1111 | // stack (see gcBgMarkWorker for explanation). |
| 1112 | return |
| 1113 | } |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1114 | |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1115 | oldsize := gp.stack.hi - gp.stack.lo |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1116 | newsize := oldsize / 2 |
| 1117 | // Don't shrink the allocation below the minimum-sized stack |
| 1118 | // allocation. |
| 1119 | if newsize < _FixedStack { |
| 1120 | return |
| 1121 | } |
| 1122 | // Compute how much of the stack is currently in use and only |
| 1123 | // shrink the stack if gp is using less than a quarter of its |
| 1124 | // current stack. The currently used stack includes everything |
| 1125 | // down to the SP plus the stack guard space that ensures |
| 1126 | // there's room for nosplit functions. |
| 1127 | avail := gp.stack.hi - gp.stack.lo |
| 1128 | if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 { |
| 1129 | return |
| 1130 | } |
| 1131 | |
| 1132 | // We can't copy the stack if we're in a syscall. |
| 1133 | // The syscall might have pointers into the stack. |
| 1134 | if gp.syscallsp != 0 { |
| 1135 | return |
| 1136 | } |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1137 | if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1138 | return |
| 1139 | } |
| 1140 | |
| 1141 | if stackDebug > 0 { |
| 1142 | print("shrinking stack ", oldsize, "->", newsize, "\n") |
| 1143 | } |
| 1144 | |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1145 | copystack(gp, newsize, false) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1146 | } |
| 1147 | |
| 1148 | // freeStackSpans frees unused stack spans at the end of GC. |
| 1149 | func freeStackSpans() { |
| 1150 | lock(&stackpoolmu) |
| 1151 | |
| 1152 | // Scan stack pools for empty stack spans. |
| 1153 | for order := range stackpool { |
| 1154 | list := &stackpool[order] |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1155 | for s := list.first; s != nil; { |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1156 | next := s.next |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1157 | if s.allocCount == 0 { |
| 1158 | list.remove(s) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1159 | s.manualFreeList = 0 |
| 1160 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1161 | } |
| 1162 | s = next |
| 1163 | } |
| 1164 | } |
| 1165 | |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1166 | unlock(&stackpoolmu) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1167 | |
| 1168 | // Free large stack spans. |
| 1169 | lock(&stackLarge.lock) |
| 1170 | for i := range stackLarge.free { |
| 1171 | for s := stackLarge.free[i].first; s != nil; { |
| 1172 | next := s.next |
| 1173 | stackLarge.free[i].remove(s) |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1174 | mheap_.freeManual(s, &memstats.stacks_inuse) |
Dan Willemsen | 0c15709 | 2016-07-08 13:57:52 -0700 | [diff] [blame] | 1175 | s = next |
| 1176 | } |
| 1177 | } |
| 1178 | unlock(&stackLarge.lock) |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1179 | } |
| 1180 | |
| 1181 | //go:nosplit |
| 1182 | func morestackc() { |
| 1183 | systemstack(func() { |
Dan Willemsen | c78f714 | 2017-07-26 13:08:14 -0700 | [diff] [blame^] | 1184 | throw("attempt to execute system stack code on user stack") |
Dan Willemsen | 6ff2325 | 2015-09-15 13:49:18 -0700 | [diff] [blame] | 1185 | }) |
| 1186 | } |