blob: 2277270dd81e29ad8afa0be57fcabdee921d595d [file] [log] [blame]
Peter Collingbournead9841e2014-11-27 00:06:42 +00001diff -r 225a208260a6 libgo/runtime/chan.goc
2--- a/libgo/runtime/chan.goc Mon Sep 22 14:14:24 2014 -0700
3+++ b/libgo/runtime/chan.goc Tue Sep 23 15:59:57 2014 -0700
4@@ -115,7 +115,7 @@
5 mysg.releasetime = -1;
6 }
7
8- runtime_lock(c);
9+ runtime_lock(&c->lock);
10 if(raceenabled)
11 runtime_racereadpc(c, pc, chansend);
12 if(c->closed)
13@@ -128,7 +128,7 @@
14 if(sg != nil) {
15 if(raceenabled)
16 racesync(c, sg);
17- runtime_unlock(c);
18+ runtime_unlock(&c->lock);
19
20 gp = sg->g;
21 gp->param = sg;
22@@ -141,7 +141,7 @@
23 }
24
25 if(!block) {
26- runtime_unlock(c);
27+ runtime_unlock(&c->lock);
28 return false;
29 }
30
31@@ -150,10 +150,10 @@
32 mysg.selectdone = nil;
33 g->param = nil;
34 enqueue(&c->sendq, &mysg);
35- runtime_parkunlock(c, "chan send");
36+ runtime_parkunlock(&c->lock, "chan send");
37
38 if(g->param == nil) {
39- runtime_lock(c);
40+ runtime_lock(&c->lock);
41 if(!c->closed)
42 runtime_throw("chansend: spurious wakeup");
43 goto closed;
44@@ -170,16 +170,16 @@
45
46 if(c->qcount >= c->dataqsiz) {
47 if(!block) {
48- runtime_unlock(c);
49+ runtime_unlock(&c->lock);
50 return false;
51 }
52 mysg.g = g;
53 mysg.elem = nil;
54 mysg.selectdone = nil;
55 enqueue(&c->sendq, &mysg);
56- runtime_parkunlock(c, "chan send");
57+ runtime_parkunlock(&c->lock, "chan send");
58
59- runtime_lock(c);
60+ runtime_lock(&c->lock);
61 goto asynch;
62 }
63
64@@ -196,18 +196,18 @@
65 sg = dequeue(&c->recvq);
66 if(sg != nil) {
67 gp = sg->g;
68- runtime_unlock(c);
69+ runtime_unlock(&c->lock);
70 if(sg->releasetime)
71 sg->releasetime = runtime_cputicks();
72 runtime_ready(gp);
73 } else
74- runtime_unlock(c);
75+ runtime_unlock(&c->lock);
76 if(mysg.releasetime > 0)
77 runtime_blockevent(mysg.releasetime - t0, 2);
78 return true;
79
80 closed:
81- runtime_unlock(c);
82+ runtime_unlock(&c->lock);
83 runtime_panicstring("send on closed channel");
84 return false; // not reached
85 }
86@@ -247,7 +247,7 @@
87 mysg.releasetime = -1;
88 }
89
90- runtime_lock(c);
91+ runtime_lock(&c->lock);
92 if(c->dataqsiz > 0)
93 goto asynch;
94
95@@ -258,7 +258,7 @@
96 if(sg != nil) {
97 if(raceenabled)
98 racesync(c, sg);
99- runtime_unlock(c);
100+ runtime_unlock(&c->lock);
101
102 if(ep != nil)
103 runtime_memmove(ep, sg->elem, c->elemsize);
104@@ -274,7 +274,7 @@
105 }
106
107 if(!block) {
108- runtime_unlock(c);
109+ runtime_unlock(&c->lock);
110 return false;
111 }
112
113@@ -283,10 +283,10 @@
114 mysg.selectdone = nil;
115 g->param = nil;
116 enqueue(&c->recvq, &mysg);
117- runtime_parkunlock(c, "chan receive");
118+ runtime_parkunlock(&c->lock, "chan receive");
119
120 if(g->param == nil) {
121- runtime_lock(c);
122+ runtime_lock(&c->lock);
123 if(!c->closed)
124 runtime_throw("chanrecv: spurious wakeup");
125 goto closed;
126@@ -304,7 +304,7 @@
127 goto closed;
128
129 if(!block) {
130- runtime_unlock(c);
131+ runtime_unlock(&c->lock);
132 if(received != nil)
133 *received = false;
134 return false;
135@@ -313,9 +313,9 @@
136 mysg.elem = nil;
137 mysg.selectdone = nil;
138 enqueue(&c->recvq, &mysg);
139- runtime_parkunlock(c, "chan receive");
140+ runtime_parkunlock(&c->lock, "chan receive");
141
142- runtime_lock(c);
143+ runtime_lock(&c->lock);
144 goto asynch;
145 }
146
147@@ -334,12 +334,12 @@
148 sg = dequeue(&c->sendq);
149 if(sg != nil) {
150 gp = sg->g;
151- runtime_unlock(c);
152+ runtime_unlock(&c->lock);
153 if(sg->releasetime)
154 sg->releasetime = runtime_cputicks();
155 runtime_ready(gp);
156 } else
157- runtime_unlock(c);
158+ runtime_unlock(&c->lock);
159
160 if(received != nil)
161 *received = true;
162@@ -354,7 +354,7 @@
163 *received = false;
164 if(raceenabled)
165 runtime_raceacquire(c);
166- runtime_unlock(c);
167+ runtime_unlock(&c->lock);
168 if(mysg.releasetime > 0)
169 runtime_blockevent(mysg.releasetime - t0, 2);
170 return true;
171@@ -628,7 +628,7 @@
172 c0 = sel->lockorder[i];
173 if(c0 && c0 != c) {
174 c = sel->lockorder[i];
175- runtime_lock(c);
176+ runtime_lock(&c->lock);
177 }
178 }
179 }
180@@ -656,7 +656,7 @@
181 c = sel->lockorder[i];
182 if(i>0 && sel->lockorder[i-1] == c)
183 continue; // will unlock it on the next iteration
184- runtime_unlock(c);
185+ runtime_unlock(&c->lock);
186 }
187 }
188
189@@ -1071,9 +1071,9 @@
190 if(runtime_gcwaiting())
191 runtime_gosched();
192
193- runtime_lock(c);
194+ runtime_lock(&c->lock);
195 if(c->closed) {
196- runtime_unlock(c);
197+ runtime_unlock(&c->lock);
198 runtime_panicstring("close of closed channel");
199 }
200
201@@ -1108,7 +1108,7 @@
202 runtime_ready(gp);
203 }
204
205- runtime_unlock(c);
206+ runtime_unlock(&c->lock);
207 }
208
209 void
210diff -r 225a208260a6 libgo/runtime/chan.h
211--- a/libgo/runtime/chan.h Mon Sep 22 14:14:24 2014 -0700
212+++ b/libgo/runtime/chan.h Tue Sep 23 15:59:57 2014 -0700
213@@ -39,7 +39,7 @@
214 uintgo recvx; // receive index
215 WaitQ recvq; // list of recv waiters
216 WaitQ sendq; // list of send waiters
217- Lock;
218+ Lock lock;
219 };
220
221 // Buffer follows Hchan immediately in memory.
222diff -r 225a208260a6 libgo/runtime/heapdump.c
223--- a/libgo/runtime/heapdump.c Mon Sep 22 14:14:24 2014 -0700
224+++ b/libgo/runtime/heapdump.c Tue Sep 23 15:59:57 2014 -0700
225@@ -387,7 +387,7 @@
226 if(sp->kind != KindSpecialFinalizer)
227 continue;
228 spf = (SpecialFinalizer*)sp;
229- p = (byte*)((s->start << PageShift) + spf->offset);
230+ p = (byte*)((s->start << PageShift) + spf->special.offset);
231 dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
232 }
233 }
234@@ -566,7 +566,7 @@
235 if(sp->kind != KindSpecialProfile)
236 continue;
237 spp = (SpecialProfile*)sp;
238- p = (byte*)((s->start << PageShift) + spp->offset);
239+ p = (byte*)((s->start << PageShift) + spp->special.offset);
240 dumpint(TagAllocSample);
241 dumpint((uintptr)p);
242 dumpint((uintptr)spp->b);
243diff -r 225a208260a6 libgo/runtime/malloc.goc
244--- a/libgo/runtime/malloc.goc Mon Sep 22 14:14:24 2014 -0700
245+++ b/libgo/runtime/malloc.goc Tue Sep 23 15:59:57 2014 -0700
246@@ -440,9 +440,9 @@
247 m->mcache->local_nlookup++;
248 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
249 // purge cache stats to prevent overflow
250- runtime_lock(&runtime_mheap);
251+ runtime_lock(&runtime_mheap.lock);
252 runtime_purgecachedstats(m->mcache);
253- runtime_unlock(&runtime_mheap);
254+ runtime_unlock(&runtime_mheap.lock);
255 }
256
257 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
258@@ -743,7 +743,7 @@
259
260 static struct
261 {
262- Lock;
263+ Lock lock;
264 byte* pos;
265 byte* end;
266 } persistent;
267@@ -772,19 +772,19 @@
268 align = 8;
269 if(size >= PersistentAllocMaxBlock)
270 return runtime_SysAlloc(size, stat);
271- runtime_lock(&persistent);
272+ runtime_lock(&persistent.lock);
273 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
274 if(persistent.pos + size > persistent.end) {
275 persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
276 if(persistent.pos == nil) {
277- runtime_unlock(&persistent);
278+ runtime_unlock(&persistent.lock);
279 runtime_throw("runtime: cannot allocate memory");
280 }
281 persistent.end = persistent.pos + PersistentAllocChunk;
282 }
283 p = persistent.pos;
284 persistent.pos += size;
285- runtime_unlock(&persistent);
286+ runtime_unlock(&persistent.lock);
287 if(stat != &mstats.other_sys) {
288 // reaccount the allocation against provided stat
289 runtime_xadd64(stat, size);
290diff -r 225a208260a6 libgo/runtime/malloc.h
291--- a/libgo/runtime/malloc.h Mon Sep 22 14:14:24 2014 -0700
292+++ b/libgo/runtime/malloc.h Tue Sep 23 15:59:57 2014 -0700
293@@ -390,7 +390,7 @@
294 typedef struct SpecialFinalizer SpecialFinalizer;
295 struct SpecialFinalizer
296 {
297- Special;
298+ Special special;
299 FuncVal* fn;
300 const FuncType* ft;
301 const PtrType* ot;
302@@ -401,7 +401,7 @@
303 typedef struct SpecialProfile SpecialProfile;
304 struct SpecialProfile
305 {
306- Special;
307+ Special special;
308 Bucket* b;
309 };
310
311@@ -458,7 +458,7 @@
312 // Central list of free objects of a given size.
313 struct MCentral
314 {
315- Lock;
316+ Lock lock;
317 int32 sizeclass;
318 MSpan nonempty; // list of spans with a free object
319 MSpan empty; // list of spans with no free objects (or cached in an MCache)
320@@ -476,7 +476,7 @@
321 // but all the other global data is here too.
322 struct MHeap
323 {
324- Lock;
325+ Lock lock;
326 MSpan free[MaxMHeapList]; // free lists of given length
327 MSpan freelarge; // free lists length >= MaxMHeapList
328 MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
329@@ -505,7 +505,7 @@
330 // spaced CacheLineSize bytes apart, so that each MCentral.Lock
331 // gets its own cache line.
332 struct {
333- MCentral;
334+ MCentral mcentral;
335 byte pad[64];
336 } central[NumSizeClasses];
337
338diff -r 225a208260a6 libgo/runtime/mcache.c
339--- a/libgo/runtime/mcache.c Mon Sep 22 14:14:24 2014 -0700
340+++ b/libgo/runtime/mcache.c Tue Sep 23 15:59:57 2014 -0700
341@@ -23,9 +23,9 @@
342 MCache *c;
343 int32 i;
344
345- runtime_lock(&runtime_mheap);
346+ runtime_lock(&runtime_mheap.lock);
347 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
348- runtime_unlock(&runtime_mheap);
349+ runtime_unlock(&runtime_mheap.lock);
350 runtime_memclr((byte*)c, sizeof(*c));
351 for(i = 0; i < NumSizeClasses; i++)
352 c->alloc[i] = &emptymspan;
353@@ -44,10 +44,10 @@
354 runtime_freemcache(MCache *c)
355 {
356 runtime_MCache_ReleaseAll(c);
357- runtime_lock(&runtime_mheap);
358+ runtime_lock(&runtime_mheap.lock);
359 runtime_purgecachedstats(c);
360 runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
361- runtime_unlock(&runtime_mheap);
362+ runtime_unlock(&runtime_mheap.lock);
363 }
364
365 // Gets a span that has a free object in it and assigns it
366@@ -64,19 +64,19 @@
367 if(s->freelist != nil)
368 runtime_throw("refill on a nonempty span");
369 if(s != &emptymspan)
370- runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);
371+ runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass].mcentral, s);
372
373 // Push any explicitly freed objects to the central lists.
374 // Not required, but it seems like a good time to do it.
375 l = &c->free[sizeclass];
376 if(l->nlist > 0) {
377- runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
378+ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
379 l->list = nil;
380 l->nlist = 0;
381 }
382
383 // Get a new cached span from the central lists.
384- s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
385+ s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass].mcentral);
386 if(s == nil)
387 runtime_throw("out of memory");
388 if(s->freelist == nil) {
389@@ -102,7 +102,7 @@
390 // We transfer a span at a time from MCentral to MCache,
391 // so we'll do the same in the other direction.
392 if(l->nlist >= (runtime_class_to_allocnpages[sizeclass]<<PageShift)/size) {
393- runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
394+ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
395 l->list = nil;
396 l->nlist = 0;
397 }
398@@ -118,12 +118,12 @@
399 for(i=0; i<NumSizeClasses; i++) {
400 s = c->alloc[i];
401 if(s != &emptymspan) {
402- runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
403+ runtime_MCentral_UncacheSpan(&runtime_mheap.central[i].mcentral, s);
404 c->alloc[i] = &emptymspan;
405 }
406 l = &c->free[i];
407 if(l->nlist > 0) {
408- runtime_MCentral_FreeList(&runtime_mheap.central[i], l->list);
409+ runtime_MCentral_FreeList(&runtime_mheap.central[i].mcentral, l->list);
410 l->list = nil;
411 l->nlist = 0;
412 }
413diff -r 225a208260a6 libgo/runtime/mcentral.c
414--- a/libgo/runtime/mcentral.c Mon Sep 22 14:14:24 2014 -0700
415+++ b/libgo/runtime/mcentral.c Tue Sep 23 15:59:57 2014 -0700
416@@ -39,14 +39,14 @@
417 int32 cap, n;
418 uint32 sg;
419
420- runtime_lock(c);
421+ runtime_lock(&c->lock);
422 sg = runtime_mheap.sweepgen;
423 retry:
424 for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
425 if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
426- runtime_unlock(c);
427+ runtime_unlock(&c->lock);
428 runtime_MSpan_Sweep(s);
429- runtime_lock(c);
430+ runtime_lock(&c->lock);
431 // the span could have been moved to heap, retry
432 goto retry;
433 }
434@@ -65,9 +65,9 @@
435 runtime_MSpanList_Remove(s);
436 // swept spans are at the end of the list
437 runtime_MSpanList_InsertBack(&c->empty, s);
438- runtime_unlock(c);
439+ runtime_unlock(&c->lock);
440 runtime_MSpan_Sweep(s);
441- runtime_lock(c);
442+ runtime_lock(&c->lock);
443 // the span could be moved to nonempty or heap, retry
444 goto retry;
445 }
446@@ -82,7 +82,7 @@
447
448 // Replenish central list if empty.
449 if(!MCentral_Grow(c)) {
450- runtime_unlock(c);
451+ runtime_unlock(&c->lock);
452 return nil;
453 }
454 goto retry;
455@@ -98,7 +98,7 @@
456 runtime_MSpanList_Remove(s);
457 runtime_MSpanList_InsertBack(&c->empty, s);
458 s->incache = true;
459- runtime_unlock(c);
460+ runtime_unlock(&c->lock);
461 return s;
462 }
463
464@@ -109,7 +109,7 @@
465 MLink *v;
466 int32 cap, n;
467
468- runtime_lock(c);
469+ runtime_lock(&c->lock);
470
471 s->incache = false;
472
473@@ -135,7 +135,7 @@
474 runtime_MSpanList_Remove(s);
475 runtime_MSpanList_Insert(&c->nonempty, s);
476 }
477- runtime_unlock(c);
478+ runtime_unlock(&c->lock);
479 }
480
481 // Free the list of objects back into the central free list c.
482@@ -145,12 +145,12 @@
483 {
484 MLink *next;
485
486- runtime_lock(c);
487+ runtime_lock(&c->lock);
488 for(; start != nil; start = next) {
489 next = start->next;
490 MCentral_Free(c, start);
491 }
492- runtime_unlock(c);
493+ runtime_unlock(&c->lock);
494 }
495
496 // Helper: free one object back into the central free list.
497@@ -193,7 +193,7 @@
498 // If s is completely freed, return it to the heap.
499 if(s->ref == 0) {
500 MCentral_ReturnToHeap(c, s); // unlocks c
501- runtime_lock(c);
502+ runtime_lock(&c->lock);
503 }
504 }
505
506@@ -206,7 +206,7 @@
507 {
508 if(s->incache)
509 runtime_throw("freespan into cached span");
510- runtime_lock(c);
511+ runtime_lock(&c->lock);
512
513 // Move to nonempty if necessary.
514 if(s->freelist == nil) {
515@@ -227,7 +227,7 @@
516 runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
517
518 if(s->ref != 0) {
519- runtime_unlock(c);
520+ runtime_unlock(&c->lock);
521 return false;
522 }
523
524@@ -260,12 +260,12 @@
525 byte *p;
526 MSpan *s;
527
528- runtime_unlock(c);
529+ runtime_unlock(&c->lock);
530 runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
531 s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
532 if(s == nil) {
533 // TODO(rsc): Log out of memory
534- runtime_lock(c);
535+ runtime_lock(&c->lock);
536 return false;
537 }
538
539@@ -282,7 +282,7 @@
540 *tailp = nil;
541 runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
542
543- runtime_lock(c);
544+ runtime_lock(&c->lock);
545 c->nfree += n;
546 runtime_MSpanList_Insert(&c->nonempty, s);
547 return true;
548@@ -301,7 +301,7 @@
549 if(s->ref != 0)
550 runtime_throw("ref wrong");
551 c->nfree -= (s->npages << PageShift) / size;
552- runtime_unlock(c);
553+ runtime_unlock(&c->lock);
554 runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
555 runtime_MHeap_Free(&runtime_mheap, s, 0);
556 }
557diff -r 225a208260a6 libgo/runtime/mgc0.c
558--- a/libgo/runtime/mgc0.c Mon Sep 22 14:14:24 2014 -0700
559+++ b/libgo/runtime/mgc0.c Tue Sep 23 15:59:57 2014 -0700
560@@ -225,7 +225,7 @@
561 Note alldone;
562 ParFor *markfor;
563
564- Lock;
565+ Lock lock;
566 byte *chunk;
567 uintptr nchunk;
568 } work __attribute__((aligned(8)));
569@@ -1337,7 +1337,7 @@
570 // retain everything it points to.
571 spf = (SpecialFinalizer*)sp;
572 // A finalizer can be set for an inner byte of an object, find object beginning.
573- p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
574+ p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
575 enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
576 enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
577 enqueue1(&wbuf, (Obj){(void*)&spf->ft, PtrSize, 0});
578@@ -1378,7 +1378,7 @@
579 b = (Workbuf*)runtime_lfstackpop(&work.empty);
580 if(b == nil) {
581 // Need to allocate.
582- runtime_lock(&work);
583+ runtime_lock(&work.lock);
584 if(work.nchunk < sizeof *b) {
585 work.nchunk = 1<<20;
586 work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
587@@ -1388,7 +1388,7 @@
588 b = (Workbuf*)work.chunk;
589 work.chunk += sizeof *b;
590 work.nchunk -= sizeof *b;
591- runtime_unlock(&work);
592+ runtime_unlock(&work.lock);
593 }
594 b->nobj = 0;
595 return b;
596@@ -1802,7 +1802,7 @@
597 c->local_nsmallfree[cl] += nfree;
598 c->local_cachealloc -= nfree * size;
599 runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
600- res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
601+ res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl].mcentral, s, nfree, head.next, end);
602 //MCentral_FreeSpan updates sweepgen
603 }
604 return res;
605@@ -2147,10 +2147,10 @@
606 return;
607
608 if(gcpercent == GcpercentUnknown) { // first time through
609- runtime_lock(&runtime_mheap);
610+ runtime_lock(&runtime_mheap.lock);
611 if(gcpercent == GcpercentUnknown)
612 gcpercent = readgogc();
613- runtime_unlock(&runtime_mheap);
614+ runtime_unlock(&runtime_mheap.lock);
615 }
616 if(gcpercent < 0)
617 return;
618@@ -2421,7 +2421,7 @@
619
620 // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
621 p = (uint64*)pauses->array;
622- runtime_lock(&runtime_mheap);
623+ runtime_lock(&runtime_mheap.lock);
624 n = mstats.numgc;
625 if(n > nelem(mstats.pause_ns))
626 n = nelem(mstats.pause_ns);
627@@ -2436,7 +2436,7 @@
628 p[n] = mstats.last_gc;
629 p[n+1] = mstats.numgc;
630 p[n+2] = mstats.pause_total_ns;
631- runtime_unlock(&runtime_mheap);
632+ runtime_unlock(&runtime_mheap.lock);
633 pauses->__count = n+3;
634 }
635
636@@ -2444,14 +2444,14 @@
637 runtime_setgcpercent(int32 in) {
638 int32 out;
639
640- runtime_lock(&runtime_mheap);
641+ runtime_lock(&runtime_mheap.lock);
642 if(gcpercent == GcpercentUnknown)
643 gcpercent = readgogc();
644 out = gcpercent;
645 if(in < 0)
646 in = -1;
647 gcpercent = in;
648- runtime_unlock(&runtime_mheap);
649+ runtime_unlock(&runtime_mheap.lock);
650 return out;
651 }
652
653diff -r 225a208260a6 libgo/runtime/mheap.c
654--- a/libgo/runtime/mheap.c Mon Sep 22 14:14:24 2014 -0700
655+++ b/libgo/runtime/mheap.c Tue Sep 23 15:59:57 2014 -0700
656@@ -70,7 +70,7 @@
657 runtime_MSpanList_Init(&h->freelarge);
658 runtime_MSpanList_Init(&h->busylarge);
659 for(i=0; i<nelem(h->central); i++)
660- runtime_MCentral_Init(&h->central[i], i);
661+ runtime_MCentral_Init(&h->central[i].mcentral, i);
662 }
663
664 void
665@@ -109,9 +109,9 @@
666 runtime_MSpanList_Remove(s);
667 // swept spans are at the end of the list
668 runtime_MSpanList_InsertBack(list, s);
669- runtime_unlock(h);
670+ runtime_unlock(&h->lock);
671 n += runtime_MSpan_Sweep(s);
672- runtime_lock(h);
673+ runtime_lock(&h->lock);
674 if(n >= npages)
675 return n;
676 // the span could have been moved elsewhere
677@@ -156,7 +156,7 @@
678 }
679
680 // Now sweep everything that is not yet swept.
681- runtime_unlock(h);
682+ runtime_unlock(&h->lock);
683 for(;;) {
684 n = runtime_sweepone();
685 if(n == (uintptr)-1) // all spans are swept
686@@ -165,7 +165,7 @@
687 if(reclaimed >= npage)
688 break;
689 }
690- runtime_lock(h);
691+ runtime_lock(&h->lock);
692 }
693
694 // Allocate a new span of npage pages from the heap
695@@ -175,7 +175,7 @@
696 {
697 MSpan *s;
698
699- runtime_lock(h);
700+ runtime_lock(&h->lock);
701 mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
702 runtime_m()->mcache->local_cachealloc = 0;
703 s = MHeap_AllocLocked(h, npage, sizeclass);
704@@ -191,7 +191,7 @@
705 runtime_MSpanList_InsertBack(&h->busylarge, s);
706 }
707 }
708- runtime_unlock(h);
709+ runtime_unlock(&h->lock);
710 if(s != nil) {
711 if(needzero && s->needzero)
712 runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
713@@ -386,7 +386,7 @@
714 void
715 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
716 {
717- runtime_lock(h);
718+ runtime_lock(&h->lock);
719 mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
720 runtime_m()->mcache->local_cachealloc = 0;
721 mstats.heap_inuse -= s->npages<<PageShift;
722@@ -395,7 +395,7 @@
723 mstats.heap_objects--;
724 }
725 MHeap_FreeLocked(h, s);
726- runtime_unlock(h);
727+ runtime_unlock(&h->lock);
728 }
729
730 static void
731@@ -548,10 +548,10 @@
732 runtime_noteclear(&note);
733 runtime_notetsleepg(&note, tick);
734
735- runtime_lock(h);
736+ runtime_lock(&h->lock);
737 unixnow = runtime_unixnanotime();
738 if(unixnow - mstats.last_gc > forcegc) {
739- runtime_unlock(h);
740+ runtime_unlock(&h->lock);
741 // The scavenger can not block other goroutines,
742 // otherwise deadlock detector can fire spuriously.
743 // GC blocks other goroutines via the runtime_worldsema.
744@@ -561,11 +561,11 @@
745 runtime_notetsleepg(&note, -1);
746 if(runtime_debug.gctrace > 0)
747 runtime_printf("scvg%d: GC forced\n", k);
748- runtime_lock(h);
749+ runtime_lock(&h->lock);
750 }
751 now = runtime_nanotime();
752 scavenge(k, now, limit);
753- runtime_unlock(h);
754+ runtime_unlock(&h->lock);
755 }
756 }
757
758@@ -575,9 +575,9 @@
759 runtime_debug_freeOSMemory(void)
760 {
761 runtime_gc(2); // force GC and do eager sweep
762- runtime_lock(&runtime_mheap);
763+ runtime_lock(&runtime_mheap.lock);
764 scavenge(-1, ~(uintptr)0, 0);
765- runtime_unlock(&runtime_mheap);
766+ runtime_unlock(&runtime_mheap.lock);
767 }
768
769 // Initialize a new span with the given start and npages.
770@@ -752,11 +752,11 @@
771 runtime_lock(&runtime_mheap.speciallock);
772 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
773 runtime_unlock(&runtime_mheap.speciallock);
774- s->kind = KindSpecialFinalizer;
775+ s->special.kind = KindSpecialFinalizer;
776 s->fn = f;
777 s->ft = ft;
778 s->ot = ot;
779- if(addspecial(p, s))
780+ if(addspecial(p, &s->special))
781 return true;
782
783 // There was an old finalizer
784@@ -789,9 +789,9 @@
785 runtime_lock(&runtime_mheap.speciallock);
786 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
787 runtime_unlock(&runtime_mheap.speciallock);
788- s->kind = KindSpecialProfile;
789+ s->special.kind = KindSpecialProfile;
790 s->b = b;
791- if(!addspecial(p, s))
792+ if(!addspecial(p, &s->special))
793 runtime_throw("setprofilebucket: profile already set");
794 }
795
796@@ -879,14 +879,14 @@
797 // remove the span from whatever list it is in now
798 if(s->sizeclass > 0) {
799 // must be in h->central[x].empty
800- c = &h->central[s->sizeclass];
801- runtime_lock(c);
802+ c = &h->central[s->sizeclass].mcentral;
803+ runtime_lock(&c->lock);
804 runtime_MSpanList_Remove(s);
805- runtime_unlock(c);
806- runtime_lock(h);
807+ runtime_unlock(&c->lock);
808+ runtime_lock(&h->lock);
809 } else {
810 // must be in h->busy/busylarge
811- runtime_lock(h);
812+ runtime_lock(&h->lock);
813 runtime_MSpanList_Remove(s);
814 }
815 // heap is locked now
816@@ -933,18 +933,18 @@
817
818 // place the span into a new list
819 if(s->sizeclass > 0) {
820- runtime_unlock(h);
821- c = &h->central[s->sizeclass];
822- runtime_lock(c);
823+ runtime_unlock(&h->lock);
824+ c = &h->central[s->sizeclass].mcentral;
825+ runtime_lock(&c->lock);
826 // swept spans are at the end of the list
827 runtime_MSpanList_InsertBack(&c->empty, s);
828- runtime_unlock(c);
829+ runtime_unlock(&c->lock);
830 } else {
831 // Swept spans are at the end of lists.
832 if(s->npages < nelem(h->free))
833 runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
834 else
835 runtime_MSpanList_InsertBack(&h->busylarge, s);
836- runtime_unlock(h);
837+ runtime_unlock(&h->lock);
838 }
839 }
840diff -r 225a208260a6 libgo/runtime/netpoll.goc
841--- a/libgo/runtime/netpoll.goc Mon Sep 22 14:14:24 2014 -0700
842+++ b/libgo/runtime/netpoll.goc Tue Sep 23 15:59:57 2014 -0700
843@@ -53,7 +53,7 @@
844 // pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
845 // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
846 // in a lock-free way by all operations.
847- Lock; // protectes the following fields
848+ Lock lock; // protectes the following fields
849 uintptr fd;
850 bool closing;
851 uintptr seq; // protects from stale timers and ready notifications
852@@ -68,7 +68,7 @@
853
854 static struct
855 {
856- Lock;
857+ Lock lock;
858 PollDesc* first;
859 // PollDesc objects must be type-stable,
860 // because we can get ready notification from epoll/kqueue
861@@ -100,7 +100,7 @@
862
863 func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
864 pd = allocPollDesc();
865- runtime_lock(pd);
866+ runtime_lock(&pd->lock);
867 if(pd->wg != nil && pd->wg != READY)
868 runtime_throw("runtime_pollOpen: blocked write on free descriptor");
869 if(pd->rg != nil && pd->rg != READY)
870@@ -112,7 +112,7 @@
871 pd->rd = 0;
872 pd->wg = nil;
873 pd->wd = 0;
874- runtime_unlock(pd);
875+ runtime_unlock(&pd->lock);
876
877 errno = runtime_netpollopen(fd, pd);
878 }
879@@ -125,10 +125,10 @@
880 if(pd->rg != nil && pd->rg != READY)
881 runtime_throw("runtime_pollClose: blocked read on closing descriptor");
882 runtime_netpollclose(pd->fd);
883- runtime_lock(&pollcache);
884+ runtime_lock(&pollcache.lock);
885 pd->link = pollcache.first;
886 pollcache.first = pd;
887- runtime_unlock(&pollcache);
888+ runtime_unlock(&pollcache.lock);
889 }
890
891 func runtime_pollReset(pd *PollDesc, mode int) (err int) {
892@@ -169,9 +169,9 @@
893 func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
894 G *rg, *wg;
895
896- runtime_lock(pd);
897+ runtime_lock(&pd->lock);
898 if(pd->closing) {
899- runtime_unlock(pd);
900+ runtime_unlock(&pd->lock);
901 return;
902 }
903 pd->seq++; // invalidate current timers
904@@ -223,7 +223,7 @@
905 rg = netpollunblock(pd, 'r', false);
906 if(pd->wd < 0)
907 wg = netpollunblock(pd, 'w', false);
908- runtime_unlock(pd);
909+ runtime_unlock(&pd->lock);
910 if(rg)
911 runtime_ready(rg);
912 if(wg)
913@@ -233,7 +233,7 @@
914 func runtime_pollUnblock(pd *PollDesc) {
915 G *rg, *wg;
916
917- runtime_lock(pd);
918+ runtime_lock(&pd->lock);
919 if(pd->closing)
920 runtime_throw("runtime_pollUnblock: already closing");
921 pd->closing = true;
922@@ -249,7 +249,7 @@
923 runtime_deltimer(&pd->wt);
924 pd->wt.fv = nil;
925 }
926- runtime_unlock(pd);
927+ runtime_unlock(&pd->lock);
928 if(rg)
929 runtime_ready(rg);
930 if(wg)
931@@ -277,13 +277,13 @@
932 void
933 runtime_netpolllock(PollDesc *pd)
934 {
935- runtime_lock(pd);
936+ runtime_lock(&pd->lock);
937 }
938
939 void
940 runtime_netpollunlock(PollDesc *pd)
941 {
942- runtime_unlock(pd);
943+ runtime_unlock(&pd->lock);
944 }
945
946 // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
947@@ -401,10 +401,10 @@
948 // If it's stale, ignore the timer event.
949 seq = (uintptr)arg.type;
950 rg = wg = nil;
951- runtime_lock(pd);
952+ runtime_lock(&pd->lock);
953 if(seq != pd->seq) {
954 // The descriptor was reused or timers were reset.
955- runtime_unlock(pd);
956+ runtime_unlock(&pd->lock);
957 return;
958 }
959 if(read) {
960@@ -421,7 +421,7 @@
961 runtime_atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
962 wg = netpollunblock(pd, 'w', false);
963 }
964- runtime_unlock(pd);
965+ runtime_unlock(&pd->lock);
966 if(rg)
967 runtime_ready(rg);
968 if(wg)
969@@ -452,7 +452,7 @@
970 PollDesc *pd;
971 uint32 i, n;
972
973- runtime_lock(&pollcache);
974+ runtime_lock(&pollcache.lock);
975 if(pollcache.first == nil) {
976 n = PollBlockSize/sizeof(*pd);
977 if(n == 0)
978@@ -467,6 +467,6 @@
979 }
980 pd = pollcache.first;
981 pollcache.first = pd->link;
982- runtime_unlock(&pollcache);
983+ runtime_unlock(&pollcache.lock);
984 return pd;
985 }
986diff -r 225a208260a6 libgo/runtime/proc.c
987--- a/libgo/runtime/proc.c Mon Sep 22 14:14:24 2014 -0700
988+++ b/libgo/runtime/proc.c Tue Sep 23 15:59:57 2014 -0700
989@@ -357,7 +357,7 @@
990
991 typedef struct Sched Sched;
992 struct Sched {
993- Lock;
994+ Lock lock;
995
996 uint64 goidgen;
997 M* midle; // idle m's waiting for work
998@@ -770,7 +770,7 @@
999
1000 mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
1001
1002- runtime_lock(&runtime_sched);
1003+ runtime_lock(&runtime_sched.lock);
1004 mp->id = runtime_sched.mcount++;
1005 checkmcount();
1006 runtime_mpreinit(mp);
1007@@ -781,7 +781,7 @@
1008 // runtime_NumCgoCall() iterates over allm w/o schedlock,
1009 // so we need to publish it safely.
1010 runtime_atomicstorep(&runtime_allm, mp);
1011- runtime_unlock(&runtime_sched);
1012+ runtime_unlock(&runtime_sched.lock);
1013 }
1014
1015 // Mark gp ready to run.
1016@@ -808,7 +808,7 @@
1017
1018 // Figure out how many CPUs to use during GC.
1019 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
1020- runtime_lock(&runtime_sched);
1021+ runtime_lock(&runtime_sched.lock);
1022 n = runtime_gomaxprocs;
1023 if(n > runtime_ncpu)
1024 n = runtime_ncpu > 0 ? runtime_ncpu : 1;
1025@@ -816,7 +816,7 @@
1026 n = MaxGcproc;
1027 if(n > runtime_sched.nmidle+1) // one M is currently running
1028 n = runtime_sched.nmidle+1;
1029- runtime_unlock(&runtime_sched);
1030+ runtime_unlock(&runtime_sched.lock);
1031 return n;
1032 }
1033
1034@@ -825,14 +825,14 @@
1035 {
1036 int32 n;
1037
1038- runtime_lock(&runtime_sched);
1039+ runtime_lock(&runtime_sched.lock);
1040 n = runtime_gomaxprocs;
1041 if(n > runtime_ncpu)
1042 n = runtime_ncpu;
1043 if(n > MaxGcproc)
1044 n = MaxGcproc;
1045 n -= runtime_sched.nmidle+1; // one M is currently running
1046- runtime_unlock(&runtime_sched);
1047+ runtime_unlock(&runtime_sched.lock);
1048 return n > 0;
1049 }
1050
1051@@ -842,7 +842,7 @@
1052 M *mp;
1053 int32 n, pos;
1054
1055- runtime_lock(&runtime_sched);
1056+ runtime_lock(&runtime_sched.lock);
1057 pos = 0;
1058 for(n = 1; n < nproc; n++) { // one M is currently running
1059 if(runtime_allp[pos]->mcache == m->mcache)
1060@@ -855,7 +855,7 @@
1061 pos++;
1062 runtime_notewakeup(&mp->park);
1063 }
1064- runtime_unlock(&runtime_sched);
1065+ runtime_unlock(&runtime_sched.lock);
1066 }
1067
1068 // Similar to stoptheworld but best-effort and can be called several times.
1069@@ -894,7 +894,7 @@
1070 P *p;
1071 bool wait;
1072
1073- runtime_lock(&runtime_sched);
1074+ runtime_lock(&runtime_sched.lock);
1075 runtime_sched.stopwait = runtime_gomaxprocs;
1076 runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
1077 preemptall();
1078@@ -914,7 +914,7 @@
1079 runtime_sched.stopwait--;
1080 }
1081 wait = runtime_sched.stopwait > 0;
1082- runtime_unlock(&runtime_sched);
1083+ runtime_unlock(&runtime_sched.lock);
1084
1085 // wait for remaining P's to stop voluntarily
1086 if(wait) {
1087@@ -948,7 +948,7 @@
1088 gp = runtime_netpoll(false); // non-blocking
1089 injectglist(gp);
1090 add = needaddgcproc();
1091- runtime_lock(&runtime_sched);
1092+ runtime_lock(&runtime_sched.lock);
1093 if(newprocs) {
1094 procresize(newprocs);
1095 newprocs = 0;
1096@@ -972,7 +972,7 @@
1097 runtime_sched.sysmonwait = false;
1098 runtime_notewakeup(&runtime_sched.sysmonnote);
1099 }
1100- runtime_unlock(&runtime_sched);
1101+ runtime_unlock(&runtime_sched.lock);
1102
1103 while(p1) {
1104 p = p1;
1105@@ -1404,9 +1404,9 @@
1106 }
1107
1108 retry:
1109- runtime_lock(&runtime_sched);
1110+ runtime_lock(&runtime_sched.lock);
1111 mput(m);
1112- runtime_unlock(&runtime_sched);
1113+ runtime_unlock(&runtime_sched.lock);
1114 runtime_notesleep(&m->park);
1115 runtime_noteclear(&m->park);
1116 if(m->helpgc) {
1117@@ -1433,18 +1433,18 @@
1118 M *mp;
1119 void (*fn)(void);
1120
1121- runtime_lock(&runtime_sched);
1122+ runtime_lock(&runtime_sched.lock);
1123 if(p == nil) {
1124 p = pidleget();
1125 if(p == nil) {
1126- runtime_unlock(&runtime_sched);
1127+ runtime_unlock(&runtime_sched.lock);
1128 if(spinning)
1129 runtime_xadd(&runtime_sched.nmspinning, -1);
1130 return;
1131 }
1132 }
1133 mp = mget();
1134- runtime_unlock(&runtime_sched);
1135+ runtime_unlock(&runtime_sched.lock);
1136 if(mp == nil) {
1137 fn = nil;
1138 if(spinning)
1139@@ -1477,28 +1477,28 @@
1140 startm(p, true);
1141 return;
1142 }
1143- runtime_lock(&runtime_sched);
1144+ runtime_lock(&runtime_sched.lock);
1145 if(runtime_sched.gcwaiting) {
1146 p->status = Pgcstop;
1147 if(--runtime_sched.stopwait == 0)
1148 runtime_notewakeup(&runtime_sched.stopnote);
1149- runtime_unlock(&runtime_sched);
1150+ runtime_unlock(&runtime_sched.lock);
1151 return;
1152 }
1153 if(runtime_sched.runqsize) {
1154- runtime_unlock(&runtime_sched);
1155+ runtime_unlock(&runtime_sched.lock);
1156 startm(p, false);
1157 return;
1158 }
1159 // If this is the last running P and nobody is polling network,
1160 // need to wakeup another M to poll network.
1161 if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
1162- runtime_unlock(&runtime_sched);
1163+ runtime_unlock(&runtime_sched.lock);
1164 startm(p, false);
1165 return;
1166 }
1167 pidleput(p);
1168- runtime_unlock(&runtime_sched);
1169+ runtime_unlock(&runtime_sched.lock);
1170 }
1171
1172 // Tries to add one more P to execute G's.
1173@@ -1570,11 +1570,11 @@
1174 runtime_xadd(&runtime_sched.nmspinning, -1);
1175 }
1176 p = releasep();
1177- runtime_lock(&runtime_sched);
1178+ runtime_lock(&runtime_sched.lock);
1179 p->status = Pgcstop;
1180 if(--runtime_sched.stopwait == 0)
1181 runtime_notewakeup(&runtime_sched.stopnote);
1182- runtime_unlock(&runtime_sched);
1183+ runtime_unlock(&runtime_sched.lock);
1184 stopm();
1185 }
1186
1187@@ -1625,9 +1625,9 @@
1188 return gp;
1189 // global runq
1190 if(runtime_sched.runqsize) {
1191- runtime_lock(&runtime_sched);
1192+ runtime_lock(&runtime_sched.lock);
1193 gp = globrunqget(m->p, 0);
1194- runtime_unlock(&runtime_sched);
1195+ runtime_unlock(&runtime_sched.lock);
1196 if(gp)
1197 return gp;
1198 }
1199@@ -1661,19 +1661,19 @@
1200 }
1201 stop:
1202 // return P and block
1203- runtime_lock(&runtime_sched);
1204+ runtime_lock(&runtime_sched.lock);
1205 if(runtime_sched.gcwaiting) {
1206- runtime_unlock(&runtime_sched);
1207+ runtime_unlock(&runtime_sched.lock);
1208 goto top;
1209 }
1210 if(runtime_sched.runqsize) {
1211 gp = globrunqget(m->p, 0);
1212- runtime_unlock(&runtime_sched);
1213+ runtime_unlock(&runtime_sched.lock);
1214 return gp;
1215 }
1216 p = releasep();
1217 pidleput(p);
1218- runtime_unlock(&runtime_sched);
1219+ runtime_unlock(&runtime_sched.lock);
1220 if(m->spinning) {
1221 m->spinning = false;
1222 runtime_xadd(&runtime_sched.nmspinning, -1);
1223@@ -1682,9 +1682,9 @@
1224 for(i = 0; i < runtime_gomaxprocs; i++) {
1225 p = runtime_allp[i];
1226 if(p && p->runqhead != p->runqtail) {
1227- runtime_lock(&runtime_sched);
1228+ runtime_lock(&runtime_sched.lock);
1229 p = pidleget();
1230- runtime_unlock(&runtime_sched);
1231+ runtime_unlock(&runtime_sched.lock);
1232 if(p) {
1233 acquirep(p);
1234 goto top;
1235@@ -1701,9 +1701,9 @@
1236 gp = runtime_netpoll(true); // block until new work is available
1237 runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
1238 if(gp) {
1239- runtime_lock(&runtime_sched);
1240+ runtime_lock(&runtime_sched.lock);
1241 p = pidleget();
1242- runtime_unlock(&runtime_sched);
1243+ runtime_unlock(&runtime_sched.lock);
1244 if(p) {
1245 acquirep(p);
1246 injectglist(gp->schedlink);
1247@@ -1746,14 +1746,14 @@
1248
1249 if(glist == nil)
1250 return;
1251- runtime_lock(&runtime_sched);
1252+ runtime_lock(&runtime_sched.lock);
1253 for(n = 0; glist; n++) {
1254 gp = glist;
1255 glist = gp->schedlink;
1256 gp->status = Grunnable;
1257 globrunqput(gp);
1258 }
1259- runtime_unlock(&runtime_sched);
1260+ runtime_unlock(&runtime_sched.lock);
1261
1262 for(; n && runtime_sched.npidle; n--)
1263 startm(nil, false);
1264@@ -1784,9 +1784,9 @@
1265 // This is a fancy way to say tick%61==0,
1266 // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
1267 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
1268- runtime_lock(&runtime_sched);
1269+ runtime_lock(&runtime_sched.lock);
1270 gp = globrunqget(m->p, 1);
1271- runtime_unlock(&runtime_sched);
1272+ runtime_unlock(&runtime_sched.lock);
1273 if(gp)
1274 resetspinning();
1275 }
1276@@ -1880,9 +1880,9 @@
1277 gp->status = Grunnable;
1278 gp->m = nil;
1279 m->curg = nil;
1280- runtime_lock(&runtime_sched);
1281+ runtime_lock(&runtime_sched.lock);
1282 globrunqput(gp);
1283- runtime_unlock(&runtime_sched);
1284+ runtime_unlock(&runtime_sched.lock);
1285 if(m->lockedg) {
1286 stoplockedm();
1287 execute(gp); // Never returns.
1288@@ -1985,24 +1985,24 @@
1289 g->status = Gsyscall;
1290
1291 if(runtime_atomicload(&runtime_sched.sysmonwait)) { // TODO: fast atomic
1292- runtime_lock(&runtime_sched);
1293+ runtime_lock(&runtime_sched.lock);
1294 if(runtime_atomicload(&runtime_sched.sysmonwait)) {
1295 runtime_atomicstore(&runtime_sched.sysmonwait, 0);
1296 runtime_notewakeup(&runtime_sched.sysmonnote);
1297 }
1298- runtime_unlock(&runtime_sched);
1299+ runtime_unlock(&runtime_sched.lock);
1300 }
1301
1302 m->mcache = nil;
1303 m->p->m = nil;
1304 runtime_atomicstore(&m->p->status, Psyscall);
1305 if(runtime_sched.gcwaiting) {
1306- runtime_lock(&runtime_sched);
1307+ runtime_lock(&runtime_sched.lock);
1308 if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
1309 if(--runtime_sched.stopwait == 0)
1310 runtime_notewakeup(&runtime_sched.stopnote);
1311 }
1312- runtime_unlock(&runtime_sched);
1313+ runtime_unlock(&runtime_sched.lock);
1314 }
1315
1316 m->locks--;
1317@@ -2113,13 +2113,13 @@
1318 // Try to get any other idle P.
1319 m->p = nil;
1320 if(runtime_sched.pidle) {
1321- runtime_lock(&runtime_sched);
1322+ runtime_lock(&runtime_sched.lock);
1323 p = pidleget();
1324 if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
1325 runtime_atomicstore(&runtime_sched.sysmonwait, 0);
1326 runtime_notewakeup(&runtime_sched.sysmonnote);
1327 }
1328- runtime_unlock(&runtime_sched);
1329+ runtime_unlock(&runtime_sched.lock);
1330 if(p) {
1331 acquirep(p);
1332 return true;
1333@@ -2138,7 +2138,7 @@
1334 gp->status = Grunnable;
1335 gp->m = nil;
1336 m->curg = nil;
1337- runtime_lock(&runtime_sched);
1338+ runtime_lock(&runtime_sched.lock);
1339 p = pidleget();
1340 if(p == nil)
1341 globrunqput(gp);
1342@@ -2146,7 +2146,7 @@
1343 runtime_atomicstore(&runtime_sched.sysmonwait, 0);
1344 runtime_notewakeup(&runtime_sched.sysmonnote);
1345 }
1346- runtime_unlock(&runtime_sched);
1347+ runtime_unlock(&runtime_sched.lock);
1348 if(p) {
1349 acquirep(p);
1350 execute(gp); // Never returns.
1351@@ -2425,13 +2425,13 @@
1352
1353 if(n > MaxGomaxprocs)
1354 n = MaxGomaxprocs;
1355- runtime_lock(&runtime_sched);
1356+ runtime_lock(&runtime_sched.lock);
1357 ret = runtime_gomaxprocs;
1358 if(n <= 0 || n == ret) {
1359- runtime_unlock(&runtime_sched);
1360+ runtime_unlock(&runtime_sched.lock);
1361 return ret;
1362 }
1363- runtime_unlock(&runtime_sched);
1364+ runtime_unlock(&runtime_sched.lock);
1365
1366 runtime_semacquire(&runtime_worldsema, false);
1367 m->gcing = 1;
1368@@ -2536,7 +2536,7 @@
1369 }
1370
1371 static struct {
1372- Lock;
1373+ Lock lock;
1374 void (*fn)(uintptr*, int32);
1375 int32 hz;
1376 uintptr pcbuf[TracebackMaxFrames];
1377@@ -2568,9 +2568,9 @@
1378 if(mp->mcache == nil)
1379 traceback = false;
1380
1381- runtime_lock(&prof);
1382+ runtime_lock(&prof.lock);
1383 if(prof.fn == nil) {
1384- runtime_unlock(&prof);
1385+ runtime_unlock(&prof.lock);
1386 mp->mallocing--;
1387 return;
1388 }
1389@@ -2598,7 +2598,7 @@
1390 prof.pcbuf[1] = (uintptr)System;
1391 }
1392 prof.fn(prof.pcbuf, n);
1393- runtime_unlock(&prof);
1394+ runtime_unlock(&prof.lock);
1395 mp->mallocing--;
1396 }
1397
1398@@ -2623,13 +2623,13 @@
1399 // it would deadlock.
1400 runtime_resetcpuprofiler(0);
1401
1402- runtime_lock(&prof);
1403+ runtime_lock(&prof.lock);
1404 prof.fn = fn;
1405 prof.hz = hz;
1406- runtime_unlock(&prof);
1407- runtime_lock(&runtime_sched);
1408+ runtime_unlock(&prof.lock);
1409+ runtime_lock(&runtime_sched.lock);
1410 runtime_sched.profilehz = hz;
1411- runtime_unlock(&runtime_sched);
1412+ runtime_unlock(&runtime_sched.lock);
1413
1414 if(hz != 0)
1415 runtime_resetcpuprofiler(hz);
1416@@ -2767,11 +2767,11 @@
1417 static void
1418 incidlelocked(int32 v)
1419 {
1420- runtime_lock(&runtime_sched);
1421+ runtime_lock(&runtime_sched.lock);
1422 runtime_sched.nmidlelocked += v;
1423 if(v > 0)
1424 checkdead();
1425- runtime_unlock(&runtime_sched);
1426+ runtime_unlock(&runtime_sched.lock);
1427 }
1428
1429 // Check for deadlock situation.
1430@@ -2840,16 +2840,16 @@
1431 runtime_usleep(delay);
1432 if(runtime_debug.schedtrace <= 0 &&
1433 (runtime_sched.gcwaiting || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs)) { // TODO: fast atomic
1434- runtime_lock(&runtime_sched);
1435+ runtime_lock(&runtime_sched.lock);
1436 if(runtime_atomicload(&runtime_sched.gcwaiting) || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs) {
1437 runtime_atomicstore(&runtime_sched.sysmonwait, 1);
1438- runtime_unlock(&runtime_sched);
1439+ runtime_unlock(&runtime_sched.lock);
1440 runtime_notesleep(&runtime_sched.sysmonnote);
1441 runtime_noteclear(&runtime_sched.sysmonnote);
1442 idle = 0;
1443 delay = 20;
1444 } else
1445- runtime_unlock(&runtime_sched);
1446+ runtime_unlock(&runtime_sched.lock);
1447 }
1448 // poll network if not polled for more than 10ms
1449 lastpoll = runtime_atomicload64(&runtime_sched.lastpoll);
1450@@ -2978,7 +2978,7 @@
1451 if(starttime == 0)
1452 starttime = now;
1453
1454- runtime_lock(&runtime_sched);
1455+ runtime_lock(&runtime_sched.lock);
1456 runtime_printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
1457 (now-starttime)/1000000, runtime_gomaxprocs, runtime_sched.npidle, runtime_sched.mcount,
1458 runtime_sched.nmidle, runtime_sched.runqsize);
1459@@ -3014,7 +3014,7 @@
1460 }
1461 }
1462 if(!detailed) {
1463- runtime_unlock(&runtime_sched);
1464+ runtime_unlock(&runtime_sched.lock);
1465 return;
1466 }
1467 for(mp = runtime_allm; mp; mp = mp->alllink) {
1468@@ -3046,7 +3046,7 @@
1469 lockedm ? lockedm->id : -1);
1470 }
1471 runtime_unlock(&allglock);
1472- runtime_unlock(&runtime_sched);
1473+ runtime_unlock(&runtime_sched.lock);
1474 }
1475
1476 // Put mp on midle list.
1477@@ -3202,9 +3202,9 @@
1478 for(i=0; i<n; i++)
1479 batch[i]->schedlink = batch[i+1];
1480 // Now put the batch on global queue.
1481- runtime_lock(&runtime_sched);
1482+ runtime_lock(&runtime_sched.lock);
1483 globrunqputbatch(batch[0], batch[n], n+1);
1484- runtime_unlock(&runtime_sched);
1485+ runtime_unlock(&runtime_sched.lock);
1486 return true;
1487 }
1488
1489@@ -3356,11 +3356,11 @@
1490 {
1491 int32 out;
1492
1493- runtime_lock(&runtime_sched);
1494+ runtime_lock(&runtime_sched.lock);
1495 out = runtime_sched.maxmcount;
1496 runtime_sched.maxmcount = in;
1497 checkmcount();
1498- runtime_unlock(&runtime_sched);
1499+ runtime_unlock(&runtime_sched.lock);
1500 return out;
1501 }
1502
1503diff -r 225a208260a6 libgo/runtime/runtime.h
1504--- a/libgo/runtime/runtime.h Mon Sep 22 14:14:24 2014 -0700
1505+++ b/libgo/runtime/runtime.h Tue Sep 23 15:59:57 2014 -0700
1506@@ -286,7 +286,7 @@
1507
1508 struct P
1509 {
1510- Lock;
1511+ Lock lock;
1512
1513 int32 id;
1514 uint32 status; // one of Pidle/Prunning/...
1515@@ -384,7 +384,7 @@
1516
1517 struct Timers
1518 {
1519- Lock;
1520+ Lock lock;
1521 G *timerproc;
1522 bool sleeping;
1523 bool rescheduling;
1524diff -r 225a208260a6 libgo/runtime/sema.goc
1525--- a/libgo/runtime/sema.goc Mon Sep 22 14:14:24 2014 -0700
1526+++ b/libgo/runtime/sema.goc Tue Sep 23 15:59:57 2014 -0700
1527@@ -35,7 +35,7 @@
1528 typedef struct SemaRoot SemaRoot;
1529 struct SemaRoot
1530 {
1531- Lock;
1532+ Lock lock;
1533 SemaWaiter* head;
1534 SemaWaiter* tail;
1535 // Number of waiters. Read w/o the lock.
1536@@ -47,7 +47,7 @@
1537
1538 struct semtable
1539 {
1540- SemaRoot;
1541+ SemaRoot root;
1542 uint8 pad[CacheLineSize-sizeof(SemaRoot)];
1543 };
1544 static struct semtable semtable[SEMTABLESZ];
1545@@ -55,7 +55,7 @@
1546 static SemaRoot*
1547 semroot(uint32 volatile *addr)
1548 {
1549- return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
1550+ return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root;
1551 }
1552
1553 static void
1554@@ -124,19 +124,19 @@
1555 }
1556 for(;;) {
1557
1558- runtime_lock(root);
1559+ runtime_lock(&root->lock);
1560 // Add ourselves to nwait to disable "easy case" in semrelease.
1561 runtime_xadd(&root->nwait, 1);
1562 // Check cansemacquire to avoid missed wakeup.
1563 if(cansemacquire(addr)) {
1564 runtime_xadd(&root->nwait, -1);
1565- runtime_unlock(root);
1566+ runtime_unlock(&root->lock);
1567 return;
1568 }
1569 // Any semrelease after the cansemacquire knows we're waiting
1570 // (we set nwait above), so go to sleep.
1571 semqueue(root, addr, &s);
1572- runtime_parkunlock(root, "semacquire");
1573+ runtime_parkunlock(&root->lock, "semacquire");
1574 if(cansemacquire(addr)) {
1575 if(t0)
1576 runtime_blockevent(s.releasetime - t0, 3);
1577@@ -161,11 +161,11 @@
1578 return;
1579
1580 // Harder case: search for a waiter and wake it.
1581- runtime_lock(root);
1582+ runtime_lock(&root->lock);
1583 if(runtime_atomicload(&root->nwait) == 0) {
1584 // The count is already consumed by another goroutine,
1585 // so no need to wake up another goroutine.
1586- runtime_unlock(root);
1587+ runtime_unlock(&root->lock);
1588 return;
1589 }
1590 for(s = root->head; s; s = s->next) {
1591@@ -175,7 +175,7 @@
1592 break;
1593 }
1594 }
1595- runtime_unlock(root);
1596+ runtime_unlock(&root->lock);
1597 if(s) {
1598 if(s->releasetime)
1599 s->releasetime = runtime_cputicks();
1600@@ -211,7 +211,7 @@
1601 typedef struct SyncSema SyncSema;
1602 struct SyncSema
1603 {
1604- Lock;
1605+ Lock lock;
1606 SemaWaiter* head;
1607 SemaWaiter* tail;
1608 };
1609@@ -238,7 +238,7 @@
1610 w.releasetime = -1;
1611 }
1612
1613- runtime_lock(s);
1614+ runtime_lock(&s->lock);
1615 if(s->head && s->head->nrelease > 0) {
1616 // have pending release, consume it
1617 wake = nil;
1618@@ -249,7 +249,7 @@
1619 if(s->head == nil)
1620 s->tail = nil;
1621 }
1622- runtime_unlock(s);
1623+ runtime_unlock(&s->lock);
1624 if(wake)
1625 runtime_ready(wake->g);
1626 } else {
1627@@ -259,7 +259,7 @@
1628 else
1629 s->tail->next = &w;
1630 s->tail = &w;
1631- runtime_parkunlock(s, "semacquire");
1632+ runtime_parkunlock(&s->lock, "semacquire");
1633 if(t0)
1634 runtime_blockevent(w.releasetime - t0, 2);
1635 }
1636@@ -274,7 +274,7 @@
1637 w.next = nil;
1638 w.releasetime = 0;
1639
1640- runtime_lock(s);
1641+ runtime_lock(&s->lock);
1642 while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
1643 // have pending acquire, satisfy it
1644 wake = s->head;
1645@@ -293,7 +293,7 @@
1646 else
1647 s->tail->next = &w;
1648 s->tail = &w;
1649- runtime_parkunlock(s, "semarelease");
1650+ runtime_parkunlock(&s->lock, "semarelease");
1651 } else
1652- runtime_unlock(s);
1653+ runtime_unlock(&s->lock);
1654 }
1655diff -r 225a208260a6 libgo/runtime/sigqueue.goc
1656--- a/libgo/runtime/sigqueue.goc Mon Sep 22 14:14:24 2014 -0700
1657+++ b/libgo/runtime/sigqueue.goc Tue Sep 23 15:59:57 2014 -0700
1658@@ -32,7 +32,7 @@
1659 #include "defs.h"
1660
1661 static struct {
1662- Note;
1663+ Note note;
1664 uint32 mask[(NSIG+31)/32];
1665 uint32 wanted[(NSIG+31)/32];
1666 uint32 state;
1667@@ -70,7 +70,7 @@
1668 new = HASSIGNAL;
1669 if(runtime_cas(&sig.state, old, new)) {
1670 if (old == HASWAITER)
1671- runtime_notewakeup(&sig);
1672+ runtime_notewakeup(&sig.note);
1673 break;
1674 }
1675 }
1676@@ -107,8 +107,8 @@
1677 new = HASWAITER;
1678 if(runtime_cas(&sig.state, old, new)) {
1679 if (new == HASWAITER) {
1680- runtime_notetsleepg(&sig, -1);
1681- runtime_noteclear(&sig);
1682+ runtime_notetsleepg(&sig.note, -1);
1683+ runtime_noteclear(&sig.note);
1684 }
1685 break;
1686 }
1687@@ -138,7 +138,7 @@
1688 // to use for initialization. It does not pass
1689 // signal information in m.
1690 sig.inuse = true; // enable reception of signals; cannot disable
1691- runtime_noteclear(&sig);
1692+ runtime_noteclear(&sig.note);
1693 return;
1694 }
1695
1696diff -r 225a208260a6 libgo/runtime/time.goc
1697--- a/libgo/runtime/time.goc Mon Sep 22 14:14:24 2014 -0700
1698+++ b/libgo/runtime/time.goc Tue Sep 23 15:59:57 2014 -0700
1699@@ -94,17 +94,17 @@
1700 t.period = 0;
1701 t.fv = &readyv;
1702 t.arg.__object = g;
1703- runtime_lock(&timers);
1704+ runtime_lock(&timers.lock);
1705 addtimer(&t);
1706- runtime_parkunlock(&timers, reason);
1707+ runtime_parkunlock(&timers.lock, reason);
1708 }
1709
1710 void
1711 runtime_addtimer(Timer *t)
1712 {
1713- runtime_lock(&timers);
1714+ runtime_lock(&timers.lock);
1715 addtimer(t);
1716- runtime_unlock(&timers);
1717+ runtime_unlock(&timers.lock);
1718 }
1719
1720 // Add a timer to the heap and start or kick the timer proc
1721@@ -169,14 +169,14 @@
1722 i = t->i;
1723 gi = i;
1724
1725- runtime_lock(&timers);
1726+ runtime_lock(&timers.lock);
1727
1728 // t may not be registered anymore and may have
1729 // a bogus i (typically 0, if generated by Go).
1730 // Verify it before proceeding.
1731 i = t->i;
1732 if(i < 0 || i >= timers.len || timers.t[i] != t) {
1733- runtime_unlock(&timers);
1734+ runtime_unlock(&timers.lock);
1735 return false;
1736 }
1737
1738@@ -192,7 +192,7 @@
1739 }
1740 if(debug)
1741 dumptimers("deltimer");
1742- runtime_unlock(&timers);
1743+ runtime_unlock(&timers.lock);
1744 return true;
1745 }
1746
1747@@ -210,7 +210,7 @@
1748 Eface arg;
1749
1750 for(;;) {
1751- runtime_lock(&timers);
1752+ runtime_lock(&timers.lock);
1753 timers.sleeping = false;
1754 now = runtime_nanotime();
1755 for(;;) {
1756@@ -236,7 +236,7 @@
1757 fv = t->fv;
1758 f = (void*)t->fv->fn;
1759 arg = t->arg;
1760- runtime_unlock(&timers);
1761+ runtime_unlock(&timers.lock);
1762 if(raceenabled)
1763 runtime_raceacquire(t);
1764 __go_set_closure(fv);
1765@@ -249,20 +249,20 @@
1766 arg.__object = nil;
1767 USED(&arg);
1768
1769- runtime_lock(&timers);
1770+ runtime_lock(&timers.lock);
1771 }
1772 if(delta < 0) {
1773 // No timers left - put goroutine to sleep.
1774 timers.rescheduling = true;
1775 runtime_g()->isbackground = true;
1776- runtime_parkunlock(&timers, "timer goroutine (idle)");
1777+ runtime_parkunlock(&timers.lock, "timer goroutine (idle)");
1778 runtime_g()->isbackground = false;
1779 continue;
1780 }
1781 // At least one timer pending. Sleep until then.
1782 timers.sleeping = true;
1783 runtime_noteclear(&timers.waitnote);
1784- runtime_unlock(&timers);
1785+ runtime_unlock(&timers.lock);
1786 runtime_notetsleepg(&timers.waitnote, delta);
1787 }
1788 }