blob: 0a6df838ab7ec6bc572bf34ff6996da154a52de6 [file] [log] [blame]
Blaine Garstcc08af12009-06-10 18:41:48 +00001/*
Daniel Dunbarfd089992009-06-26 16:47:03 +00002 * runtime.c
Blaine Garstcc08af12009-06-10 18:41:48 +00003 *
Daniel Dunbarfd089992009-06-26 16:47:03 +00004 * Copyright 2008-2009 Apple, Inc. Permission is hereby granted, free of charge,
5 * to any person obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to permit
9 * persons to whom the Software is furnished to do so, subject to the following
10 * conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
Blaine Garstcc08af12009-06-10 18:41:48 +000023 */
24
25#include "Block_private.h"
26#include <stdio.h>
27#include <stdlib.h>
28#include <string.h>
29#include <stdint.h>
30
31#if !TARGET_OS_WIN32
32#include <libkern/OSAtomic.h>
33#else
34#define _CRT_SECURE_NO_WARNINGS 1
35#include <windows.h>
36static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
37{
38 // fixme barrier is overkill -- see objc-os.h
39 long original = InterlockedCompareExchange(dst, newl, oldl);
40 return (original == oldl);
41}
42
43static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
44{
45 // fixme barrier is overkill -- see objc-os.h
46 int original = InterlockedCompareExchange(dst, newi, oldi);
47 return (original == oldi);
48}
49#endif
50
51
52/***********************
53Globals
54************************/
55
56static void *_Block_copy_class = _NSConcreteMallocBlock;
57static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
58static int _Block_copy_flag = BLOCK_NEEDS_FREE;
59static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
60
61static const int WANTS_ONE = (1 << 16);
62
63static bool isGC = false;
64
65/*******************************************************************************
66Internal Utilities
67********************************************************************************/
68
69#if 0
70static unsigned long int latching_incr_long(unsigned long int *where) {
71 while (1) {
72 unsigned long int old_value = *(volatile unsigned long int *)where;
73 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
74 return BLOCK_REFCOUNT_MASK;
75 }
76 if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
77 return old_value+1;
78 }
79 }
80}
81#endif
82
83static int latching_incr_int(int *where) {
84 while (1) {
85 int old_value = *(volatile int *)where;
86 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
87 return BLOCK_REFCOUNT_MASK;
88 }
89 if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
90 return old_value+1;
91 }
92 }
93}
94
95#if 0
96static int latching_decr_long(unsigned long int *where) {
97 while (1) {
98 unsigned long int old_value = *(volatile int *)where;
99 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
100 return BLOCK_REFCOUNT_MASK;
101 }
102 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
103 return 0;
104 }
105 if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
106 return old_value-1;
107 }
108 }
109}
110#endif
111
112static int latching_decr_int(int *where) {
113 while (1) {
114 int old_value = *(volatile int *)where;
115 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
116 return BLOCK_REFCOUNT_MASK;
117 }
118 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
119 return 0;
120 }
121 if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
122 return old_value-1;
123 }
124 }
125}
126
127
128/***********************
129GC support stub routines
130************************/
131#if !TARGET_OS_WIN32
132#pragma mark GC Support Routines
133#endif
134
135
136
137static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
138 return malloc(size);
139}
140
141static void _Block_assign_default(void *value, void **destptr) {
142 *destptr = value;
143}
144
145static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
146}
147
148static void _Block_do_nothing(const void *aBlock) { }
149
150static void _Block_retain_object_default(const void *ptr) {
151 if (!ptr) return;
152}
153
154static void _Block_release_object_default(const void *ptr) {
155 if (!ptr) return;
156}
157
158static void _Block_assign_weak_default(const void *ptr, void *dest) {
159#if !TARGET_OS_WIN32
160 *(long *)dest = (long)ptr;
161#else
162 *(void **)dest = (void *)ptr;
163#endif
164}
165
166static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
167 memmove(dst, src, (size_t)size);
168}
169
170static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
171 void **destp = (void **)dest;
172 void **srcp = (void **)src;
173 while (size) {
174 _Block_assign_default(*srcp, destp);
175 destp++;
176 srcp++;
177 size -= sizeof(void *);
178 }
179}
180
181/**************************************************************************
182GC support callout functions - initially set to stub routines
183***************************************************************************/
184
185static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
186static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
187static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
188static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
189static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
190static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
191static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
192static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
193
194
195/**************************************************************************
196GC support SPI functions - called from ObjC runtime and CoreFoundation
197***************************************************************************/
198
199// Public SPI
200// Called from objc-auto to turn on GC.
201// version 3, 4 arg, but changed 1st arg
202void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
203 void (*setHasRefcount)(const void *, const bool),
204 void (*gc_assign)(void *, void **),
205 void (*gc_assign_weak)(const void *, void *),
206 void (*gc_memmove)(void *, void *, unsigned long)) {
207
208 isGC = true;
209 _Block_allocator = alloc;
210 _Block_deallocator = _Block_do_nothing;
211 _Block_assign = gc_assign;
212 _Block_copy_flag = BLOCK_IS_GC;
213 _Block_copy_class = _NSConcreteAutoBlock;
214 // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
215 _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
216 _Block_setHasRefcount = setHasRefcount;
217 _Byref_flag_initial_value = BLOCK_IS_GC; // no refcount
218 _Block_retain_object = _Block_do_nothing;
219 _Block_release_object = _Block_do_nothing;
220 _Block_assign_weak = gc_assign_weak;
221 _Block_memmove = gc_memmove;
222}
223
224// transitional
225void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
226 void (*setHasRefcount)(const void *, const bool),
227 void (*gc_assign)(void *, void **),
228 void (*gc_assign_weak)(const void *, void *)) {
229 // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
230 _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
231}
232
233
234// Called from objc-auto to alternatively turn on retain/release.
235// Prior to this the only "object" support we can provide is for those
236// super special objects that live in libSystem, namely dispatch queues.
237// Blocks and Block_byrefs have their own special entry points.
238void _Block_use_RR( void (*retain)(const void *),
239 void (*release)(const void *)) {
240 _Block_retain_object = retain;
241 _Block_release_object = release;
242}
243
244/*******************************************************************************
245Internal Support routines for copying
246********************************************************************************/
247
248#if !TARGET_OS_WIN32
249#pragma mark Copy/Release support
250#endif
251
252// Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
253static void *_Block_copy_internal(const void *arg, const int flags) {
254 struct Block_layout *aBlock;
255 const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
256
257 //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
258 if (!arg) return NULL;
259
260
261 // The following would be better done as a switch statement
262 aBlock = (struct Block_layout *)arg;
263 if (aBlock->flags & BLOCK_NEEDS_FREE) {
264 // latches on high
265 latching_incr_int(&aBlock->flags);
266 return aBlock;
267 }
268 else if (aBlock->flags & BLOCK_IS_GC) {
269 // GC refcounting is expensive so do most refcounting here.
270 if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
271 // Tell collector to hang on this - it will bump the GC refcount version
272 _Block_setHasRefcount(aBlock, true);
273 }
274 return aBlock;
275 }
276 else if (aBlock->flags & BLOCK_IS_GLOBAL) {
277 return aBlock;
278 }
279
280 // Its a stack block. Make a copy.
281 if (!isGC) {
282 struct Block_layout *result = malloc(aBlock->descriptor->size);
283 if (!result) return (void *)0;
284 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
285 // reset refcount
286 result->flags &= ~(BLOCK_REFCOUNT_MASK); // XXX not needed
287 result->flags |= BLOCK_NEEDS_FREE | 1;
288 result->isa = _NSConcreteMallocBlock;
289 if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
290 //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
291 (*aBlock->descriptor->copy)(result, aBlock); // do fixup
292 }
293 return result;
294 }
295 else {
296 // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
297 // This allows the copy helper routines to make non-refcounted block copies under GC
298 unsigned long int flags = aBlock->flags;
299 bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
300 struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
301 if (!result) return (void *)0;
302 memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
303 // reset refcount
304 // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
305 flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK); // XXX not needed
306 if (wantsOne)
307 flags |= BLOCK_IS_GC | 1;
308 else
309 flags |= BLOCK_IS_GC;
310 result->flags = flags;
311 if (flags & BLOCK_HAS_COPY_DISPOSE) {
312 //printf("calling block copy helper...\n");
313 (*aBlock->descriptor->copy)(result, aBlock); // do fixup
314 }
315 if (hasCTOR) {
316 result->isa = _NSConcreteFinalizingBlock;
317 }
318 else {
319 result->isa = _NSConcreteAutoBlock;
320 }
321 return result;
322 }
323}
324
325
326
327
328
329// Runtime entry points for maintaining the sharing knowledge of byref data blocks.
330
331// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
332// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
333// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
334// Otherwise we need to copy it and update the stack forwarding pointer
335// XXX We need to account for weak/nonretained read-write barriers.
336static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
337 struct Block_byref **destp = (struct Block_byref **)dest;
338 struct Block_byref *src = (struct Block_byref *)arg;
339
340 //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
341 //printf("src dump: %s\n", _Block_byref_dump(src));
342 if (src->forwarding->flags & BLOCK_IS_GC) {
343 ; // don't need to do any more work
344 }
345 else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
346 //printf("making copy\n");
347 // src points to stack
348 bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
349 // if its weak ask for an object (only matters under GC)
350 struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
351 copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
352 copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
353 src->forwarding = copy; // patch stack to point to heap copy
354 copy->size = src->size;
355 if (isWeak) {
356 copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
357 }
358 if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
359 // Trust copy helper to copy everything of interest
360 // If more than one field shows up in a byref block this is wrong XXX
361 copy->byref_keep = src->byref_keep;
362 copy->byref_destroy = src->byref_destroy;
363 (*src->byref_keep)(copy, src);
364 }
365 else {
366 // just bits. Blast 'em using _Block_memmove in case they're __strong
367 _Block_memmove(
368 (void *)&copy->byref_keep,
369 (void *)&src->byref_keep,
370 src->size - sizeof(struct Block_byref_header));
371 }
372 }
373 // already copied to heap
374 else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
375 latching_incr_int(&src->forwarding->flags);
376 }
377 // assign byref data block pointer into new Block
378 _Block_assign(src->forwarding, (void **)destp);
379}
380
381// Old compiler SPI
382static void _Block_byref_release(const void *arg) {
383 struct Block_byref *shared_struct = (struct Block_byref *)arg;
384 int refcount;
385
386 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
387 shared_struct = shared_struct->forwarding;
388
389 //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
390 // To support C++ destructors under GC we arrange for there to be a finalizer for this
391 // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
392 if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
393 return; // stack or GC or global
394 }
395 refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
396 if (refcount <= 0) {
397 printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
398 }
399 else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
400 //printf("disposing of heap based byref block\n");
401 if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
402 //printf("calling out to helper\n");
403 (*shared_struct->byref_destroy)(shared_struct);
404 }
405 _Block_deallocator((struct Block_layout *)shared_struct);
406 }
407}
408
409
410/************************************************************
411 *
412 * API supporting SPI
413 * _Block_copy, _Block_release, and (old) _Block_destroy
414 *
415 ***********************************************************/
416
417#if !TARGET_OS_WIN32
418#pragma mark SPI/API
419#endif
420
421void *_Block_copy(const void *arg) {
422 return _Block_copy_internal(arg, WANTS_ONE);
423}
424
425
426// API entry point to release a copied Block
427void _Block_release(void *arg) {
428 struct Block_layout *aBlock = (struct Block_layout *)arg;
429 int32_t newCount;
430 if (!aBlock) return;
431 newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
432 if (newCount > 0) return;
433 // Hit zero
434 if (aBlock->flags & BLOCK_IS_GC) {
435 // Tell GC we no longer have our own refcounts. GC will decr its refcount
436 // and unless someone has done a CFRetain or marked it uncollectable it will
437 // now be subject to GC reclamation.
438 _Block_setHasRefcount(aBlock, false);
439 }
440 else if (aBlock->flags & BLOCK_NEEDS_FREE) {
441 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
442 _Block_deallocator(aBlock);
443 }
444 else if (aBlock->flags & BLOCK_IS_GLOBAL) {
445 ;
446 }
447 else {
448 printf("Block_release called upon a stack Block: %p, ignored\n", aBlock);
449 }
450}
451
452
453
454// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
455static void _Block_destroy(const void *arg) {
456 struct Block_layout *aBlock;
457 if (!arg) return;
458 aBlock = (struct Block_layout *)arg;
459 if (aBlock->flags & BLOCK_IS_GC) {
460 // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
461 return; // ignore, we are being called because of a DTOR
462 }
463 _Block_release(aBlock);
464}
465
466
467
468/************************************************************
469 *
470 * SPI used by other layers
471 *
472 ***********************************************************/
473
474// SPI, also internal. Called from NSAutoBlock only under GC
475void *_Block_copy_collectable(const void *aBlock) {
476 return _Block_copy_internal(aBlock, 0);
477}
478
479
480// SPI
481unsigned long int Block_size(void *arg) {
482 return ((struct Block_layout *)arg)->descriptor->size;
483}
484
485
486#if !TARGET_OS_WIN32
487#pragma mark Compiler SPI entry points
488#endif
489
490
491/*******************************************************
492
493Entry points used by the compiler - the real API!
494
495
496A Block can reference four different kinds of things that require help when the Block is copied to the heap.
4971) C++ stack based objects
4982) References to Objective-C objects
4993) Other Blocks
5004) __block variables
501
502In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
503
504The flags parameter of _Block_object_assign and _Block_object_dispose is set to
505 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
506 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
507 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
508If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
509
510So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
511
512When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
513
514So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
515 __block id 128+3
516 __weak block id 128+3+16
517 __block (^Block) 128+7
518 __weak __block (^Block) 128+7+16
519
520The implementation of the two routines would be improved by switch statements enumerating the eight cases.
521
522********************************************************/
523
524//
525// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
526// to do the assignment.
527//
528void _Block_object_assign(void *destAddr, const void *object, const int flags) {
529 //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
530 if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
531 if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
532 _Block_assign_weak(object, destAddr);
533 }
534 else {
535 // do *not* retain or *copy* __block variables whatever they are
536 _Block_assign((void *)object, destAddr);
537 }
538 }
539 else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF) {
540 // copying a __block reference from the stack Block to the heap
541 // flags will indicate if it holds a __weak reference and needs a special isa
542 _Block_byref_assign_copy(destAddr, object, flags);
543 }
544 // (this test must be before next one)
545 else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
546 // copying a Block declared variable from the stack Block to the heap
547 _Block_assign(_Block_copy_internal(object, flags), destAddr);
548 }
549 // (this test must be after previous one)
550 else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
551 //printf("retaining object at %p\n", object);
552 _Block_retain_object(object);
553 //printf("done retaining object at %p\n", object);
554 _Block_assign((void *)object, destAddr);
555 }
556}
557
558// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
559// to help dispose of the contents
560// Used initially only for __attribute__((NSObject)) marked pointers.
561void _Block_object_dispose(const void *object, const int flags) {
562 //printf("_Block_object_dispose(%p, %x)\n", object, flags);
563 if (flags & BLOCK_FIELD_IS_BYREF) {
564 // get rid of the __block data structure held in a Block
565 _Block_byref_release(object);
566 }
567 else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
568 // get rid of a referenced Block held by this Block
569 // (ignore __block Block variables, compiler doesn't need to call us)
570 _Block_destroy(object);
571 }
572 else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
573 // get rid of a referenced object held by this Block
574 // (ignore __block object variables, compiler doesn't need to call us)
575 _Block_release_object(object);
576 }
577}
578
579
580/*******************
581Debugging support
582********************/
583#if !TARGET_OS_WIN32
584#pragma mark Debugging
585#endif
586
587
588const char *_Block_dump(const void *block) {
589 struct Block_layout *closure = (struct Block_layout *)block;
590 static char buffer[512];
591 char *cp = buffer;
592 if (closure == NULL) {
593 sprintf(cp, "NULL passed to _Block_dump\n");
594 return buffer;
595 }
596 if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
597 printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
598 exit(1);
599 }
600 cp += sprintf(cp, "^%p (new layout) =\n", closure);
601 if (closure->isa == NULL) {
602 cp += sprintf(cp, "isa: NULL\n");
603 }
604 else if (closure->isa == _NSConcreteStackBlock) {
605 cp += sprintf(cp, "isa: stack Block\n");
606 }
607 else if (closure->isa == _NSConcreteMallocBlock) {
608 cp += sprintf(cp, "isa: malloc heap Block\n");
609 }
610 else if (closure->isa == _NSConcreteAutoBlock) {
611 cp += sprintf(cp, "isa: GC heap Block\n");
612 }
613 else if (closure->isa == _NSConcreteGlobalBlock) {
614 cp += sprintf(cp, "isa: global Block\n");
615 }
616 else if (closure->isa == _NSConcreteFinalizingBlock) {
617 cp += sprintf(cp, "isa: finalizing Block\n");
618 }
619 else {
620 cp += sprintf(cp, "isa?: %p\n", closure->isa);
621 }
622 cp += sprintf(cp, "flags:");
623 if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
624 cp += sprintf(cp, " HASDESCRIPTOR");
625 }
626 if (closure->flags & BLOCK_NEEDS_FREE) {
627 cp += sprintf(cp, " FREEME");
628 }
629 if (closure->flags & BLOCK_IS_GC) {
630 cp += sprintf(cp, " ISGC");
631 }
632 if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
633 cp += sprintf(cp, " HASHELP");
634 }
635 if (closure->flags & BLOCK_HAS_CTOR) {
636 cp += sprintf(cp, " HASCTOR");
637 }
638 cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
639 cp += sprintf(cp, "invoke: %p\n", closure->invoke);
640 {
641 struct Block_descriptor *dp = closure->descriptor;
642 cp += sprintf(cp, "descriptor: %p\n", dp);
643 cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
644 cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
645
646 if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
647 cp += sprintf(cp, "descriptor->copy helper: %p\n", dp->copy);
648 cp += sprintf(cp, "descriptor->dispose helper: %p\n", dp->dispose);
649 }
650 }
651 return buffer;
652}
653
654
655const char *_Block_byref_dump(struct Block_byref *src) {
656 static char buffer[256];
657 char *cp = buffer;
658 cp += sprintf(cp, "byref data block %p contents:\n", src);
659 cp += sprintf(cp, " forwarding: %p\n", src->forwarding);
660 cp += sprintf(cp, " flags: 0x%x\n", src->flags);
661 cp += sprintf(cp, " size: %d\n", src->size);
662 if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
663 cp += sprintf(cp, " copy helper: %p\n", src->byref_keep);
664 cp += sprintf(cp, " dispose helper: %p\n", src->byref_destroy);
665 }
666 return buffer;
667}
668