xref: /llvm-project/compiler-rt/lib/BlocksRuntime/runtime.c (revision b8665226ec21e500dd210a39b8ca6c99dd7f4f51)
1*b8665226SAlexey Samsonov /*
2*b8665226SAlexey Samsonov  * runtime.c
3*b8665226SAlexey Samsonov  *
4*b8665226SAlexey Samsonov  * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
5*b8665226SAlexey Samsonov  * to any person obtaining a copy of this software and associated documentation
6*b8665226SAlexey Samsonov  * files (the "Software"), to deal in the Software without restriction,
7*b8665226SAlexey Samsonov  * including without limitation the rights to use, copy, modify, merge, publish,
8*b8665226SAlexey Samsonov  * distribute, sublicense, and/or sell copies of the Software, and to permit
9*b8665226SAlexey Samsonov  * persons to whom the Software is furnished to do so, subject to the following
10*b8665226SAlexey Samsonov  * conditions:
11*b8665226SAlexey Samsonov  *
12*b8665226SAlexey Samsonov  * The above copyright notice and this permission notice shall be included in
13*b8665226SAlexey Samsonov  * all copies or substantial portions of the Software.
14*b8665226SAlexey Samsonov  *
15*b8665226SAlexey Samsonov  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*b8665226SAlexey Samsonov  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*b8665226SAlexey Samsonov  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18*b8665226SAlexey Samsonov  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*b8665226SAlexey Samsonov  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*b8665226SAlexey Samsonov  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*b8665226SAlexey Samsonov  * SOFTWARE.
22*b8665226SAlexey Samsonov  *
23*b8665226SAlexey Samsonov  */
24*b8665226SAlexey Samsonov 
25*b8665226SAlexey Samsonov #include "Block_private.h"
26*b8665226SAlexey Samsonov #include <stdio.h>
27*b8665226SAlexey Samsonov #include <stdlib.h>
28*b8665226SAlexey Samsonov #include <string.h>
29*b8665226SAlexey Samsonov #include <stdint.h>
30*b8665226SAlexey Samsonov 
31*b8665226SAlexey Samsonov #include "config.h"
32*b8665226SAlexey Samsonov 
33*b8665226SAlexey Samsonov #ifdef HAVE_AVAILABILITY_MACROS_H
34*b8665226SAlexey Samsonov #include <AvailabilityMacros.h>
35*b8665226SAlexey Samsonov #endif /* HAVE_AVAILABILITY_MACROS_H */
36*b8665226SAlexey Samsonov 
37*b8665226SAlexey Samsonov #ifdef HAVE_TARGET_CONDITIONALS_H
38*b8665226SAlexey Samsonov #include <TargetConditionals.h>
39*b8665226SAlexey Samsonov #endif /* HAVE_TARGET_CONDITIONALS_H */
40*b8665226SAlexey Samsonov 
41*b8665226SAlexey Samsonov #if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
42*b8665226SAlexey Samsonov 
43*b8665226SAlexey Samsonov #ifdef HAVE_LIBKERN_OSATOMIC_H
44*b8665226SAlexey Samsonov #include <libkern/OSAtomic.h>
45*b8665226SAlexey Samsonov #endif /* HAVE_LIBKERN_OSATOMIC_H */
46*b8665226SAlexey Samsonov 
47*b8665226SAlexey Samsonov #elif defined(__WIN32__) || defined(_WIN32)
48*b8665226SAlexey Samsonov #define _CRT_SECURE_NO_WARNINGS 1
49*b8665226SAlexey Samsonov #include <windows.h>
50*b8665226SAlexey Samsonov 
OSAtomicCompareAndSwapLong(long oldl,long newl,long volatile * dst)51*b8665226SAlexey Samsonov static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
52*b8665226SAlexey Samsonov     /* fixme barrier is overkill -- see objc-os.h */
53*b8665226SAlexey Samsonov     long original = InterlockedCompareExchange(dst, newl, oldl);
54*b8665226SAlexey Samsonov     return (original == oldl);
55*b8665226SAlexey Samsonov }
56*b8665226SAlexey Samsonov 
OSAtomicCompareAndSwapInt(int oldi,int newi,int volatile * dst)57*b8665226SAlexey Samsonov static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
58*b8665226SAlexey Samsonov     /* fixme barrier is overkill -- see objc-os.h */
59*b8665226SAlexey Samsonov     int original = InterlockedCompareExchange(dst, newi, oldi);
60*b8665226SAlexey Samsonov     return (original == oldi);
61*b8665226SAlexey Samsonov }
62*b8665226SAlexey Samsonov 
63*b8665226SAlexey Samsonov /*
64*b8665226SAlexey Samsonov  * Check to see if the GCC atomic built-ins are available.  If we're on
65*b8665226SAlexey Samsonov  * a 64-bit system, make sure we have an 8-byte atomic function
66*b8665226SAlexey Samsonov  * available.
67*b8665226SAlexey Samsonov  *
68*b8665226SAlexey Samsonov  */
69*b8665226SAlexey Samsonov 
70*b8665226SAlexey Samsonov #elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
71*b8665226SAlexey Samsonov 
OSAtomicCompareAndSwapLong(long oldl,long newl,long volatile * dst)72*b8665226SAlexey Samsonov static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
73*b8665226SAlexey Samsonov   return __sync_bool_compare_and_swap(dst, oldl, newl);
74*b8665226SAlexey Samsonov }
75*b8665226SAlexey Samsonov 
OSAtomicCompareAndSwapInt(int oldi,int newi,int volatile * dst)76*b8665226SAlexey Samsonov static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
77*b8665226SAlexey Samsonov   return __sync_bool_compare_and_swap(dst, oldi, newi);
78*b8665226SAlexey Samsonov }
79*b8665226SAlexey Samsonov 
80*b8665226SAlexey Samsonov #else
81*b8665226SAlexey Samsonov #error unknown atomic compare-and-swap primitive
82*b8665226SAlexey Samsonov #endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
83*b8665226SAlexey Samsonov 
84*b8665226SAlexey Samsonov 
85*b8665226SAlexey Samsonov /*
86*b8665226SAlexey Samsonov  * Globals:
87*b8665226SAlexey Samsonov  */
88*b8665226SAlexey Samsonov 
89*b8665226SAlexey Samsonov static void *_Block_copy_class = _NSConcreteMallocBlock;
90*b8665226SAlexey Samsonov static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
91*b8665226SAlexey Samsonov static int _Block_copy_flag = BLOCK_NEEDS_FREE;
92*b8665226SAlexey Samsonov static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
93*b8665226SAlexey Samsonov 
94*b8665226SAlexey Samsonov static const int WANTS_ONE = (1 << 16);
95*b8665226SAlexey Samsonov 
96*b8665226SAlexey Samsonov static bool isGC = false;
97*b8665226SAlexey Samsonov 
98*b8665226SAlexey Samsonov /*
99*b8665226SAlexey Samsonov  * Internal Utilities:
100*b8665226SAlexey Samsonov  */
101*b8665226SAlexey Samsonov 
102*b8665226SAlexey Samsonov #if 0
103*b8665226SAlexey Samsonov static unsigned long int latching_incr_long(unsigned long int *where) {
104*b8665226SAlexey Samsonov     while (1) {
105*b8665226SAlexey Samsonov         unsigned long int old_value = *(volatile unsigned long int *)where;
106*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107*b8665226SAlexey Samsonov             return BLOCK_REFCOUNT_MASK;
108*b8665226SAlexey Samsonov         }
109*b8665226SAlexey Samsonov         if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
110*b8665226SAlexey Samsonov             return old_value+1;
111*b8665226SAlexey Samsonov         }
112*b8665226SAlexey Samsonov     }
113*b8665226SAlexey Samsonov }
114*b8665226SAlexey Samsonov #endif /* if 0 */
115*b8665226SAlexey Samsonov 
latching_incr_int(int * where)116*b8665226SAlexey Samsonov static int latching_incr_int(int *where) {
117*b8665226SAlexey Samsonov     while (1) {
118*b8665226SAlexey Samsonov         int old_value = *(volatile int *)where;
119*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
120*b8665226SAlexey Samsonov             return BLOCK_REFCOUNT_MASK;
121*b8665226SAlexey Samsonov         }
122*b8665226SAlexey Samsonov         if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
123*b8665226SAlexey Samsonov             return old_value+1;
124*b8665226SAlexey Samsonov         }
125*b8665226SAlexey Samsonov     }
126*b8665226SAlexey Samsonov }
127*b8665226SAlexey Samsonov 
128*b8665226SAlexey Samsonov #if 0
129*b8665226SAlexey Samsonov static int latching_decr_long(unsigned long int *where) {
130*b8665226SAlexey Samsonov     while (1) {
131*b8665226SAlexey Samsonov         unsigned long int old_value = *(volatile int *)where;
132*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
133*b8665226SAlexey Samsonov             return BLOCK_REFCOUNT_MASK;
134*b8665226SAlexey Samsonov         }
135*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
136*b8665226SAlexey Samsonov             return 0;
137*b8665226SAlexey Samsonov         }
138*b8665226SAlexey Samsonov         if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
139*b8665226SAlexey Samsonov             return old_value-1;
140*b8665226SAlexey Samsonov         }
141*b8665226SAlexey Samsonov     }
142*b8665226SAlexey Samsonov }
143*b8665226SAlexey Samsonov #endif /* if 0 */
144*b8665226SAlexey Samsonov 
latching_decr_int(int * where)145*b8665226SAlexey Samsonov static int latching_decr_int(int *where) {
146*b8665226SAlexey Samsonov     while (1) {
147*b8665226SAlexey Samsonov         int old_value = *(volatile int *)where;
148*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
149*b8665226SAlexey Samsonov             return BLOCK_REFCOUNT_MASK;
150*b8665226SAlexey Samsonov         }
151*b8665226SAlexey Samsonov         if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
152*b8665226SAlexey Samsonov             return 0;
153*b8665226SAlexey Samsonov         }
154*b8665226SAlexey Samsonov         if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
155*b8665226SAlexey Samsonov             return old_value-1;
156*b8665226SAlexey Samsonov         }
157*b8665226SAlexey Samsonov     }
158*b8665226SAlexey Samsonov }
159*b8665226SAlexey Samsonov 
160*b8665226SAlexey Samsonov 
161*b8665226SAlexey Samsonov /*
162*b8665226SAlexey Samsonov  * GC support stub routines:
163*b8665226SAlexey Samsonov  */
164*b8665226SAlexey Samsonov #if 0
165*b8665226SAlexey Samsonov #pragma mark GC Support Routines
166*b8665226SAlexey Samsonov #endif /* if 0 */
167*b8665226SAlexey Samsonov 
168*b8665226SAlexey Samsonov 
_Block_alloc_default(const unsigned long size,const bool initialCountIsOne,const bool isObject)169*b8665226SAlexey Samsonov static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
170*b8665226SAlexey Samsonov     return malloc(size);
171*b8665226SAlexey Samsonov }
172*b8665226SAlexey Samsonov 
_Block_assign_default(void * value,void ** destptr)173*b8665226SAlexey Samsonov static void _Block_assign_default(void *value, void **destptr) {
174*b8665226SAlexey Samsonov     *destptr = value;
175*b8665226SAlexey Samsonov }
176*b8665226SAlexey Samsonov 
_Block_setHasRefcount_default(const void * ptr,const bool hasRefcount)177*b8665226SAlexey Samsonov static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
178*b8665226SAlexey Samsonov }
179*b8665226SAlexey Samsonov 
_Block_do_nothing(const void * aBlock)180*b8665226SAlexey Samsonov static void _Block_do_nothing(const void *aBlock) { }
181*b8665226SAlexey Samsonov 
_Block_retain_object_default(const void * ptr)182*b8665226SAlexey Samsonov static void _Block_retain_object_default(const void *ptr) {
183*b8665226SAlexey Samsonov     if (!ptr) return;
184*b8665226SAlexey Samsonov }
185*b8665226SAlexey Samsonov 
_Block_release_object_default(const void * ptr)186*b8665226SAlexey Samsonov static void _Block_release_object_default(const void *ptr) {
187*b8665226SAlexey Samsonov     if (!ptr) return;
188*b8665226SAlexey Samsonov }
189*b8665226SAlexey Samsonov 
_Block_assign_weak_default(const void * ptr,void * dest)190*b8665226SAlexey Samsonov static void _Block_assign_weak_default(const void *ptr, void *dest) {
191*b8665226SAlexey Samsonov     *(void **)dest = (void *)ptr;
192*b8665226SAlexey Samsonov }
193*b8665226SAlexey Samsonov 
_Block_memmove_default(void * dst,void * src,unsigned long size)194*b8665226SAlexey Samsonov static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
195*b8665226SAlexey Samsonov     memmove(dst, src, (size_t)size);
196*b8665226SAlexey Samsonov }
197*b8665226SAlexey Samsonov 
_Block_memmove_gc_broken(void * dest,void * src,unsigned long size)198*b8665226SAlexey Samsonov static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
199*b8665226SAlexey Samsonov     void **destp = (void **)dest;
200*b8665226SAlexey Samsonov     void **srcp = (void **)src;
201*b8665226SAlexey Samsonov     while (size) {
202*b8665226SAlexey Samsonov         _Block_assign_default(*srcp, destp);
203*b8665226SAlexey Samsonov         destp++;
204*b8665226SAlexey Samsonov         srcp++;
205*b8665226SAlexey Samsonov         size -= sizeof(void *);
206*b8665226SAlexey Samsonov     }
207*b8665226SAlexey Samsonov }
208*b8665226SAlexey Samsonov 
209*b8665226SAlexey Samsonov /*
210*b8665226SAlexey Samsonov  * GC support callout functions - initially set to stub routines:
211*b8665226SAlexey Samsonov  */
212*b8665226SAlexey Samsonov 
213*b8665226SAlexey Samsonov static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
214*b8665226SAlexey Samsonov static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
215*b8665226SAlexey Samsonov static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
216*b8665226SAlexey Samsonov static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
217*b8665226SAlexey Samsonov static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
218*b8665226SAlexey Samsonov static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
219*b8665226SAlexey Samsonov static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
220*b8665226SAlexey Samsonov static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
221*b8665226SAlexey Samsonov 
222*b8665226SAlexey Samsonov 
223*b8665226SAlexey Samsonov /*
224*b8665226SAlexey Samsonov  * GC support SPI functions - called from ObjC runtime and CoreFoundation:
225*b8665226SAlexey Samsonov  */
226*b8665226SAlexey Samsonov 
227*b8665226SAlexey Samsonov /* Public SPI
228*b8665226SAlexey Samsonov  * Called from objc-auto to turn on GC.
229*b8665226SAlexey Samsonov  * version 3, 4 arg, but changed 1st arg
230*b8665226SAlexey Samsonov  */
_Block_use_GC(void * (* alloc)(const unsigned long,const bool isOne,const bool isObject),void (* setHasRefcount)(const void *,const bool),void (* gc_assign)(void *,void **),void (* gc_assign_weak)(const void *,void *),void (* gc_memmove)(void *,void *,unsigned long))231*b8665226SAlexey Samsonov void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
232*b8665226SAlexey Samsonov                     void (*setHasRefcount)(const void *, const bool),
233*b8665226SAlexey Samsonov                     void (*gc_assign)(void *, void **),
234*b8665226SAlexey Samsonov                     void (*gc_assign_weak)(const void *, void *),
235*b8665226SAlexey Samsonov                     void (*gc_memmove)(void *, void *, unsigned long)) {
236*b8665226SAlexey Samsonov 
237*b8665226SAlexey Samsonov     isGC = true;
238*b8665226SAlexey Samsonov     _Block_allocator = alloc;
239*b8665226SAlexey Samsonov     _Block_deallocator = _Block_do_nothing;
240*b8665226SAlexey Samsonov     _Block_assign = gc_assign;
241*b8665226SAlexey Samsonov     _Block_copy_flag = BLOCK_IS_GC;
242*b8665226SAlexey Samsonov     _Block_copy_class = _NSConcreteAutoBlock;
243*b8665226SAlexey Samsonov     /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
244*b8665226SAlexey Samsonov     _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
245*b8665226SAlexey Samsonov     _Block_setHasRefcount = setHasRefcount;
246*b8665226SAlexey Samsonov     _Byref_flag_initial_value = BLOCK_IS_GC;   // no refcount
247*b8665226SAlexey Samsonov     _Block_retain_object = _Block_do_nothing;
248*b8665226SAlexey Samsonov     _Block_release_object = _Block_do_nothing;
249*b8665226SAlexey Samsonov     _Block_assign_weak = gc_assign_weak;
250*b8665226SAlexey Samsonov     _Block_memmove = gc_memmove;
251*b8665226SAlexey Samsonov }
252*b8665226SAlexey Samsonov 
253*b8665226SAlexey Samsonov /* transitional */
_Block_use_GC5(void * (* alloc)(const unsigned long,const bool isOne,const bool isObject),void (* setHasRefcount)(const void *,const bool),void (* gc_assign)(void *,void **),void (* gc_assign_weak)(const void *,void *))254*b8665226SAlexey Samsonov void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
255*b8665226SAlexey Samsonov                     void (*setHasRefcount)(const void *, const bool),
256*b8665226SAlexey Samsonov                     void (*gc_assign)(void *, void **),
257*b8665226SAlexey Samsonov                     void (*gc_assign_weak)(const void *, void *)) {
258*b8665226SAlexey Samsonov     /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
259*b8665226SAlexey Samsonov     _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
260*b8665226SAlexey Samsonov }
261*b8665226SAlexey Samsonov 
262*b8665226SAlexey Samsonov 
263*b8665226SAlexey Samsonov /*
264*b8665226SAlexey Samsonov  * Called from objc-auto to alternatively turn on retain/release.
265*b8665226SAlexey Samsonov  * Prior to this the only "object" support we can provide is for those
266*b8665226SAlexey Samsonov  * super special objects that live in libSystem, namely dispatch queues.
267*b8665226SAlexey Samsonov  * Blocks and Block_byrefs have their own special entry points.
268*b8665226SAlexey Samsonov  *
269*b8665226SAlexey Samsonov  */
_Block_use_RR(void (* retain)(const void *),void (* release)(const void *))270*b8665226SAlexey Samsonov void _Block_use_RR( void (*retain)(const void *),
271*b8665226SAlexey Samsonov                     void (*release)(const void *)) {
272*b8665226SAlexey Samsonov     _Block_retain_object = retain;
273*b8665226SAlexey Samsonov     _Block_release_object = release;
274*b8665226SAlexey Samsonov }
275*b8665226SAlexey Samsonov 
276*b8665226SAlexey Samsonov /*
277*b8665226SAlexey Samsonov  * Internal Support routines for copying:
278*b8665226SAlexey Samsonov  */
279*b8665226SAlexey Samsonov 
280*b8665226SAlexey Samsonov #if 0
281*b8665226SAlexey Samsonov #pragma mark Copy/Release support
282*b8665226SAlexey Samsonov #endif /* if 0 */
283*b8665226SAlexey Samsonov 
284*b8665226SAlexey Samsonov /* Copy, or bump refcount, of a block.  If really copying, call the copy helper if present. */
_Block_copy_internal(const void * arg,const int flags)285*b8665226SAlexey Samsonov static void *_Block_copy_internal(const void *arg, const int flags) {
286*b8665226SAlexey Samsonov     struct Block_layout *aBlock;
287*b8665226SAlexey Samsonov     const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
288*b8665226SAlexey Samsonov 
289*b8665226SAlexey Samsonov     //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
290*b8665226SAlexey Samsonov     if (!arg) return NULL;
291*b8665226SAlexey Samsonov 
292*b8665226SAlexey Samsonov 
293*b8665226SAlexey Samsonov     // The following would be better done as a switch statement
294*b8665226SAlexey Samsonov     aBlock = (struct Block_layout *)arg;
295*b8665226SAlexey Samsonov     if (aBlock->flags & BLOCK_NEEDS_FREE) {
296*b8665226SAlexey Samsonov         // latches on high
297*b8665226SAlexey Samsonov         latching_incr_int(&aBlock->flags);
298*b8665226SAlexey Samsonov         return aBlock;
299*b8665226SAlexey Samsonov     }
300*b8665226SAlexey Samsonov     else if (aBlock->flags & BLOCK_IS_GC) {
301*b8665226SAlexey Samsonov         // GC refcounting is expensive so do most refcounting here.
302*b8665226SAlexey Samsonov         if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
303*b8665226SAlexey Samsonov             // Tell collector to hang on this - it will bump the GC refcount version
304*b8665226SAlexey Samsonov             _Block_setHasRefcount(aBlock, true);
305*b8665226SAlexey Samsonov         }
306*b8665226SAlexey Samsonov         return aBlock;
307*b8665226SAlexey Samsonov     }
308*b8665226SAlexey Samsonov     else if (aBlock->flags & BLOCK_IS_GLOBAL) {
309*b8665226SAlexey Samsonov         return aBlock;
310*b8665226SAlexey Samsonov     }
311*b8665226SAlexey Samsonov 
312*b8665226SAlexey Samsonov     // Its a stack block.  Make a copy.
313*b8665226SAlexey Samsonov     if (!isGC) {
314*b8665226SAlexey Samsonov         struct Block_layout *result = malloc(aBlock->descriptor->size);
315*b8665226SAlexey Samsonov         if (!result) return (void *)0;
316*b8665226SAlexey Samsonov         memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
317*b8665226SAlexey Samsonov         // reset refcount
318*b8665226SAlexey Samsonov         result->flags &= ~(BLOCK_REFCOUNT_MASK);    // XXX not needed
319*b8665226SAlexey Samsonov         result->flags |= BLOCK_NEEDS_FREE | 1;
320*b8665226SAlexey Samsonov         result->isa = _NSConcreteMallocBlock;
321*b8665226SAlexey Samsonov         if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
322*b8665226SAlexey Samsonov             //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
323*b8665226SAlexey Samsonov             (*aBlock->descriptor->copy)(result, aBlock); // do fixup
324*b8665226SAlexey Samsonov         }
325*b8665226SAlexey Samsonov         return result;
326*b8665226SAlexey Samsonov     }
327*b8665226SAlexey Samsonov     else {
328*b8665226SAlexey Samsonov         // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
329*b8665226SAlexey Samsonov         // This allows the copy helper routines to make non-refcounted block copies under GC
330*b8665226SAlexey Samsonov         unsigned long int flags = aBlock->flags;
331*b8665226SAlexey Samsonov         bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
332*b8665226SAlexey Samsonov         struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
333*b8665226SAlexey Samsonov         if (!result) return (void *)0;
334*b8665226SAlexey Samsonov         memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
335*b8665226SAlexey Samsonov         // reset refcount
336*b8665226SAlexey Samsonov         // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
337*b8665226SAlexey Samsonov         flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK);   // XXX not needed
338*b8665226SAlexey Samsonov         if (wantsOne)
339*b8665226SAlexey Samsonov             flags |= BLOCK_IS_GC | 1;
340*b8665226SAlexey Samsonov         else
341*b8665226SAlexey Samsonov             flags |= BLOCK_IS_GC;
342*b8665226SAlexey Samsonov         result->flags = flags;
343*b8665226SAlexey Samsonov         if (flags & BLOCK_HAS_COPY_DISPOSE) {
344*b8665226SAlexey Samsonov             //printf("calling block copy helper...\n");
345*b8665226SAlexey Samsonov             (*aBlock->descriptor->copy)(result, aBlock); // do fixup
346*b8665226SAlexey Samsonov         }
347*b8665226SAlexey Samsonov         if (hasCTOR) {
348*b8665226SAlexey Samsonov             result->isa = _NSConcreteFinalizingBlock;
349*b8665226SAlexey Samsonov         }
350*b8665226SAlexey Samsonov         else {
351*b8665226SAlexey Samsonov             result->isa = _NSConcreteAutoBlock;
352*b8665226SAlexey Samsonov         }
353*b8665226SAlexey Samsonov         return result;
354*b8665226SAlexey Samsonov     }
355*b8665226SAlexey Samsonov }
356*b8665226SAlexey Samsonov 
357*b8665226SAlexey Samsonov 
358*b8665226SAlexey Samsonov /*
359*b8665226SAlexey Samsonov  * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
360*b8665226SAlexey Samsonov  *
361*b8665226SAlexey Samsonov  * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
362*b8665226SAlexey Samsonov  * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
363*b8665226SAlexey Samsonov  * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
364*b8665226SAlexey Samsonov  * Otherwise we need to copy it and update the stack forwarding pointer
365*b8665226SAlexey Samsonov  * XXX We need to account for weak/nonretained read-write barriers.
366*b8665226SAlexey Samsonov  */
367*b8665226SAlexey Samsonov 
_Block_byref_assign_copy(void * dest,const void * arg,const int flags)368*b8665226SAlexey Samsonov static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
369*b8665226SAlexey Samsonov     struct Block_byref **destp = (struct Block_byref **)dest;
370*b8665226SAlexey Samsonov     struct Block_byref *src = (struct Block_byref *)arg;
371*b8665226SAlexey Samsonov 
372*b8665226SAlexey Samsonov     //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
373*b8665226SAlexey Samsonov     //printf("src dump: %s\n", _Block_byref_dump(src));
374*b8665226SAlexey Samsonov     if (src->forwarding->flags & BLOCK_IS_GC) {
375*b8665226SAlexey Samsonov         ;   // don't need to do any more work
376*b8665226SAlexey Samsonov     }
377*b8665226SAlexey Samsonov     else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
378*b8665226SAlexey Samsonov         //printf("making copy\n");
379*b8665226SAlexey Samsonov         // src points to stack
380*b8665226SAlexey Samsonov         bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
381*b8665226SAlexey Samsonov         // if its weak ask for an object (only matters under GC)
382*b8665226SAlexey Samsonov         struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
383*b8665226SAlexey Samsonov         copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
384*b8665226SAlexey Samsonov         copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
385*b8665226SAlexey Samsonov         src->forwarding = copy;  // patch stack to point to heap copy
386*b8665226SAlexey Samsonov         copy->size = src->size;
387*b8665226SAlexey Samsonov         if (isWeak) {
388*b8665226SAlexey Samsonov             copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
389*b8665226SAlexey Samsonov         }
390*b8665226SAlexey Samsonov         if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
391*b8665226SAlexey Samsonov             // Trust copy helper to copy everything of interest
392*b8665226SAlexey Samsonov             // If more than one field shows up in a byref block this is wrong XXX
393*b8665226SAlexey Samsonov             copy->byref_keep = src->byref_keep;
394*b8665226SAlexey Samsonov             copy->byref_destroy = src->byref_destroy;
395*b8665226SAlexey Samsonov             (*src->byref_keep)(copy, src);
396*b8665226SAlexey Samsonov         }
397*b8665226SAlexey Samsonov         else {
398*b8665226SAlexey Samsonov             // just bits.  Blast 'em using _Block_memmove in case they're __strong
399*b8665226SAlexey Samsonov             _Block_memmove(
400*b8665226SAlexey Samsonov                 (void *)&copy->byref_keep,
401*b8665226SAlexey Samsonov                 (void *)&src->byref_keep,
402*b8665226SAlexey Samsonov                 src->size - sizeof(struct Block_byref_header));
403*b8665226SAlexey Samsonov         }
404*b8665226SAlexey Samsonov     }
405*b8665226SAlexey Samsonov     // already copied to heap
406*b8665226SAlexey Samsonov     else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
407*b8665226SAlexey Samsonov         latching_incr_int(&src->forwarding->flags);
408*b8665226SAlexey Samsonov     }
409*b8665226SAlexey Samsonov     // assign byref data block pointer into new Block
410*b8665226SAlexey Samsonov     _Block_assign(src->forwarding, (void **)destp);
411*b8665226SAlexey Samsonov }
412*b8665226SAlexey Samsonov 
413*b8665226SAlexey Samsonov // Old compiler SPI
_Block_byref_release(const void * arg)414*b8665226SAlexey Samsonov static void _Block_byref_release(const void *arg) {
415*b8665226SAlexey Samsonov     struct Block_byref *shared_struct = (struct Block_byref *)arg;
416*b8665226SAlexey Samsonov     int refcount;
417*b8665226SAlexey Samsonov 
418*b8665226SAlexey Samsonov     // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
419*b8665226SAlexey Samsonov     shared_struct = shared_struct->forwarding;
420*b8665226SAlexey Samsonov 
421*b8665226SAlexey Samsonov     //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
422*b8665226SAlexey Samsonov     // To support C++ destructors under GC we arrange for there to be a finalizer for this
423*b8665226SAlexey Samsonov     // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
424*b8665226SAlexey Samsonov     if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
425*b8665226SAlexey Samsonov         return; // stack or GC or global
426*b8665226SAlexey Samsonov     }
427*b8665226SAlexey Samsonov     refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
428*b8665226SAlexey Samsonov     if (refcount <= 0) {
429*b8665226SAlexey Samsonov         printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
430*b8665226SAlexey Samsonov     }
431*b8665226SAlexey Samsonov     else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
432*b8665226SAlexey Samsonov         //printf("disposing of heap based byref block\n");
433*b8665226SAlexey Samsonov         if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
434*b8665226SAlexey Samsonov             //printf("calling out to helper\n");
435*b8665226SAlexey Samsonov             (*shared_struct->byref_destroy)(shared_struct);
436*b8665226SAlexey Samsonov         }
437*b8665226SAlexey Samsonov         _Block_deallocator((struct Block_layout *)shared_struct);
438*b8665226SAlexey Samsonov     }
439*b8665226SAlexey Samsonov }
440*b8665226SAlexey Samsonov 
441*b8665226SAlexey Samsonov 
442*b8665226SAlexey Samsonov /*
443*b8665226SAlexey Samsonov  *
444*b8665226SAlexey Samsonov  * API supporting SPI
445*b8665226SAlexey Samsonov  * _Block_copy, _Block_release, and (old) _Block_destroy
446*b8665226SAlexey Samsonov  *
447*b8665226SAlexey Samsonov  */
448*b8665226SAlexey Samsonov 
449*b8665226SAlexey Samsonov #if 0
450*b8665226SAlexey Samsonov #pragma mark SPI/API
451*b8665226SAlexey Samsonov #endif /* if 0 */
452*b8665226SAlexey Samsonov 
_Block_copy(const void * arg)453*b8665226SAlexey Samsonov void *_Block_copy(const void *arg) {
454*b8665226SAlexey Samsonov     return _Block_copy_internal(arg, WANTS_ONE);
455*b8665226SAlexey Samsonov }
456*b8665226SAlexey Samsonov 
457*b8665226SAlexey Samsonov 
458*b8665226SAlexey Samsonov // API entry point to release a copied Block
_Block_release(void * arg)459*b8665226SAlexey Samsonov void _Block_release(void *arg) {
460*b8665226SAlexey Samsonov     struct Block_layout *aBlock = (struct Block_layout *)arg;
461*b8665226SAlexey Samsonov     int32_t newCount;
462*b8665226SAlexey Samsonov     if (!aBlock) return;
463*b8665226SAlexey Samsonov     newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
464*b8665226SAlexey Samsonov     if (newCount > 0) return;
465*b8665226SAlexey Samsonov     // Hit zero
466*b8665226SAlexey Samsonov     if (aBlock->flags & BLOCK_IS_GC) {
467*b8665226SAlexey Samsonov         // Tell GC we no longer have our own refcounts.  GC will decr its refcount
468*b8665226SAlexey Samsonov         // and unless someone has done a CFRetain or marked it uncollectable it will
469*b8665226SAlexey Samsonov         // now be subject to GC reclamation.
470*b8665226SAlexey Samsonov         _Block_setHasRefcount(aBlock, false);
471*b8665226SAlexey Samsonov     }
472*b8665226SAlexey Samsonov     else if (aBlock->flags & BLOCK_NEEDS_FREE) {
473*b8665226SAlexey Samsonov         if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
474*b8665226SAlexey Samsonov         _Block_deallocator(aBlock);
475*b8665226SAlexey Samsonov     }
476*b8665226SAlexey Samsonov     else if (aBlock->flags & BLOCK_IS_GLOBAL) {
477*b8665226SAlexey Samsonov         ;
478*b8665226SAlexey Samsonov     }
479*b8665226SAlexey Samsonov     else {
480*b8665226SAlexey Samsonov         printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
481*b8665226SAlexey Samsonov     }
482*b8665226SAlexey Samsonov }
483*b8665226SAlexey Samsonov 
484*b8665226SAlexey Samsonov 
485*b8665226SAlexey Samsonov 
486*b8665226SAlexey Samsonov // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
_Block_destroy(const void * arg)487*b8665226SAlexey Samsonov static void _Block_destroy(const void *arg) {
488*b8665226SAlexey Samsonov     struct Block_layout *aBlock;
489*b8665226SAlexey Samsonov     if (!arg) return;
490*b8665226SAlexey Samsonov     aBlock = (struct Block_layout *)arg;
491*b8665226SAlexey Samsonov     if (aBlock->flags & BLOCK_IS_GC) {
492*b8665226SAlexey Samsonov         // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
493*b8665226SAlexey Samsonov         return; // ignore, we are being called because of a DTOR
494*b8665226SAlexey Samsonov     }
495*b8665226SAlexey Samsonov     _Block_release(aBlock);
496*b8665226SAlexey Samsonov }
497*b8665226SAlexey Samsonov 
498*b8665226SAlexey Samsonov 
499*b8665226SAlexey Samsonov 
500*b8665226SAlexey Samsonov /*
501*b8665226SAlexey Samsonov  *
502*b8665226SAlexey Samsonov  * SPI used by other layers
503*b8665226SAlexey Samsonov  *
504*b8665226SAlexey Samsonov  */
505*b8665226SAlexey Samsonov 
506*b8665226SAlexey Samsonov // SPI, also internal.  Called from NSAutoBlock only under GC
_Block_copy_collectable(const void * aBlock)507*b8665226SAlexey Samsonov void *_Block_copy_collectable(const void *aBlock) {
508*b8665226SAlexey Samsonov     return _Block_copy_internal(aBlock, 0);
509*b8665226SAlexey Samsonov }
510*b8665226SAlexey Samsonov 
511*b8665226SAlexey Samsonov 
512*b8665226SAlexey Samsonov // SPI
Block_size(void * arg)513*b8665226SAlexey Samsonov unsigned long int Block_size(void *arg) {
514*b8665226SAlexey Samsonov     return ((struct Block_layout *)arg)->descriptor->size;
515*b8665226SAlexey Samsonov }
516*b8665226SAlexey Samsonov 
517*b8665226SAlexey Samsonov 
518*b8665226SAlexey Samsonov #if 0
519*b8665226SAlexey Samsonov #pragma mark Compiler SPI entry points
520*b8665226SAlexey Samsonov #endif /* if 0 */
521*b8665226SAlexey Samsonov 
522*b8665226SAlexey Samsonov 
523*b8665226SAlexey Samsonov /*******************************************************
524*b8665226SAlexey Samsonov 
525*b8665226SAlexey Samsonov Entry points used by the compiler - the real API!
526*b8665226SAlexey Samsonov 
527*b8665226SAlexey Samsonov 
528*b8665226SAlexey Samsonov A Block can reference four different kinds of things that require help when the Block is copied to the heap.
529*b8665226SAlexey Samsonov 1) C++ stack based objects
530*b8665226SAlexey Samsonov 2) References to Objective-C objects
531*b8665226SAlexey Samsonov 3) Other Blocks
532*b8665226SAlexey Samsonov 4) __block variables
533*b8665226SAlexey Samsonov 
534*b8665226SAlexey Samsonov In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
535*b8665226SAlexey Samsonov 
536*b8665226SAlexey Samsonov The flags parameter of _Block_object_assign and _Block_object_dispose is set to
537*b8665226SAlexey Samsonov 	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
538*b8665226SAlexey Samsonov 	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
539*b8665226SAlexey Samsonov 	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
540*b8665226SAlexey Samsonov If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
541*b8665226SAlexey Samsonov 
542*b8665226SAlexey Samsonov So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
543*b8665226SAlexey Samsonov 
544*b8665226SAlexey Samsonov When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
545*b8665226SAlexey Samsonov 
546*b8665226SAlexey Samsonov So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
547*b8665226SAlexey Samsonov 	__block id                   128+3
548*b8665226SAlexey Samsonov         __weak block id              128+3+16
549*b8665226SAlexey Samsonov 	__block (^Block)             128+7
550*b8665226SAlexey Samsonov 	__weak __block (^Block)      128+7+16
551*b8665226SAlexey Samsonov 
552*b8665226SAlexey Samsonov The implementation of the two routines would be improved by switch statements enumerating the eight cases.
553*b8665226SAlexey Samsonov 
554*b8665226SAlexey Samsonov ********************************************************/
555*b8665226SAlexey Samsonov 
556*b8665226SAlexey Samsonov /*
557*b8665226SAlexey Samsonov  * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
558*b8665226SAlexey Samsonov  * to do the assignment.
559*b8665226SAlexey Samsonov  */
_Block_object_assign(void * destAddr,const void * object,const int flags)560*b8665226SAlexey Samsonov void _Block_object_assign(void *destAddr, const void *object, const int flags) {
561*b8665226SAlexey Samsonov     //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
562*b8665226SAlexey Samsonov     if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
563*b8665226SAlexey Samsonov         if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
564*b8665226SAlexey Samsonov             _Block_assign_weak(object, destAddr);
565*b8665226SAlexey Samsonov         }
566*b8665226SAlexey Samsonov         else {
567*b8665226SAlexey Samsonov             // do *not* retain or *copy* __block variables whatever they are
568*b8665226SAlexey Samsonov             _Block_assign((void *)object, destAddr);
569*b8665226SAlexey Samsonov         }
570*b8665226SAlexey Samsonov     }
571*b8665226SAlexey Samsonov     else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF)  {
572*b8665226SAlexey Samsonov         // copying a __block reference from the stack Block to the heap
573*b8665226SAlexey Samsonov         // flags will indicate if it holds a __weak reference and needs a special isa
574*b8665226SAlexey Samsonov         _Block_byref_assign_copy(destAddr, object, flags);
575*b8665226SAlexey Samsonov     }
576*b8665226SAlexey Samsonov     // (this test must be before next one)
577*b8665226SAlexey Samsonov     else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
578*b8665226SAlexey Samsonov         // copying a Block declared variable from the stack Block to the heap
579*b8665226SAlexey Samsonov         _Block_assign(_Block_copy_internal(object, flags), destAddr);
580*b8665226SAlexey Samsonov     }
581*b8665226SAlexey Samsonov     // (this test must be after previous one)
582*b8665226SAlexey Samsonov     else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
583*b8665226SAlexey Samsonov         //printf("retaining object at %p\n", object);
584*b8665226SAlexey Samsonov         _Block_retain_object(object);
585*b8665226SAlexey Samsonov         //printf("done retaining object at %p\n", object);
586*b8665226SAlexey Samsonov         _Block_assign((void *)object, destAddr);
587*b8665226SAlexey Samsonov     }
588*b8665226SAlexey Samsonov }
589*b8665226SAlexey Samsonov 
590*b8665226SAlexey Samsonov // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
591*b8665226SAlexey Samsonov // to help dispose of the contents
592*b8665226SAlexey Samsonov // Used initially only for __attribute__((NSObject)) marked pointers.
_Block_object_dispose(const void * object,const int flags)593*b8665226SAlexey Samsonov void _Block_object_dispose(const void *object, const int flags) {
594*b8665226SAlexey Samsonov     //printf("_Block_object_dispose(%p, %x)\n", object, flags);
595*b8665226SAlexey Samsonov     if (flags & BLOCK_FIELD_IS_BYREF)  {
596*b8665226SAlexey Samsonov         // get rid of the __block data structure held in a Block
597*b8665226SAlexey Samsonov         _Block_byref_release(object);
598*b8665226SAlexey Samsonov     }
599*b8665226SAlexey Samsonov     else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
600*b8665226SAlexey Samsonov         // get rid of a referenced Block held by this Block
601*b8665226SAlexey Samsonov         // (ignore __block Block variables, compiler doesn't need to call us)
602*b8665226SAlexey Samsonov         _Block_destroy(object);
603*b8665226SAlexey Samsonov     }
604*b8665226SAlexey Samsonov     else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
605*b8665226SAlexey Samsonov         // get rid of a referenced object held by this Block
606*b8665226SAlexey Samsonov         // (ignore __block object variables, compiler doesn't need to call us)
607*b8665226SAlexey Samsonov         _Block_release_object(object);
608*b8665226SAlexey Samsonov     }
609*b8665226SAlexey Samsonov }
610*b8665226SAlexey Samsonov 
611*b8665226SAlexey Samsonov 
612*b8665226SAlexey Samsonov /*
613*b8665226SAlexey Samsonov  * Debugging support:
614*b8665226SAlexey Samsonov  */
615*b8665226SAlexey Samsonov #if 0
616*b8665226SAlexey Samsonov #pragma mark Debugging
617*b8665226SAlexey Samsonov #endif /* if 0 */
618*b8665226SAlexey Samsonov 
619*b8665226SAlexey Samsonov 
_Block_dump(const void * block)620*b8665226SAlexey Samsonov const char *_Block_dump(const void *block) {
621*b8665226SAlexey Samsonov     struct Block_layout *closure = (struct Block_layout *)block;
622*b8665226SAlexey Samsonov     static char buffer[512];
623*b8665226SAlexey Samsonov     char *cp = buffer;
624*b8665226SAlexey Samsonov     if (closure == NULL) {
625*b8665226SAlexey Samsonov         sprintf(cp, "NULL passed to _Block_dump\n");
626*b8665226SAlexey Samsonov         return buffer;
627*b8665226SAlexey Samsonov     }
628*b8665226SAlexey Samsonov     if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
629*b8665226SAlexey Samsonov         printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
630*b8665226SAlexey Samsonov         exit(1);
631*b8665226SAlexey Samsonov     }
632*b8665226SAlexey Samsonov     cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
633*b8665226SAlexey Samsonov     if (closure->isa == NULL) {
634*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: NULL\n");
635*b8665226SAlexey Samsonov     }
636*b8665226SAlexey Samsonov     else if (closure->isa == _NSConcreteStackBlock) {
637*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: stack Block\n");
638*b8665226SAlexey Samsonov     }
639*b8665226SAlexey Samsonov     else if (closure->isa == _NSConcreteMallocBlock) {
640*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: malloc heap Block\n");
641*b8665226SAlexey Samsonov     }
642*b8665226SAlexey Samsonov     else if (closure->isa == _NSConcreteAutoBlock) {
643*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: GC heap Block\n");
644*b8665226SAlexey Samsonov     }
645*b8665226SAlexey Samsonov     else if (closure->isa == _NSConcreteGlobalBlock) {
646*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: global Block\n");
647*b8665226SAlexey Samsonov     }
648*b8665226SAlexey Samsonov     else if (closure->isa == _NSConcreteFinalizingBlock) {
649*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa: finalizing Block\n");
650*b8665226SAlexey Samsonov     }
651*b8665226SAlexey Samsonov     else {
652*b8665226SAlexey Samsonov         cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
653*b8665226SAlexey Samsonov     }
654*b8665226SAlexey Samsonov     cp += sprintf(cp, "flags:");
655*b8665226SAlexey Samsonov     if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
656*b8665226SAlexey Samsonov         cp += sprintf(cp, " HASDESCRIPTOR");
657*b8665226SAlexey Samsonov     }
658*b8665226SAlexey Samsonov     if (closure->flags & BLOCK_NEEDS_FREE) {
659*b8665226SAlexey Samsonov         cp += sprintf(cp, " FREEME");
660*b8665226SAlexey Samsonov     }
661*b8665226SAlexey Samsonov     if (closure->flags & BLOCK_IS_GC) {
662*b8665226SAlexey Samsonov         cp += sprintf(cp, " ISGC");
663*b8665226SAlexey Samsonov     }
664*b8665226SAlexey Samsonov     if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
665*b8665226SAlexey Samsonov         cp += sprintf(cp, " HASHELP");
666*b8665226SAlexey Samsonov     }
667*b8665226SAlexey Samsonov     if (closure->flags & BLOCK_HAS_CTOR) {
668*b8665226SAlexey Samsonov         cp += sprintf(cp, " HASCTOR");
669*b8665226SAlexey Samsonov     }
670*b8665226SAlexey Samsonov     cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
671*b8665226SAlexey Samsonov     cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
672*b8665226SAlexey Samsonov     {
673*b8665226SAlexey Samsonov         struct Block_descriptor *dp = closure->descriptor;
674*b8665226SAlexey Samsonov         cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
675*b8665226SAlexey Samsonov         cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
676*b8665226SAlexey Samsonov         cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
677*b8665226SAlexey Samsonov 
678*b8665226SAlexey Samsonov         if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
679*b8665226SAlexey Samsonov             cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
680*b8665226SAlexey Samsonov             cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
681*b8665226SAlexey Samsonov         }
682*b8665226SAlexey Samsonov     }
683*b8665226SAlexey Samsonov     return buffer;
684*b8665226SAlexey Samsonov }
685*b8665226SAlexey Samsonov 
686*b8665226SAlexey Samsonov 
_Block_byref_dump(struct Block_byref * src)687*b8665226SAlexey Samsonov const char *_Block_byref_dump(struct Block_byref *src) {
688*b8665226SAlexey Samsonov     static char buffer[256];
689*b8665226SAlexey Samsonov     char *cp = buffer;
690*b8665226SAlexey Samsonov     cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
691*b8665226SAlexey Samsonov     cp += sprintf(cp, "  forwarding: %p\n", (void *)src->forwarding);
692*b8665226SAlexey Samsonov     cp += sprintf(cp, "  flags: 0x%x\n", src->flags);
693*b8665226SAlexey Samsonov     cp += sprintf(cp, "  size: %d\n", src->size);
694*b8665226SAlexey Samsonov     if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
695*b8665226SAlexey Samsonov         cp += sprintf(cp, "  copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
696*b8665226SAlexey Samsonov         cp += sprintf(cp, "  dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
697*b8665226SAlexey Samsonov     }
698*b8665226SAlexey Samsonov     return buffer;
699*b8665226SAlexey Samsonov }
700*b8665226SAlexey Samsonov 
701