1 /*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11 #ifndef KERNEL
12
13 #include "Block_private.h"
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <dlfcn.h>
17 #include <os/assumes.h>
18 #include <TargetConditionals.h>
19
20 #else /* !KERNEL */
21 #define TARGET_OS_WIN32 0
22
23 #include <libkern/Block_private.h>
24 __BEGIN_DECLS
25 #include <kern/kalloc.h>
26 __END_DECLS
27
28 /* void * is a bit of a lie, but that will have to do */
29 KALLOC_TYPE_VAR_DEFINE(KT_BLOCK_LAYOUT, struct Block_layout, void *, KT_DEFAULT);
30 KALLOC_TYPE_VAR_DEFINE(KT_BLOCK_BYREF, struct Block_byref, void *, KT_DEFAULT);
31
32 static inline struct Block_layout *
block_layout_alloc(size_t size)33 block_layout_alloc(size_t size)
34 {
35 return (struct Block_layout *)kalloc_type_var_impl(KT_BLOCK_LAYOUT,
36 size, Z_WAITOK_ZERO_NOFAIL, NULL);
37 }
38
39 static inline void
block_layout_free(Block_layout * ptr,size_t size)40 block_layout_free(Block_layout *ptr, size_t size)
41 {
42 kfree_type_var_impl(KT_BLOCK_LAYOUT, ptr, size);
43 }
44
45 static inline struct Block_byref *
block_byref_alloc(size_t size)46 block_byref_alloc(size_t size)
47 {
48 return (struct Block_byref *)kalloc_type_var_impl(KT_BLOCK_BYREF,
49 size, Z_WAITOK_ZERO_NOFAIL, NULL);
50 }
51
52 static inline void
block_byref_free(Block_byref * ptr,size_t size)53 block_byref_free(Block_byref *ptr, size_t size)
54 {
55 kfree_type_var_impl(KT_BLOCK_BYREF, ptr, size);
56 }
57
58 #endif /* KERNEL */
59
60 #include <machine/atomic.h>
61 #include <string.h>
62 #include <stdint.h>
63 #ifndef os_assumes
64 #define os_assumes(_x) (_x)
65 #endif
66 #ifndef os_assert
67 #define os_assert(_x) assert(_x)
68 #endif
69
70 #if TARGET_OS_WIN32
71 #define _CRT_SECURE_NO_WARNINGS 1
72 #include <windows.h>
73 static __inline bool
OSAtomicCompareAndSwapLong(long oldl,long newl,long volatile * dst)74 OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
75 {
76 // fixme barrier is overkill -- see objc-os.h
77 long original = InterlockedCompareExchange(dst, newl, oldl);
78 return original == oldl;
79 }
80
81 static __inline bool
OSAtomicCompareAndSwapInt(int oldi,int newi,int volatile * dst)82 OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
83 {
84 // fixme barrier is overkill -- see objc-os.h
85 int original = InterlockedCompareExchange(dst, newi, oldi);
86 return original == oldi;
87 }
88 #else
89 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
90 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
91 #endif
92
93
94 /*******************************************************************************
95 * Internal Utilities
96 ********************************************************************************/
97
98 static int32_t
latching_incr_int(volatile int32_t * where)99 latching_incr_int(volatile int32_t *where)
100 {
101 while (1) {
102 int32_t old_value = *where;
103 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
104 return BLOCK_REFCOUNT_MASK;
105 }
106 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
107 return old_value + 2;
108 }
109 }
110 }
111
112 static bool
latching_incr_int_not_deallocating(volatile int32_t * where)113 latching_incr_int_not_deallocating(volatile int32_t *where)
114 {
115 while (1) {
116 int32_t old_value = *where;
117 if (old_value & BLOCK_DEALLOCATING) {
118 // if deallocating we can't do this
119 return false;
120 }
121 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
122 // if latched, we're leaking this block, and we succeed
123 return true;
124 }
125 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
126 // otherwise, we must store a new retained value without the deallocating bit set
127 return true;
128 }
129 }
130 }
131
132
133 // return should_deallocate?
134 static bool
latching_decr_int_should_deallocate(volatile int32_t * where)135 latching_decr_int_should_deallocate(volatile int32_t *where)
136 {
137 while (1) {
138 int32_t old_value = *where;
139 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
140 return false; // latched high
141 }
142 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
143 return false; // underflow, latch low
144 }
145 int32_t new_value = old_value - 2;
146 bool result = false;
147 if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) {
148 new_value = old_value - 1;
149 result = true;
150 }
151 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
152 return result;
153 }
154 }
155 }
156
157
158 /**************************************************************************
159 * Framework callback functions and their default implementations.
160 ***************************************************************************/
161 #if !TARGET_OS_WIN32
162 #pragma mark Framework Callback Routines
163 #endif
164 #if KERNEL
165 static inline void
_Block_retain_object(const void * ptr __unused)166 _Block_retain_object(const void *ptr __unused)
167 {
168 }
169
170 static inline void
_Block_release_object(const void * ptr __unused)171 _Block_release_object(const void *ptr __unused)
172 {
173 }
174
175 static inline void
_Block_destructInstance(const void * aBlock __unused)176 _Block_destructInstance(const void *aBlock __unused)
177 {
178 }
179
180 #else
181
182 static void
_Block_retain_object_default(const void * ptr __unused)183 _Block_retain_object_default(const void *ptr __unused)
184 {
185 }
186
187 static void
_Block_release_object_default(const void * ptr __unused)188 _Block_release_object_default(const void *ptr __unused)
189 {
190 }
191
192 static void
_Block_destructInstance_default(const void * aBlock __unused)193 _Block_destructInstance_default(const void *aBlock __unused)
194 {
195 }
196
197 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
198 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
199 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
200
201
202 /**************************************************************************
203 * Callback registration from ObjC runtime and CoreFoundation
204 ***************************************************************************/
205
206 void
_Block_use_RR2(const Block_callbacks_RR * callbacks)207 _Block_use_RR2(const Block_callbacks_RR *callbacks)
208 {
209 _Block_retain_object = callbacks->retain;
210 _Block_release_object = callbacks->release;
211 _Block_destructInstance = callbacks->destructInstance;
212 }
213 #endif // !KERNEL
214
215 /****************************************************************************
216 * Accessors for block descriptor fields
217 *****************************************************************************/
218
219 #if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
220 template <class T>
221 static T *
unwrap_relative_pointer(int32_t & offset)222 unwrap_relative_pointer(int32_t &offset)
223 {
224 if (offset == 0) {
225 return nullptr;
226 }
227
228 uintptr_t base = (uintptr_t)&offset;
229 uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset;
230 uintptr_t pointer = base + extendedOffset;
231 return (T *)pointer;
232 }
233 #endif
234
235 #if 0
236 static struct Block_descriptor_2 *
237 _Block_descriptor_2(struct Block_layout *aBlock)
238 {
239 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
240 desc += sizeof(struct Block_descriptor_1);
241 return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
242 }
243 #endif
244
245 static struct Block_descriptor_3 *
_Block_descriptor_3(struct Block_layout * aBlock)246 _Block_descriptor_3(struct Block_layout *aBlock)
247 {
248 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
249 desc += sizeof(struct Block_descriptor_1);
250 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
251 desc += sizeof(struct Block_descriptor_2);
252 }
253 return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
254 }
255
256 static void
_Block_call_copy_helper(void * result,struct Block_layout * aBlock)257 _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
258 {
259 if (auto *pFn = _Block_get_copy_function(aBlock)) {
260 pFn(result, aBlock);
261 }
262 }
263
264 static void
_Block_call_dispose_helper(struct Block_layout * aBlock)265 _Block_call_dispose_helper(struct Block_layout *aBlock)
266 {
267 if (auto *pFn = _Block_get_dispose_function(aBlock)) {
268 pFn(aBlock);
269 }
270 }
271
272 /*******************************************************************************
273 * Internal Support routines for copying
274 ********************************************************************************/
275
276 #if !TARGET_OS_WIN32
277 #pragma mark Copy/Release support
278 #endif
279
280 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
281 void *
_Block_copy(const void * arg)282 _Block_copy(const void *arg)
283 {
284 struct Block_layout *aBlock;
285
286 if (!arg) {
287 return NULL;
288 }
289
290 // The following would be better done as a switch statement
291 aBlock = (struct Block_layout *)arg;
292 if (aBlock->flags & BLOCK_NEEDS_FREE) {
293 // latches on high
294 latching_incr_int(&aBlock->flags);
295 return aBlock;
296 } else if (aBlock->flags & BLOCK_IS_GLOBAL) {
297 return aBlock;
298 } else {
299 // Its a stack block. Make a copy.
300 size_t size = Block_size(aBlock);
301 struct Block_layout *result = block_layout_alloc(size);
302 memmove(result, aBlock, size); // bitcopy first
303 #if __has_feature(ptrauth_calls)
304 // Resign the invoke pointer as it uses address authentication.
305 result->invoke = aBlock->invoke;
306
307 #if __has_feature(ptrauth_signed_block_descriptors)
308 uintptr_t oldDesc =
309 ptrauth_blend_discriminator(
310 &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator);
311 uintptr_t newDesc =
312 ptrauth_blend_discriminator(
313 &result->descriptor, _Block_descriptor_ptrauth_discriminator);
314
315 result->descriptor =
316 ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc,
317 ptrauth_key_asda, newDesc);
318 #endif
319 #endif
320
321 // reset refcount
322 result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed
323 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
324 _Block_call_copy_helper(result, aBlock);
325 // Set isa last so memory analysis tools see a fully-initialized object.
326 result->isa = _NSConcreteMallocBlock;
327 return result;
328 }
329 }
330
331
332 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
333
334 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
335 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
336 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
337 // Otherwise we need to copy it and update the stack forwarding pointer
338 static struct Block_byref *
_Block_byref_copy(const void * arg)339 _Block_byref_copy(const void *arg)
340 {
341 struct Block_byref *src = (struct Block_byref *)arg;
342
343 if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
344 // src points to stack
345 struct Block_byref *copy = block_byref_alloc(src->size);
346 copy->isa = NULL;
347 // byref value 4 is logical refcount of 2: one for caller, one for stack
348 copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
349 copy->forwarding = copy; // patch heap copy to point to itself
350 src->forwarding = copy; // patch stack to point to heap copy
351 copy->size = src->size;
352
353 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
354 // Trust copy helper to copy everything of interest
355 // If more than one field shows up in a byref block this is wrong XXX
356 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1);
357 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1);
358 copy2->byref_keep = src2->byref_keep;
359 copy2->byref_destroy = src2->byref_destroy;
360
361 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
362 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1);
363 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1);
364 copy3->layout = src3->layout;
365 }
366
367 (*src2->byref_keep)(copy, src);
368 } else {
369 // Bitwise copy.
370 // This copy includes Block_byref_3, if any.
371 memmove(copy + 1, src + 1, src->size - sizeof(*src));
372 }
373 }
374 // already copied to heap
375 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
376 latching_incr_int(&src->forwarding->flags);
377 }
378
379 return src->forwarding;
380 }
381
382 static void
_Block_byref_release(const void * arg)383 _Block_byref_release(const void *arg)
384 {
385 struct Block_byref *byref = (struct Block_byref *)arg;
386
387 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
388 byref = byref->forwarding;
389
390 if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
391 __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
392 os_assert(refcount);
393 if (latching_decr_int_should_deallocate(&byref->flags)) {
394 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
395 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1);
396 (*byref2->byref_destroy)(byref);
397 }
398 block_byref_free(byref, byref->size);
399 }
400 }
401 }
402
403
404 /************************************************************
405 *
406 * API supporting SPI
407 * _Block_copy, _Block_release, and (old) _Block_destroy
408 *
409 ***********************************************************/
410
411 #if !TARGET_OS_WIN32
412 #pragma mark SPI/API
413 #endif
414
415
416 // API entry point to release a copied Block
417 void
_Block_release(const void * arg)418 _Block_release(const void *arg)
419 {
420 struct Block_layout *aBlock = (struct Block_layout *)arg;
421 if (!aBlock) {
422 return;
423 }
424 if (aBlock->flags & BLOCK_IS_GLOBAL) {
425 return;
426 }
427 if (!(aBlock->flags & BLOCK_NEEDS_FREE)) {
428 return;
429 }
430
431 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
432 _Block_call_dispose_helper(aBlock);
433 _Block_destructInstance(aBlock);
434 block_layout_free(aBlock, Block_size(aBlock));
435 }
436 }
437
438 bool
_Block_tryRetain(const void * arg)439 _Block_tryRetain(const void *arg)
440 {
441 struct Block_layout *aBlock = (struct Block_layout *)arg;
442 return latching_incr_int_not_deallocating(&aBlock->flags);
443 }
444
445 bool
_Block_isDeallocating(const void * arg)446 _Block_isDeallocating(const void *arg)
447 {
448 struct Block_layout *aBlock = (struct Block_layout *)arg;
449 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
450 }
451
452
453 /************************************************************
454 *
455 * SPI used by other layers
456 *
457 ***********************************************************/
458
459 size_t
Block_size(void * aBlock)460 Block_size(void *aBlock)
461 {
462 auto *layout = (Block_layout *)aBlock;
463 void *desc = _Block_get_descriptor(layout);
464 #if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
465 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
466 return ((Block_descriptor_small *)desc)->size;
467 }
468 #endif
469 return ((Block_descriptor_1 *)desc)->size;
470 }
471
472 bool
_Block_use_stret(void * aBlock)473 _Block_use_stret(void *aBlock)
474 {
475 struct Block_layout *layout = (struct Block_layout *)aBlock;
476
477 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
478 return (layout->flags & requiredFlags) == requiredFlags;
479 }
480
481 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
482 bool
_Block_has_signature(void * aBlock)483 _Block_has_signature(void *aBlock)
484 {
485 return _Block_signature(aBlock) ? true : false;
486 }
487
488 const char *
_Block_signature(void * aBlock)489 _Block_signature(void *aBlock)
490 {
491 struct Block_layout *layout = (struct Block_layout *)aBlock;
492 if (!(layout->flags & BLOCK_HAS_SIGNATURE)) {
493 return nullptr;
494 }
495
496 #if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
497 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
498 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
499 return unwrap_relative_pointer<const char>(bds->signature);
500 }
501 #endif
502
503 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
504 return desc3->signature;
505 }
506
507 const char *
_Block_layout(void * aBlock)508 _Block_layout(void *aBlock)
509 {
510 // Don't return extended layout to callers expecting old GC layout
511 Block_layout *layout = (Block_layout *)aBlock;
512 if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
513 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
514 return nullptr;
515 }
516
517 #if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
518 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
519 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
520 return unwrap_relative_pointer<const char>(bds->layout);
521 }
522 #endif
523
524 Block_descriptor_3 *desc = _Block_descriptor_3(layout);
525 return desc->layout;
526 }
527
528 const char *
_Block_extended_layout(void * aBlock)529 _Block_extended_layout(void *aBlock)
530 {
531 // Don't return old GC layout to callers expecting extended layout
532 Block_layout *layout = (Block_layout *)aBlock;
533 if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
534 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
535 return nullptr;
536 }
537
538 const char *extLayout;
539 #if BLOCK_SMALL_DESCRIPTOR_SUPPORTED
540 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
541 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
542 if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) {
543 extLayout = (const char *)(uintptr_t)bds->layout;
544 } else {
545 extLayout = unwrap_relative_pointer<const char>(bds->layout);
546 }
547 } else
548 #endif
549 {
550 Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
551 extLayout = desc3->layout;
552 }
553
554 // Return empty string (all non-object bytes) instead of NULL
555 // so callers can distinguish "empty layout" from "no layout".
556 if (!extLayout) {
557 extLayout = "";
558 }
559 return extLayout;
560 }
561
562 #if !TARGET_OS_WIN32
563 #pragma mark Compiler SPI entry points
564 #endif
565
566
567 /*******************************************************
568 *
569 * Entry points used by the compiler - the real API!
570 *
571 *
572 * A Block can reference four different kinds of things that require help when the Block is copied to the heap.
573 * 1) C++ stack based objects
574 * 2) References to Objective-C objects
575 * 3) Other Blocks
576 * 4) __block variables
577 *
578 * In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
579 *
580 * The flags parameter of _Block_object_assign and _Block_object_dispose is set to
581 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
582 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
583 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
584 * If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
585 *
586 * So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
587 *
588 * When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
589 *
590 * So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
591 * __block id 128+3 (0x83)
592 * __block (^Block) 128+7 (0x87)
593 * __weak __block id 128+3+16 (0x93)
594 * __weak __block (^Block) 128+7+16 (0x97)
595 *
596 *
597 ********************************************************/
598
599 //
600 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
601 // to do the assignment.
602 //
603 void
_Block_object_assign(void * destArg,const void * object,const int flags)604 _Block_object_assign(void *destArg, const void *object, const int flags)
605 {
606 const void **dest = (const void **)destArg;
607 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
608 case BLOCK_FIELD_IS_OBJECT:
609 /*******
610 * id object = ...;
611 * [^{ object; } copy];
612 ********/
613
614 _Block_retain_object(object);
615 *dest = object;
616 break;
617
618 case BLOCK_FIELD_IS_BLOCK:
619 /*******
620 * void (^object)(void) = ...;
621 * [^{ object; } copy];
622 ********/
623
624 *dest = _Block_copy(object);
625 break;
626
627 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
628 case BLOCK_FIELD_IS_BYREF:
629 /*******
630 * // copy the onstack __block container to the heap
631 * // Note this __weak is old GC-weak/MRC-unretained.
632 * // ARC-style __weak is handled by the copy helper directly.
633 * __block ... x;
634 * __weak __block ... x;
635 * [^{ x; } copy];
636 ********/
637
638 *dest = _Block_byref_copy(object);
639 break;
640
641 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
642 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
643 /*******
644 * // copy the actual field held in the __block container
645 * // Note this is MRC unretained __block only.
646 * // ARC retained __block is handled by the copy helper directly.
647 * __block id object;
648 * __block void (^object)(void);
649 * [^{ object; } copy];
650 ********/
651
652 *dest = object;
653 break;
654
655 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
656 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
657 /*******
658 * // copy the actual field held in the __block container
659 * // Note this __weak is old GC-weak/MRC-unretained.
660 * // ARC-style __weak is handled by the copy helper directly.
661 * __weak __block id object;
662 * __weak __block void (^object)(void);
663 * [^{ object; } copy];
664 ********/
665
666 *dest = object;
667 break;
668
669 default:
670 break;
671 }
672 }
673
674 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
675 // to help dispose of the contents
676 void
_Block_object_dispose(const void * object,const int flags)677 _Block_object_dispose(const void *object, const int flags)
678 {
679 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
680 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
681 case BLOCK_FIELD_IS_BYREF:
682 // get rid of the __block data structure held in a Block
683 _Block_byref_release(object);
684 break;
685 case BLOCK_FIELD_IS_BLOCK:
686 _Block_release(object);
687 break;
688 case BLOCK_FIELD_IS_OBJECT:
689 _Block_release_object(object);
690 break;
691 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
692 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
693 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
694 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
695 break;
696 default:
697 break;
698 }
699 }
700
701
702 // Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
703 __attribute__((used))
704 static int let_there_be_data = 42;
705