xref: /xnu-11215/libkern/libclosure/runtime.cpp (revision e7776783)
1 /*
2  * runtime.c
3  * libclosure
4  *
5  * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6  *
7  * @APPLE_LLVM_LICENSE_HEADER@
8  */
9 
10 
11 #ifndef KERNEL
12 
13 #include "Block_private.h"
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <dlfcn.h>
17 #include <os/assumes.h>
18 #include <TargetConditionals.h>
19 
20 #else /* !KERNEL */
21 #define TARGET_OS_WIN32 0
22 
23 #include <libkern/Block_private.h>
24 __BEGIN_DECLS
25 #include <kern/kalloc.h>
26 __END_DECLS
27 
28 static inline void *
29 malloc(size_t size)
30 {
31 	if (size == 0) {
32 		return NULL;
33 	}
34 	return kheap_alloc(KHEAP_DEFAULT, size,
35 	           Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN));
36 }
37 
38 static inline void
39 free(void *addr, size_t size)
40 {
41 	kheap_free(KHEAP_DEFAULT, addr, size);
42 }
43 
44 #endif /* KERNEL */
45 
46 #include <machine/atomic.h>
47 #include <string.h>
48 #include <stdint.h>
49 #ifndef os_assumes
50 #define os_assumes(_x) (_x)
51 #endif
52 #ifndef os_assert
53 #define os_assert(_x) assert(_x)
54 #endif
55 
56 #if TARGET_OS_WIN32
57 #define _CRT_SECURE_NO_WARNINGS 1
58 #include <windows.h>
59 static __inline bool
60 OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
61 {
62 	// fixme barrier is overkill -- see objc-os.h
63 	long original = InterlockedCompareExchange(dst, newl, oldl);
64 	return original == oldl;
65 }
66 
67 static __inline bool
68 OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
69 {
70 	// fixme barrier is overkill -- see objc-os.h
71 	int original = InterlockedCompareExchange(dst, newi, oldi);
72 	return original == oldi;
73 }
74 #else
75 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
76 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
77 #endif
78 
79 
80 /*******************************************************************************
81  *  Internal Utilities
82  ********************************************************************************/
83 
84 static int32_t
85 latching_incr_int(volatile int32_t *where)
86 {
87 	while (1) {
88 		int32_t old_value = *where;
89 		if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
90 			return BLOCK_REFCOUNT_MASK;
91 		}
92 		if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
93 			return old_value + 2;
94 		}
95 	}
96 }
97 
98 static bool
99 latching_incr_int_not_deallocating(volatile int32_t *where)
100 {
101 	while (1) {
102 		int32_t old_value = *where;
103 		if (old_value & BLOCK_DEALLOCATING) {
104 			// if deallocating we can't do this
105 			return false;
106 		}
107 		if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
108 			// if latched, we're leaking this block, and we succeed
109 			return true;
110 		}
111 		if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
112 			// otherwise, we must store a new retained value without the deallocating bit set
113 			return true;
114 		}
115 	}
116 }
117 
118 
119 // return should_deallocate?
120 static bool
121 latching_decr_int_should_deallocate(volatile int32_t *where)
122 {
123 	while (1) {
124 		int32_t old_value = *where;
125 		if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
126 			return false; // latched high
127 		}
128 		if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
129 			return false; // underflow, latch low
130 		}
131 		int32_t new_value = old_value - 2;
132 		bool result = false;
133 		if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) {
134 			new_value = old_value - 1;
135 			result = true;
136 		}
137 		if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
138 			return result;
139 		}
140 	}
141 }
142 
143 
144 /**************************************************************************
145  *  Framework callback functions and their default implementations.
146  ***************************************************************************/
147 #if !TARGET_OS_WIN32
148 #pragma mark Framework Callback Routines
149 #endif
150 #if KERNEL
151 static inline void
152 _Block_retain_object(const void *ptr __unused)
153 {
154 }
155 
156 static inline void
157 _Block_release_object(const void *ptr __unused)
158 {
159 }
160 
161 static inline void
162 _Block_destructInstance(const void *aBlock __unused)
163 {
164 }
165 
166 #else
167 
168 static void
169 _Block_retain_object_default(const void *ptr __unused)
170 {
171 }
172 
173 static void
174 _Block_release_object_default(const void *ptr __unused)
175 {
176 }
177 
178 static void
179 _Block_destructInstance_default(const void *aBlock __unused)
180 {
181 }
182 
183 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
184 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
185 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
186 
187 
188 /**************************************************************************
189  *  Callback registration from ObjC runtime and CoreFoundation
190  ***************************************************************************/
191 
192 void
193 _Block_use_RR2(const Block_callbacks_RR *callbacks)
194 {
195 	_Block_retain_object = callbacks->retain;
196 	_Block_release_object = callbacks->release;
197 	_Block_destructInstance = callbacks->destructInstance;
198 }
199 #endif // !KERNEL
200 
201 /****************************************************************************
202  *  Accessors for block descriptor fields
203  *****************************************************************************/
204 
205 template <class T>
206 static T *
207 unwrap_relative_pointer(int32_t &offset)
208 {
209 	if (offset == 0) {
210 		return nullptr;
211 	}
212 
213 	uintptr_t base = (uintptr_t)&offset;
214 	uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset;
215 	uintptr_t pointer = base + extendedOffset;
216 	return (T *)pointer;
217 }
218 
219 #if 0
220 static struct Block_descriptor_2 *
221 _Block_descriptor_2(struct Block_layout *aBlock)
222 {
223 	uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
224 	desc += sizeof(struct Block_descriptor_1);
225 	return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
226 }
227 #endif
228 
229 static struct Block_descriptor_3 *
230 _Block_descriptor_3(struct Block_layout *aBlock)
231 {
232 	uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
233 	desc += sizeof(struct Block_descriptor_1);
234 	if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
235 		desc += sizeof(struct Block_descriptor_2);
236 	}
237 	return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
238 }
239 
240 static void
241 _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
242 {
243 	if (auto *pFn = _Block_get_copy_function(aBlock)) {
244 		pFn(result, aBlock);
245 	}
246 }
247 
248 static void
249 _Block_call_dispose_helper(struct Block_layout *aBlock)
250 {
251 	if (auto *pFn = _Block_get_dispose_function(aBlock)) {
252 		pFn(aBlock);
253 	}
254 }
255 
256 /*******************************************************************************
257  *  Internal Support routines for copying
258  ********************************************************************************/
259 
260 #if !TARGET_OS_WIN32
261 #pragma mark Copy/Release support
262 #endif
263 
264 // Copy, or bump refcount, of a block.  If really copying, call the copy helper if present.
265 void *
266 _Block_copy(const void *arg)
267 {
268 	struct Block_layout *aBlock;
269 
270 	if (!arg) {
271 		return NULL;
272 	}
273 
274 	// The following would be better done as a switch statement
275 	aBlock = (struct Block_layout *)arg;
276 	if (aBlock->flags & BLOCK_NEEDS_FREE) {
277 		// latches on high
278 		latching_incr_int(&aBlock->flags);
279 		return aBlock;
280 	} else if (aBlock->flags & BLOCK_IS_GLOBAL) {
281 		return aBlock;
282 	} else {
283 		// Its a stack block.  Make a copy.
284 		size_t size = Block_size(aBlock);
285 		struct Block_layout *result = (struct Block_layout *)malloc(size);
286 		if (!result) {
287 			return NULL;
288 		}
289 		memmove(result, aBlock, size); // bitcopy first
290 #if __has_feature(ptrauth_calls)
291 		// Resign the invoke pointer as it uses address authentication.
292 		result->invoke = aBlock->invoke;
293 
294 #if __has_feature(ptrauth_signed_block_descriptors)
295 		uintptr_t oldDesc =
296 		    ptrauth_blend_discriminator(
297 			&aBlock->descriptor, _Block_descriptor_ptrauth_discriminator);
298 		uintptr_t newDesc =
299 		    ptrauth_blend_discriminator(
300 			&result->descriptor, _Block_descriptor_ptrauth_discriminator);
301 
302 		result->descriptor =
303 		    ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc,
304 		    ptrauth_key_asda, newDesc);
305 #endif
306 #endif
307 
308 		// reset refcount
309 		result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed
310 		result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
311 		_Block_call_copy_helper(result, aBlock);
312 		// Set isa last so memory analysis tools see a fully-initialized object.
313 		result->isa = _NSConcreteMallocBlock;
314 		return result;
315 	}
316 }
317 
318 
319 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
320 
321 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
322 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
323 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
324 // Otherwise we need to copy it and update the stack forwarding pointer
325 static struct Block_byref *
326 _Block_byref_copy(const void *arg)
327 {
328 	struct Block_byref *src = (struct Block_byref *)arg;
329 
330 	if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
331 		// src points to stack
332 		struct Block_byref *copy = (struct Block_byref *)malloc(src->size);
333 		copy->isa = NULL;
334 		// byref value 4 is logical refcount of 2: one for caller, one for stack
335 		copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
336 		copy->forwarding = copy; // patch heap copy to point to itself
337 		src->forwarding = copy; // patch stack to point to heap copy
338 		copy->size = src->size;
339 
340 		if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
341 			// Trust copy helper to copy everything of interest
342 			// If more than one field shows up in a byref block this is wrong XXX
343 			struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1);
344 			struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1);
345 			copy2->byref_keep = src2->byref_keep;
346 			copy2->byref_destroy = src2->byref_destroy;
347 
348 			if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
349 				struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1);
350 				struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1);
351 				copy3->layout = src3->layout;
352 			}
353 
354 			(*src2->byref_keep)(copy, src);
355 		} else {
356 			// Bitwise copy.
357 			// This copy includes Block_byref_3, if any.
358 			memmove(copy + 1, src + 1, src->size - sizeof(*src));
359 		}
360 	}
361 	// already copied to heap
362 	else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
363 		latching_incr_int(&src->forwarding->flags);
364 	}
365 
366 	return src->forwarding;
367 }
368 
369 static void
370 _Block_byref_release(const void *arg)
371 {
372 	struct Block_byref *byref = (struct Block_byref *)arg;
373 
374 	// dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
375 	byref = byref->forwarding;
376 
377 	if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
378 		__assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
379 		os_assert(refcount);
380 		if (latching_decr_int_should_deallocate(&byref->flags)) {
381 			if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
382 				struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1);
383 				(*byref2->byref_destroy)(byref);
384 			}
385 			free(byref, byref->size);
386 		}
387 	}
388 }
389 
390 
391 /************************************************************
392  *
393  * API supporting SPI
394  * _Block_copy, _Block_release, and (old) _Block_destroy
395  *
396  ***********************************************************/
397 
398 #if !TARGET_OS_WIN32
399 #pragma mark SPI/API
400 #endif
401 
402 
403 // API entry point to release a copied Block
404 void
405 _Block_release(const void *arg)
406 {
407 	struct Block_layout *aBlock = (struct Block_layout *)arg;
408 	if (!aBlock) {
409 		return;
410 	}
411 	if (aBlock->flags & BLOCK_IS_GLOBAL) {
412 		return;
413 	}
414 	if (!(aBlock->flags & BLOCK_NEEDS_FREE)) {
415 		return;
416 	}
417 
418 	if (latching_decr_int_should_deallocate(&aBlock->flags)) {
419 		_Block_call_dispose_helper(aBlock);
420 		_Block_destructInstance(aBlock);
421 		free(aBlock, Block_size(aBlock));
422 	}
423 }
424 
425 bool
426 _Block_tryRetain(const void *arg)
427 {
428 	struct Block_layout *aBlock = (struct Block_layout *)arg;
429 	return latching_incr_int_not_deallocating(&aBlock->flags);
430 }
431 
432 bool
433 _Block_isDeallocating(const void *arg)
434 {
435 	struct Block_layout *aBlock = (struct Block_layout *)arg;
436 	return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
437 }
438 
439 
440 /************************************************************
441  *
442  * SPI used by other layers
443  *
444  ***********************************************************/
445 
446 size_t
447 Block_size(void *aBlock)
448 {
449 	auto *layout = (Block_layout *)aBlock;
450 	void *desc = _Block_get_descriptor(layout);
451 	if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
452 		return ((Block_descriptor_small *)desc)->size;
453 	}
454 	return ((Block_descriptor_1 *)desc)->size;
455 }
456 
457 bool
458 _Block_use_stret(void *aBlock)
459 {
460 	struct Block_layout *layout = (struct Block_layout *)aBlock;
461 
462 	int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
463 	return (layout->flags & requiredFlags) == requiredFlags;
464 }
465 
466 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
467 bool
468 _Block_has_signature(void *aBlock)
469 {
470 	return _Block_signature(aBlock) ? true : false;
471 }
472 
473 const char *
474 _Block_signature(void *aBlock)
475 {
476 	struct Block_layout *layout = (struct Block_layout *)aBlock;
477 	if (!(layout->flags & BLOCK_HAS_SIGNATURE)) {
478 		return nullptr;
479 	}
480 
481 	if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
482 		auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
483 		return unwrap_relative_pointer<const char>(bds->signature);
484 	}
485 
486 	struct Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
487 	return desc3->signature;
488 }
489 
490 const char *
491 _Block_layout(void *aBlock)
492 {
493 	// Don't return extended layout to callers expecting old GC layout
494 	Block_layout *layout = (Block_layout *)aBlock;
495 	if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
496 	    !(layout->flags & BLOCK_HAS_SIGNATURE)) {
497 		return nullptr;
498 	}
499 
500 	if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
501 		auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
502 		return unwrap_relative_pointer<const char>(bds->layout);
503 	}
504 
505 	Block_descriptor_3 *desc = _Block_descriptor_3(layout);
506 	return desc->layout;
507 }
508 
509 const char *
510 _Block_extended_layout(void *aBlock)
511 {
512 	// Don't return old GC layout to callers expecting extended layout
513 	Block_layout *layout = (Block_layout *)aBlock;
514 	if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
515 	    !(layout->flags & BLOCK_HAS_SIGNATURE)) {
516 		return nullptr;
517 	}
518 
519 	const char *extLayout;
520 	if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
521 		auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
522 		if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) {
523 			extLayout = (const char *)(uintptr_t)bds->layout;
524 		} else {
525 			extLayout = unwrap_relative_pointer<const char>(bds->layout);
526 		}
527 	} else {
528 		Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
529 		extLayout = desc3->layout;
530 	}
531 
532 	// Return empty string (all non-object bytes) instead of NULL
533 	// so callers can distinguish "empty layout" from "no layout".
534 	if (!extLayout) {
535 		extLayout = "";
536 	}
537 	return extLayout;
538 }
539 
540 #if !TARGET_OS_WIN32
541 #pragma mark Compiler SPI entry points
542 #endif
543 
544 
545 /*******************************************************
546  *
547  *  Entry points used by the compiler - the real API!
548  *
549  *
550  *  A Block can reference four different kinds of things that require help when the Block is copied to the heap.
551  *  1) C++ stack based objects
552  *  2) References to Objective-C objects
553  *  3) Other Blocks
554  *  4) __block variables
555  *
556  *  In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
557  *
558  *  The flags parameter of _Block_object_assign and _Block_object_dispose is set to
559  * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
560  * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
561  * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
562  *  If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
563  *
564  *  So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
565  *
566  *  When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
567  *
568  *  So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
569  *   __block id                   128+3       (0x83)
570  *   __block (^Block)             128+7       (0x87)
571  *   __weak __block id            128+3+16    (0x93)
572  *   __weak __block (^Block)      128+7+16    (0x97)
573  *
574  *
575  ********************************************************/
576 
577 //
578 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
579 // to do the assignment.
580 //
581 void
582 _Block_object_assign(void *destArg, const void *object, const int flags)
583 {
584 	const void **dest = (const void **)destArg;
585 	switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
586 	case BLOCK_FIELD_IS_OBJECT:
587 		/*******
588 		 *  id object = ...;
589 		 *  [^{ object; } copy];
590 		 ********/
591 
592 		_Block_retain_object(object);
593 		*dest = object;
594 		break;
595 
596 	case BLOCK_FIELD_IS_BLOCK:
597 		/*******
598 		 *  void (^object)(void) = ...;
599 		 *  [^{ object; } copy];
600 		 ********/
601 
602 		*dest = _Block_copy(object);
603 		break;
604 
605 	case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
606 	case BLOCK_FIELD_IS_BYREF:
607 		/*******
608 		 *  // copy the onstack __block container to the heap
609 		 *  // Note this __weak is old GC-weak/MRC-unretained.
610 		 *  // ARC-style __weak is handled by the copy helper directly.
611 		 *  __block ... x;
612 		 *  __weak __block ... x;
613 		 *  [^{ x; } copy];
614 		 ********/
615 
616 		*dest = _Block_byref_copy(object);
617 		break;
618 
619 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
620 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
621 		/*******
622 		 *  // copy the actual field held in the __block container
623 		 *  // Note this is MRC unretained __block only.
624 		 *  // ARC retained __block is handled by the copy helper directly.
625 		 *  __block id object;
626 		 *  __block void (^object)(void);
627 		 *  [^{ object; } copy];
628 		 ********/
629 
630 		*dest = object;
631 		break;
632 
633 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
634 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
635 		/*******
636 		 *  // copy the actual field held in the __block container
637 		 *  // Note this __weak is old GC-weak/MRC-unretained.
638 		 *  // ARC-style __weak is handled by the copy helper directly.
639 		 *  __weak __block id object;
640 		 *  __weak __block void (^object)(void);
641 		 *  [^{ object; } copy];
642 		 ********/
643 
644 		*dest = object;
645 		break;
646 
647 	default:
648 		break;
649 	}
650 }
651 
652 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
653 // to help dispose of the contents
654 void
655 _Block_object_dispose(const void *object, const int flags)
656 {
657 	switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
658 	case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
659 	case BLOCK_FIELD_IS_BYREF:
660 		// get rid of the __block data structure held in a Block
661 		_Block_byref_release(object);
662 		break;
663 	case BLOCK_FIELD_IS_BLOCK:
664 		_Block_release(object);
665 		break;
666 	case BLOCK_FIELD_IS_OBJECT:
667 		_Block_release_object(object);
668 		break;
669 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
670 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
671 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
672 	case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK  | BLOCK_FIELD_IS_WEAK:
673 		break;
674 	default:
675 		break;
676 	}
677 }
678 
679 
680 // Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
681 __attribute__((used))
682 static int let_there_be_data = 42;
683