1 //===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit blocks. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "llvm/Module.h" 19 #include "llvm/Target/TargetData.h" 20 #include <algorithm> 21 22 using namespace clang; 23 using namespace CodeGen; 24 25 llvm::Constant *CodeGenFunction:: 26 BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size, 27 const llvm::StructType* Ty, 28 std::vector<HelperInfo> *NoteForHelper) { 29 const llvm::Type *UnsignedLongTy 30 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 31 llvm::Constant *C; 32 std::vector<llvm::Constant*> Elts; 33 34 // reserved 35 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 36 Elts.push_back(C); 37 38 // Size 39 // FIXME: What is the right way to say this doesn't fit? We should give 40 // a user diagnostic in that case. Better fix would be to change the 41 // API to size_t. 42 C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity()); 43 Elts.push_back(C); 44 45 if (BlockHasCopyDispose) { 46 // copy_func_helper_decl 47 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 48 49 // destroy_func_decl 50 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 51 } 52 53 C = llvm::ConstantStruct::get(VMContext, Elts, false); 54 55 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 56 llvm::GlobalValue::InternalLinkage, 57 C, "__block_descriptor_tmp"); 58 return C; 59 } 60 61 llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 62 if (NSConcreteGlobalBlock == 0) 63 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 64 "_NSConcreteGlobalBlock"); 65 return NSConcreteGlobalBlock; 66 } 67 68 llvm::Constant *BlockModule::getNSConcreteStackBlock() { 69 if (NSConcreteStackBlock == 0) 70 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 71 "_NSConcreteStackBlock"); 72 return NSConcreteStackBlock; 73 } 74 75 static void CollectBlockDeclRefInfo( 76 const Stmt *S, CodeGenFunction::BlockInfo &Info, 77 llvm::SmallSet<const DeclContext *, 16> &InnerContexts) { 78 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 79 I != E; ++I) 80 if (*I) 81 CollectBlockDeclRefInfo(*I, Info, InnerContexts); 82 83 // We want to ensure we walk down into block literals so we can find 84 // all nested BlockDeclRefExprs. 85 if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) { 86 InnerContexts.insert(cast<DeclContext>(BE->getBlockDecl())); 87 CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts); 88 } 89 90 if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) { 91 // FIXME: Handle enums. 92 if (isa<FunctionDecl>(BDRE->getDecl())) 93 return; 94 95 // Only Decls that escape are added. 96 if (!InnerContexts.count(BDRE->getDecl()->getDeclContext())) 97 Info.DeclRefs.push_back(BDRE); 98 } 99 } 100 101 /// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be 102 /// declared as a global variable instead of on the stack. 103 static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) { 104 return Info.DeclRefs.empty(); 105 } 106 107 /// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to 108 /// ensure we can generate the debug information for the parameter for the block 109 /// invoke function. 110 static void AllocateAllBlockDeclRefs(const CodeGenFunction::BlockInfo &Info, 111 CodeGenFunction *CGF) { 112 // Always allocate self, as it is often handy in the debugger, even if there 113 // is no codegen in the block that uses it. This is also useful to always do 114 // this as if we didn't, we'd have to figure out all code that uses a self 115 // pointer, including implicit uses. 116 if (const ObjCMethodDecl *OMD 117 = dyn_cast_or_null<ObjCMethodDecl>(CGF->CurFuncDecl)) { 118 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); 119 BlockDeclRefExpr *BDRE = new (CGF->getContext()) 120 BlockDeclRefExpr(SelfDecl, 121 SelfDecl->getType(), SourceLocation(), false); 122 CGF->AllocateBlockDecl(BDRE); 123 } 124 125 // FIXME: Also always forward the this pointer in C++ as well. 126 127 for (size_t i = 0; i < Info.DeclRefs.size(); ++i) 128 CGF->AllocateBlockDecl(Info.DeclRefs[i]); 129 } 130 131 // FIXME: Push most into CGM, passing down a few bits, like current function 132 // name. 133 llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 134 135 std::string Name = CurFn->getName(); 136 CodeGenFunction::BlockInfo Info(0, Name.c_str()); 137 llvm::SmallSet<const DeclContext *, 16> InnerContexts; 138 InnerContexts.insert(BE->getBlockDecl()); 139 CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts); 140 141 // Check if the block can be global. 142 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 143 // to just have one code path. We should move this function into CGM and pass 144 // CGF, then we can just check to see if CGF is 0. 145 if (0 && CanBlockBeGlobal(Info)) 146 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 147 148 size_t BlockFields = 5; 149 150 bool hasIntrospection = CGM.getContext().getLangOptions().BlockIntrospection; 151 152 if (hasIntrospection) { 153 BlockFields++; 154 } 155 std::vector<llvm::Constant*> Elts(BlockFields); 156 157 if (hasIntrospection) { 158 std::string BlockTypeEncoding; 159 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 160 161 Elts[5] = llvm::ConstantExpr::getBitCast( 162 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty); 163 } 164 165 llvm::Constant *C; 166 llvm::Value *V; 167 168 { 169 // C = BuildBlockStructInitlist(); 170 unsigned int flags = BLOCK_HAS_DESCRIPTOR; 171 172 if (hasIntrospection) 173 flags |= BLOCK_HAS_OBJC_TYPE; 174 175 // We run this first so that we set BlockHasCopyDispose from the entire 176 // block literal. 177 // __invoke 178 CharUnits subBlockSize; 179 CharUnits subBlockAlign; 180 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 181 bool subBlockHasCopyDispose = false; 182 llvm::Function *Fn 183 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, 184 LocalDeclMap, 185 subBlockSize, 186 subBlockAlign, 187 subBlockDeclRefDecls, 188 subBlockHasCopyDispose); 189 BlockHasCopyDispose |= subBlockHasCopyDispose; 190 Elts[3] = Fn; 191 192 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 193 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 194 if (subBlockHasCopyDispose) 195 flags |= BLOCK_HAS_COPY_DISPOSE; 196 197 // __isa 198 C = CGM.getNSConcreteStackBlock(); 199 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 200 Elts[0] = C; 201 202 // __flags 203 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 204 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 205 C = llvm::ConstantInt::get(IntTy, flags); 206 Elts[1] = C; 207 208 // __reserved 209 C = llvm::ConstantInt::get(IntTy, 0); 210 Elts[2] = C; 211 212 if (subBlockDeclRefDecls.size() == 0) { 213 // __descriptor 214 Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 215 0, 0); 216 217 // Optimize to being a global block. 218 Elts[0] = CGM.getNSConcreteGlobalBlock(); 219 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 220 221 C = llvm::ConstantStruct::get(VMContext, Elts, false); 222 223 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 224 llvm::GlobalValue::InternalLinkage, C, 225 "__block_holder_tmp_" + 226 llvm::Twine(CGM.getGlobalUniqueCount())); 227 QualType BPT = BE->getType(); 228 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 229 return C; 230 } 231 232 std::vector<const llvm::Type *> Types(BlockFields+subBlockDeclRefDecls.size()); 233 for (int i=0; i<4; ++i) 234 Types[i] = Elts[i]->getType(); 235 Types[4] = PtrToInt8Ty; 236 if (hasIntrospection) 237 Types[5] = PtrToInt8Ty; 238 239 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) { 240 const Expr *E = subBlockDeclRefDecls[i]; 241 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 242 QualType Ty = E->getType(); 243 if (BDRE && BDRE->isByRef()) { 244 Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 245 } else 246 Types[i+BlockFields] = ConvertType(Ty); 247 } 248 249 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 250 251 llvm::AllocaInst *A = CreateTempAlloca(Ty); 252 A->setAlignment(subBlockAlign.getQuantity()); 253 V = A; 254 255 std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size()); 256 int helpersize = 0; 257 258 for (unsigned i=0; i<4; ++i) 259 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 260 if (hasIntrospection) 261 Builder.CreateStore(Elts[5], Builder.CreateStructGEP(V, 5, "block.tmp")); 262 263 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) 264 { 265 // FIXME: Push const down. 266 Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]); 267 DeclRefExpr *DR; 268 ValueDecl *VD; 269 270 DR = dyn_cast<DeclRefExpr>(E); 271 // Skip padding. 272 if (DR) continue; 273 274 BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 275 VD = BDRE->getDecl(); 276 277 llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp"); 278 NoteForHelper[helpersize].index = i+5; 279 NoteForHelper[helpersize].RequiresCopying 280 = BlockRequiresCopying(VD->getType()); 281 NoteForHelper[helpersize].flag 282 = (VD->getType()->isBlockPointerType() 283 ? BLOCK_FIELD_IS_BLOCK 284 : BLOCK_FIELD_IS_OBJECT); 285 286 if (LocalDeclMap[VD]) { 287 if (BDRE->isByRef()) { 288 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 289 // FIXME: Someone double check this. 290 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 291 llvm::Value *Loc = LocalDeclMap[VD]; 292 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 293 Loc = Builder.CreateLoad(Loc); 294 Builder.CreateStore(Loc, Addr); 295 ++helpersize; 296 continue; 297 } else 298 E = new (getContext()) DeclRefExpr (VD, 299 VD->getType(), 300 SourceLocation()); 301 } 302 if (BDRE->isByRef()) { 303 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 304 // FIXME: Someone double check this. 305 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 306 E = new (getContext()) 307 UnaryOperator(E, UnaryOperator::AddrOf, 308 getContext().getPointerType(E->getType()), 309 SourceLocation()); 310 } 311 ++helpersize; 312 313 RValue r = EmitAnyExpr(E, Addr, false); 314 if (r.isScalar()) { 315 llvm::Value *Loc = r.getScalarVal(); 316 const llvm::Type *Ty = Types[i+BlockFields]; 317 if (BDRE->isByRef()) { 318 // E is now the address of the value field, instead, we want the 319 // address of the actual ByRef struct. We optimize this slightly 320 // compared to gcc by not grabbing the forwarding slot as this must 321 // be done during Block_copy for us, and we can postpone the work 322 // until then. 323 CharUnits offset = BlockDecls[BDRE->getDecl()]; 324 325 llvm::Value *BlockLiteral = LoadBlockStruct(); 326 327 Loc = Builder.CreateGEP(BlockLiteral, 328 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 329 offset.getQuantity()), 330 "block.literal"); 331 Ty = llvm::PointerType::get(Ty, 0); 332 Loc = Builder.CreateBitCast(Loc, Ty); 333 Loc = Builder.CreateLoad(Loc); 334 // Loc = Builder.CreateBitCast(Loc, Ty); 335 } 336 Builder.CreateStore(Loc, Addr); 337 } else if (r.isComplex()) 338 // FIXME: implement 339 ErrorUnsupported(BE, "complex in block literal"); 340 else if (r.isAggregate()) 341 ; // Already created into the destination 342 else 343 assert (0 && "bad block variable"); 344 // FIXME: Ensure that the offset created by the backend for 345 // the struct matches the previously computed offset in BlockDecls. 346 } 347 NoteForHelper.resize(helpersize); 348 349 // __descriptor 350 llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose, 351 subBlockSize, Ty, 352 &NoteForHelper); 353 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 354 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 355 } 356 357 QualType BPT = BE->getType(); 358 return Builder.CreateBitCast(V, ConvertType(BPT)); 359 } 360 361 362 const llvm::Type *BlockModule::getBlockDescriptorType() { 363 if (BlockDescriptorType) 364 return BlockDescriptorType; 365 366 const llvm::Type *UnsignedLongTy = 367 getTypes().ConvertType(getContext().UnsignedLongTy); 368 369 // struct __block_descriptor { 370 // unsigned long reserved; 371 // unsigned long block_size; 372 // }; 373 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 374 UnsignedLongTy, 375 UnsignedLongTy, 376 NULL); 377 378 getModule().addTypeName("struct.__block_descriptor", 379 BlockDescriptorType); 380 381 return BlockDescriptorType; 382 } 383 384 const llvm::Type *BlockModule::getGenericBlockLiteralType() { 385 if (GenericBlockLiteralType) 386 return GenericBlockLiteralType; 387 388 const llvm::Type *BlockDescPtrTy = 389 llvm::PointerType::getUnqual(getBlockDescriptorType()); 390 391 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 392 getTypes().ConvertType(getContext().IntTy)); 393 394 // struct __block_literal_generic { 395 // void *__isa; 396 // int __flags; 397 // int __reserved; 398 // void (*__invoke)(void *); 399 // struct __block_descriptor *__descriptor; 400 // // GNU runtime only: 401 // const char *types; 402 // }; 403 if (CGM.getContext().getLangOptions().BlockIntrospection) 404 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 405 PtrToInt8Ty, 406 IntTy, 407 IntTy, 408 PtrToInt8Ty, 409 BlockDescPtrTy, 410 PtrToInt8Ty, 411 NULL); 412 else 413 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 414 PtrToInt8Ty, 415 IntTy, 416 IntTy, 417 PtrToInt8Ty, 418 BlockDescPtrTy, 419 NULL); 420 421 getModule().addTypeName("struct.__block_literal_generic", 422 GenericBlockLiteralType); 423 424 return GenericBlockLiteralType; 425 } 426 427 const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { 428 if (GenericExtendedBlockLiteralType) 429 return GenericExtendedBlockLiteralType; 430 431 const llvm::Type *BlockDescPtrTy = 432 llvm::PointerType::getUnqual(getBlockDescriptorType()); 433 434 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 435 getTypes().ConvertType(getContext().IntTy)); 436 437 // struct __block_literal_generic { 438 // void *__isa; 439 // int __flags; 440 // int __reserved; 441 // void (*__invoke)(void *); 442 // struct __block_descriptor *__descriptor; 443 // void *__copy_func_helper_decl; 444 // void *__destroy_func_decl; 445 // }; 446 GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 447 PtrToInt8Ty, 448 IntTy, 449 IntTy, 450 PtrToInt8Ty, 451 BlockDescPtrTy, 452 PtrToInt8Ty, 453 PtrToInt8Ty, 454 NULL); 455 456 getModule().addTypeName("struct.__block_literal_extended_generic", 457 GenericExtendedBlockLiteralType); 458 459 return GenericExtendedBlockLiteralType; 460 } 461 462 RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, 463 ReturnValueSlot ReturnValue) { 464 const BlockPointerType *BPT = 465 E->getCallee()->getType()->getAs<BlockPointerType>(); 466 467 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 468 469 // Get a pointer to the generic block literal. 470 const llvm::Type *BlockLiteralTy = 471 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 472 473 // Bitcast the callee to a block literal. 474 llvm::Value *BlockLiteral = 475 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 476 477 // Get the function pointer from the literal. 478 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 479 480 BlockLiteral = 481 Builder.CreateBitCast(BlockLiteral, 482 llvm::Type::getInt8PtrTy(VMContext), 483 "tmp"); 484 485 // Add the block literal. 486 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 487 CallArgList Args; 488 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 489 490 QualType FnType = BPT->getPointeeType(); 491 492 // And the rest of the arguments. 493 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 494 E->arg_begin(), E->arg_end()); 495 496 // Load the function. 497 llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp"); 498 499 QualType ResultType = FnType->getAs<FunctionType>()->getResultType(); 500 501 const CGFunctionInfo &FnInfo = 502 CGM.getTypes().getFunctionInfo(ResultType, Args); 503 504 // Cast the function pointer to the right type. 505 const llvm::Type *BlockFTy = 506 CGM.getTypes().GetFunctionType(FnInfo, false); 507 508 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 509 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 510 511 // And call the block. 512 return EmitCall(FnInfo, Func, ReturnValue, Args); 513 } 514 515 CharUnits CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) { 516 const ValueDecl *VD = E->getDecl(); 517 CharUnits &offset = BlockDecls[VD]; 518 519 // See if we have already allocated an offset for this variable. 520 if (offset.isPositive()) 521 return offset; 522 523 // Don't run the expensive check, unless we have to. 524 if (!BlockHasCopyDispose) 525 if (E->isByRef() 526 || BlockRequiresCopying(E->getType())) 527 BlockHasCopyDispose = true; 528 529 // if not, allocate one now. 530 offset = getBlockOffset(E); 531 532 return offset; 533 } 534 535 llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { 536 const ValueDecl *VD = E->getDecl(); 537 CharUnits offset = AllocateBlockDecl(E); 538 539 540 llvm::Value *BlockLiteral = LoadBlockStruct(); 541 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 542 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 543 offset.getQuantity()), 544 "block.literal"); 545 if (E->isByRef()) { 546 const llvm::Type *PtrStructTy 547 = llvm::PointerType::get(BuildByRefType(VD), 0); 548 // The block literal will need a copy/destroy helper. 549 BlockHasCopyDispose = true; 550 551 const llvm::Type *Ty = PtrStructTy; 552 Ty = llvm::PointerType::get(Ty, 0); 553 V = Builder.CreateBitCast(V, Ty); 554 V = Builder.CreateLoad(V); 555 V = Builder.CreateStructGEP(V, 1, "forwarding"); 556 V = Builder.CreateLoad(V); 557 V = Builder.CreateBitCast(V, PtrStructTy); 558 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 559 VD->getNameAsString()); 560 } else { 561 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 562 563 Ty = llvm::PointerType::get(Ty, 0); 564 V = Builder.CreateBitCast(V, Ty); 565 } 566 return V; 567 } 568 569 void CodeGenFunction::BlockForwardSelf() { 570 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 571 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); 572 llvm::Value *&DMEntry = LocalDeclMap[SelfDecl]; 573 if (DMEntry) 574 return; 575 // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care 576 BlockDeclRefExpr *BDRE = new (getContext()) 577 BlockDeclRefExpr(SelfDecl, 578 SelfDecl->getType(), SourceLocation(), false); 579 DMEntry = GetAddrOfBlockDecl(BDRE); 580 } 581 582 llvm::Constant * 583 BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 584 // Generate the block descriptor. 585 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 586 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 587 getTypes().ConvertType(getContext().IntTy)); 588 589 llvm::Constant *DescriptorFields[2]; 590 591 // Reserved 592 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 593 594 // Block literal size. For global blocks we just use the size of the generic 595 // block literal struct. 596 CharUnits BlockLiteralSize = 597 CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType()); 598 DescriptorFields[1] = 599 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity()); 600 601 llvm::Constant *DescriptorStruct = 602 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false); 603 604 llvm::GlobalVariable *Descriptor = 605 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 606 llvm::GlobalVariable::InternalLinkage, 607 DescriptorStruct, "__block_descriptor_global"); 608 609 int FieldCount = 5; 610 // Generate the constants for the block literal. 611 if (CGM.getContext().getLangOptions().BlockIntrospection) 612 FieldCount = 6; 613 614 std::vector<llvm::Constant*> LiteralFields(FieldCount); 615 616 CodeGenFunction::BlockInfo Info(0, n); 617 CharUnits subBlockSize; 618 CharUnits subBlockAlign; 619 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 620 bool subBlockHasCopyDispose = false; 621 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 622 llvm::Function *Fn 623 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap, 624 subBlockSize, 625 subBlockAlign, 626 subBlockDeclRefDecls, 627 subBlockHasCopyDispose); 628 assert(subBlockSize == BlockLiteralSize 629 && "no imports allowed for global block"); 630 631 // isa 632 LiteralFields[0] = getNSConcreteGlobalBlock(); 633 634 // Flags 635 LiteralFields[1] = CGM.getContext().getLangOptions().BlockIntrospection ? 636 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR | 637 BLOCK_HAS_OBJC_TYPE) : 638 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR); 639 640 // Reserved 641 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 642 643 // Function 644 LiteralFields[3] = Fn; 645 646 // Descriptor 647 LiteralFields[4] = Descriptor; 648 649 // Type encoding 650 if (CGM.getContext().getLangOptions().BlockIntrospection) { 651 std::string BlockTypeEncoding; 652 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 653 654 LiteralFields[5] = CGM.GetAddrOfConstantCString(BlockTypeEncoding); 655 } 656 657 llvm::Constant *BlockLiteralStruct = 658 llvm::ConstantStruct::get(VMContext, LiteralFields, false); 659 660 llvm::GlobalVariable *BlockLiteral = 661 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 662 llvm::GlobalVariable::InternalLinkage, 663 BlockLiteralStruct, "__block_literal_global"); 664 665 return BlockLiteral; 666 } 667 668 llvm::Value *CodeGenFunction::LoadBlockStruct() { 669 llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], 670 "self"); 671 // For now, we codegen based upon byte offsets. 672 return Builder.CreateBitCast(V, PtrToInt8Ty); 673 } 674 675 llvm::Function * 676 CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 677 const BlockInfo& Info, 678 const Decl *OuterFuncDecl, 679 llvm::DenseMap<const Decl*, llvm::Value*> ldm, 680 CharUnits &Size, 681 CharUnits &Align, 682 llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls, 683 bool &subBlockHasCopyDispose) { 684 685 // Check if we should generate debug info for this block. 686 if (CGM.getDebugInfo()) 687 DebugInfo = CGM.getDebugInfo(); 688 689 // Arrange for local static and local extern declarations to appear 690 // to be local to this function as well, as they are directly referenced 691 // in a block. 692 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 693 i != ldm.end(); 694 ++i) { 695 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 696 697 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 698 LocalDeclMap[VD] = i->second; 699 } 700 701 BlockOffset = 702 CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType()); 703 BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 704 705 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 706 QualType ResultType; 707 bool IsVariadic; 708 if (const FunctionProtoType *FTy = 709 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 710 ResultType = FTy->getResultType(); 711 IsVariadic = FTy->isVariadic(); 712 } else { 713 // K&R style block. 714 ResultType = BlockFunctionType->getResultType(); 715 IsVariadic = false; 716 } 717 718 FunctionArgList Args; 719 720 CurFuncDecl = OuterFuncDecl; 721 722 const BlockDecl *BD = BExpr->getBlockDecl(); 723 724 IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); 725 726 // Allocate all BlockDeclRefDecls, so we can calculate the right ParmTy below. 727 AllocateAllBlockDeclRefs(Info, this); 728 729 QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose, 730 BlockDeclRefDecls); 731 // FIXME: This leaks 732 ImplicitParamDecl *SelfDecl = 733 ImplicitParamDecl::Create(getContext(), 0, 734 SourceLocation(), II, 735 ParmTy); 736 737 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 738 BlockStructDecl = SelfDecl; 739 740 for (BlockDecl::param_const_iterator i = BD->param_begin(), 741 e = BD->param_end(); i != e; ++i) 742 Args.push_back(std::make_pair(*i, (*i)->getType())); 743 744 const CGFunctionInfo &FI = 745 CGM.getTypes().getFunctionInfo(ResultType, Args); 746 747 CodeGenTypes &Types = CGM.getTypes(); 748 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 749 750 llvm::Function *Fn = 751 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 752 llvm::Twine("__") + Info.Name + "_block_invoke_", 753 &CGM.getModule()); 754 755 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 756 757 StartFunction(BD, ResultType, Fn, Args, 758 BExpr->getBody()->getLocEnd()); 759 760 CurFuncDecl = OuterFuncDecl; 761 CurCodeDecl = BD; 762 763 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 764 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 765 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 766 --entry_ptr; 767 768 EmitStmt(BExpr->getBody()); 769 770 // Remember where we were... 771 llvm::BasicBlock *resume = Builder.GetInsertBlock(); 772 773 // Go back to the entry. 774 ++entry_ptr; 775 Builder.SetInsertPoint(entry, entry_ptr); 776 777 if (CGDebugInfo *DI = getDebugInfo()) { 778 // Emit debug information for all the BlockDeclRefDecls. 779 for (unsigned i = 0, e = BlockDeclRefDecls.size(); i != e; ++i) { 780 if (const BlockDeclRefExpr *BDRE = 781 dyn_cast<BlockDeclRefExpr>(BlockDeclRefDecls[i])) { 782 const ValueDecl *D = BDRE->getDecl(); 783 DI->setLocation(D->getLocation()); 784 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 785 LocalDeclMap[getBlockStructDecl()], 786 Builder, this); 787 } 788 } 789 } 790 // And resume where we left off. 791 if (resume == 0) 792 Builder.ClearInsertionPoint(); 793 else 794 Builder.SetInsertPoint(resume); 795 796 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 797 798 // The runtime needs a minimum alignment of a void *. 799 CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 800 BlockOffset = CharUnits::fromQuantity( 801 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), 802 MinAlign.getQuantity())); 803 804 Size = BlockOffset; 805 Align = BlockAlign; 806 subBlockDeclRefDecls = BlockDeclRefDecls; 807 subBlockHasCopyDispose |= BlockHasCopyDispose; 808 return Fn; 809 } 810 811 CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { 812 const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl()); 813 814 CharUnits Size = getContext().getTypeSizeInChars(D->getType()); 815 CharUnits Align = getContext().getDeclAlign(D); 816 817 if (BDRE->isByRef()) { 818 Size = getContext().getTypeSizeInChars(getContext().VoidPtrTy); 819 Align = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 820 } 821 822 assert ((Align.isPositive()) && "alignment must be 1 byte or more"); 823 824 CharUnits OldOffset = BlockOffset; 825 826 // Ensure proper alignment, even if it means we have to have a gap 827 BlockOffset = CharUnits::fromQuantity( 828 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity())); 829 BlockAlign = std::max(Align, BlockAlign); 830 831 CharUnits Pad = BlockOffset - OldOffset; 832 if (Pad.isPositive()) { 833 llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad.getQuantity()); 834 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 835 llvm::APInt(32, 836 Pad.getQuantity()), 837 ArrayType::Normal, 0); 838 ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(), 839 0, QualType(PadTy), 0, VarDecl::None); 840 Expr *E; 841 E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 842 SourceLocation()); 843 BlockDeclRefDecls.push_back(E); 844 } 845 BlockDeclRefDecls.push_back(BDRE); 846 847 BlockOffset += Size; 848 return BlockOffset-Size; 849 } 850 851 llvm::Constant *BlockFunction:: 852 GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 853 std::vector<HelperInfo> *NoteForHelperp) { 854 QualType R = getContext().VoidTy; 855 856 FunctionArgList Args; 857 // FIXME: This leaks 858 ImplicitParamDecl *Dst = 859 ImplicitParamDecl::Create(getContext(), 0, 860 SourceLocation(), 0, 861 getContext().getPointerType(getContext().VoidTy)); 862 Args.push_back(std::make_pair(Dst, Dst->getType())); 863 ImplicitParamDecl *Src = 864 ImplicitParamDecl::Create(getContext(), 0, 865 SourceLocation(), 0, 866 getContext().getPointerType(getContext().VoidTy)); 867 Args.push_back(std::make_pair(Src, Src->getType())); 868 869 const CGFunctionInfo &FI = 870 CGM.getTypes().getFunctionInfo(R, Args); 871 872 // FIXME: We'd like to put these into a mergable by content, with 873 // internal linkage. 874 CodeGenTypes &Types = CGM.getTypes(); 875 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 876 877 llvm::Function *Fn = 878 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 879 "__copy_helper_block_", &CGM.getModule()); 880 881 IdentifierInfo *II 882 = &CGM.getContext().Idents.get("__copy_helper_block_"); 883 884 FunctionDecl *FD = FunctionDecl::Create(getContext(), 885 getContext().getTranslationUnitDecl(), 886 SourceLocation(), II, R, 0, 887 FunctionDecl::Static, false, 888 true); 889 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 890 891 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 892 llvm::Type *PtrPtrT; 893 894 if (NoteForHelperp) { 895 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 896 897 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 898 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 899 SrcObj = Builder.CreateLoad(SrcObj); 900 901 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 902 llvm::Type *PtrPtrT; 903 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 904 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 905 DstObj = Builder.CreateLoad(DstObj); 906 907 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 908 int flag = NoteForHelper[i].flag; 909 int index = NoteForHelper[i].index; 910 911 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 912 || NoteForHelper[i].RequiresCopying) { 913 llvm::Value *Srcv = SrcObj; 914 Srcv = Builder.CreateStructGEP(Srcv, index); 915 Srcv = Builder.CreateBitCast(Srcv, 916 llvm::PointerType::get(PtrToInt8Ty, 0)); 917 Srcv = Builder.CreateLoad(Srcv); 918 919 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 920 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 921 922 llvm::Value *N = llvm::ConstantInt::get( 923 llvm::Type::getInt32Ty(T->getContext()), flag); 924 llvm::Value *F = getBlockObjectAssign(); 925 Builder.CreateCall3(F, Dstv, Srcv, N); 926 } 927 } 928 } 929 930 CGF.FinishFunction(); 931 932 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 933 } 934 935 llvm::Constant *BlockFunction:: 936 GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 937 const llvm::StructType* T, 938 std::vector<HelperInfo> *NoteForHelperp) { 939 QualType R = getContext().VoidTy; 940 941 FunctionArgList Args; 942 // FIXME: This leaks 943 ImplicitParamDecl *Src = 944 ImplicitParamDecl::Create(getContext(), 0, 945 SourceLocation(), 0, 946 getContext().getPointerType(getContext().VoidTy)); 947 948 Args.push_back(std::make_pair(Src, Src->getType())); 949 950 const CGFunctionInfo &FI = 951 CGM.getTypes().getFunctionInfo(R, Args); 952 953 // FIXME: We'd like to put these into a mergable by content, with 954 // internal linkage. 955 CodeGenTypes &Types = CGM.getTypes(); 956 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 957 958 llvm::Function *Fn = 959 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 960 "__destroy_helper_block_", &CGM.getModule()); 961 962 IdentifierInfo *II 963 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 964 965 FunctionDecl *FD = FunctionDecl::Create(getContext(), 966 getContext().getTranslationUnitDecl(), 967 SourceLocation(), II, R, 0, 968 FunctionDecl::Static, false, 969 true); 970 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 971 972 if (NoteForHelperp) { 973 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 974 975 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 976 llvm::Type *PtrPtrT; 977 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 978 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 979 SrcObj = Builder.CreateLoad(SrcObj); 980 981 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 982 int flag = NoteForHelper[i].flag; 983 int index = NoteForHelper[i].index; 984 985 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 986 || NoteForHelper[i].RequiresCopying) { 987 llvm::Value *Srcv = SrcObj; 988 Srcv = Builder.CreateStructGEP(Srcv, index); 989 Srcv = Builder.CreateBitCast(Srcv, 990 llvm::PointerType::get(PtrToInt8Ty, 0)); 991 Srcv = Builder.CreateLoad(Srcv); 992 993 BuildBlockRelease(Srcv, flag); 994 } 995 } 996 } 997 998 CGF.FinishFunction(); 999 1000 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1001 } 1002 1003 llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 1004 std::vector<HelperInfo> *NoteForHelper) { 1005 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 1006 T, NoteForHelper); 1007 } 1008 1009 llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 1010 std::vector<HelperInfo> *NoteForHelperp) { 1011 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 1012 T, NoteForHelperp); 1013 } 1014 1015 llvm::Constant *BlockFunction:: 1016 GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 1017 QualType R = getContext().VoidTy; 1018 1019 FunctionArgList Args; 1020 // FIXME: This leaks 1021 ImplicitParamDecl *Dst = 1022 ImplicitParamDecl::Create(getContext(), 0, 1023 SourceLocation(), 0, 1024 getContext().getPointerType(getContext().VoidTy)); 1025 Args.push_back(std::make_pair(Dst, Dst->getType())); 1026 1027 // FIXME: This leaks 1028 ImplicitParamDecl *Src = 1029 ImplicitParamDecl::Create(getContext(), 0, 1030 SourceLocation(), 0, 1031 getContext().getPointerType(getContext().VoidTy)); 1032 Args.push_back(std::make_pair(Src, Src->getType())); 1033 1034 const CGFunctionInfo &FI = 1035 CGM.getTypes().getFunctionInfo(R, Args); 1036 1037 CodeGenTypes &Types = CGM.getTypes(); 1038 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1039 1040 // FIXME: We'd like to put these into a mergable by content, with 1041 // internal linkage. 1042 llvm::Function *Fn = 1043 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1044 "__Block_byref_id_object_copy_", &CGM.getModule()); 1045 1046 IdentifierInfo *II 1047 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 1048 1049 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1050 getContext().getTranslationUnitDecl(), 1051 SourceLocation(), II, R, 0, 1052 FunctionDecl::Static, false, 1053 true); 1054 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1055 1056 // dst->x 1057 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 1058 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1059 V = Builder.CreateLoad(V); 1060 V = Builder.CreateStructGEP(V, 6, "x"); 1061 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 1062 1063 // src->x 1064 V = CGF.GetAddrOfLocalVar(Src); 1065 V = Builder.CreateLoad(V); 1066 V = Builder.CreateBitCast(V, T); 1067 V = Builder.CreateStructGEP(V, 6, "x"); 1068 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1069 llvm::Value *SrcObj = Builder.CreateLoad(V); 1070 1071 flag |= BLOCK_BYREF_CALLER; 1072 1073 llvm::Value *N = llvm::ConstantInt::get( 1074 llvm::Type::getInt32Ty(T->getContext()), flag); 1075 llvm::Value *F = getBlockObjectAssign(); 1076 Builder.CreateCall3(F, DstObj, SrcObj, N); 1077 1078 CGF.FinishFunction(); 1079 1080 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1081 } 1082 1083 llvm::Constant * 1084 BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 1085 int flag) { 1086 QualType R = getContext().VoidTy; 1087 1088 FunctionArgList Args; 1089 // FIXME: This leaks 1090 ImplicitParamDecl *Src = 1091 ImplicitParamDecl::Create(getContext(), 0, 1092 SourceLocation(), 0, 1093 getContext().getPointerType(getContext().VoidTy)); 1094 1095 Args.push_back(std::make_pair(Src, Src->getType())); 1096 1097 const CGFunctionInfo &FI = 1098 CGM.getTypes().getFunctionInfo(R, Args); 1099 1100 CodeGenTypes &Types = CGM.getTypes(); 1101 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1102 1103 // FIXME: We'd like to put these into a mergable by content, with 1104 // internal linkage. 1105 llvm::Function *Fn = 1106 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1107 "__Block_byref_id_object_dispose_", 1108 &CGM.getModule()); 1109 1110 IdentifierInfo *II 1111 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1112 1113 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1114 getContext().getTranslationUnitDecl(), 1115 SourceLocation(), II, R, 0, 1116 FunctionDecl::Static, false, 1117 true); 1118 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1119 1120 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1121 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1122 V = Builder.CreateLoad(V); 1123 V = Builder.CreateStructGEP(V, 6, "x"); 1124 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1125 V = Builder.CreateLoad(V); 1126 1127 flag |= BLOCK_BYREF_CALLER; 1128 BuildBlockRelease(V, flag); 1129 CGF.FinishFunction(); 1130 1131 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1132 } 1133 1134 llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1135 int Flag, unsigned Align) { 1136 // All alignments below that of pointer alignment collapse down to just 1137 // pointer alignment, as we always have at least that much alignment to begin 1138 // with. 1139 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1140 1141 // As an optimization, we only generate a single function of each kind we 1142 // might need. We need a different one for each alignment and for each 1143 // setting of flags. We mix Align and flag to get the kind. 1144 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1145 llvm::Constant *&Entry = CGM.AssignCache[Kind]; 1146 if (Entry) 1147 return Entry; 1148 return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag); 1149 } 1150 1151 llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1152 int Flag, 1153 unsigned Align) { 1154 // All alignments below that of pointer alignment collpase down to just 1155 // pointer alignment, as we always have at least that much alignment to begin 1156 // with. 1157 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1158 1159 // As an optimization, we only generate a single function of each kind we 1160 // might need. We need a different one for each alignment and for each 1161 // setting of flags. We mix Align and flag to get the kind. 1162 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1163 llvm::Constant *&Entry = CGM.DestroyCache[Kind]; 1164 if (Entry) 1165 return Entry; 1166 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag); 1167 } 1168 1169 llvm::Value *BlockFunction::getBlockObjectDispose() { 1170 if (CGM.BlockObjectDispose == 0) { 1171 const llvm::FunctionType *FTy; 1172 std::vector<const llvm::Type*> ArgTys; 1173 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1174 ArgTys.push_back(PtrToInt8Ty); 1175 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1176 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1177 CGM.BlockObjectDispose 1178 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1179 } 1180 return CGM.BlockObjectDispose; 1181 } 1182 1183 llvm::Value *BlockFunction::getBlockObjectAssign() { 1184 if (CGM.BlockObjectAssign == 0) { 1185 const llvm::FunctionType *FTy; 1186 std::vector<const llvm::Type*> ArgTys; 1187 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1188 ArgTys.push_back(PtrToInt8Ty); 1189 ArgTys.push_back(PtrToInt8Ty); 1190 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1191 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1192 CGM.BlockObjectAssign 1193 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1194 } 1195 return CGM.BlockObjectAssign; 1196 } 1197 1198 void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1199 llvm::Value *F = getBlockObjectDispose(); 1200 llvm::Value *N; 1201 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1202 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); 1203 Builder.CreateCall2(F, V, N); 1204 } 1205 1206 ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1207 1208 BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1209 CGBuilderTy &B) 1210 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { 1211 PtrToInt8Ty = llvm::PointerType::getUnqual( 1212 llvm::Type::getInt8Ty(VMContext)); 1213 1214 BlockHasCopyDispose = false; 1215 } 1216