1 //===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit blocks. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CGObjCRuntime.h" 17 #include "CodeGenModule.h" 18 #include "clang/AST/DeclObjC.h" 19 #include "llvm/Module.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/Target/TargetData.h" 22 #include <algorithm> 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 /// CGBlockInfo - Information to generate a block literal. 28 class clang::CodeGen::CGBlockInfo { 29 public: 30 /// Name - The name of the block, kindof. 31 const char *Name; 32 33 /// DeclRefs - Variables from parent scopes that have been 34 /// imported into this block. 35 llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs; 36 37 /// InnerBlocks - This block and the blocks it encloses. 38 llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks; 39 40 /// CXXThisRef - Non-null if 'this' was required somewhere, in 41 /// which case this is that expression. 42 const CXXThisExpr *CXXThisRef; 43 44 /// NeedsObjCSelf - True if something in this block has an implicit 45 /// reference to 'self'. 46 bool NeedsObjCSelf; 47 48 /// These are initialized by GenerateBlockFunction. 49 bool BlockHasCopyDispose; 50 CharUnits BlockSize; 51 CharUnits BlockAlign; 52 llvm::SmallVector<const Expr*, 8> BlockLayout; 53 54 CGBlockInfo(const char *Name); 55 }; 56 57 CGBlockInfo::CGBlockInfo(const char *N) 58 : Name(N), CXXThisRef(0), NeedsObjCSelf(false) { 59 60 // Skip asm prefix, if any. 61 if (Name && Name[0] == '\01') 62 ++Name; 63 } 64 65 66 llvm::Constant *CodeGenFunction:: 67 BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size, 68 const llvm::StructType* Ty, 69 std::vector<HelperInfo> *NoteForHelper) { 70 const llvm::Type *UnsignedLongTy 71 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 72 llvm::Constant *C; 73 std::vector<llvm::Constant*> Elts; 74 75 // reserved 76 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 77 Elts.push_back(C); 78 79 // Size 80 // FIXME: What is the right way to say this doesn't fit? We should give 81 // a user diagnostic in that case. Better fix would be to change the 82 // API to size_t. 83 C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity()); 84 Elts.push_back(C); 85 86 // optional copy/dispose helpers 87 if (BlockHasCopyDispose) { 88 // copy_func_helper_decl 89 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 90 91 // destroy_func_decl 92 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 93 } 94 95 // Signature. non-optional ObjC-style method descriptor @encode sequence 96 std::string BlockTypeEncoding; 97 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 98 99 Elts.push_back(llvm::ConstantExpr::getBitCast( 100 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty)); 101 102 // Layout. 103 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 104 Elts.push_back(C); 105 106 C = llvm::ConstantStruct::get(VMContext, Elts, false); 107 108 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 109 llvm::GlobalValue::InternalLinkage, 110 C, "__block_descriptor_tmp"); 111 return C; 112 } 113 114 llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 115 if (NSConcreteGlobalBlock == 0) 116 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 117 "_NSConcreteGlobalBlock"); 118 return NSConcreteGlobalBlock; 119 } 120 121 llvm::Constant *BlockModule::getNSConcreteStackBlock() { 122 if (NSConcreteStackBlock == 0) 123 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 124 "_NSConcreteStackBlock"); 125 return NSConcreteStackBlock; 126 } 127 128 static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) { 129 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 130 I != E; ++I) 131 if (*I) 132 CollectBlockDeclRefInfo(*I, Info); 133 134 // We want to ensure we walk down into block literals so we can find 135 // all nested BlockDeclRefExprs. 136 if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) { 137 Info.InnerBlocks.insert(BE->getBlockDecl()); 138 CollectBlockDeclRefInfo(BE->getBody(), Info); 139 } 140 141 else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) { 142 const ValueDecl *D = BDRE->getDecl(); 143 // FIXME: Handle enums. 144 if (isa<FunctionDecl>(D)) 145 return; 146 147 if (isa<ImplicitParamDecl>(D) && 148 isa<ObjCMethodDecl>(D->getDeclContext()) && 149 cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) { 150 Info.NeedsObjCSelf = true; 151 return; 152 } 153 154 // Only Decls that escape are added. 155 if (!Info.InnerBlocks.count(D->getDeclContext())) 156 Info.DeclRefs.push_back(BDRE); 157 } 158 159 // Make sure to capture implicit 'self' references due to super calls. 160 else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) { 161 if (E->getReceiverKind() == ObjCMessageExpr::SuperClass || 162 E->getReceiverKind() == ObjCMessageExpr::SuperInstance) 163 Info.NeedsObjCSelf = true; 164 } 165 166 // Getter/setter uses may also cause implicit super references, 167 // which we can check for with: 168 else if (isa<ObjCSuperExpr>(S)) 169 Info.NeedsObjCSelf = true; 170 171 else if (isa<CXXThisExpr>(S)) 172 Info.CXXThisRef = cast<CXXThisExpr>(S); 173 } 174 175 /// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be 176 /// declared as a global variable instead of on the stack. 177 static bool CanBlockBeGlobal(const CGBlockInfo &Info) { 178 return Info.DeclRefs.empty(); 179 } 180 181 /// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to 182 /// ensure we can generate the debug information for the parameter for the block 183 /// invoke function. 184 static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) { 185 if (Info.CXXThisRef) 186 CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef); 187 188 for (size_t i = 0; i < Info.DeclRefs.size(); ++i) 189 CGF.AllocateBlockDecl(Info.DeclRefs[i]); 190 191 if (Info.NeedsObjCSelf) { 192 ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl(); 193 BlockDeclRefExpr *BDRE = 194 new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(), 195 SourceLocation(), false); 196 Info.DeclRefs.push_back(BDRE); 197 CGF.AllocateBlockDecl(BDRE); 198 } 199 } 200 201 // FIXME: Push most into CGM, passing down a few bits, like current function 202 // name. 203 llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 204 std::string Name = CurFn->getName(); 205 CGBlockInfo Info(Name.c_str()); 206 Info.InnerBlocks.insert(BE->getBlockDecl()); 207 CollectBlockDeclRefInfo(BE->getBody(), Info); 208 209 // Check if the block can be global. 210 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 211 // to just have one code path. We should move this function into CGM and pass 212 // CGF, then we can just check to see if CGF is 0. 213 if (0 && CanBlockBeGlobal(Info)) 214 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 215 216 size_t BlockFields = 5; 217 218 std::vector<llvm::Constant*> Elts(BlockFields); 219 220 llvm::Constant *C; 221 llvm::Value *V; 222 223 { 224 // C = BuildBlockStructInitlist(); 225 unsigned int flags = BLOCK_HAS_SIGNATURE; 226 227 // We run this first so that we set BlockHasCopyDispose from the entire 228 // block literal. 229 // __invoke 230 llvm::Function *Fn 231 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, 232 LocalDeclMap); 233 BlockHasCopyDispose |= Info.BlockHasCopyDispose; 234 Elts[3] = Fn; 235 236 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 237 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 238 if (Info.BlockHasCopyDispose) 239 flags |= BLOCK_HAS_COPY_DISPOSE; 240 241 // __isa 242 C = CGM.getNSConcreteStackBlock(); 243 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 244 Elts[0] = C; 245 246 // __flags 247 { 248 QualType BPT = BE->getType(); 249 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 250 QualType ResultType = ftype->getResultType(); 251 252 CallArgList Args; 253 CodeGenTypes &Types = CGM.getTypes(); 254 const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args, 255 FunctionType::ExtInfo()); 256 if (CGM.ReturnTypeUsesSret(FnInfo)) 257 flags |= BLOCK_USE_STRET; 258 } 259 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 260 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 261 C = llvm::ConstantInt::get(IntTy, flags); 262 Elts[1] = C; 263 264 // __reserved 265 C = llvm::ConstantInt::get(IntTy, 0); 266 Elts[2] = C; 267 268 if (Info.BlockLayout.empty()) { 269 // __descriptor 270 Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose, 271 Info.BlockSize, 0, 0); 272 273 // Optimize to being a global block. 274 Elts[0] = CGM.getNSConcreteGlobalBlock(); 275 276 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 277 278 C = llvm::ConstantStruct::get(VMContext, Elts, false); 279 280 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 281 llvm::GlobalValue::InternalLinkage, C, 282 "__block_holder_tmp_" + 283 llvm::Twine(CGM.getGlobalUniqueCount())); 284 QualType BPT = BE->getType(); 285 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 286 return C; 287 } 288 289 std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size()); 290 for (int i=0; i<4; ++i) 291 Types[i] = Elts[i]->getType(); 292 Types[4] = PtrToInt8Ty; 293 294 for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) { 295 const Expr *E = Info.BlockLayout[i]; 296 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 297 QualType Ty = E->getType(); 298 if (BDRE && BDRE->isByRef()) { 299 Types[i+BlockFields] = 300 llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 301 } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) { 302 Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0); 303 } else 304 Types[i+BlockFields] = ConvertType(Ty); 305 } 306 307 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 308 309 llvm::AllocaInst *A = CreateTempAlloca(Ty); 310 A->setAlignment(Info.BlockAlign.getQuantity()); 311 V = A; 312 313 // Build layout / cleanup information for all the data entries in the 314 // layout, and write the enclosing fields into the type. 315 std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size()); 316 unsigned NumHelpers = 0; 317 318 for (unsigned i=0; i<4; ++i) 319 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 320 321 for (unsigned i=0; i < Info.BlockLayout.size(); ++i) { 322 const Expr *E = Info.BlockLayout[i]; 323 324 // Skip padding. 325 if (isa<DeclRefExpr>(E)) continue; 326 327 llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp"); 328 HelperInfo &Note = NoteForHelper[NumHelpers++]; 329 330 Note.index = i+5; 331 332 if (isa<CXXThisExpr>(E)) { 333 Note.RequiresCopying = false; 334 Note.flag = BLOCK_FIELD_IS_OBJECT; 335 336 Builder.CreateStore(LoadCXXThis(), Addr); 337 continue; 338 } 339 340 const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E); 341 const ValueDecl *VD = BDRE->getDecl(); 342 QualType T = VD->getType(); 343 344 Note.RequiresCopying = BlockRequiresCopying(T); 345 346 if (BDRE->isByRef()) { 347 Note.flag = BLOCK_FIELD_IS_BYREF; 348 if (T.isObjCGCWeak()) 349 Note.flag |= BLOCK_FIELD_IS_WEAK; 350 } else if (T->isBlockPointerType()) { 351 Note.flag = BLOCK_FIELD_IS_BLOCK; 352 } else { 353 Note.flag = BLOCK_FIELD_IS_OBJECT; 354 } 355 356 if (LocalDeclMap[VD]) { 357 if (BDRE->isByRef()) { 358 llvm::Value *Loc = LocalDeclMap[VD]; 359 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 360 Loc = Builder.CreateLoad(Loc); 361 Builder.CreateStore(Loc, Addr); 362 continue; 363 } else { 364 if (BDRE->getCopyConstructorExpr()) 365 E = BDRE->getCopyConstructorExpr(); 366 else { 367 E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD), 368 VD->getType().getNonReferenceType(), 369 SourceLocation()); 370 if (VD->getType()->isReferenceType()) { 371 E = new (getContext()) 372 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 373 getContext().getPointerType(E->getType()), 374 SourceLocation()); 375 } 376 } 377 } 378 } 379 380 if (BDRE->isByRef()) { 381 E = new (getContext()) 382 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 383 getContext().getPointerType(E->getType()), 384 SourceLocation()); 385 } 386 387 RValue r = EmitAnyExpr(E, Addr, false); 388 if (r.isScalar()) { 389 llvm::Value *Loc = r.getScalarVal(); 390 const llvm::Type *Ty = Types[i+BlockFields]; 391 if (BDRE->isByRef()) { 392 // E is now the address of the value field, instead, we want the 393 // address of the actual ByRef struct. We optimize this slightly 394 // compared to gcc by not grabbing the forwarding slot as this must 395 // be done during Block_copy for us, and we can postpone the work 396 // until then. 397 CharUnits offset = BlockDecls[BDRE->getDecl()]; 398 399 llvm::Value *BlockLiteral = LoadBlockStruct(); 400 401 Loc = Builder.CreateGEP(BlockLiteral, 402 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 403 offset.getQuantity()), 404 "block.literal"); 405 Ty = llvm::PointerType::get(Ty, 0); 406 Loc = Builder.CreateBitCast(Loc, Ty); 407 Loc = Builder.CreateLoad(Loc); 408 // Loc = Builder.CreateBitCast(Loc, Ty); 409 } 410 Builder.CreateStore(Loc, Addr); 411 } else if (r.isComplex()) 412 // FIXME: implement 413 ErrorUnsupported(BE, "complex in block literal"); 414 else if (r.isAggregate()) 415 ; // Already created into the destination 416 else 417 assert (0 && "bad block variable"); 418 // FIXME: Ensure that the offset created by the backend for 419 // the struct matches the previously computed offset in BlockDecls. 420 } 421 NoteForHelper.resize(NumHelpers); 422 423 // __descriptor 424 llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, 425 Info.BlockHasCopyDispose, 426 Info.BlockSize, Ty, 427 &NoteForHelper); 428 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 429 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 430 } 431 432 QualType BPT = BE->getType(); 433 V = Builder.CreateBitCast(V, ConvertType(BPT)); 434 // See if this is a __weak block variable and the must call objc_read_weak 435 // on it. 436 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 437 QualType RES = ftype->getResultType(); 438 if (RES.isObjCGCWeak()) { 439 // Must cast argument to id* 440 const llvm::Type *ObjectPtrTy = 441 ConvertType(CGM.getContext().getObjCIdType()); 442 const llvm::Type *PtrObjectPtrTy = 443 llvm::PointerType::getUnqual(ObjectPtrTy); 444 V = Builder.CreateBitCast(V, PtrObjectPtrTy); 445 V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V); 446 } 447 return V; 448 } 449 450 451 const llvm::Type *BlockModule::getBlockDescriptorType() { 452 if (BlockDescriptorType) 453 return BlockDescriptorType; 454 455 const llvm::Type *UnsignedLongTy = 456 getTypes().ConvertType(getContext().UnsignedLongTy); 457 458 // struct __block_descriptor { 459 // unsigned long reserved; 460 // unsigned long block_size; 461 // 462 // // later, the following will be added 463 // 464 // struct { 465 // void (*copyHelper)(); 466 // void (*copyHelper)(); 467 // } helpers; // !!! optional 468 // 469 // const char *signature; // the block signature 470 // const char *layout; // reserved 471 // }; 472 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 473 UnsignedLongTy, 474 UnsignedLongTy, 475 NULL); 476 477 getModule().addTypeName("struct.__block_descriptor", 478 BlockDescriptorType); 479 480 return BlockDescriptorType; 481 } 482 483 const llvm::Type *BlockModule::getGenericBlockLiteralType() { 484 if (GenericBlockLiteralType) 485 return GenericBlockLiteralType; 486 487 const llvm::Type *BlockDescPtrTy = 488 llvm::PointerType::getUnqual(getBlockDescriptorType()); 489 490 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 491 getTypes().ConvertType(getContext().IntTy)); 492 493 // struct __block_literal_generic { 494 // void *__isa; 495 // int __flags; 496 // int __reserved; 497 // void (*__invoke)(void *); 498 // struct __block_descriptor *__descriptor; 499 // }; 500 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 501 PtrToInt8Ty, 502 IntTy, 503 IntTy, 504 PtrToInt8Ty, 505 BlockDescPtrTy, 506 NULL); 507 508 getModule().addTypeName("struct.__block_literal_generic", 509 GenericBlockLiteralType); 510 511 return GenericBlockLiteralType; 512 } 513 514 515 RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, 516 ReturnValueSlot ReturnValue) { 517 const BlockPointerType *BPT = 518 E->getCallee()->getType()->getAs<BlockPointerType>(); 519 520 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 521 522 // Get a pointer to the generic block literal. 523 const llvm::Type *BlockLiteralTy = 524 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 525 526 // Bitcast the callee to a block literal. 527 llvm::Value *BlockLiteral = 528 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 529 530 // Get the function pointer from the literal. 531 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 532 533 BlockLiteral = 534 Builder.CreateBitCast(BlockLiteral, 535 llvm::Type::getInt8PtrTy(VMContext), 536 "tmp"); 537 538 // Add the block literal. 539 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 540 CallArgList Args; 541 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 542 543 QualType FnType = BPT->getPointeeType(); 544 545 // And the rest of the arguments. 546 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 547 E->arg_begin(), E->arg_end()); 548 549 // Load the function. 550 llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp"); 551 552 const FunctionType *FuncTy = FnType->getAs<FunctionType>(); 553 QualType ResultType = FuncTy->getResultType(); 554 555 const CGFunctionInfo &FnInfo = 556 CGM.getTypes().getFunctionInfo(ResultType, Args, 557 FuncTy->getExtInfo()); 558 559 // Cast the function pointer to the right type. 560 const llvm::Type *BlockFTy = 561 CGM.getTypes().GetFunctionType(FnInfo, false); 562 563 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 564 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 565 566 // And call the block. 567 return EmitCall(FnInfo, Func, ReturnValue, Args); 568 } 569 570 void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) { 571 assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer"); 572 573 // Figure out what the offset is. 574 QualType T = E->getType(); 575 std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T); 576 CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second); 577 578 BlockCXXThisOffset = Offset; 579 BlockLayout.push_back(E); 580 } 581 582 void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) { 583 const ValueDecl *VD = E->getDecl(); 584 CharUnits &Offset = BlockDecls[VD]; 585 586 // See if we have already allocated an offset for this variable. 587 if (!Offset.isZero()) 588 return; 589 590 // Don't run the expensive check, unless we have to. 591 if (!BlockHasCopyDispose) 592 if (E->isByRef() 593 || BlockRequiresCopying(E->getType())) 594 BlockHasCopyDispose = true; 595 596 const ValueDecl *D = cast<ValueDecl>(E->getDecl()); 597 598 CharUnits Size; 599 CharUnits Align; 600 601 if (E->isByRef()) { 602 llvm::tie(Size,Align) = 603 getContext().getTypeInfoInChars(getContext().VoidPtrTy); 604 } else { 605 Size = getContext().getTypeSizeInChars(D->getType()); 606 Align = getContext().getDeclAlign(D); 607 } 608 609 Offset = getBlockOffset(Size, Align); 610 BlockLayout.push_back(E); 611 } 612 613 llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, 614 bool IsByRef) { 615 616 CharUnits offset = BlockDecls[VD]; 617 assert(!offset.isZero() && "getting address of unallocated decl"); 618 619 llvm::Value *BlockLiteral = LoadBlockStruct(); 620 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 621 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 622 offset.getQuantity()), 623 "block.literal"); 624 if (IsByRef) { 625 const llvm::Type *PtrStructTy 626 = llvm::PointerType::get(BuildByRefType(VD), 0); 627 // The block literal will need a copy/destroy helper. 628 BlockHasCopyDispose = true; 629 630 const llvm::Type *Ty = PtrStructTy; 631 Ty = llvm::PointerType::get(Ty, 0); 632 V = Builder.CreateBitCast(V, Ty); 633 V = Builder.CreateLoad(V); 634 V = Builder.CreateStructGEP(V, 1, "forwarding"); 635 V = Builder.CreateLoad(V); 636 V = Builder.CreateBitCast(V, PtrStructTy); 637 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 638 VD->getNameAsString()); 639 if (VD->getType()->isReferenceType()) 640 V = Builder.CreateLoad(V); 641 } else { 642 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 643 Ty = llvm::PointerType::get(Ty, 0); 644 V = Builder.CreateBitCast(V, Ty); 645 if (VD->getType()->isReferenceType()) 646 V = Builder.CreateLoad(V, "ref.tmp"); 647 } 648 return V; 649 } 650 651 llvm::Constant * 652 BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 653 // Generate the block descriptor. 654 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 655 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 656 getTypes().ConvertType(getContext().IntTy)); 657 658 llvm::Constant *DescriptorFields[4]; 659 660 // Reserved 661 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 662 663 // Block literal size. For global blocks we just use the size of the generic 664 // block literal struct. 665 CharUnits BlockLiteralSize = 666 CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType()); 667 DescriptorFields[1] = 668 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity()); 669 670 // signature. non-optional ObjC-style method descriptor @encode sequence 671 std::string BlockTypeEncoding; 672 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 673 674 DescriptorFields[2] = llvm::ConstantExpr::getBitCast( 675 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty); 676 677 // layout 678 DescriptorFields[3] = 679 llvm::ConstantInt::get(UnsignedLongTy,0); 680 681 // build the structure from the 4 elements 682 llvm::Constant *DescriptorStruct = 683 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false); 684 685 llvm::GlobalVariable *Descriptor = 686 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 687 llvm::GlobalVariable::InternalLinkage, 688 DescriptorStruct, "__block_descriptor_global"); 689 690 int FieldCount = 5; 691 // Generate the constants for the block literal. 692 693 std::vector<llvm::Constant*> LiteralFields(FieldCount); 694 695 CGBlockInfo Info(n); 696 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 697 llvm::Function *Fn 698 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap); 699 assert(Info.BlockSize == BlockLiteralSize 700 && "no imports allowed for global block"); 701 702 // isa 703 LiteralFields[0] = getNSConcreteGlobalBlock(); 704 705 // Flags 706 LiteralFields[1] = 707 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE); 708 709 // Reserved 710 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 711 712 // Function 713 LiteralFields[3] = Fn; 714 715 // Descriptor 716 LiteralFields[4] = Descriptor; 717 718 llvm::Constant *BlockLiteralStruct = 719 llvm::ConstantStruct::get(VMContext, LiteralFields, false); 720 721 llvm::GlobalVariable *BlockLiteral = 722 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 723 llvm::GlobalVariable::InternalLinkage, 724 BlockLiteralStruct, "__block_literal_global"); 725 726 return BlockLiteral; 727 } 728 729 llvm::Value *CodeGenFunction::LoadBlockStruct() { 730 llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], 731 "self"); 732 // For now, we codegen based upon byte offsets. 733 return Builder.CreateBitCast(V, PtrToInt8Ty); 734 } 735 736 llvm::Function * 737 CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 738 CGBlockInfo &Info, 739 const Decl *OuterFuncDecl, 740 llvm::DenseMap<const Decl*, llvm::Value*> ldm) { 741 742 // Check if we should generate debug info for this block. 743 if (CGM.getDebugInfo()) 744 DebugInfo = CGM.getDebugInfo(); 745 746 // Arrange for local static and local extern declarations to appear 747 // to be local to this function as well, as they are directly referenced 748 // in a block. 749 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 750 i != ldm.end(); 751 ++i) { 752 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 753 754 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 755 LocalDeclMap[VD] = i->second; 756 } 757 758 BlockOffset = 759 CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType()); 760 BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 761 762 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 763 QualType ResultType; 764 FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType); 765 bool IsVariadic; 766 if (const FunctionProtoType *FTy = 767 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 768 ResultType = FTy->getResultType(); 769 IsVariadic = FTy->isVariadic(); 770 } else { 771 // K&R style block. 772 ResultType = BlockFunctionType->getResultType(); 773 IsVariadic = false; 774 } 775 776 FunctionArgList Args; 777 778 CurFuncDecl = OuterFuncDecl; 779 780 const BlockDecl *BD = BExpr->getBlockDecl(); 781 782 IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); 783 784 // Build the block struct now. 785 AllocateAllBlockDeclRefs(*this, Info); 786 787 QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose, 788 BlockLayout); 789 790 // FIXME: This leaks 791 ImplicitParamDecl *SelfDecl = 792 ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD), 793 SourceLocation(), II, 794 ParmTy); 795 796 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 797 BlockStructDecl = SelfDecl; 798 799 for (BlockDecl::param_const_iterator i = BD->param_begin(), 800 e = BD->param_end(); i != e; ++i) 801 Args.push_back(std::make_pair(*i, (*i)->getType())); 802 803 const CGFunctionInfo &FI = 804 CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo); 805 806 CodeGenTypes &Types = CGM.getTypes(); 807 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 808 809 MangleBuffer Name; 810 CGM.getMangledName(Name, BD); 811 llvm::Function *Fn = 812 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 813 Name.getString(), &CGM.getModule()); 814 815 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 816 817 StartFunction(BD, ResultType, Fn, Args, 818 BExpr->getBody()->getLocEnd()); 819 820 CurFuncDecl = OuterFuncDecl; 821 CurCodeDecl = BD; 822 823 // If we have a C++ 'this' reference, go ahead and force it into 824 // existence now. 825 if (Info.CXXThisRef) { 826 assert(!BlockCXXThisOffset.isZero() && 827 "haven't yet allocated 'this' reference"); 828 829 // TODO: I have a dream that one day this will be typed. 830 llvm::Value *BlockLiteral = LoadBlockStruct(); 831 llvm::Value *ThisPtrRaw = 832 Builder.CreateConstInBoundsGEP1_64(BlockLiteral, 833 BlockCXXThisOffset.getQuantity(), 834 "this.ptr.raw"); 835 836 const llvm::Type *Ty = 837 CGM.getTypes().ConvertType(Info.CXXThisRef->getType()); 838 Ty = llvm::PointerType::get(Ty, 0); 839 llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr"); 840 841 CXXThisValue = Builder.CreateLoad(ThisPtr, "this"); 842 } 843 844 // If we have an Objective C 'self' reference, go ahead and force it 845 // into existence now. 846 if (Info.NeedsObjCSelf) { 847 ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 848 LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false); 849 } 850 851 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 852 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 853 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 854 --entry_ptr; 855 856 EmitStmt(BExpr->getBody()); 857 858 // Remember where we were... 859 llvm::BasicBlock *resume = Builder.GetInsertBlock(); 860 861 // Go back to the entry. 862 ++entry_ptr; 863 Builder.SetInsertPoint(entry, entry_ptr); 864 865 if (CGDebugInfo *DI = getDebugInfo()) { 866 // Emit debug information for all the BlockDeclRefDecls. 867 // FIXME: also for 'this' 868 for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) { 869 if (const BlockDeclRefExpr *BDRE = 870 dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) { 871 const ValueDecl *D = BDRE->getDecl(); 872 DI->setLocation(D->getLocation()); 873 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 874 LocalDeclMap[getBlockStructDecl()], 875 Builder, this); 876 } 877 } 878 } 879 // And resume where we left off. 880 if (resume == 0) 881 Builder.ClearInsertionPoint(); 882 else 883 Builder.SetInsertPoint(resume); 884 885 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 886 887 // The runtime needs a minimum alignment of a void *. 888 CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 889 BlockOffset = CharUnits::fromQuantity( 890 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), 891 MinAlign.getQuantity())); 892 893 Info.BlockSize = BlockOffset; 894 Info.BlockAlign = BlockAlign; 895 Info.BlockLayout = BlockLayout; 896 Info.BlockHasCopyDispose = BlockHasCopyDispose; 897 return Fn; 898 } 899 900 CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) { 901 assert((Align.isPositive()) && "alignment must be 1 byte or more"); 902 903 CharUnits OldOffset = BlockOffset; 904 905 // Ensure proper alignment, even if it means we have to have a gap 906 BlockOffset = CharUnits::fromQuantity( 907 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity())); 908 BlockAlign = std::max(Align, BlockAlign); 909 910 CharUnits Pad = BlockOffset - OldOffset; 911 if (Pad.isPositive()) { 912 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 913 llvm::APInt(32, 914 Pad.getQuantity()), 915 ArrayType::Normal, 0); 916 ValueDecl *PadDecl = VarDecl::Create(getContext(), 917 getContext().getTranslationUnitDecl(), 918 SourceLocation(), 919 0, QualType(PadTy), 0, 920 VarDecl::None, VarDecl::None); 921 Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 922 SourceLocation()); 923 BlockLayout.push_back(E); 924 } 925 926 BlockOffset += Size; 927 return BlockOffset - Size; 928 } 929 930 llvm::Constant *BlockFunction:: 931 GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 932 std::vector<HelperInfo> *NoteForHelperp) { 933 QualType R = getContext().VoidTy; 934 935 FunctionArgList Args; 936 // FIXME: This leaks 937 ImplicitParamDecl *Dst = 938 ImplicitParamDecl::Create(getContext(), 0, 939 SourceLocation(), 0, 940 getContext().getPointerType(getContext().VoidTy)); 941 Args.push_back(std::make_pair(Dst, Dst->getType())); 942 ImplicitParamDecl *Src = 943 ImplicitParamDecl::Create(getContext(), 0, 944 SourceLocation(), 0, 945 getContext().getPointerType(getContext().VoidTy)); 946 Args.push_back(std::make_pair(Src, Src->getType())); 947 948 const CGFunctionInfo &FI = 949 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 950 951 // FIXME: We'd like to put these into a mergable by content, with 952 // internal linkage. 953 CodeGenTypes &Types = CGM.getTypes(); 954 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 955 956 llvm::Function *Fn = 957 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 958 "__copy_helper_block_", &CGM.getModule()); 959 960 IdentifierInfo *II 961 = &CGM.getContext().Idents.get("__copy_helper_block_"); 962 963 FunctionDecl *FD = FunctionDecl::Create(getContext(), 964 getContext().getTranslationUnitDecl(), 965 SourceLocation(), II, R, 0, 966 FunctionDecl::Static, 967 FunctionDecl::None, 968 false, 969 true); 970 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 971 972 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 973 llvm::Type *PtrPtrT; 974 975 if (NoteForHelperp) { 976 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 977 978 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 979 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 980 SrcObj = Builder.CreateLoad(SrcObj); 981 982 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 983 llvm::Type *PtrPtrT; 984 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 985 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 986 DstObj = Builder.CreateLoad(DstObj); 987 988 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 989 int flag = NoteForHelper[i].flag; 990 int index = NoteForHelper[i].index; 991 992 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 993 || NoteForHelper[i].RequiresCopying) { 994 llvm::Value *Srcv = SrcObj; 995 Srcv = Builder.CreateStructGEP(Srcv, index); 996 Srcv = Builder.CreateBitCast(Srcv, 997 llvm::PointerType::get(PtrToInt8Ty, 0)); 998 Srcv = Builder.CreateLoad(Srcv); 999 1000 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 1001 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 1002 1003 llvm::Value *N = llvm::ConstantInt::get( 1004 llvm::Type::getInt32Ty(T->getContext()), flag); 1005 llvm::Value *F = getBlockObjectAssign(); 1006 Builder.CreateCall3(F, Dstv, Srcv, N); 1007 } 1008 } 1009 } 1010 1011 CGF.FinishFunction(); 1012 1013 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1014 } 1015 1016 llvm::Constant *BlockFunction:: 1017 GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 1018 const llvm::StructType* T, 1019 std::vector<HelperInfo> *NoteForHelperp) { 1020 QualType R = getContext().VoidTy; 1021 1022 FunctionArgList Args; 1023 // FIXME: This leaks 1024 ImplicitParamDecl *Src = 1025 ImplicitParamDecl::Create(getContext(), 0, 1026 SourceLocation(), 0, 1027 getContext().getPointerType(getContext().VoidTy)); 1028 1029 Args.push_back(std::make_pair(Src, Src->getType())); 1030 1031 const CGFunctionInfo &FI = 1032 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1033 1034 // FIXME: We'd like to put these into a mergable by content, with 1035 // internal linkage. 1036 CodeGenTypes &Types = CGM.getTypes(); 1037 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1038 1039 llvm::Function *Fn = 1040 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1041 "__destroy_helper_block_", &CGM.getModule()); 1042 1043 IdentifierInfo *II 1044 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 1045 1046 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1047 getContext().getTranslationUnitDecl(), 1048 SourceLocation(), II, R, 0, 1049 FunctionDecl::Static, 1050 FunctionDecl::None, 1051 false, true); 1052 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1053 1054 if (NoteForHelperp) { 1055 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 1056 1057 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 1058 llvm::Type *PtrPtrT; 1059 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 1060 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 1061 SrcObj = Builder.CreateLoad(SrcObj); 1062 1063 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 1064 int flag = NoteForHelper[i].flag; 1065 int index = NoteForHelper[i].index; 1066 1067 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 1068 || NoteForHelper[i].RequiresCopying) { 1069 llvm::Value *Srcv = SrcObj; 1070 Srcv = Builder.CreateStructGEP(Srcv, index); 1071 Srcv = Builder.CreateBitCast(Srcv, 1072 llvm::PointerType::get(PtrToInt8Ty, 0)); 1073 Srcv = Builder.CreateLoad(Srcv); 1074 1075 BuildBlockRelease(Srcv, flag); 1076 } 1077 } 1078 } 1079 1080 CGF.FinishFunction(); 1081 1082 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1083 } 1084 1085 llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 1086 std::vector<HelperInfo> *NoteForHelper) { 1087 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 1088 T, NoteForHelper); 1089 } 1090 1091 llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 1092 std::vector<HelperInfo> *NoteForHelperp) { 1093 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 1094 T, NoteForHelperp); 1095 } 1096 1097 llvm::Constant *BlockFunction:: 1098 GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 1099 QualType R = getContext().VoidTy; 1100 1101 FunctionArgList Args; 1102 // FIXME: This leaks 1103 ImplicitParamDecl *Dst = 1104 ImplicitParamDecl::Create(getContext(), 0, 1105 SourceLocation(), 0, 1106 getContext().getPointerType(getContext().VoidTy)); 1107 Args.push_back(std::make_pair(Dst, Dst->getType())); 1108 1109 // FIXME: This leaks 1110 ImplicitParamDecl *Src = 1111 ImplicitParamDecl::Create(getContext(), 0, 1112 SourceLocation(), 0, 1113 getContext().getPointerType(getContext().VoidTy)); 1114 Args.push_back(std::make_pair(Src, Src->getType())); 1115 1116 const CGFunctionInfo &FI = 1117 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1118 1119 CodeGenTypes &Types = CGM.getTypes(); 1120 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1121 1122 // FIXME: We'd like to put these into a mergable by content, with 1123 // internal linkage. 1124 llvm::Function *Fn = 1125 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1126 "__Block_byref_id_object_copy_", &CGM.getModule()); 1127 1128 IdentifierInfo *II 1129 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 1130 1131 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1132 getContext().getTranslationUnitDecl(), 1133 SourceLocation(), II, R, 0, 1134 FunctionDecl::Static, 1135 FunctionDecl::None, 1136 false, true); 1137 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1138 1139 // dst->x 1140 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 1141 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1142 V = Builder.CreateLoad(V); 1143 V = Builder.CreateStructGEP(V, 6, "x"); 1144 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 1145 1146 // src->x 1147 V = CGF.GetAddrOfLocalVar(Src); 1148 V = Builder.CreateLoad(V); 1149 V = Builder.CreateBitCast(V, T); 1150 V = Builder.CreateStructGEP(V, 6, "x"); 1151 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1152 llvm::Value *SrcObj = Builder.CreateLoad(V); 1153 1154 flag |= BLOCK_BYREF_CALLER; 1155 1156 llvm::Value *N = llvm::ConstantInt::get( 1157 llvm::Type::getInt32Ty(T->getContext()), flag); 1158 llvm::Value *F = getBlockObjectAssign(); 1159 Builder.CreateCall3(F, DstObj, SrcObj, N); 1160 1161 CGF.FinishFunction(); 1162 1163 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1164 } 1165 1166 llvm::Constant * 1167 BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 1168 int flag) { 1169 QualType R = getContext().VoidTy; 1170 1171 FunctionArgList Args; 1172 // FIXME: This leaks 1173 ImplicitParamDecl *Src = 1174 ImplicitParamDecl::Create(getContext(), 0, 1175 SourceLocation(), 0, 1176 getContext().getPointerType(getContext().VoidTy)); 1177 1178 Args.push_back(std::make_pair(Src, Src->getType())); 1179 1180 const CGFunctionInfo &FI = 1181 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1182 1183 CodeGenTypes &Types = CGM.getTypes(); 1184 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1185 1186 // FIXME: We'd like to put these into a mergable by content, with 1187 // internal linkage. 1188 llvm::Function *Fn = 1189 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1190 "__Block_byref_id_object_dispose_", 1191 &CGM.getModule()); 1192 1193 IdentifierInfo *II 1194 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1195 1196 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1197 getContext().getTranslationUnitDecl(), 1198 SourceLocation(), II, R, 0, 1199 FunctionDecl::Static, 1200 FunctionDecl::None, 1201 false, true); 1202 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1203 1204 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1205 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1206 V = Builder.CreateLoad(V); 1207 V = Builder.CreateStructGEP(V, 6, "x"); 1208 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1209 V = Builder.CreateLoad(V); 1210 1211 flag |= BLOCK_BYREF_CALLER; 1212 BuildBlockRelease(V, flag); 1213 CGF.FinishFunction(); 1214 1215 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1216 } 1217 1218 llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1219 int Flag, unsigned Align) { 1220 // All alignments below that of pointer alignment collapse down to just 1221 // pointer alignment, as we always have at least that much alignment to begin 1222 // with. 1223 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1224 1225 // As an optimization, we only generate a single function of each kind we 1226 // might need. We need a different one for each alignment and for each 1227 // setting of flags. We mix Align and flag to get the kind. 1228 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1229 llvm::Constant *&Entry = CGM.AssignCache[Kind]; 1230 if (Entry) 1231 return Entry; 1232 return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag); 1233 } 1234 1235 llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1236 int Flag, 1237 unsigned Align) { 1238 // All alignments below that of pointer alignment collpase down to just 1239 // pointer alignment, as we always have at least that much alignment to begin 1240 // with. 1241 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1242 1243 // As an optimization, we only generate a single function of each kind we 1244 // might need. We need a different one for each alignment and for each 1245 // setting of flags. We mix Align and flag to get the kind. 1246 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1247 llvm::Constant *&Entry = CGM.DestroyCache[Kind]; 1248 if (Entry) 1249 return Entry; 1250 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag); 1251 } 1252 1253 llvm::Value *BlockFunction::getBlockObjectDispose() { 1254 if (CGM.BlockObjectDispose == 0) { 1255 const llvm::FunctionType *FTy; 1256 std::vector<const llvm::Type*> ArgTys; 1257 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1258 ArgTys.push_back(PtrToInt8Ty); 1259 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1260 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1261 CGM.BlockObjectDispose 1262 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1263 } 1264 return CGM.BlockObjectDispose; 1265 } 1266 1267 llvm::Value *BlockFunction::getBlockObjectAssign() { 1268 if (CGM.BlockObjectAssign == 0) { 1269 const llvm::FunctionType *FTy; 1270 std::vector<const llvm::Type*> ArgTys; 1271 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1272 ArgTys.push_back(PtrToInt8Ty); 1273 ArgTys.push_back(PtrToInt8Ty); 1274 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1275 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1276 CGM.BlockObjectAssign 1277 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1278 } 1279 return CGM.BlockObjectAssign; 1280 } 1281 1282 void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1283 llvm::Value *F = getBlockObjectDispose(); 1284 llvm::Value *N; 1285 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1286 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); 1287 Builder.CreateCall2(F, V, N); 1288 } 1289 1290 ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1291 1292 BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1293 CGBuilderTy &B) 1294 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { 1295 PtrToInt8Ty = llvm::PointerType::getUnqual( 1296 llvm::Type::getInt8Ty(VMContext)); 1297 1298 BlockHasCopyDispose = false; 1299 } 1300