1 //===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit blocks. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CGObjCRuntime.h" 17 #include "CodeGenModule.h" 18 #include "clang/AST/DeclObjC.h" 19 #include "llvm/Module.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/Target/TargetData.h" 22 #include <algorithm> 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 /// CGBlockInfo - Information to generate a block literal. 28 class clang::CodeGen::CGBlockInfo { 29 public: 30 /// Name - The name of the block, kindof. 31 const char *Name; 32 33 /// DeclRefs - Variables from parent scopes that have been 34 /// imported into this block. 35 llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs; 36 37 /// InnerBlocks - This block and the blocks it encloses. 38 llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks; 39 40 /// CXXThisRef - Non-null if 'this' was required somewhere, in 41 /// which case this is that expression. 42 const CXXThisExpr *CXXThisRef; 43 44 /// NeedsObjCSelf - True if something in this block has an implicit 45 /// reference to 'self'. 46 bool NeedsObjCSelf; 47 48 /// These are initialized by GenerateBlockFunction. 49 bool BlockHasCopyDispose; 50 CharUnits BlockSize; 51 CharUnits BlockAlign; 52 llvm::SmallVector<const Expr*, 8> BlockLayout; 53 54 CGBlockInfo(const char *Name); 55 }; 56 57 CGBlockInfo::CGBlockInfo(const char *N) 58 : Name(N), CXXThisRef(0), NeedsObjCSelf(false) { 59 60 // Skip asm prefix, if any. 61 if (Name && Name[0] == '\01') 62 ++Name; 63 } 64 65 66 llvm::Constant *CodeGenFunction:: 67 BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size, 68 const llvm::StructType* Ty, 69 std::vector<HelperInfo> *NoteForHelper) { 70 const llvm::Type *UnsignedLongTy 71 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 72 llvm::Constant *C; 73 std::vector<llvm::Constant*> Elts; 74 75 // reserved 76 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 77 Elts.push_back(C); 78 79 // Size 80 // FIXME: What is the right way to say this doesn't fit? We should give 81 // a user diagnostic in that case. Better fix would be to change the 82 // API to size_t. 83 C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity()); 84 Elts.push_back(C); 85 86 // optional copy/dispose helpers 87 if (BlockHasCopyDispose) { 88 // copy_func_helper_decl 89 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 90 91 // destroy_func_decl 92 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 93 } 94 95 // Signature. non-optional ObjC-style method descriptor @encode sequence 96 std::string BlockTypeEncoding; 97 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 98 99 Elts.push_back(llvm::ConstantExpr::getBitCast( 100 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty)); 101 102 // Layout. 103 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 104 Elts.push_back(C); 105 106 C = llvm::ConstantStruct::get(VMContext, Elts, false); 107 108 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 109 llvm::GlobalValue::InternalLinkage, 110 C, "__block_descriptor_tmp"); 111 return C; 112 } 113 114 llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 115 if (NSConcreteGlobalBlock == 0) 116 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 117 "_NSConcreteGlobalBlock"); 118 return NSConcreteGlobalBlock; 119 } 120 121 llvm::Constant *BlockModule::getNSConcreteStackBlock() { 122 if (NSConcreteStackBlock == 0) 123 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 124 "_NSConcreteStackBlock"); 125 return NSConcreteStackBlock; 126 } 127 128 static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) { 129 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 130 I != E; ++I) 131 if (*I) 132 CollectBlockDeclRefInfo(*I, Info); 133 134 // We want to ensure we walk down into block literals so we can find 135 // all nested BlockDeclRefExprs. 136 if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) { 137 Info.InnerBlocks.insert(BE->getBlockDecl()); 138 CollectBlockDeclRefInfo(BE->getBody(), Info); 139 } 140 141 else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) { 142 const ValueDecl *D = BDRE->getDecl(); 143 // FIXME: Handle enums. 144 if (isa<FunctionDecl>(D)) 145 return; 146 147 if (isa<ImplicitParamDecl>(D) && 148 isa<ObjCMethodDecl>(D->getDeclContext()) && 149 cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) { 150 Info.NeedsObjCSelf = true; 151 return; 152 } 153 154 // Only Decls that escape are added. 155 if (!Info.InnerBlocks.count(D->getDeclContext())) 156 Info.DeclRefs.push_back(BDRE); 157 } 158 159 // Make sure to capture implicit 'self' references due to super calls. 160 else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) { 161 if (E->getReceiverKind() == ObjCMessageExpr::SuperClass || 162 E->getReceiverKind() == ObjCMessageExpr::SuperInstance) 163 Info.NeedsObjCSelf = true; 164 } 165 166 // Getter/setter uses may also cause implicit super references, 167 // which we can check for with: 168 else if (isa<ObjCSuperExpr>(S)) 169 Info.NeedsObjCSelf = true; 170 171 else if (isa<CXXThisExpr>(S)) 172 Info.CXXThisRef = cast<CXXThisExpr>(S); 173 } 174 175 /// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be 176 /// declared as a global variable instead of on the stack. 177 static bool CanBlockBeGlobal(const CGBlockInfo &Info) { 178 return Info.DeclRefs.empty(); 179 } 180 181 /// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to 182 /// ensure we can generate the debug information for the parameter for the block 183 /// invoke function. 184 static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) { 185 if (Info.CXXThisRef) 186 CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef); 187 188 for (size_t i = 0; i < Info.DeclRefs.size(); ++i) 189 CGF.AllocateBlockDecl(Info.DeclRefs[i]); 190 191 if (Info.NeedsObjCSelf) { 192 ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl(); 193 BlockDeclRefExpr *BDRE = 194 new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(), 195 SourceLocation(), false); 196 Info.DeclRefs.push_back(BDRE); 197 CGF.AllocateBlockDecl(BDRE); 198 } 199 } 200 201 // FIXME: Push most into CGM, passing down a few bits, like current function 202 // name. 203 llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 204 std::string Name = CurFn->getName(); 205 CGBlockInfo Info(Name.c_str()); 206 Info.InnerBlocks.insert(BE->getBlockDecl()); 207 CollectBlockDeclRefInfo(BE->getBody(), Info); 208 209 // Check if the block can be global. 210 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 211 // to just have one code path. We should move this function into CGM and pass 212 // CGF, then we can just check to see if CGF is 0. 213 if (0 && CanBlockBeGlobal(Info)) 214 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 215 216 size_t BlockFields = 5; 217 218 std::vector<llvm::Constant*> Elts(BlockFields); 219 220 llvm::Constant *C; 221 llvm::Value *V; 222 223 { 224 // C = BuildBlockStructInitlist(); 225 unsigned int flags = BLOCK_HAS_SIGNATURE; 226 227 // We run this first so that we set BlockHasCopyDispose from the entire 228 // block literal. 229 // __invoke 230 llvm::Function *Fn 231 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, 232 LocalDeclMap); 233 BlockHasCopyDispose |= Info.BlockHasCopyDispose; 234 Elts[3] = Fn; 235 236 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 237 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 238 if (Info.BlockHasCopyDispose) 239 flags |= BLOCK_HAS_COPY_DISPOSE; 240 241 // __isa 242 C = CGM.getNSConcreteStackBlock(); 243 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 244 Elts[0] = C; 245 246 // __flags 247 { 248 QualType BPT = BE->getType(); 249 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 250 QualType ResultType = ftype->getResultType(); 251 252 CallArgList Args; 253 CodeGenTypes &Types = CGM.getTypes(); 254 const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args, 255 FunctionType::ExtInfo()); 256 if (CGM.ReturnTypeUsesSret(FnInfo)) 257 flags |= BLOCK_USE_STRET; 258 } 259 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 260 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 261 C = llvm::ConstantInt::get(IntTy, flags); 262 Elts[1] = C; 263 264 // __reserved 265 C = llvm::ConstantInt::get(IntTy, 0); 266 Elts[2] = C; 267 268 if (Info.BlockLayout.empty()) { 269 // __descriptor 270 Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose, 271 Info.BlockSize, 0, 0); 272 273 // Optimize to being a global block. 274 Elts[0] = CGM.getNSConcreteGlobalBlock(); 275 276 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 277 278 C = llvm::ConstantStruct::get(VMContext, Elts, false); 279 280 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 281 llvm::GlobalValue::InternalLinkage, C, 282 "__block_holder_tmp_" + 283 llvm::Twine(CGM.getGlobalUniqueCount())); 284 QualType BPT = BE->getType(); 285 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 286 return C; 287 } 288 289 std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size()); 290 for (int i=0; i<4; ++i) 291 Types[i] = Elts[i]->getType(); 292 Types[4] = PtrToInt8Ty; 293 294 for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) { 295 const Expr *E = Info.BlockLayout[i]; 296 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 297 QualType Ty = E->getType(); 298 if (BDRE && BDRE->isByRef()) { 299 Types[i+BlockFields] = 300 llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 301 } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) { 302 Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0); 303 } else 304 Types[i+BlockFields] = ConvertType(Ty); 305 } 306 307 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 308 309 llvm::AllocaInst *A = CreateTempAlloca(Ty); 310 A->setAlignment(Info.BlockAlign.getQuantity()); 311 V = A; 312 313 // Build layout / cleanup information for all the data entries in the 314 // layout, and write the enclosing fields into the type. 315 std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size()); 316 unsigned NumHelpers = 0; 317 318 for (unsigned i=0; i<4; ++i) 319 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 320 321 for (unsigned i=0; i < Info.BlockLayout.size(); ++i) { 322 const Expr *E = Info.BlockLayout[i]; 323 324 // Skip padding. 325 if (isa<DeclRefExpr>(E)) continue; 326 327 llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp"); 328 HelperInfo &Note = NoteForHelper[NumHelpers++]; 329 330 Note.index = i+5; 331 332 if (isa<CXXThisExpr>(E)) { 333 Note.RequiresCopying = false; 334 Note.flag = BLOCK_FIELD_IS_OBJECT; 335 336 Builder.CreateStore(LoadCXXThis(), Addr); 337 continue; 338 } 339 340 const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E); 341 const ValueDecl *VD = BDRE->getDecl(); 342 QualType T = VD->getType(); 343 344 Note.RequiresCopying = BlockRequiresCopying(T); 345 346 if (BDRE->isByRef()) { 347 Note.flag = BLOCK_FIELD_IS_BYREF; 348 if (T.isObjCGCWeak()) 349 Note.flag |= BLOCK_FIELD_IS_WEAK; 350 } else if (T->isBlockPointerType()) { 351 Note.flag = BLOCK_FIELD_IS_BLOCK; 352 } else { 353 Note.flag = BLOCK_FIELD_IS_OBJECT; 354 } 355 356 if (LocalDeclMap[VD]) { 357 if (BDRE->isByRef()) { 358 llvm::Value *Loc = LocalDeclMap[VD]; 359 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 360 Loc = Builder.CreateLoad(Loc); 361 Builder.CreateStore(Loc, Addr); 362 continue; 363 } else { 364 if (BDRE->getCopyConstructorExpr()) 365 E = BDRE->getCopyConstructorExpr(); 366 else { 367 E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD), 368 VD->getType().getNonReferenceType(), 369 SourceLocation()); 370 if (VD->getType()->isReferenceType()) { 371 E = new (getContext()) 372 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 373 getContext().getPointerType(E->getType()), 374 SourceLocation()); 375 } 376 } 377 } 378 } 379 380 if (BDRE->isByRef()) { 381 E = new (getContext()) 382 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 383 getContext().getPointerType(E->getType()), 384 SourceLocation()); 385 } 386 387 RValue r = EmitAnyExpr(E, Addr, false); 388 if (r.isScalar()) { 389 llvm::Value *Loc = r.getScalarVal(); 390 const llvm::Type *Ty = Types[i+BlockFields]; 391 if (BDRE->isByRef()) { 392 // E is now the address of the value field, instead, we want the 393 // address of the actual ByRef struct. We optimize this slightly 394 // compared to gcc by not grabbing the forwarding slot as this must 395 // be done during Block_copy for us, and we can postpone the work 396 // until then. 397 CharUnits offset = BlockDecls[BDRE->getDecl()]; 398 399 llvm::Value *BlockLiteral = LoadBlockStruct(); 400 401 Loc = Builder.CreateGEP(BlockLiteral, 402 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 403 offset.getQuantity()), 404 "block.literal"); 405 Ty = llvm::PointerType::get(Ty, 0); 406 Loc = Builder.CreateBitCast(Loc, Ty); 407 Loc = Builder.CreateLoad(Loc); 408 // Loc = Builder.CreateBitCast(Loc, Ty); 409 } 410 Builder.CreateStore(Loc, Addr); 411 } else if (r.isComplex()) 412 // FIXME: implement 413 ErrorUnsupported(BE, "complex in block literal"); 414 else if (r.isAggregate()) 415 ; // Already created into the destination 416 else 417 assert (0 && "bad block variable"); 418 // FIXME: Ensure that the offset created by the backend for 419 // the struct matches the previously computed offset in BlockDecls. 420 } 421 NoteForHelper.resize(NumHelpers); 422 423 // __descriptor 424 llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, 425 Info.BlockHasCopyDispose, 426 Info.BlockSize, Ty, 427 &NoteForHelper); 428 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 429 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 430 } 431 432 QualType BPT = BE->getType(); 433 V = Builder.CreateBitCast(V, ConvertType(BPT)); 434 // See if this is a __weak block variable and the must call objc_read_weak 435 // on it. 436 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 437 QualType RES = ftype->getResultType(); 438 if (RES.isObjCGCWeak()) { 439 // Must cast argument to id* 440 const llvm::Type *ObjectPtrTy = 441 ConvertType(CGM.getContext().getObjCIdType()); 442 const llvm::Type *PtrObjectPtrTy = 443 llvm::PointerType::getUnqual(ObjectPtrTy); 444 V = Builder.CreateBitCast(V, PtrObjectPtrTy); 445 V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V); 446 } 447 return V; 448 } 449 450 451 const llvm::Type *BlockModule::getBlockDescriptorType() { 452 if (BlockDescriptorType) 453 return BlockDescriptorType; 454 455 const llvm::Type *UnsignedLongTy = 456 getTypes().ConvertType(getContext().UnsignedLongTy); 457 458 // struct __block_descriptor { 459 // unsigned long reserved; 460 // unsigned long block_size; 461 // 462 // // later, the following will be added 463 // 464 // struct { 465 // void (*copyHelper)(); 466 // void (*copyHelper)(); 467 // } helpers; // !!! optional 468 // 469 // const char *signature; // the block signature 470 // const char *layout; // reserved 471 // }; 472 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 473 UnsignedLongTy, 474 UnsignedLongTy, 475 NULL); 476 477 getModule().addTypeName("struct.__block_descriptor", 478 BlockDescriptorType); 479 480 return BlockDescriptorType; 481 } 482 483 const llvm::Type *BlockModule::getGenericBlockLiteralType() { 484 if (GenericBlockLiteralType) 485 return GenericBlockLiteralType; 486 487 const llvm::Type *BlockDescPtrTy = 488 llvm::PointerType::getUnqual(getBlockDescriptorType()); 489 490 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 491 getTypes().ConvertType(getContext().IntTy)); 492 493 // struct __block_literal_generic { 494 // void *__isa; 495 // int __flags; 496 // int __reserved; 497 // void (*__invoke)(void *); 498 // struct __block_descriptor *__descriptor; 499 // }; 500 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 501 PtrToInt8Ty, 502 IntTy, 503 IntTy, 504 PtrToInt8Ty, 505 BlockDescPtrTy, 506 NULL); 507 508 getModule().addTypeName("struct.__block_literal_generic", 509 GenericBlockLiteralType); 510 511 return GenericBlockLiteralType; 512 } 513 514 515 RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, 516 ReturnValueSlot ReturnValue) { 517 const BlockPointerType *BPT = 518 E->getCallee()->getType()->getAs<BlockPointerType>(); 519 520 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 521 522 // Get a pointer to the generic block literal. 523 const llvm::Type *BlockLiteralTy = 524 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 525 526 // Bitcast the callee to a block literal. 527 llvm::Value *BlockLiteral = 528 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 529 530 // Get the function pointer from the literal. 531 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 532 533 BlockLiteral = 534 Builder.CreateBitCast(BlockLiteral, 535 llvm::Type::getInt8PtrTy(VMContext), 536 "tmp"); 537 538 // Add the block literal. 539 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 540 CallArgList Args; 541 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 542 543 QualType FnType = BPT->getPointeeType(); 544 545 // And the rest of the arguments. 546 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 547 E->arg_begin(), E->arg_end()); 548 549 // Load the function. 550 llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp"); 551 552 const FunctionType *FuncTy = FnType->getAs<FunctionType>(); 553 QualType ResultType = FuncTy->getResultType(); 554 555 const CGFunctionInfo &FnInfo = 556 CGM.getTypes().getFunctionInfo(ResultType, Args, 557 FuncTy->getExtInfo()); 558 559 // Cast the function pointer to the right type. 560 const llvm::Type *BlockFTy = 561 CGM.getTypes().GetFunctionType(FnInfo, false); 562 563 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 564 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 565 566 // And call the block. 567 return EmitCall(FnInfo, Func, ReturnValue, Args); 568 } 569 570 void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) { 571 assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer"); 572 573 // Figure out what the offset is. 574 QualType T = E->getType(); 575 std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T); 576 CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second); 577 578 BlockCXXThisOffset = Offset; 579 BlockLayout.push_back(E); 580 } 581 582 void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) { 583 const ValueDecl *VD = E->getDecl(); 584 CharUnits &Offset = BlockDecls[VD]; 585 586 // See if we have already allocated an offset for this variable. 587 if (!Offset.isZero()) 588 return; 589 590 // Don't run the expensive check, unless we have to. 591 if (!BlockHasCopyDispose) 592 if (E->isByRef() 593 || BlockRequiresCopying(E->getType())) 594 BlockHasCopyDispose = true; 595 596 const ValueDecl *D = cast<ValueDecl>(E->getDecl()); 597 598 CharUnits Size; 599 CharUnits Align; 600 601 if (E->isByRef()) { 602 llvm::tie(Size,Align) = 603 getContext().getTypeInfoInChars(getContext().VoidPtrTy); 604 } else { 605 Size = getContext().getTypeSizeInChars(D->getType()); 606 Align = getContext().getDeclAlign(D); 607 } 608 609 Offset = getBlockOffset(Size, Align); 610 BlockLayout.push_back(E); 611 } 612 613 llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, 614 bool IsByRef) { 615 llvm::Value *&VE = BlockDeclsValue[VD]; 616 if (VE) 617 return VE; 618 619 CharUnits offset = BlockDecls[VD]; 620 assert(!offset.isZero() && "getting address of unallocated decl"); 621 622 llvm::Value *BlockLiteral = LoadBlockStruct(); 623 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 624 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 625 offset.getQuantity()), 626 "block.literal"); 627 if (IsByRef) { 628 const llvm::Type *PtrStructTy 629 = llvm::PointerType::get(BuildByRefType(VD), 0); 630 // The block literal will need a copy/destroy helper. 631 BlockHasCopyDispose = true; 632 633 const llvm::Type *Ty = PtrStructTy; 634 Ty = llvm::PointerType::get(Ty, 0); 635 V = Builder.CreateBitCast(V, Ty); 636 V = Builder.CreateLoad(V); 637 V = Builder.CreateStructGEP(V, 1, "forwarding"); 638 V = Builder.CreateLoad(V); 639 V = Builder.CreateBitCast(V, PtrStructTy); 640 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 641 VD->getNameAsString()); 642 if (VD->getType()->isReferenceType()) 643 V = Builder.CreateLoad(V); 644 } else { 645 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 646 Ty = llvm::PointerType::get(Ty, 0); 647 V = Builder.CreateBitCast(V, Ty); 648 if (VD->getType()->isReferenceType()) 649 V = Builder.CreateLoad(V, "ref.tmp"); 650 } 651 VE = V; 652 return V; 653 } 654 655 llvm::Constant * 656 BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 657 // Generate the block descriptor. 658 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 659 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 660 getTypes().ConvertType(getContext().IntTy)); 661 662 llvm::Constant *DescriptorFields[4]; 663 664 // Reserved 665 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 666 667 // Block literal size. For global blocks we just use the size of the generic 668 // block literal struct. 669 CharUnits BlockLiteralSize = 670 CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType()); 671 DescriptorFields[1] = 672 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity()); 673 674 // signature. non-optional ObjC-style method descriptor @encode sequence 675 std::string BlockTypeEncoding; 676 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 677 678 DescriptorFields[2] = llvm::ConstantExpr::getBitCast( 679 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty); 680 681 // layout 682 DescriptorFields[3] = 683 llvm::ConstantInt::get(UnsignedLongTy,0); 684 685 // build the structure from the 4 elements 686 llvm::Constant *DescriptorStruct = 687 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false); 688 689 llvm::GlobalVariable *Descriptor = 690 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 691 llvm::GlobalVariable::InternalLinkage, 692 DescriptorStruct, "__block_descriptor_global"); 693 694 int FieldCount = 5; 695 // Generate the constants for the block literal. 696 697 std::vector<llvm::Constant*> LiteralFields(FieldCount); 698 699 CGBlockInfo Info(n); 700 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 701 llvm::Function *Fn 702 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap); 703 assert(Info.BlockSize == BlockLiteralSize 704 && "no imports allowed for global block"); 705 706 // isa 707 LiteralFields[0] = getNSConcreteGlobalBlock(); 708 709 // Flags 710 LiteralFields[1] = 711 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE); 712 713 // Reserved 714 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 715 716 // Function 717 LiteralFields[3] = Fn; 718 719 // Descriptor 720 LiteralFields[4] = Descriptor; 721 722 llvm::Constant *BlockLiteralStruct = 723 llvm::ConstantStruct::get(VMContext, LiteralFields, false); 724 725 llvm::GlobalVariable *BlockLiteral = 726 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 727 llvm::GlobalVariable::InternalLinkage, 728 BlockLiteralStruct, "__block_literal_global"); 729 730 return BlockLiteral; 731 } 732 733 llvm::Value *CodeGenFunction::LoadBlockStruct() { 734 llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], 735 "self"); 736 // For now, we codegen based upon byte offsets. 737 return Builder.CreateBitCast(V, PtrToInt8Ty); 738 } 739 740 llvm::Function * 741 CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 742 CGBlockInfo &Info, 743 const Decl *OuterFuncDecl, 744 llvm::DenseMap<const Decl*, llvm::Value*> ldm) { 745 746 // Check if we should generate debug info for this block. 747 if (CGM.getDebugInfo()) 748 DebugInfo = CGM.getDebugInfo(); 749 750 // Arrange for local static and local extern declarations to appear 751 // to be local to this function as well, as they are directly referenced 752 // in a block. 753 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 754 i != ldm.end(); 755 ++i) { 756 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 757 758 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 759 LocalDeclMap[VD] = i->second; 760 } 761 762 BlockOffset = 763 CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType()); 764 BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 765 766 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 767 QualType ResultType; 768 FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType); 769 bool IsVariadic; 770 if (const FunctionProtoType *FTy = 771 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 772 ResultType = FTy->getResultType(); 773 IsVariadic = FTy->isVariadic(); 774 } else { 775 // K&R style block. 776 ResultType = BlockFunctionType->getResultType(); 777 IsVariadic = false; 778 } 779 780 FunctionArgList Args; 781 782 CurFuncDecl = OuterFuncDecl; 783 784 const BlockDecl *BD = BExpr->getBlockDecl(); 785 786 IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); 787 788 // Build the block struct now. 789 AllocateAllBlockDeclRefs(*this, Info); 790 791 QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose, 792 BlockLayout); 793 794 // FIXME: This leaks 795 ImplicitParamDecl *SelfDecl = 796 ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD), 797 SourceLocation(), II, 798 ParmTy); 799 800 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 801 BlockStructDecl = SelfDecl; 802 803 for (BlockDecl::param_const_iterator i = BD->param_begin(), 804 e = BD->param_end(); i != e; ++i) 805 Args.push_back(std::make_pair(*i, (*i)->getType())); 806 807 const CGFunctionInfo &FI = 808 CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo); 809 810 CodeGenTypes &Types = CGM.getTypes(); 811 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 812 813 MangleBuffer Name; 814 CGM.getMangledName(Name, BD); 815 llvm::Function *Fn = 816 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 817 Name.getString(), &CGM.getModule()); 818 819 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 820 821 StartFunction(BD, ResultType, Fn, Args, 822 BExpr->getBody()->getLocEnd()); 823 824 CurFuncDecl = OuterFuncDecl; 825 CurCodeDecl = BD; 826 827 // If we have a C++ 'this' reference, go ahead and force it into 828 // existence now. 829 if (Info.CXXThisRef) { 830 assert(!BlockCXXThisOffset.isZero() && 831 "haven't yet allocated 'this' reference"); 832 833 // TODO: I have a dream that one day this will be typed. 834 llvm::Value *BlockLiteral = LoadBlockStruct(); 835 llvm::Value *ThisPtrRaw = 836 Builder.CreateConstInBoundsGEP1_64(BlockLiteral, 837 BlockCXXThisOffset.getQuantity(), 838 "this.ptr.raw"); 839 840 const llvm::Type *Ty = 841 CGM.getTypes().ConvertType(Info.CXXThisRef->getType()); 842 Ty = llvm::PointerType::get(Ty, 0); 843 llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr"); 844 845 CXXThisValue = Builder.CreateLoad(ThisPtr, "this"); 846 } 847 848 // If we have an Objective C 'self' reference, go ahead and force it 849 // into existence now. 850 if (Info.NeedsObjCSelf) { 851 ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 852 LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false); 853 } 854 855 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 856 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 857 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 858 --entry_ptr; 859 860 EmitStmt(BExpr->getBody()); 861 862 // Remember where we were... 863 llvm::BasicBlock *resume = Builder.GetInsertBlock(); 864 865 // Go back to the entry. 866 ++entry_ptr; 867 Builder.SetInsertPoint(entry, entry_ptr); 868 869 if (CGDebugInfo *DI = getDebugInfo()) { 870 // Emit debug information for all the BlockDeclRefDecls. 871 // FIXME: also for 'this' 872 for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) { 873 if (const BlockDeclRefExpr *BDRE = 874 dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) { 875 const ValueDecl *D = BDRE->getDecl(); 876 DI->setLocation(D->getLocation()); 877 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 878 LocalDeclMap[getBlockStructDecl()], 879 Builder, this); 880 } 881 } 882 } 883 // And resume where we left off. 884 if (resume == 0) 885 Builder.ClearInsertionPoint(); 886 else 887 Builder.SetInsertPoint(resume); 888 889 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 890 891 // The runtime needs a minimum alignment of a void *. 892 CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 893 BlockOffset = CharUnits::fromQuantity( 894 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), 895 MinAlign.getQuantity())); 896 897 Info.BlockSize = BlockOffset; 898 Info.BlockAlign = BlockAlign; 899 Info.BlockLayout = BlockLayout; 900 Info.BlockHasCopyDispose = BlockHasCopyDispose; 901 return Fn; 902 } 903 904 CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) { 905 assert((Align.isPositive()) && "alignment must be 1 byte or more"); 906 907 CharUnits OldOffset = BlockOffset; 908 909 // Ensure proper alignment, even if it means we have to have a gap 910 BlockOffset = CharUnits::fromQuantity( 911 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity())); 912 BlockAlign = std::max(Align, BlockAlign); 913 914 CharUnits Pad = BlockOffset - OldOffset; 915 if (Pad.isPositive()) { 916 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 917 llvm::APInt(32, 918 Pad.getQuantity()), 919 ArrayType::Normal, 0); 920 ValueDecl *PadDecl = VarDecl::Create(getContext(), 921 getContext().getTranslationUnitDecl(), 922 SourceLocation(), 923 0, QualType(PadTy), 0, 924 VarDecl::None, VarDecl::None); 925 Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 926 SourceLocation()); 927 BlockLayout.push_back(E); 928 } 929 930 BlockOffset += Size; 931 return BlockOffset - Size; 932 } 933 934 llvm::Constant *BlockFunction:: 935 GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 936 std::vector<HelperInfo> *NoteForHelperp) { 937 QualType R = getContext().VoidTy; 938 939 FunctionArgList Args; 940 // FIXME: This leaks 941 ImplicitParamDecl *Dst = 942 ImplicitParamDecl::Create(getContext(), 0, 943 SourceLocation(), 0, 944 getContext().getPointerType(getContext().VoidTy)); 945 Args.push_back(std::make_pair(Dst, Dst->getType())); 946 ImplicitParamDecl *Src = 947 ImplicitParamDecl::Create(getContext(), 0, 948 SourceLocation(), 0, 949 getContext().getPointerType(getContext().VoidTy)); 950 Args.push_back(std::make_pair(Src, Src->getType())); 951 952 const CGFunctionInfo &FI = 953 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 954 955 // FIXME: We'd like to put these into a mergable by content, with 956 // internal linkage. 957 CodeGenTypes &Types = CGM.getTypes(); 958 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 959 960 llvm::Function *Fn = 961 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 962 "__copy_helper_block_", &CGM.getModule()); 963 964 IdentifierInfo *II 965 = &CGM.getContext().Idents.get("__copy_helper_block_"); 966 967 FunctionDecl *FD = FunctionDecl::Create(getContext(), 968 getContext().getTranslationUnitDecl(), 969 SourceLocation(), II, R, 0, 970 FunctionDecl::Static, 971 FunctionDecl::None, 972 false, 973 true); 974 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 975 976 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 977 llvm::Type *PtrPtrT; 978 979 if (NoteForHelperp) { 980 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 981 982 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 983 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 984 SrcObj = Builder.CreateLoad(SrcObj); 985 986 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 987 llvm::Type *PtrPtrT; 988 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 989 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 990 DstObj = Builder.CreateLoad(DstObj); 991 992 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 993 int flag = NoteForHelper[i].flag; 994 int index = NoteForHelper[i].index; 995 996 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 997 || NoteForHelper[i].RequiresCopying) { 998 llvm::Value *Srcv = SrcObj; 999 Srcv = Builder.CreateStructGEP(Srcv, index); 1000 Srcv = Builder.CreateBitCast(Srcv, 1001 llvm::PointerType::get(PtrToInt8Ty, 0)); 1002 Srcv = Builder.CreateLoad(Srcv); 1003 1004 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 1005 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 1006 1007 llvm::Value *N = llvm::ConstantInt::get( 1008 llvm::Type::getInt32Ty(T->getContext()), flag); 1009 llvm::Value *F = getBlockObjectAssign(); 1010 Builder.CreateCall3(F, Dstv, Srcv, N); 1011 } 1012 } 1013 } 1014 1015 CGF.FinishFunction(); 1016 1017 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1018 } 1019 1020 llvm::Constant *BlockFunction:: 1021 GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 1022 const llvm::StructType* T, 1023 std::vector<HelperInfo> *NoteForHelperp) { 1024 QualType R = getContext().VoidTy; 1025 1026 FunctionArgList Args; 1027 // FIXME: This leaks 1028 ImplicitParamDecl *Src = 1029 ImplicitParamDecl::Create(getContext(), 0, 1030 SourceLocation(), 0, 1031 getContext().getPointerType(getContext().VoidTy)); 1032 1033 Args.push_back(std::make_pair(Src, Src->getType())); 1034 1035 const CGFunctionInfo &FI = 1036 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1037 1038 // FIXME: We'd like to put these into a mergable by content, with 1039 // internal linkage. 1040 CodeGenTypes &Types = CGM.getTypes(); 1041 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1042 1043 llvm::Function *Fn = 1044 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1045 "__destroy_helper_block_", &CGM.getModule()); 1046 1047 IdentifierInfo *II 1048 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 1049 1050 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1051 getContext().getTranslationUnitDecl(), 1052 SourceLocation(), II, R, 0, 1053 FunctionDecl::Static, 1054 FunctionDecl::None, 1055 false, true); 1056 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1057 1058 if (NoteForHelperp) { 1059 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 1060 1061 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 1062 llvm::Type *PtrPtrT; 1063 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 1064 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 1065 SrcObj = Builder.CreateLoad(SrcObj); 1066 1067 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 1068 int flag = NoteForHelper[i].flag; 1069 int index = NoteForHelper[i].index; 1070 1071 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 1072 || NoteForHelper[i].RequiresCopying) { 1073 llvm::Value *Srcv = SrcObj; 1074 Srcv = Builder.CreateStructGEP(Srcv, index); 1075 Srcv = Builder.CreateBitCast(Srcv, 1076 llvm::PointerType::get(PtrToInt8Ty, 0)); 1077 Srcv = Builder.CreateLoad(Srcv); 1078 1079 BuildBlockRelease(Srcv, flag); 1080 } 1081 } 1082 } 1083 1084 CGF.FinishFunction(); 1085 1086 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1087 } 1088 1089 llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 1090 std::vector<HelperInfo> *NoteForHelper) { 1091 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 1092 T, NoteForHelper); 1093 } 1094 1095 llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 1096 std::vector<HelperInfo> *NoteForHelperp) { 1097 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 1098 T, NoteForHelperp); 1099 } 1100 1101 llvm::Constant *BlockFunction:: 1102 GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 1103 QualType R = getContext().VoidTy; 1104 1105 FunctionArgList Args; 1106 // FIXME: This leaks 1107 ImplicitParamDecl *Dst = 1108 ImplicitParamDecl::Create(getContext(), 0, 1109 SourceLocation(), 0, 1110 getContext().getPointerType(getContext().VoidTy)); 1111 Args.push_back(std::make_pair(Dst, Dst->getType())); 1112 1113 // FIXME: This leaks 1114 ImplicitParamDecl *Src = 1115 ImplicitParamDecl::Create(getContext(), 0, 1116 SourceLocation(), 0, 1117 getContext().getPointerType(getContext().VoidTy)); 1118 Args.push_back(std::make_pair(Src, Src->getType())); 1119 1120 const CGFunctionInfo &FI = 1121 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1122 1123 CodeGenTypes &Types = CGM.getTypes(); 1124 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1125 1126 // FIXME: We'd like to put these into a mergable by content, with 1127 // internal linkage. 1128 llvm::Function *Fn = 1129 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1130 "__Block_byref_id_object_copy_", &CGM.getModule()); 1131 1132 IdentifierInfo *II 1133 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 1134 1135 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1136 getContext().getTranslationUnitDecl(), 1137 SourceLocation(), II, R, 0, 1138 FunctionDecl::Static, 1139 FunctionDecl::None, 1140 false, true); 1141 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1142 1143 // dst->x 1144 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 1145 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1146 V = Builder.CreateLoad(V); 1147 V = Builder.CreateStructGEP(V, 6, "x"); 1148 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 1149 1150 // src->x 1151 V = CGF.GetAddrOfLocalVar(Src); 1152 V = Builder.CreateLoad(V); 1153 V = Builder.CreateBitCast(V, T); 1154 V = Builder.CreateStructGEP(V, 6, "x"); 1155 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1156 llvm::Value *SrcObj = Builder.CreateLoad(V); 1157 1158 flag |= BLOCK_BYREF_CALLER; 1159 1160 llvm::Value *N = llvm::ConstantInt::get( 1161 llvm::Type::getInt32Ty(T->getContext()), flag); 1162 llvm::Value *F = getBlockObjectAssign(); 1163 Builder.CreateCall3(F, DstObj, SrcObj, N); 1164 1165 CGF.FinishFunction(); 1166 1167 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1168 } 1169 1170 llvm::Constant * 1171 BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 1172 int flag) { 1173 QualType R = getContext().VoidTy; 1174 1175 FunctionArgList Args; 1176 // FIXME: This leaks 1177 ImplicitParamDecl *Src = 1178 ImplicitParamDecl::Create(getContext(), 0, 1179 SourceLocation(), 0, 1180 getContext().getPointerType(getContext().VoidTy)); 1181 1182 Args.push_back(std::make_pair(Src, Src->getType())); 1183 1184 const CGFunctionInfo &FI = 1185 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1186 1187 CodeGenTypes &Types = CGM.getTypes(); 1188 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1189 1190 // FIXME: We'd like to put these into a mergable by content, with 1191 // internal linkage. 1192 llvm::Function *Fn = 1193 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1194 "__Block_byref_id_object_dispose_", 1195 &CGM.getModule()); 1196 1197 IdentifierInfo *II 1198 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1199 1200 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1201 getContext().getTranslationUnitDecl(), 1202 SourceLocation(), II, R, 0, 1203 FunctionDecl::Static, 1204 FunctionDecl::None, 1205 false, true); 1206 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1207 1208 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1209 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1210 V = Builder.CreateLoad(V); 1211 V = Builder.CreateStructGEP(V, 6, "x"); 1212 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1213 V = Builder.CreateLoad(V); 1214 1215 flag |= BLOCK_BYREF_CALLER; 1216 BuildBlockRelease(V, flag); 1217 CGF.FinishFunction(); 1218 1219 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1220 } 1221 1222 llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1223 int Flag, unsigned Align) { 1224 // All alignments below that of pointer alignment collapse down to just 1225 // pointer alignment, as we always have at least that much alignment to begin 1226 // with. 1227 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1228 1229 // As an optimization, we only generate a single function of each kind we 1230 // might need. We need a different one for each alignment and for each 1231 // setting of flags. We mix Align and flag to get the kind. 1232 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1233 llvm::Constant *&Entry = CGM.AssignCache[Kind]; 1234 if (Entry) 1235 return Entry; 1236 return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag); 1237 } 1238 1239 llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1240 int Flag, 1241 unsigned Align) { 1242 // All alignments below that of pointer alignment collpase down to just 1243 // pointer alignment, as we always have at least that much alignment to begin 1244 // with. 1245 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1246 1247 // As an optimization, we only generate a single function of each kind we 1248 // might need. We need a different one for each alignment and for each 1249 // setting of flags. We mix Align and flag to get the kind. 1250 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1251 llvm::Constant *&Entry = CGM.DestroyCache[Kind]; 1252 if (Entry) 1253 return Entry; 1254 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag); 1255 } 1256 1257 llvm::Value *BlockFunction::getBlockObjectDispose() { 1258 if (CGM.BlockObjectDispose == 0) { 1259 const llvm::FunctionType *FTy; 1260 std::vector<const llvm::Type*> ArgTys; 1261 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1262 ArgTys.push_back(PtrToInt8Ty); 1263 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1264 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1265 CGM.BlockObjectDispose 1266 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1267 } 1268 return CGM.BlockObjectDispose; 1269 } 1270 1271 llvm::Value *BlockFunction::getBlockObjectAssign() { 1272 if (CGM.BlockObjectAssign == 0) { 1273 const llvm::FunctionType *FTy; 1274 std::vector<const llvm::Type*> ArgTys; 1275 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1276 ArgTys.push_back(PtrToInt8Ty); 1277 ArgTys.push_back(PtrToInt8Ty); 1278 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1279 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1280 CGM.BlockObjectAssign 1281 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1282 } 1283 return CGM.BlockObjectAssign; 1284 } 1285 1286 void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1287 llvm::Value *F = getBlockObjectDispose(); 1288 llvm::Value *N; 1289 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1290 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); 1291 Builder.CreateCall2(F, V, N); 1292 } 1293 1294 ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1295 1296 BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1297 CGBuilderTy &B) 1298 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { 1299 PtrToInt8Ty = llvm::PointerType::getUnqual( 1300 llvm::Type::getInt8Ty(VMContext)); 1301 1302 BlockHasCopyDispose = false; 1303 } 1304