1 //===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit blocks. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CGObjCRuntime.h" 17 #include "CodeGenModule.h" 18 #include "clang/AST/DeclObjC.h" 19 #include "llvm/Module.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/Target/TargetData.h" 22 #include <algorithm> 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 /// CGBlockInfo - Information to generate a block literal. 28 class clang::CodeGen::CGBlockInfo { 29 public: 30 /// Name - The name of the block, kindof. 31 const char *Name; 32 33 /// DeclRefs - Variables from parent scopes that have been 34 /// imported into this block. 35 llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs; 36 37 /// InnerBlocks - This block and the blocks it encloses. 38 llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks; 39 40 /// CXXThisRef - Non-null if 'this' was required somewhere, in 41 /// which case this is that expression. 42 const CXXThisExpr *CXXThisRef; 43 44 /// NeedsObjCSelf - True if something in this block has an implicit 45 /// reference to 'self'. 46 bool NeedsObjCSelf; 47 48 /// These are initialized by GenerateBlockFunction. 49 bool BlockHasCopyDispose; 50 CharUnits BlockSize; 51 CharUnits BlockAlign; 52 llvm::SmallVector<const Expr*, 8> BlockLayout; 53 54 CGBlockInfo(const char *Name); 55 }; 56 57 CGBlockInfo::CGBlockInfo(const char *N) 58 : Name(N), CXXThisRef(0), NeedsObjCSelf(false) { 59 60 // Skip asm prefix, if any. 61 if (Name && Name[0] == '\01') 62 ++Name; 63 } 64 65 66 llvm::Constant *CodeGenFunction:: 67 BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size, 68 const llvm::StructType* Ty, 69 std::vector<HelperInfo> *NoteForHelper) { 70 const llvm::Type *UnsignedLongTy 71 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 72 llvm::Constant *C; 73 std::vector<llvm::Constant*> Elts; 74 75 // reserved 76 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 77 Elts.push_back(C); 78 79 // Size 80 // FIXME: What is the right way to say this doesn't fit? We should give 81 // a user diagnostic in that case. Better fix would be to change the 82 // API to size_t. 83 C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity()); 84 Elts.push_back(C); 85 86 // optional copy/dispose helpers 87 if (BlockHasCopyDispose) { 88 // copy_func_helper_decl 89 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 90 91 // destroy_func_decl 92 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 93 } 94 95 // Signature. non-optional ObjC-style method descriptor @encode sequence 96 std::string BlockTypeEncoding; 97 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 98 99 Elts.push_back(llvm::ConstantExpr::getBitCast( 100 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty)); 101 102 // Layout. 103 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 104 Elts.push_back(C); 105 106 C = llvm::ConstantStruct::get(VMContext, Elts, false); 107 108 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 109 llvm::GlobalValue::InternalLinkage, 110 C, "__block_descriptor_tmp"); 111 return C; 112 } 113 114 llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 115 if (NSConcreteGlobalBlock == 0) 116 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 117 "_NSConcreteGlobalBlock"); 118 return NSConcreteGlobalBlock; 119 } 120 121 llvm::Constant *BlockModule::getNSConcreteStackBlock() { 122 if (NSConcreteStackBlock == 0) 123 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 124 "_NSConcreteStackBlock"); 125 return NSConcreteStackBlock; 126 } 127 128 static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) { 129 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 130 I != E; ++I) 131 if (*I) 132 CollectBlockDeclRefInfo(*I, Info); 133 134 // We want to ensure we walk down into block literals so we can find 135 // all nested BlockDeclRefExprs. 136 if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) { 137 Info.InnerBlocks.insert(BE->getBlockDecl()); 138 CollectBlockDeclRefInfo(BE->getBody(), Info); 139 } 140 141 else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) { 142 const ValueDecl *D = BDRE->getDecl(); 143 // FIXME: Handle enums. 144 if (isa<FunctionDecl>(D)) 145 return; 146 147 if (isa<ImplicitParamDecl>(D) && 148 isa<ObjCMethodDecl>(D->getDeclContext()) && 149 cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) { 150 Info.NeedsObjCSelf = true; 151 return; 152 } 153 154 // Only Decls that escape are added. 155 if (!Info.InnerBlocks.count(D->getDeclContext())) 156 Info.DeclRefs.push_back(BDRE); 157 } 158 159 // Make sure to capture implicit 'self' references due to super calls. 160 else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) { 161 if (E->getReceiverKind() == ObjCMessageExpr::SuperClass || 162 E->getReceiverKind() == ObjCMessageExpr::SuperInstance) 163 Info.NeedsObjCSelf = true; 164 } 165 166 // Getter/setter uses may also cause implicit super references, 167 // which we can check for with: 168 else if (isa<ObjCSuperExpr>(S)) 169 Info.NeedsObjCSelf = true; 170 171 else if (isa<CXXThisExpr>(S)) 172 Info.CXXThisRef = cast<CXXThisExpr>(S); 173 } 174 175 /// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be 176 /// declared as a global variable instead of on the stack. 177 static bool CanBlockBeGlobal(const CGBlockInfo &Info) { 178 return Info.DeclRefs.empty(); 179 } 180 181 /// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to 182 /// ensure we can generate the debug information for the parameter for the block 183 /// invoke function. 184 static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) { 185 if (Info.CXXThisRef) 186 CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef); 187 188 for (size_t i = 0; i < Info.DeclRefs.size(); ++i) 189 CGF.AllocateBlockDecl(Info.DeclRefs[i]); 190 191 if (Info.NeedsObjCSelf) { 192 ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl(); 193 BlockDeclRefExpr *BDRE = 194 new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(), 195 SourceLocation(), false); 196 Info.DeclRefs.push_back(BDRE); 197 CGF.AllocateBlockDecl(BDRE); 198 } 199 } 200 201 // FIXME: Push most into CGM, passing down a few bits, like current function 202 // name. 203 llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 204 std::string Name = CurFn->getName(); 205 CGBlockInfo Info(Name.c_str()); 206 Info.InnerBlocks.insert(BE->getBlockDecl()); 207 CollectBlockDeclRefInfo(BE->getBody(), Info); 208 209 // Check if the block can be global. 210 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 211 // to just have one code path. We should move this function into CGM and pass 212 // CGF, then we can just check to see if CGF is 0. 213 if (0 && CanBlockBeGlobal(Info)) 214 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 215 216 size_t BlockFields = 5; 217 218 std::vector<llvm::Constant*> Elts(BlockFields); 219 220 llvm::Constant *C; 221 llvm::Value *V; 222 223 { 224 // C = BuildBlockStructInitlist(); 225 unsigned int flags = BLOCK_HAS_SIGNATURE; 226 227 // We run this first so that we set BlockHasCopyDispose from the entire 228 // block literal. 229 // __invoke 230 llvm::Function *Fn 231 = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl, 232 LocalDeclMap); 233 BlockHasCopyDispose |= Info.BlockHasCopyDispose; 234 Elts[3] = Fn; 235 236 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 237 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 238 if (Info.BlockHasCopyDispose) 239 flags |= BLOCK_HAS_COPY_DISPOSE; 240 241 // __isa 242 C = CGM.getNSConcreteStackBlock(); 243 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 244 Elts[0] = C; 245 246 // __flags 247 { 248 QualType BPT = BE->getType(); 249 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 250 QualType ResultType = ftype->getResultType(); 251 252 CallArgList Args; 253 CodeGenTypes &Types = CGM.getTypes(); 254 const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args, 255 FunctionType::ExtInfo()); 256 if (CGM.ReturnTypeUsesSret(FnInfo)) 257 flags |= BLOCK_USE_STRET; 258 } 259 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 260 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 261 C = llvm::ConstantInt::get(IntTy, flags); 262 Elts[1] = C; 263 264 // __reserved 265 C = llvm::ConstantInt::get(IntTy, 0); 266 Elts[2] = C; 267 268 if (Info.BlockLayout.empty()) { 269 // __descriptor 270 Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose, 271 Info.BlockSize, 0, 0); 272 273 // Optimize to being a global block. 274 Elts[0] = CGM.getNSConcreteGlobalBlock(); 275 276 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 277 278 C = llvm::ConstantStruct::get(VMContext, Elts, false); 279 280 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 281 llvm::GlobalValue::InternalLinkage, C, 282 "__block_holder_tmp_" + 283 llvm::Twine(CGM.getGlobalUniqueCount())); 284 QualType BPT = BE->getType(); 285 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 286 return C; 287 } 288 289 std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size()); 290 for (int i=0; i<4; ++i) 291 Types[i] = Elts[i]->getType(); 292 Types[4] = PtrToInt8Ty; 293 294 for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) { 295 const Expr *E = Info.BlockLayout[i]; 296 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 297 QualType Ty = E->getType(); 298 if (BDRE && BDRE->isByRef()) { 299 Types[i+BlockFields] = 300 llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 301 } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) { 302 Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0); 303 } else 304 Types[i+BlockFields] = ConvertType(Ty); 305 } 306 307 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 308 309 llvm::AllocaInst *A = CreateTempAlloca(Ty); 310 A->setAlignment(Info.BlockAlign.getQuantity()); 311 V = A; 312 313 // Build layout / cleanup information for all the data entries in the 314 // layout, and write the enclosing fields into the type. 315 std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size()); 316 unsigned NumHelpers = 0; 317 318 for (unsigned i=0; i<4; ++i) 319 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 320 321 for (unsigned i=0; i < Info.BlockLayout.size(); ++i) { 322 const Expr *E = Info.BlockLayout[i]; 323 324 // Skip padding. 325 if (isa<DeclRefExpr>(E)) continue; 326 327 llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp"); 328 HelperInfo &Note = NoteForHelper[NumHelpers++]; 329 330 Note.index = i+5; 331 332 if (isa<CXXThisExpr>(E)) { 333 Note.RequiresCopying = false; 334 Note.flag = BLOCK_FIELD_IS_OBJECT; 335 336 Builder.CreateStore(LoadCXXThis(), Addr); 337 continue; 338 } 339 340 const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E); 341 const ValueDecl *VD = BDRE->getDecl(); 342 QualType T = VD->getType(); 343 344 Note.RequiresCopying = BlockRequiresCopying(T); 345 346 if (BDRE->isByRef()) { 347 Note.flag = BLOCK_FIELD_IS_BYREF; 348 if (T.isObjCGCWeak()) 349 Note.flag |= BLOCK_FIELD_IS_WEAK; 350 } else if (T->isBlockPointerType()) { 351 Note.flag = BLOCK_FIELD_IS_BLOCK; 352 } else { 353 Note.flag = BLOCK_FIELD_IS_OBJECT; 354 } 355 356 if (LocalDeclMap[VD]) { 357 if (BDRE->isByRef()) { 358 llvm::Value *Loc = LocalDeclMap[VD]; 359 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 360 Loc = Builder.CreateLoad(Loc); 361 Builder.CreateStore(Loc, Addr); 362 continue; 363 } else { 364 if (BDRE->getCopyConstructorExpr()) { 365 E = BDRE->getCopyConstructorExpr(); 366 // Code to destruct copy-constructed descriptor element for 367 // copied-in class object. 368 // TODO: Refactor this into common code with mostly similar 369 // CodeGenFunction::EmitLocalBlockVarDecl 370 QualType DtorTy = E->getType(); 371 if (const RecordType *RT = DtorTy->getAs<RecordType>()) 372 if (CXXRecordDecl *ClassDecl = 373 dyn_cast<CXXRecordDecl>(RT->getDecl())) { 374 if (!ClassDecl->hasTrivialDestructor()) { 375 const CXXDestructorDecl *D = 376 ClassDecl->getDestructor(getContext()); 377 assert(D && "BuildBlockLiteralTmp - destructor is nul"); 378 { 379 // Normal destruction. 380 DelayedCleanupBlock Scope(*this); 381 EmitCXXDestructorCall(D, Dtor_Complete, 382 /*ForVirtualBase=*/false, Addr); 383 // Make sure to jump to the exit block. 384 EmitBranch(Scope.getCleanupExitBlock()); 385 } 386 if (Exceptions) { 387 EHCleanupBlock Cleanup(*this); 388 EmitCXXDestructorCall(D, Dtor_Complete, 389 /*ForVirtualBase=*/false, Addr); 390 } 391 } 392 } 393 } 394 else { 395 E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD), 396 VD->getType().getNonReferenceType(), 397 SourceLocation()); 398 if (VD->getType()->isReferenceType()) { 399 E = new (getContext()) 400 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 401 getContext().getPointerType(E->getType()), 402 SourceLocation()); 403 } 404 } 405 } 406 } 407 408 if (BDRE->isByRef()) { 409 E = new (getContext()) 410 UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, 411 getContext().getPointerType(E->getType()), 412 SourceLocation()); 413 } 414 415 RValue r = EmitAnyExpr(E, Addr, false); 416 if (r.isScalar()) { 417 llvm::Value *Loc = r.getScalarVal(); 418 const llvm::Type *Ty = Types[i+BlockFields]; 419 if (BDRE->isByRef()) { 420 // E is now the address of the value field, instead, we want the 421 // address of the actual ByRef struct. We optimize this slightly 422 // compared to gcc by not grabbing the forwarding slot as this must 423 // be done during Block_copy for us, and we can postpone the work 424 // until then. 425 CharUnits offset = BlockDecls[BDRE->getDecl()]; 426 427 llvm::Value *BlockLiteral = LoadBlockStruct(); 428 429 Loc = Builder.CreateGEP(BlockLiteral, 430 llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), 431 "block.literal"); 432 Ty = llvm::PointerType::get(Ty, 0); 433 Loc = Builder.CreateBitCast(Loc, Ty); 434 Loc = Builder.CreateLoad(Loc); 435 // Loc = Builder.CreateBitCast(Loc, Ty); 436 } 437 Builder.CreateStore(Loc, Addr); 438 } else if (r.isComplex()) 439 // FIXME: implement 440 ErrorUnsupported(BE, "complex in block literal"); 441 else if (r.isAggregate()) 442 ; // Already created into the destination 443 else 444 assert (0 && "bad block variable"); 445 // FIXME: Ensure that the offset created by the backend for 446 // the struct matches the previously computed offset in BlockDecls. 447 } 448 NoteForHelper.resize(NumHelpers); 449 450 // __descriptor 451 llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, 452 Info.BlockHasCopyDispose, 453 Info.BlockSize, Ty, 454 &NoteForHelper); 455 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 456 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 457 } 458 459 QualType BPT = BE->getType(); 460 V = Builder.CreateBitCast(V, ConvertType(BPT)); 461 // See if this is a __weak block variable and the must call objc_read_weak 462 // on it. 463 const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>(); 464 QualType RES = ftype->getResultType(); 465 if (RES.isObjCGCWeak()) { 466 // Must cast argument to id* 467 const llvm::Type *ObjectPtrTy = 468 ConvertType(CGM.getContext().getObjCIdType()); 469 const llvm::Type *PtrObjectPtrTy = 470 llvm::PointerType::getUnqual(ObjectPtrTy); 471 V = Builder.CreateBitCast(V, PtrObjectPtrTy); 472 V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V); 473 } 474 return V; 475 } 476 477 478 const llvm::Type *BlockModule::getBlockDescriptorType() { 479 if (BlockDescriptorType) 480 return BlockDescriptorType; 481 482 const llvm::Type *UnsignedLongTy = 483 getTypes().ConvertType(getContext().UnsignedLongTy); 484 485 // struct __block_descriptor { 486 // unsigned long reserved; 487 // unsigned long block_size; 488 // 489 // // later, the following will be added 490 // 491 // struct { 492 // void (*copyHelper)(); 493 // void (*copyHelper)(); 494 // } helpers; // !!! optional 495 // 496 // const char *signature; // the block signature 497 // const char *layout; // reserved 498 // }; 499 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 500 UnsignedLongTy, 501 UnsignedLongTy, 502 NULL); 503 504 getModule().addTypeName("struct.__block_descriptor", 505 BlockDescriptorType); 506 507 return BlockDescriptorType; 508 } 509 510 const llvm::Type *BlockModule::getGenericBlockLiteralType() { 511 if (GenericBlockLiteralType) 512 return GenericBlockLiteralType; 513 514 const llvm::Type *BlockDescPtrTy = 515 llvm::PointerType::getUnqual(getBlockDescriptorType()); 516 517 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 518 getTypes().ConvertType(getContext().IntTy)); 519 520 // struct __block_literal_generic { 521 // void *__isa; 522 // int __flags; 523 // int __reserved; 524 // void (*__invoke)(void *); 525 // struct __block_descriptor *__descriptor; 526 // }; 527 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 528 PtrToInt8Ty, 529 IntTy, 530 IntTy, 531 PtrToInt8Ty, 532 BlockDescPtrTy, 533 NULL); 534 535 getModule().addTypeName("struct.__block_literal_generic", 536 GenericBlockLiteralType); 537 538 return GenericBlockLiteralType; 539 } 540 541 542 RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, 543 ReturnValueSlot ReturnValue) { 544 const BlockPointerType *BPT = 545 E->getCallee()->getType()->getAs<BlockPointerType>(); 546 547 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 548 549 // Get a pointer to the generic block literal. 550 const llvm::Type *BlockLiteralTy = 551 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 552 553 // Bitcast the callee to a block literal. 554 llvm::Value *BlockLiteral = 555 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 556 557 // Get the function pointer from the literal. 558 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 559 560 BlockLiteral = 561 Builder.CreateBitCast(BlockLiteral, 562 llvm::Type::getInt8PtrTy(VMContext), 563 "tmp"); 564 565 // Add the block literal. 566 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 567 CallArgList Args; 568 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 569 570 QualType FnType = BPT->getPointeeType(); 571 572 // And the rest of the arguments. 573 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 574 E->arg_begin(), E->arg_end()); 575 576 // Load the function. 577 llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp"); 578 579 const FunctionType *FuncTy = FnType->getAs<FunctionType>(); 580 QualType ResultType = FuncTy->getResultType(); 581 582 const CGFunctionInfo &FnInfo = 583 CGM.getTypes().getFunctionInfo(ResultType, Args, 584 FuncTy->getExtInfo()); 585 586 // Cast the function pointer to the right type. 587 const llvm::Type *BlockFTy = 588 CGM.getTypes().GetFunctionType(FnInfo, false); 589 590 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 591 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 592 593 // And call the block. 594 return EmitCall(FnInfo, Func, ReturnValue, Args); 595 } 596 597 void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) { 598 assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer"); 599 600 // Figure out what the offset is. 601 QualType T = E->getType(); 602 std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T); 603 CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second); 604 605 BlockCXXThisOffset = Offset; 606 BlockLayout.push_back(E); 607 } 608 609 void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) { 610 const ValueDecl *VD = E->getDecl(); 611 CharUnits &Offset = BlockDecls[VD]; 612 613 // See if we have already allocated an offset for this variable. 614 if (!Offset.isZero()) 615 return; 616 617 // Don't run the expensive check, unless we have to. 618 if (!BlockHasCopyDispose) 619 if (E->isByRef() 620 || BlockRequiresCopying(E->getType())) 621 BlockHasCopyDispose = true; 622 623 const ValueDecl *D = cast<ValueDecl>(E->getDecl()); 624 625 CharUnits Size; 626 CharUnits Align; 627 628 if (E->isByRef()) { 629 llvm::tie(Size,Align) = 630 getContext().getTypeInfoInChars(getContext().VoidPtrTy); 631 } else { 632 Size = getContext().getTypeSizeInChars(D->getType()); 633 Align = getContext().getDeclAlign(D); 634 } 635 636 Offset = getBlockOffset(Size, Align); 637 BlockLayout.push_back(E); 638 } 639 640 llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, 641 bool IsByRef) { 642 643 CharUnits offset = BlockDecls[VD]; 644 assert(!offset.isZero() && "getting address of unallocated decl"); 645 646 llvm::Value *BlockLiteral = LoadBlockStruct(); 647 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 648 llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), 649 "block.literal"); 650 if (IsByRef) { 651 const llvm::Type *PtrStructTy 652 = llvm::PointerType::get(BuildByRefType(VD), 0); 653 // The block literal will need a copy/destroy helper. 654 BlockHasCopyDispose = true; 655 656 const llvm::Type *Ty = PtrStructTy; 657 Ty = llvm::PointerType::get(Ty, 0); 658 V = Builder.CreateBitCast(V, Ty); 659 V = Builder.CreateLoad(V); 660 V = Builder.CreateStructGEP(V, 1, "forwarding"); 661 V = Builder.CreateLoad(V); 662 V = Builder.CreateBitCast(V, PtrStructTy); 663 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 664 VD->getNameAsString()); 665 if (VD->getType()->isReferenceType()) 666 V = Builder.CreateLoad(V); 667 } else { 668 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 669 Ty = llvm::PointerType::get(Ty, 0); 670 V = Builder.CreateBitCast(V, Ty); 671 if (VD->getType()->isReferenceType()) 672 V = Builder.CreateLoad(V, "ref.tmp"); 673 } 674 return V; 675 } 676 677 llvm::Constant * 678 BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 679 // Generate the block descriptor. 680 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 681 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 682 getTypes().ConvertType(getContext().IntTy)); 683 684 llvm::Constant *DescriptorFields[4]; 685 686 // Reserved 687 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 688 689 // Block literal size. For global blocks we just use the size of the generic 690 // block literal struct. 691 CharUnits BlockLiteralSize = 692 CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType()); 693 DescriptorFields[1] = 694 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity()); 695 696 // signature. non-optional ObjC-style method descriptor @encode sequence 697 std::string BlockTypeEncoding; 698 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding); 699 700 DescriptorFields[2] = llvm::ConstantExpr::getBitCast( 701 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty); 702 703 // layout 704 DescriptorFields[3] = 705 llvm::ConstantInt::get(UnsignedLongTy,0); 706 707 // build the structure from the 4 elements 708 llvm::Constant *DescriptorStruct = 709 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false); 710 711 llvm::GlobalVariable *Descriptor = 712 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 713 llvm::GlobalVariable::InternalLinkage, 714 DescriptorStruct, "__block_descriptor_global"); 715 716 int FieldCount = 5; 717 // Generate the constants for the block literal. 718 719 std::vector<llvm::Constant*> LiteralFields(FieldCount); 720 721 CGBlockInfo Info(n); 722 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 723 llvm::Function *Fn 724 = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap); 725 assert(Info.BlockSize == BlockLiteralSize 726 && "no imports allowed for global block"); 727 728 // isa 729 LiteralFields[0] = getNSConcreteGlobalBlock(); 730 731 // Flags 732 LiteralFields[1] = 733 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE); 734 735 // Reserved 736 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 737 738 // Function 739 LiteralFields[3] = Fn; 740 741 // Descriptor 742 LiteralFields[4] = Descriptor; 743 744 llvm::Constant *BlockLiteralStruct = 745 llvm::ConstantStruct::get(VMContext, LiteralFields, false); 746 747 llvm::GlobalVariable *BlockLiteral = 748 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 749 llvm::GlobalVariable::InternalLinkage, 750 BlockLiteralStruct, "__block_literal_global"); 751 752 return BlockLiteral; 753 } 754 755 llvm::Value *CodeGenFunction::LoadBlockStruct() { 756 llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], 757 "self"); 758 // For now, we codegen based upon byte offsets. 759 return Builder.CreateBitCast(V, PtrToInt8Ty); 760 } 761 762 llvm::Function * 763 CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr, 764 CGBlockInfo &Info, 765 const Decl *OuterFuncDecl, 766 llvm::DenseMap<const Decl*, llvm::Value*> ldm) { 767 768 // Check if we should generate debug info for this block. 769 if (CGM.getDebugInfo()) 770 DebugInfo = CGM.getDebugInfo(); 771 772 // Arrange for local static and local extern declarations to appear 773 // to be local to this function as well, as they are directly referenced 774 // in a block. 775 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 776 i != ldm.end(); 777 ++i) { 778 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 779 780 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 781 LocalDeclMap[VD] = i->second; 782 } 783 784 BlockOffset = 785 CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType()); 786 BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 787 788 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 789 QualType ResultType; 790 FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType); 791 bool IsVariadic; 792 if (const FunctionProtoType *FTy = 793 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 794 ResultType = FTy->getResultType(); 795 IsVariadic = FTy->isVariadic(); 796 } else { 797 // K&R style block. 798 ResultType = BlockFunctionType->getResultType(); 799 IsVariadic = false; 800 } 801 802 FunctionArgList Args; 803 804 CurFuncDecl = OuterFuncDecl; 805 806 const BlockDecl *BD = BExpr->getBlockDecl(); 807 808 IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); 809 810 // Build the block struct now. 811 AllocateAllBlockDeclRefs(*this, Info); 812 813 QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose, 814 BlockLayout); 815 816 // FIXME: This leaks 817 ImplicitParamDecl *SelfDecl = 818 ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD), 819 SourceLocation(), II, 820 ParmTy); 821 822 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 823 BlockStructDecl = SelfDecl; 824 825 for (BlockDecl::param_const_iterator i = BD->param_begin(), 826 e = BD->param_end(); i != e; ++i) 827 Args.push_back(std::make_pair(*i, (*i)->getType())); 828 829 const CGFunctionInfo &FI = 830 CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo); 831 832 CodeGenTypes &Types = CGM.getTypes(); 833 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 834 835 MangleBuffer Name; 836 CGM.getMangledName(GD, Name, BD); 837 llvm::Function *Fn = 838 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 839 Name.getString(), &CGM.getModule()); 840 841 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 842 843 QualType FnType(BlockFunctionType, 0); 844 bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType); 845 846 IdentifierInfo *ID = &getContext().Idents.get(Name.getString()); 847 CurCodeDecl = FunctionDecl::Create(getContext(), 848 getContext().getTranslationUnitDecl(), 849 SourceLocation(), ID, FnType, 850 0, 851 FunctionDecl::Static, 852 FunctionDecl::None, 853 false, HasPrototype); 854 855 StartFunction(BD, ResultType, Fn, Args, 856 BExpr->getBody()->getLocEnd()); 857 858 CurFuncDecl = OuterFuncDecl; 859 860 // If we have a C++ 'this' reference, go ahead and force it into 861 // existence now. 862 if (Info.CXXThisRef) { 863 assert(!BlockCXXThisOffset.isZero() && 864 "haven't yet allocated 'this' reference"); 865 866 // TODO: I have a dream that one day this will be typed. 867 llvm::Value *BlockLiteral = LoadBlockStruct(); 868 llvm::Value *ThisPtrRaw = 869 Builder.CreateConstInBoundsGEP1_64(BlockLiteral, 870 BlockCXXThisOffset.getQuantity(), 871 "this.ptr.raw"); 872 873 const llvm::Type *Ty = 874 CGM.getTypes().ConvertType(Info.CXXThisRef->getType()); 875 Ty = llvm::PointerType::get(Ty, 0); 876 llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr"); 877 878 CXXThisValue = Builder.CreateLoad(ThisPtr, "this"); 879 } 880 881 // If we have an Objective C 'self' reference, go ahead and force it 882 // into existence now. 883 if (Info.NeedsObjCSelf) { 884 ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 885 LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false); 886 } 887 888 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 889 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 890 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 891 --entry_ptr; 892 893 EmitStmt(BExpr->getBody()); 894 895 // Remember where we were... 896 llvm::BasicBlock *resume = Builder.GetInsertBlock(); 897 898 // Go back to the entry. 899 ++entry_ptr; 900 Builder.SetInsertPoint(entry, entry_ptr); 901 902 if (CGDebugInfo *DI = getDebugInfo()) { 903 // Emit debug information for all the BlockDeclRefDecls. 904 // FIXME: also for 'this' 905 for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) { 906 if (const BlockDeclRefExpr *BDRE = 907 dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) { 908 const ValueDecl *D = BDRE->getDecl(); 909 DI->setLocation(D->getLocation()); 910 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 911 LocalDeclMap[getBlockStructDecl()], 912 Builder, this); 913 } 914 } 915 } 916 // And resume where we left off. 917 if (resume == 0) 918 Builder.ClearInsertionPoint(); 919 else 920 Builder.SetInsertPoint(resume); 921 922 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 923 924 // The runtime needs a minimum alignment of a void *. 925 CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy); 926 BlockOffset = CharUnits::fromQuantity( 927 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), 928 MinAlign.getQuantity())); 929 930 Info.BlockSize = BlockOffset; 931 Info.BlockAlign = BlockAlign; 932 Info.BlockLayout = BlockLayout; 933 Info.BlockHasCopyDispose = BlockHasCopyDispose; 934 return Fn; 935 } 936 937 CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) { 938 assert((Align.isPositive()) && "alignment must be 1 byte or more"); 939 940 CharUnits OldOffset = BlockOffset; 941 942 // Ensure proper alignment, even if it means we have to have a gap 943 BlockOffset = CharUnits::fromQuantity( 944 llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity())); 945 BlockAlign = std::max(Align, BlockAlign); 946 947 CharUnits Pad = BlockOffset - OldOffset; 948 if (Pad.isPositive()) { 949 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 950 llvm::APInt(32, 951 Pad.getQuantity()), 952 ArrayType::Normal, 0); 953 ValueDecl *PadDecl = VarDecl::Create(getContext(), 954 getContext().getTranslationUnitDecl(), 955 SourceLocation(), 956 0, QualType(PadTy), 0, 957 VarDecl::None, VarDecl::None); 958 Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 959 SourceLocation()); 960 BlockLayout.push_back(E); 961 } 962 963 BlockOffset += Size; 964 return BlockOffset - Size; 965 } 966 967 llvm::Constant *BlockFunction:: 968 GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 969 std::vector<HelperInfo> *NoteForHelperp) { 970 QualType R = getContext().VoidTy; 971 972 FunctionArgList Args; 973 // FIXME: This leaks 974 ImplicitParamDecl *Dst = 975 ImplicitParamDecl::Create(getContext(), 0, 976 SourceLocation(), 0, 977 getContext().getPointerType(getContext().VoidTy)); 978 Args.push_back(std::make_pair(Dst, Dst->getType())); 979 ImplicitParamDecl *Src = 980 ImplicitParamDecl::Create(getContext(), 0, 981 SourceLocation(), 0, 982 getContext().getPointerType(getContext().VoidTy)); 983 Args.push_back(std::make_pair(Src, Src->getType())); 984 985 const CGFunctionInfo &FI = 986 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 987 988 // FIXME: We'd like to put these into a mergable by content, with 989 // internal linkage. 990 CodeGenTypes &Types = CGM.getTypes(); 991 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 992 993 llvm::Function *Fn = 994 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 995 "__copy_helper_block_", &CGM.getModule()); 996 997 IdentifierInfo *II 998 = &CGM.getContext().Idents.get("__copy_helper_block_"); 999 1000 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1001 getContext().getTranslationUnitDecl(), 1002 SourceLocation(), II, R, 0, 1003 FunctionDecl::Static, 1004 FunctionDecl::None, 1005 false, 1006 true); 1007 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1008 1009 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 1010 llvm::Type *PtrPtrT; 1011 1012 if (NoteForHelperp) { 1013 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 1014 1015 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 1016 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 1017 SrcObj = Builder.CreateLoad(SrcObj); 1018 1019 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 1020 llvm::Type *PtrPtrT; 1021 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 1022 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 1023 DstObj = Builder.CreateLoad(DstObj); 1024 1025 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 1026 int flag = NoteForHelper[i].flag; 1027 int index = NoteForHelper[i].index; 1028 1029 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 1030 || NoteForHelper[i].RequiresCopying) { 1031 llvm::Value *Srcv = SrcObj; 1032 Srcv = Builder.CreateStructGEP(Srcv, index); 1033 Srcv = Builder.CreateBitCast(Srcv, 1034 llvm::PointerType::get(PtrToInt8Ty, 0)); 1035 Srcv = Builder.CreateLoad(Srcv); 1036 1037 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 1038 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 1039 1040 llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); 1041 llvm::Value *F = getBlockObjectAssign(); 1042 Builder.CreateCall3(F, Dstv, Srcv, N); 1043 } 1044 } 1045 } 1046 1047 CGF.FinishFunction(); 1048 1049 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1050 } 1051 1052 llvm::Constant *BlockFunction:: 1053 GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 1054 const llvm::StructType* T, 1055 std::vector<HelperInfo> *NoteForHelperp) { 1056 QualType R = getContext().VoidTy; 1057 1058 FunctionArgList Args; 1059 // FIXME: This leaks 1060 ImplicitParamDecl *Src = 1061 ImplicitParamDecl::Create(getContext(), 0, 1062 SourceLocation(), 0, 1063 getContext().getPointerType(getContext().VoidTy)); 1064 1065 Args.push_back(std::make_pair(Src, Src->getType())); 1066 1067 const CGFunctionInfo &FI = 1068 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1069 1070 // FIXME: We'd like to put these into a mergable by content, with 1071 // internal linkage. 1072 CodeGenTypes &Types = CGM.getTypes(); 1073 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1074 1075 llvm::Function *Fn = 1076 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1077 "__destroy_helper_block_", &CGM.getModule()); 1078 1079 IdentifierInfo *II 1080 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 1081 1082 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1083 getContext().getTranslationUnitDecl(), 1084 SourceLocation(), II, R, 0, 1085 FunctionDecl::Static, 1086 FunctionDecl::None, 1087 false, true); 1088 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1089 1090 if (NoteForHelperp) { 1091 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 1092 1093 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 1094 llvm::Type *PtrPtrT; 1095 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 1096 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 1097 SrcObj = Builder.CreateLoad(SrcObj); 1098 1099 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 1100 int flag = NoteForHelper[i].flag; 1101 int index = NoteForHelper[i].index; 1102 1103 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 1104 || NoteForHelper[i].RequiresCopying) { 1105 llvm::Value *Srcv = SrcObj; 1106 Srcv = Builder.CreateStructGEP(Srcv, index); 1107 Srcv = Builder.CreateBitCast(Srcv, 1108 llvm::PointerType::get(PtrToInt8Ty, 0)); 1109 Srcv = Builder.CreateLoad(Srcv); 1110 1111 BuildBlockRelease(Srcv, flag); 1112 } 1113 } 1114 } 1115 1116 CGF.FinishFunction(); 1117 1118 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1119 } 1120 1121 llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 1122 std::vector<HelperInfo> *NoteForHelper) { 1123 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 1124 T, NoteForHelper); 1125 } 1126 1127 llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 1128 std::vector<HelperInfo> *NoteForHelperp) { 1129 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 1130 T, NoteForHelperp); 1131 } 1132 1133 llvm::Constant *BlockFunction:: 1134 GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 1135 QualType R = getContext().VoidTy; 1136 1137 FunctionArgList Args; 1138 // FIXME: This leaks 1139 ImplicitParamDecl *Dst = 1140 ImplicitParamDecl::Create(getContext(), 0, 1141 SourceLocation(), 0, 1142 getContext().getPointerType(getContext().VoidTy)); 1143 Args.push_back(std::make_pair(Dst, Dst->getType())); 1144 1145 // FIXME: This leaks 1146 ImplicitParamDecl *Src = 1147 ImplicitParamDecl::Create(getContext(), 0, 1148 SourceLocation(), 0, 1149 getContext().getPointerType(getContext().VoidTy)); 1150 Args.push_back(std::make_pair(Src, Src->getType())); 1151 1152 const CGFunctionInfo &FI = 1153 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1154 1155 CodeGenTypes &Types = CGM.getTypes(); 1156 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1157 1158 // FIXME: We'd like to put these into a mergable by content, with 1159 // internal linkage. 1160 llvm::Function *Fn = 1161 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1162 "__Block_byref_id_object_copy_", &CGM.getModule()); 1163 1164 IdentifierInfo *II 1165 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 1166 1167 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1168 getContext().getTranslationUnitDecl(), 1169 SourceLocation(), II, R, 0, 1170 FunctionDecl::Static, 1171 FunctionDecl::None, 1172 false, true); 1173 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1174 1175 // dst->x 1176 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 1177 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1178 V = Builder.CreateLoad(V); 1179 V = Builder.CreateStructGEP(V, 6, "x"); 1180 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 1181 1182 // src->x 1183 V = CGF.GetAddrOfLocalVar(Src); 1184 V = Builder.CreateLoad(V); 1185 V = Builder.CreateBitCast(V, T); 1186 V = Builder.CreateStructGEP(V, 6, "x"); 1187 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1188 llvm::Value *SrcObj = Builder.CreateLoad(V); 1189 1190 flag |= BLOCK_BYREF_CALLER; 1191 1192 llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); 1193 llvm::Value *F = getBlockObjectAssign(); 1194 Builder.CreateCall3(F, DstObj, SrcObj, N); 1195 1196 CGF.FinishFunction(); 1197 1198 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1199 } 1200 1201 llvm::Constant * 1202 BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 1203 int flag) { 1204 QualType R = getContext().VoidTy; 1205 1206 FunctionArgList Args; 1207 // FIXME: This leaks 1208 ImplicitParamDecl *Src = 1209 ImplicitParamDecl::Create(getContext(), 0, 1210 SourceLocation(), 0, 1211 getContext().getPointerType(getContext().VoidTy)); 1212 1213 Args.push_back(std::make_pair(Src, Src->getType())); 1214 1215 const CGFunctionInfo &FI = 1216 CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); 1217 1218 CodeGenTypes &Types = CGM.getTypes(); 1219 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1220 1221 // FIXME: We'd like to put these into a mergable by content, with 1222 // internal linkage. 1223 llvm::Function *Fn = 1224 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1225 "__Block_byref_id_object_dispose_", 1226 &CGM.getModule()); 1227 1228 IdentifierInfo *II 1229 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1230 1231 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1232 getContext().getTranslationUnitDecl(), 1233 SourceLocation(), II, R, 0, 1234 FunctionDecl::Static, 1235 FunctionDecl::None, 1236 false, true); 1237 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1238 1239 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1240 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1241 V = Builder.CreateLoad(V); 1242 V = Builder.CreateStructGEP(V, 6, "x"); 1243 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1244 V = Builder.CreateLoad(V); 1245 1246 flag |= BLOCK_BYREF_CALLER; 1247 BuildBlockRelease(V, flag); 1248 CGF.FinishFunction(); 1249 1250 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1251 } 1252 1253 llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1254 int Flag, unsigned Align) { 1255 // All alignments below that of pointer alignment collapse down to just 1256 // pointer alignment, as we always have at least that much alignment to begin 1257 // with. 1258 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1259 1260 // As an optimization, we only generate a single function of each kind we 1261 // might need. We need a different one for each alignment and for each 1262 // setting of flags. We mix Align and flag to get the kind. 1263 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1264 llvm::Constant *&Entry = CGM.AssignCache[Kind]; 1265 if (Entry) 1266 return Entry; 1267 return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag); 1268 } 1269 1270 llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1271 int Flag, 1272 unsigned Align) { 1273 // All alignments below that of pointer alignment collpase down to just 1274 // pointer alignment, as we always have at least that much alignment to begin 1275 // with. 1276 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1277 1278 // As an optimization, we only generate a single function of each kind we 1279 // might need. We need a different one for each alignment and for each 1280 // setting of flags. We mix Align and flag to get the kind. 1281 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag; 1282 llvm::Constant *&Entry = CGM.DestroyCache[Kind]; 1283 if (Entry) 1284 return Entry; 1285 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag); 1286 } 1287 1288 llvm::Value *BlockFunction::getBlockObjectDispose() { 1289 if (CGM.BlockObjectDispose == 0) { 1290 const llvm::FunctionType *FTy; 1291 std::vector<const llvm::Type*> ArgTys; 1292 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1293 ArgTys.push_back(PtrToInt8Ty); 1294 ArgTys.push_back(CGF.Int32Ty); 1295 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1296 CGM.BlockObjectDispose 1297 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1298 } 1299 return CGM.BlockObjectDispose; 1300 } 1301 1302 llvm::Value *BlockFunction::getBlockObjectAssign() { 1303 if (CGM.BlockObjectAssign == 0) { 1304 const llvm::FunctionType *FTy; 1305 std::vector<const llvm::Type*> ArgTys; 1306 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1307 ArgTys.push_back(PtrToInt8Ty); 1308 ArgTys.push_back(PtrToInt8Ty); 1309 ArgTys.push_back(CGF.Int32Ty); 1310 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1311 CGM.BlockObjectAssign 1312 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1313 } 1314 return CGM.BlockObjectAssign; 1315 } 1316 1317 void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1318 llvm::Value *F = getBlockObjectDispose(); 1319 llvm::Value *N; 1320 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1321 N = llvm::ConstantInt::get(CGF.Int32Ty, flag); 1322 Builder.CreateCall2(F, V, N); 1323 } 1324 1325 ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1326 1327 BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1328 CGBuilderTy &B) 1329 : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) { 1330 PtrToInt8Ty = llvm::PointerType::getUnqual( 1331 llvm::Type::getInt8Ty(VMContext)); 1332 1333 BlockHasCopyDispose = false; 1334 } 1335