1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Decl nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TargetInfo.h" 23 #include "clang/CodeGen/CodeGenOptions.h" 24 #include "llvm/GlobalVariable.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/Target/TargetData.h" 27 #include "llvm/Type.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 32 void CodeGenFunction::EmitDecl(const Decl &D) { 33 switch (D.getKind()) { 34 case Decl::TranslationUnit: 35 case Decl::Namespace: 36 case Decl::UnresolvedUsingTypename: 37 case Decl::ClassTemplateSpecialization: 38 case Decl::ClassTemplatePartialSpecialization: 39 case Decl::TemplateTypeParm: 40 case Decl::UnresolvedUsingValue: 41 case Decl::NonTypeTemplateParm: 42 case Decl::CXXMethod: 43 case Decl::CXXConstructor: 44 case Decl::CXXDestructor: 45 case Decl::CXXConversion: 46 case Decl::Field: 47 case Decl::ObjCIvar: 48 case Decl::ObjCAtDefsField: 49 case Decl::ParmVar: 50 case Decl::ImplicitParam: 51 case Decl::ClassTemplate: 52 case Decl::FunctionTemplate: 53 case Decl::TemplateTemplateParm: 54 case Decl::ObjCMethod: 55 case Decl::ObjCCategory: 56 case Decl::ObjCProtocol: 57 case Decl::ObjCInterface: 58 case Decl::ObjCCategoryImpl: 59 case Decl::ObjCImplementation: 60 case Decl::ObjCProperty: 61 case Decl::ObjCCompatibleAlias: 62 case Decl::AccessSpec: 63 case Decl::LinkageSpec: 64 case Decl::ObjCPropertyImpl: 65 case Decl::ObjCClass: 66 case Decl::ObjCForwardProtocol: 67 case Decl::FileScopeAsm: 68 case Decl::Friend: 69 case Decl::FriendTemplate: 70 case Decl::Block: 71 72 assert(0 && "Declaration not should not be in declstmts!"); 73 case Decl::Function: // void X(); 74 case Decl::Record: // struct/union/class X; 75 case Decl::Enum: // enum X; 76 case Decl::EnumConstant: // enum ? { X = ? } 77 case Decl::CXXRecord: // struct/union/class X; [C++] 78 case Decl::Using: // using X; [C++] 79 case Decl::UsingShadow: 80 case Decl::UsingDirective: // using namespace X; [C++] 81 case Decl::NamespaceAlias: 82 case Decl::StaticAssert: // static_assert(X, ""); [C++0x] 83 // None of these decls require codegen support. 84 return; 85 86 case Decl::Var: { 87 const VarDecl &VD = cast<VarDecl>(D); 88 assert(VD.isBlockVarDecl() && 89 "Should not see file-scope variables inside a function!"); 90 return EmitBlockVarDecl(VD); 91 } 92 93 case Decl::Typedef: { // typedef int X; 94 const TypedefDecl &TD = cast<TypedefDecl>(D); 95 QualType Ty = TD.getUnderlyingType(); 96 97 if (Ty->isVariablyModifiedType()) 98 EmitVLASize(Ty); 99 } 100 } 101 } 102 103 /// EmitBlockVarDecl - This method handles emission of any variable declaration 104 /// inside a function, including static vars etc. 105 void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) { 106 if (D.hasAttr<AsmLabelAttr>()) 107 CGM.ErrorUnsupported(&D, "__asm__"); 108 109 switch (D.getStorageClass()) { 110 case VarDecl::None: 111 case VarDecl::Auto: 112 case VarDecl::Register: 113 return EmitLocalBlockVarDecl(D); 114 case VarDecl::Static: { 115 llvm::GlobalValue::LinkageTypes Linkage = 116 llvm::GlobalValue::InternalLinkage; 117 118 // If the function definition has some sort of weak linkage, its 119 // static variables should also be weak so that they get properly 120 // uniqued. We can't do this in C, though, because there's no 121 // standard way to agree on which variables are the same (i.e. 122 // there's no mangling). 123 if (getContext().getLangOptions().CPlusPlus) 124 if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage())) 125 Linkage = CurFn->getLinkage(); 126 127 return EmitStaticBlockVarDecl(D, Linkage); 128 } 129 case VarDecl::Extern: 130 case VarDecl::PrivateExtern: 131 // Don't emit it now, allow it to be emitted lazily on its first use. 132 return; 133 } 134 135 assert(0 && "Unknown storage class"); 136 } 137 138 static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D, 139 const char *Separator) { 140 CodeGenModule &CGM = CGF.CGM; 141 if (CGF.getContext().getLangOptions().CPlusPlus) { 142 MangleBuffer Name; 143 CGM.getMangledName(Name, &D); 144 return Name.getString().str(); 145 } 146 147 std::string ContextName; 148 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) { 149 MangleBuffer Name; 150 CGM.getMangledName(Name, FD); 151 ContextName = Name.getString().str(); 152 } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) 153 ContextName = CGF.CurFn->getName(); 154 else 155 // FIXME: What about in a block?? 156 assert(0 && "Unknown context for block var decl"); 157 158 return ContextName + Separator + D.getNameAsString(); 159 } 160 161 llvm::GlobalVariable * 162 CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D, 163 const char *Separator, 164 llvm::GlobalValue::LinkageTypes Linkage) { 165 QualType Ty = D.getType(); 166 assert(Ty->isConstantSizeType() && "VLAs can't be static"); 167 168 std::string Name = GetStaticDeclName(*this, D, Separator); 169 170 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty); 171 llvm::GlobalVariable *GV = 172 new llvm::GlobalVariable(CGM.getModule(), LTy, 173 Ty.isConstant(getContext()), Linkage, 174 CGM.EmitNullConstant(D.getType()), Name, 0, 175 D.isThreadSpecified(), Ty.getAddressSpace()); 176 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 177 return GV; 178 } 179 180 /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the 181 /// global variable that has already been created for it. If the initializer 182 /// has a different type than GV does, this may free GV and return a different 183 /// one. Otherwise it just returns GV. 184 llvm::GlobalVariable * 185 CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D, 186 llvm::GlobalVariable *GV) { 187 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this); 188 189 // If constant emission failed, then this should be a C++ static 190 // initializer. 191 if (!Init) { 192 if (!getContext().getLangOptions().CPlusPlus) 193 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); 194 else { 195 // Since we have a static initializer, this global variable can't 196 // be constant. 197 GV->setConstant(false); 198 199 EmitStaticCXXBlockVarDeclInit(D, GV); 200 } 201 return GV; 202 } 203 204 // The initializer may differ in type from the global. Rewrite 205 // the global to match the initializer. (We have to do this 206 // because some types, like unions, can't be completely represented 207 // in the LLVM type system.) 208 if (GV->getType() != Init->getType()) { 209 llvm::GlobalVariable *OldGV = GV; 210 211 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), 212 OldGV->isConstant(), 213 OldGV->getLinkage(), Init, "", 214 0, D.isThreadSpecified(), 215 D.getType().getAddressSpace()); 216 217 // Steal the name of the old global 218 GV->takeName(OldGV); 219 220 // Replace all uses of the old global with the new global 221 llvm::Constant *NewPtrForOldDecl = 222 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 223 OldGV->replaceAllUsesWith(NewPtrForOldDecl); 224 225 // Erase the old global, since it is no longer used. 226 OldGV->eraseFromParent(); 227 } 228 229 GV->setInitializer(Init); 230 return GV; 231 } 232 233 void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D, 234 llvm::GlobalValue::LinkageTypes Linkage) { 235 llvm::Value *&DMEntry = LocalDeclMap[&D]; 236 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 237 238 llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage); 239 240 // Store into LocalDeclMap before generating initializer to handle 241 // circular references. 242 DMEntry = GV; 243 244 // We can't have a VLA here, but we can have a pointer to a VLA, 245 // even though that doesn't really make any sense. 246 // Make sure to evaluate VLA bounds now so that we have them for later. 247 if (D.getType()->isVariablyModifiedType()) 248 EmitVLASize(D.getType()); 249 250 // If this value has an initializer, emit it. 251 if (D.getInit()) 252 GV = AddInitializerToGlobalBlockVarDecl(D, GV); 253 254 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 255 256 // FIXME: Merge attribute handling. 257 if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) { 258 SourceManager &SM = CGM.getContext().getSourceManager(); 259 llvm::Constant *Ann = 260 CGM.EmitAnnotateAttr(GV, AA, 261 SM.getInstantiationLineNumber(D.getLocation())); 262 CGM.AddAnnotation(Ann); 263 } 264 265 if (const SectionAttr *SA = D.getAttr<SectionAttr>()) 266 GV->setSection(SA->getName()); 267 268 if (D.hasAttr<UsedAttr>()) 269 CGM.AddUsedGlobal(GV); 270 271 if (getContext().getLangOptions().CPlusPlus) 272 CGM.setStaticLocalDeclAddress(&D, GV); 273 274 // We may have to cast the constant because of the initializer 275 // mismatch above. 276 // 277 // FIXME: It is really dangerous to store this in the map; if anyone 278 // RAUW's the GV uses of this constant will be invalid. 279 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); 280 const llvm::Type *LPtrTy = 281 llvm::PointerType::get(LTy, D.getType().getAddressSpace()); 282 DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); 283 284 // Emit global variable debug descriptor for static vars. 285 CGDebugInfo *DI = getDebugInfo(); 286 if (DI) { 287 DI->setLocation(D.getLocation()); 288 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D); 289 } 290 } 291 292 unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { 293 assert(ByRefValueInfo.count(VD) && "Did not find value!"); 294 295 return ByRefValueInfo.find(VD)->second.second; 296 } 297 298 /// BuildByRefType - This routine changes a __block variable declared as T x 299 /// into: 300 /// 301 /// struct { 302 /// void *__isa; 303 /// void *__forwarding; 304 /// int32_t __flags; 305 /// int32_t __size; 306 /// void *__copy_helper; // only if needed 307 /// void *__destroy_helper; // only if needed 308 /// char padding[X]; // only if needed 309 /// T x; 310 /// } x 311 /// 312 const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { 313 std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D]; 314 if (Info.first) 315 return Info.first; 316 317 QualType Ty = D->getType(); 318 319 std::vector<const llvm::Type *> Types; 320 321 const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 322 323 llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext); 324 325 // void *__isa; 326 Types.push_back(Int8PtrTy); 327 328 // void *__forwarding; 329 Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); 330 331 // int32_t __flags; 332 Types.push_back(llvm::Type::getInt32Ty(VMContext)); 333 334 // int32_t __size; 335 Types.push_back(llvm::Type::getInt32Ty(VMContext)); 336 337 bool HasCopyAndDispose = BlockRequiresCopying(Ty); 338 if (HasCopyAndDispose) { 339 /// void *__copy_helper; 340 Types.push_back(Int8PtrTy); 341 342 /// void *__destroy_helper; 343 Types.push_back(Int8PtrTy); 344 } 345 346 bool Packed = false; 347 CharUnits Align = getContext().getDeclAlign(D); 348 if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) { 349 // We have to insert padding. 350 351 // The struct above has 2 32-bit integers. 352 unsigned CurrentOffsetInBytes = 4 * 2; 353 354 // And either 2 or 4 pointers. 355 CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) * 356 CGM.getTargetData().getTypeAllocSize(Int8PtrTy); 357 358 // Align the offset. 359 unsigned AlignedOffsetInBytes = 360 llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity()); 361 362 unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes; 363 if (NumPaddingBytes > 0) { 364 const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext); 365 // FIXME: We need a sema error for alignment larger than the minimum of 366 // the maximal stack alignmint and the alignment of malloc on the system. 367 if (NumPaddingBytes > 1) 368 Ty = llvm::ArrayType::get(Ty, NumPaddingBytes); 369 370 Types.push_back(Ty); 371 372 // We want a packed struct. 373 Packed = true; 374 } 375 } 376 377 // T x; 378 Types.push_back(ConvertType(Ty)); 379 380 const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed); 381 382 cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T); 383 CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(), 384 ByRefTypeHolder.get()); 385 386 Info.first = ByRefTypeHolder.get(); 387 388 Info.second = Types.size() - 1; 389 390 return Info.first; 391 } 392 393 /// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a 394 /// variable declaration with auto, register, or no storage class specifier. 395 /// These turn into simple stack objects, or GlobalValues depending on target. 396 void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { 397 QualType Ty = D.getType(); 398 bool isByRef = D.hasAttr<BlocksAttr>(); 399 bool needsDispose = false; 400 CharUnits Align = CharUnits::Zero(); 401 bool IsSimpleConstantInitializer = false; 402 403 bool NRVO = false; 404 llvm::Value *NRVOFlag = 0; 405 llvm::Value *DeclPtr; 406 if (Ty->isConstantSizeType()) { 407 if (!Target.useGlobalsForAutomaticVariables()) { 408 NRVO = getContext().getLangOptions().ElideConstructors && 409 D.isNRVOVariable(); 410 // If this value is an array or struct, is POD, and if the initializer is 411 // a staticly determinable constant, try to optimize it (unless the NRVO 412 // is already optimizing this). 413 if (D.getInit() && !isByRef && 414 (Ty->isArrayType() || Ty->isRecordType()) && 415 Ty->isPODType() && 416 D.getInit()->isConstantInitializer(getContext()) && !NRVO) { 417 // If this variable is marked 'const', emit the value as a global. 418 if (CGM.getCodeGenOpts().MergeAllConstants && 419 Ty.isConstant(getContext())) { 420 EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage); 421 return; 422 } 423 424 IsSimpleConstantInitializer = true; 425 } 426 427 // A normal fixed sized variable becomes an alloca in the entry block, 428 // unless it's an NRVO variable. 429 const llvm::Type *LTy = ConvertTypeForMem(Ty); 430 431 if (NRVO) { 432 // The named return value optimization: allocate this variable in the 433 // return slot, so that we can elide the copy when returning this 434 // variable (C++0x [class.copy]p34). 435 DeclPtr = ReturnValue; 436 437 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 438 if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { 439 // Create a flag that is used to indicate when the NRVO was applied 440 // to this variable. Set it to zero to indicate that NRVO was not 441 // applied. 442 const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext); 443 llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0); 444 NRVOFlag = CreateTempAlloca(BoolTy, "nrvo"); 445 Builder.CreateStore(Zero, NRVOFlag); 446 447 // Record the NRVO flag for this variable. 448 NRVOFlags[&D] = NRVOFlag; 449 } 450 } 451 } else { 452 if (isByRef) 453 LTy = BuildByRefType(&D); 454 455 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); 456 Alloc->setName(D.getNameAsString()); 457 458 Align = getContext().getDeclAlign(&D); 459 if (isByRef) 460 Align = std::max(Align, 461 CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)); 462 Alloc->setAlignment(Align.getQuantity()); 463 DeclPtr = Alloc; 464 } 465 } else { 466 // Targets that don't support recursion emit locals as globals. 467 const char *Class = 468 D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; 469 DeclPtr = CreateStaticBlockVarDecl(D, Class, 470 llvm::GlobalValue 471 ::InternalLinkage); 472 } 473 474 // FIXME: Can this happen? 475 if (Ty->isVariablyModifiedType()) 476 EmitVLASize(Ty); 477 } else { 478 EnsureInsertPoint(); 479 480 if (!DidCallStackSave) { 481 // Save the stack. 482 const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext); 483 llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); 484 485 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); 486 llvm::Value *V = Builder.CreateCall(F); 487 488 Builder.CreateStore(V, Stack); 489 490 DidCallStackSave = true; 491 492 { 493 // Push a cleanup block and restore the stack there. 494 DelayedCleanupBlock scope(*this); 495 496 V = Builder.CreateLoad(Stack, "tmp"); 497 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 498 Builder.CreateCall(F, V); 499 } 500 } 501 502 // Get the element type. 503 const llvm::Type *LElemTy = ConvertTypeForMem(Ty); 504 const llvm::Type *LElemPtrTy = 505 llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); 506 507 llvm::Value *VLASize = EmitVLASize(Ty); 508 509 // Allocate memory for the array. 510 llvm::AllocaInst *VLA = 511 Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); 512 VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 513 514 DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); 515 } 516 517 llvm::Value *&DMEntry = LocalDeclMap[&D]; 518 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 519 DMEntry = DeclPtr; 520 521 // Emit debug info for local var declaration. 522 if (CGDebugInfo *DI = getDebugInfo()) { 523 assert(HaveInsertPoint() && "Unexpected unreachable point!"); 524 525 DI->setLocation(D.getLocation()); 526 if (Target.useGlobalsForAutomaticVariables()) { 527 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); 528 } else 529 DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); 530 } 531 532 // If this local has an initializer, emit it now. 533 const Expr *Init = D.getInit(); 534 535 // If we are at an unreachable point, we don't need to emit the initializer 536 // unless it contains a label. 537 if (!HaveInsertPoint()) { 538 if (!ContainsLabel(Init)) 539 Init = 0; 540 else 541 EnsureInsertPoint(); 542 } 543 544 if (isByRef) { 545 const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext); 546 547 EnsureInsertPoint(); 548 llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); 549 llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); 550 llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); 551 llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); 552 llvm::Value *V; 553 int flag = 0; 554 int flags = 0; 555 556 needsDispose = true; 557 558 if (Ty->isBlockPointerType()) { 559 flag |= BLOCK_FIELD_IS_BLOCK; 560 flags |= BLOCK_HAS_COPY_DISPOSE; 561 } else if (BlockRequiresCopying(Ty)) { 562 flag |= BLOCK_FIELD_IS_OBJECT; 563 flags |= BLOCK_HAS_COPY_DISPOSE; 564 } 565 566 // FIXME: Someone double check this. 567 if (Ty.isObjCGCWeak()) 568 flag |= BLOCK_FIELD_IS_WEAK; 569 570 int isa = 0; 571 if (flag&BLOCK_FIELD_IS_WEAK) 572 isa = 1; 573 V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); 574 V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); 575 Builder.CreateStore(V, isa_field); 576 577 Builder.CreateStore(DeclPtr, forwarding_field); 578 579 V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); 580 Builder.CreateStore(V, flags_field); 581 582 const llvm::Type *V1; 583 V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); 584 V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 585 CGM.GetTargetTypeStoreSize(V1).getQuantity()); 586 Builder.CreateStore(V, size_field); 587 588 if (flags & BLOCK_HAS_COPY_DISPOSE) { 589 BlockHasCopyDispose = true; 590 llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); 591 Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, 592 Align.getQuantity()), 593 copy_helper); 594 595 llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); 596 Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, 597 Align.getQuantity()), 598 destroy_helper); 599 } 600 } 601 602 if (Init) { 603 llvm::Value *Loc = DeclPtr; 604 if (isByRef) 605 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 606 D.getNameAsString()); 607 608 bool isVolatile = 609 getContext().getCanonicalType(D.getType()).isVolatileQualified(); 610 611 // If the initializer was a simple constant initializer, we can optimize it 612 // in various ways. 613 if (IsSimpleConstantInitializer) { 614 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this); 615 assert(Init != 0 && "Wasn't a simple constant init?"); 616 617 llvm::Value *AlignVal = 618 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 619 Align.getQuantity()); 620 const llvm::Type *IntPtr = 621 llvm::IntegerType::get(VMContext, LLVMPointerWidth); 622 llvm::Value *SizeVal = 623 llvm::ConstantInt::get(IntPtr, 624 getContext().getTypeSizeInChars(Ty).getQuantity()); 625 626 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 627 if (Loc->getType() != BP) 628 Loc = Builder.CreateBitCast(Loc, BP, "tmp"); 629 630 llvm::Value *NotVolatile = 631 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0); 632 633 // If the initializer is all zeros, codegen with memset. 634 if (isa<llvm::ConstantAggregateZero>(Init)) { 635 llvm::Value *Zero = 636 llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0); 637 Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()), 638 Loc, Zero, SizeVal, AlignVal, NotVolatile); 639 } else { 640 // Otherwise, create a temporary global with the initializer then 641 // memcpy from the global to the alloca. 642 std::string Name = GetStaticDeclName(*this, D, "."); 643 llvm::GlobalVariable *GV = 644 new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true, 645 llvm::GlobalValue::InternalLinkage, 646 Init, Name, 0, false, 0); 647 GV->setAlignment(Align.getQuantity()); 648 649 llvm::Value *SrcPtr = GV; 650 if (SrcPtr->getType() != BP) 651 SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); 652 653 Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(), 654 SizeVal->getType()), 655 Loc, SrcPtr, SizeVal, AlignVal, NotVolatile); 656 } 657 } else if (Ty->isReferenceType()) { 658 RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true); 659 EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); 660 } else if (!hasAggregateLLVMType(Init->getType())) { 661 llvm::Value *V = EmitScalarExpr(Init); 662 EmitStoreOfScalar(V, Loc, isVolatile, D.getType()); 663 } else if (Init->getType()->isAnyComplexType()) { 664 EmitComplexExprIntoAddr(Init, Loc, isVolatile); 665 } else { 666 EmitAggExpr(Init, Loc, isVolatile); 667 } 668 } 669 670 // Handle CXX destruction of variables. 671 QualType DtorTy(Ty); 672 while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) 673 DtorTy = getContext().getBaseElementType(Array); 674 if (const RecordType *RT = DtorTy->getAs<RecordType>()) 675 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 676 if (!ClassDecl->hasTrivialDestructor()) { 677 // Note: We suppress the destructor call when the corresponding NRVO 678 // flag has been set. 679 llvm::Value *Loc = DeclPtr; 680 if (isByRef) 681 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 682 D.getNameAsString()); 683 684 const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); 685 assert(D && "EmitLocalBlockVarDecl - destructor is nul"); 686 687 if (const ConstantArrayType *Array = 688 getContext().getAsConstantArrayType(Ty)) { 689 { 690 DelayedCleanupBlock Scope(*this); 691 QualType BaseElementTy = getContext().getBaseElementType(Array); 692 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 693 BasePtr = llvm::PointerType::getUnqual(BasePtr); 694 llvm::Value *BaseAddrPtr = 695 Builder.CreateBitCast(Loc, BasePtr); 696 EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); 697 698 // Make sure to jump to the exit block. 699 EmitBranch(Scope.getCleanupExitBlock()); 700 } 701 if (Exceptions) { 702 EHCleanupBlock Cleanup(*this); 703 QualType BaseElementTy = getContext().getBaseElementType(Array); 704 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 705 BasePtr = llvm::PointerType::getUnqual(BasePtr); 706 llvm::Value *BaseAddrPtr = 707 Builder.CreateBitCast(Loc, BasePtr); 708 EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); 709 } 710 } else { 711 { 712 // Normal destruction. 713 DelayedCleanupBlock Scope(*this); 714 715 if (NRVO) { 716 // If we exited via NRVO, we skip the destructor call. 717 llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused"); 718 Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"), 719 Scope.getCleanupExitBlock(), 720 NoNRVO); 721 EmitBlock(NoNRVO); 722 } 723 724 // We don't call the destructor along the normal edge if we're 725 // applying the NRVO. 726 EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, 727 Loc); 728 729 // Make sure to jump to the exit block. 730 EmitBranch(Scope.getCleanupExitBlock()); 731 } 732 733 if (Exceptions) { 734 EHCleanupBlock Cleanup(*this); 735 EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, 736 Loc); 737 } 738 } 739 } 740 } 741 742 // Handle the cleanup attribute 743 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { 744 const FunctionDecl *FD = CA->getFunctionDecl(); 745 746 llvm::Constant* F = CGM.GetAddrOfFunction(FD); 747 assert(F && "Could not find function!"); 748 749 const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); 750 751 // In some cases, the type of the function argument will be different from 752 // the type of the pointer. An example of this is 753 // void f(void* arg); 754 // __attribute__((cleanup(f))) void *g; 755 // 756 // To fix this we insert a bitcast here. 757 QualType ArgTy = Info.arg_begin()->type; 758 { 759 DelayedCleanupBlock scope(*this); 760 761 CallArgList Args; 762 Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, 763 ConvertType(ArgTy))), 764 getContext().getPointerType(D.getType()))); 765 EmitCall(Info, F, ReturnValueSlot(), Args); 766 } 767 if (Exceptions) { 768 EHCleanupBlock Cleanup(*this); 769 770 CallArgList Args; 771 Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, 772 ConvertType(ArgTy))), 773 getContext().getPointerType(D.getType()))); 774 EmitCall(Info, F, ReturnValueSlot(), Args); 775 } 776 } 777 778 if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { 779 { 780 DelayedCleanupBlock scope(*this); 781 llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); 782 V = Builder.CreateLoad(V); 783 BuildBlockRelease(V); 784 } 785 // FIXME: Turn this on and audit the codegen 786 if (0 && Exceptions) { 787 EHCleanupBlock Cleanup(*this); 788 llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); 789 V = Builder.CreateLoad(V); 790 BuildBlockRelease(V); 791 } 792 } 793 } 794 795 /// Emit an alloca (or GlobalValue depending on target) 796 /// for the specified parameter and set up LocalDeclMap. 797 void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { 798 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? 799 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && 800 "Invalid argument to EmitParmDecl"); 801 QualType Ty = D.getType(); 802 CanQualType CTy = getContext().getCanonicalType(Ty); 803 804 llvm::Value *DeclPtr; 805 // If this is an aggregate or variable sized value, reuse the input pointer. 806 if (!Ty->isConstantSizeType() || 807 CodeGenFunction::hasAggregateLLVMType(Ty)) { 808 DeclPtr = Arg; 809 } else { 810 // Otherwise, create a temporary to hold the value. 811 DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr"); 812 813 // Store the initial value into the alloca. 814 EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty); 815 } 816 Arg->setName(D.getName()); 817 818 llvm::Value *&DMEntry = LocalDeclMap[&D]; 819 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 820 DMEntry = DeclPtr; 821 822 // Emit debug info for param declaration. 823 if (CGDebugInfo *DI = getDebugInfo()) { 824 DI->setLocation(D.getLocation()); 825 DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); 826 } 827 } 828