1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Decl nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TargetInfo.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/GlobalVariable.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/Target/TargetData.h" 27 #include "llvm/Type.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 32 void CodeGenFunction::EmitDecl(const Decl &D) { 33 switch (D.getKind()) { 34 case Decl::TranslationUnit: 35 case Decl::Namespace: 36 case Decl::UnresolvedUsingTypename: 37 case Decl::ClassTemplateSpecialization: 38 case Decl::ClassTemplatePartialSpecialization: 39 case Decl::TemplateTypeParm: 40 case Decl::UnresolvedUsingValue: 41 case Decl::NonTypeTemplateParm: 42 case Decl::CXXMethod: 43 case Decl::CXXConstructor: 44 case Decl::CXXDestructor: 45 case Decl::CXXConversion: 46 case Decl::Field: 47 case Decl::ObjCIvar: 48 case Decl::ObjCAtDefsField: 49 case Decl::ParmVar: 50 case Decl::ImplicitParam: 51 case Decl::ClassTemplate: 52 case Decl::FunctionTemplate: 53 case Decl::TemplateTemplateParm: 54 case Decl::ObjCMethod: 55 case Decl::ObjCCategory: 56 case Decl::ObjCProtocol: 57 case Decl::ObjCInterface: 58 case Decl::ObjCCategoryImpl: 59 case Decl::ObjCImplementation: 60 case Decl::ObjCProperty: 61 case Decl::ObjCCompatibleAlias: 62 case Decl::AccessSpec: 63 case Decl::LinkageSpec: 64 case Decl::ObjCPropertyImpl: 65 case Decl::ObjCClass: 66 case Decl::ObjCForwardProtocol: 67 case Decl::FileScopeAsm: 68 case Decl::Friend: 69 case Decl::FriendTemplate: 70 case Decl::Block: 71 72 assert(0 && "Declaration not should not be in declstmts!"); 73 case Decl::Function: // void X(); 74 case Decl::Record: // struct/union/class X; 75 case Decl::Enum: // enum X; 76 case Decl::EnumConstant: // enum ? { X = ? } 77 case Decl::CXXRecord: // struct/union/class X; [C++] 78 case Decl::Using: // using X; [C++] 79 case Decl::UsingShadow: 80 case Decl::UsingDirective: // using namespace X; [C++] 81 case Decl::NamespaceAlias: 82 case Decl::StaticAssert: // static_assert(X, ""); [C++0x] 83 // None of these decls require codegen support. 84 return; 85 86 case Decl::Var: { 87 const VarDecl &VD = cast<VarDecl>(D); 88 assert(VD.isBlockVarDecl() && 89 "Should not see file-scope variables inside a function!"); 90 return EmitBlockVarDecl(VD); 91 } 92 93 case Decl::Typedef: { // typedef int X; 94 const TypedefDecl &TD = cast<TypedefDecl>(D); 95 QualType Ty = TD.getUnderlyingType(); 96 97 if (Ty->isVariablyModifiedType()) 98 EmitVLASize(Ty); 99 } 100 } 101 } 102 103 /// EmitBlockVarDecl - This method handles emission of any variable declaration 104 /// inside a function, including static vars etc. 105 void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) { 106 if (D.hasAttr<AsmLabelAttr>()) 107 CGM.ErrorUnsupported(&D, "__asm__"); 108 109 switch (D.getStorageClass()) { 110 case VarDecl::None: 111 case VarDecl::Auto: 112 case VarDecl::Register: 113 return EmitLocalBlockVarDecl(D); 114 case VarDecl::Static: { 115 llvm::GlobalValue::LinkageTypes Linkage = 116 llvm::GlobalValue::InternalLinkage; 117 118 // If the function definition has some sort of weak linkage, its 119 // static variables should also be weak so that they get properly 120 // uniqued. We can't do this in C, though, because there's no 121 // standard way to agree on which variables are the same (i.e. 122 // there's no mangling). 123 if (getContext().getLangOptions().CPlusPlus) 124 if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage())) 125 Linkage = CurFn->getLinkage(); 126 127 return EmitStaticBlockVarDecl(D, Linkage); 128 } 129 case VarDecl::Extern: 130 case VarDecl::PrivateExtern: 131 // Don't emit it now, allow it to be emitted lazily on its first use. 132 return; 133 } 134 135 assert(0 && "Unknown storage class"); 136 } 137 138 static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D, 139 const char *Separator) { 140 CodeGenModule &CGM = CGF.CGM; 141 if (CGF.getContext().getLangOptions().CPlusPlus) { 142 llvm::StringRef Name = CGM.getMangledName(&D); 143 return Name.str(); 144 } 145 146 std::string ContextName; 147 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) { 148 llvm::StringRef Name = CGM.getMangledName(FD); 149 ContextName = Name.str(); 150 } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) 151 ContextName = CGF.CurFn->getName(); 152 else 153 // FIXME: What about in a block?? 154 assert(0 && "Unknown context for block var decl"); 155 156 return ContextName + Separator + D.getNameAsString(); 157 } 158 159 llvm::GlobalVariable * 160 CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D, 161 const char *Separator, 162 llvm::GlobalValue::LinkageTypes Linkage) { 163 QualType Ty = D.getType(); 164 assert(Ty->isConstantSizeType() && "VLAs can't be static"); 165 166 std::string Name = GetStaticDeclName(*this, D, Separator); 167 168 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty); 169 llvm::GlobalVariable *GV = 170 new llvm::GlobalVariable(CGM.getModule(), LTy, 171 Ty.isConstant(getContext()), Linkage, 172 CGM.EmitNullConstant(D.getType()), Name, 0, 173 D.isThreadSpecified(), Ty.getAddressSpace()); 174 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 175 return GV; 176 } 177 178 /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the 179 /// global variable that has already been created for it. If the initializer 180 /// has a different type than GV does, this may free GV and return a different 181 /// one. Otherwise it just returns GV. 182 llvm::GlobalVariable * 183 CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D, 184 llvm::GlobalVariable *GV) { 185 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this); 186 187 // If constant emission failed, then this should be a C++ static 188 // initializer. 189 if (!Init) { 190 if (!getContext().getLangOptions().CPlusPlus) 191 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); 192 else { 193 // Since we have a static initializer, this global variable can't 194 // be constant. 195 GV->setConstant(false); 196 197 EmitStaticCXXBlockVarDeclInit(D, GV); 198 } 199 return GV; 200 } 201 202 // The initializer may differ in type from the global. Rewrite 203 // the global to match the initializer. (We have to do this 204 // because some types, like unions, can't be completely represented 205 // in the LLVM type system.) 206 if (GV->getType() != Init->getType()) { 207 llvm::GlobalVariable *OldGV = GV; 208 209 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), 210 OldGV->isConstant(), 211 OldGV->getLinkage(), Init, "", 212 0, D.isThreadSpecified(), 213 D.getType().getAddressSpace()); 214 215 // Steal the name of the old global 216 GV->takeName(OldGV); 217 218 // Replace all uses of the old global with the new global 219 llvm::Constant *NewPtrForOldDecl = 220 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 221 OldGV->replaceAllUsesWith(NewPtrForOldDecl); 222 223 // Erase the old global, since it is no longer used. 224 OldGV->eraseFromParent(); 225 } 226 227 GV->setInitializer(Init); 228 return GV; 229 } 230 231 void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D, 232 llvm::GlobalValue::LinkageTypes Linkage) { 233 llvm::Value *&DMEntry = LocalDeclMap[&D]; 234 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 235 236 llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage); 237 238 // Store into LocalDeclMap before generating initializer to handle 239 // circular references. 240 DMEntry = GV; 241 242 // We can't have a VLA here, but we can have a pointer to a VLA, 243 // even though that doesn't really make any sense. 244 // Make sure to evaluate VLA bounds now so that we have them for later. 245 if (D.getType()->isVariablyModifiedType()) 246 EmitVLASize(D.getType()); 247 248 // If this value has an initializer, emit it. 249 if (D.getInit()) 250 GV = AddInitializerToGlobalBlockVarDecl(D, GV); 251 252 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 253 254 // FIXME: Merge attribute handling. 255 if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) { 256 SourceManager &SM = CGM.getContext().getSourceManager(); 257 llvm::Constant *Ann = 258 CGM.EmitAnnotateAttr(GV, AA, 259 SM.getInstantiationLineNumber(D.getLocation())); 260 CGM.AddAnnotation(Ann); 261 } 262 263 if (const SectionAttr *SA = D.getAttr<SectionAttr>()) 264 GV->setSection(SA->getName()); 265 266 if (D.hasAttr<UsedAttr>()) 267 CGM.AddUsedGlobal(GV); 268 269 if (getContext().getLangOptions().CPlusPlus) 270 CGM.setStaticLocalDeclAddress(&D, GV); 271 272 // We may have to cast the constant because of the initializer 273 // mismatch above. 274 // 275 // FIXME: It is really dangerous to store this in the map; if anyone 276 // RAUW's the GV uses of this constant will be invalid. 277 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); 278 const llvm::Type *LPtrTy = 279 llvm::PointerType::get(LTy, D.getType().getAddressSpace()); 280 DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); 281 282 // Emit global variable debug descriptor for static vars. 283 CGDebugInfo *DI = getDebugInfo(); 284 if (DI) { 285 DI->setLocation(D.getLocation()); 286 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D); 287 } 288 } 289 290 unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { 291 assert(ByRefValueInfo.count(VD) && "Did not find value!"); 292 293 return ByRefValueInfo.find(VD)->second.second; 294 } 295 296 /// BuildByRefType - This routine changes a __block variable declared as T x 297 /// into: 298 /// 299 /// struct { 300 /// void *__isa; 301 /// void *__forwarding; 302 /// int32_t __flags; 303 /// int32_t __size; 304 /// void *__copy_helper; // only if needed 305 /// void *__destroy_helper; // only if needed 306 /// char padding[X]; // only if needed 307 /// T x; 308 /// } x 309 /// 310 const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { 311 std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D]; 312 if (Info.first) 313 return Info.first; 314 315 QualType Ty = D->getType(); 316 317 std::vector<const llvm::Type *> Types; 318 319 const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 320 321 llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext); 322 323 // void *__isa; 324 Types.push_back(Int8PtrTy); 325 326 // void *__forwarding; 327 Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); 328 329 // int32_t __flags; 330 Types.push_back(Int32Ty); 331 332 // int32_t __size; 333 Types.push_back(Int32Ty); 334 335 bool HasCopyAndDispose = BlockRequiresCopying(Ty); 336 if (HasCopyAndDispose) { 337 /// void *__copy_helper; 338 Types.push_back(Int8PtrTy); 339 340 /// void *__destroy_helper; 341 Types.push_back(Int8PtrTy); 342 } 343 344 bool Packed = false; 345 CharUnits Align = getContext().getDeclAlign(D); 346 if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) { 347 // We have to insert padding. 348 349 // The struct above has 2 32-bit integers. 350 unsigned CurrentOffsetInBytes = 4 * 2; 351 352 // And either 2 or 4 pointers. 353 CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) * 354 CGM.getTargetData().getTypeAllocSize(Int8PtrTy); 355 356 // Align the offset. 357 unsigned AlignedOffsetInBytes = 358 llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity()); 359 360 unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes; 361 if (NumPaddingBytes > 0) { 362 const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext); 363 // FIXME: We need a sema error for alignment larger than the minimum of 364 // the maximal stack alignmint and the alignment of malloc on the system. 365 if (NumPaddingBytes > 1) 366 Ty = llvm::ArrayType::get(Ty, NumPaddingBytes); 367 368 Types.push_back(Ty); 369 370 // We want a packed struct. 371 Packed = true; 372 } 373 } 374 375 // T x; 376 Types.push_back(ConvertType(Ty)); 377 378 const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed); 379 380 cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T); 381 CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(), 382 ByRefTypeHolder.get()); 383 384 Info.first = ByRefTypeHolder.get(); 385 386 Info.second = Types.size() - 1; 387 388 return Info.first; 389 } 390 391 /// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a 392 /// variable declaration with auto, register, or no storage class specifier. 393 /// These turn into simple stack objects, or GlobalValues depending on target. 394 void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { 395 QualType Ty = D.getType(); 396 bool isByRef = D.hasAttr<BlocksAttr>(); 397 bool needsDispose = false; 398 CharUnits Align = CharUnits::Zero(); 399 bool IsSimpleConstantInitializer = false; 400 401 bool NRVO = false; 402 llvm::Value *NRVOFlag = 0; 403 llvm::Value *DeclPtr; 404 if (Ty->isConstantSizeType()) { 405 if (!Target.useGlobalsForAutomaticVariables()) { 406 NRVO = getContext().getLangOptions().ElideConstructors && 407 D.isNRVOVariable(); 408 // If this value is an array or struct, is POD, and if the initializer is 409 // a staticly determinable constant, try to optimize it (unless the NRVO 410 // is already optimizing this). 411 if (D.getInit() && !isByRef && 412 (Ty->isArrayType() || Ty->isRecordType()) && 413 Ty->isPODType() && 414 D.getInit()->isConstantInitializer(getContext()) && !NRVO) { 415 // If this variable is marked 'const', emit the value as a global. 416 if (CGM.getCodeGenOpts().MergeAllConstants && 417 Ty.isConstant(getContext())) { 418 EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage); 419 return; 420 } 421 422 IsSimpleConstantInitializer = true; 423 } 424 425 // A normal fixed sized variable becomes an alloca in the entry block, 426 // unless it's an NRVO variable. 427 const llvm::Type *LTy = ConvertTypeForMem(Ty); 428 429 if (NRVO) { 430 // The named return value optimization: allocate this variable in the 431 // return slot, so that we can elide the copy when returning this 432 // variable (C++0x [class.copy]p34). 433 DeclPtr = ReturnValue; 434 435 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 436 if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { 437 // Create a flag that is used to indicate when the NRVO was applied 438 // to this variable. Set it to zero to indicate that NRVO was not 439 // applied. 440 const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext); 441 llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0); 442 NRVOFlag = CreateTempAlloca(BoolTy, "nrvo"); 443 Builder.CreateStore(Zero, NRVOFlag); 444 445 // Record the NRVO flag for this variable. 446 NRVOFlags[&D] = NRVOFlag; 447 } 448 } 449 } else { 450 if (isByRef) 451 LTy = BuildByRefType(&D); 452 453 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); 454 Alloc->setName(D.getNameAsString()); 455 456 Align = getContext().getDeclAlign(&D); 457 if (isByRef) 458 Align = std::max(Align, 459 CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)); 460 Alloc->setAlignment(Align.getQuantity()); 461 DeclPtr = Alloc; 462 } 463 } else { 464 // Targets that don't support recursion emit locals as globals. 465 const char *Class = 466 D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; 467 DeclPtr = CreateStaticBlockVarDecl(D, Class, 468 llvm::GlobalValue 469 ::InternalLinkage); 470 } 471 472 // FIXME: Can this happen? 473 if (Ty->isVariablyModifiedType()) 474 EmitVLASize(Ty); 475 } else { 476 EnsureInsertPoint(); 477 478 if (!DidCallStackSave) { 479 // Save the stack. 480 const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext); 481 llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); 482 483 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); 484 llvm::Value *V = Builder.CreateCall(F); 485 486 Builder.CreateStore(V, Stack); 487 488 DidCallStackSave = true; 489 490 { 491 // Push a cleanup block and restore the stack there. 492 DelayedCleanupBlock scope(*this); 493 494 V = Builder.CreateLoad(Stack, "tmp"); 495 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 496 Builder.CreateCall(F, V); 497 } 498 } 499 500 // Get the element type. 501 const llvm::Type *LElemTy = ConvertTypeForMem(Ty); 502 const llvm::Type *LElemPtrTy = 503 llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); 504 505 llvm::Value *VLASize = EmitVLASize(Ty); 506 507 // Allocate memory for the array. 508 llvm::AllocaInst *VLA = 509 Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); 510 VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 511 512 DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); 513 } 514 515 llvm::Value *&DMEntry = LocalDeclMap[&D]; 516 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 517 DMEntry = DeclPtr; 518 519 // Emit debug info for local var declaration. 520 if (CGDebugInfo *DI = getDebugInfo()) { 521 assert(HaveInsertPoint() && "Unexpected unreachable point!"); 522 523 DI->setLocation(D.getLocation()); 524 if (Target.useGlobalsForAutomaticVariables()) { 525 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); 526 } else 527 DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); 528 } 529 530 // If this local has an initializer, emit it now. 531 const Expr *Init = D.getInit(); 532 533 // If we are at an unreachable point, we don't need to emit the initializer 534 // unless it contains a label. 535 if (!HaveInsertPoint()) { 536 if (!ContainsLabel(Init)) 537 Init = 0; 538 else 539 EnsureInsertPoint(); 540 } 541 542 if (isByRef) { 543 const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext); 544 545 EnsureInsertPoint(); 546 llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); 547 llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); 548 llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); 549 llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); 550 llvm::Value *V; 551 int flag = 0; 552 int flags = 0; 553 554 needsDispose = true; 555 556 if (Ty->isBlockPointerType()) { 557 flag |= BLOCK_FIELD_IS_BLOCK; 558 flags |= BLOCK_HAS_COPY_DISPOSE; 559 } else if (BlockRequiresCopying(Ty)) { 560 flag |= BLOCK_FIELD_IS_OBJECT; 561 flags |= BLOCK_HAS_COPY_DISPOSE; 562 } 563 564 // FIXME: Someone double check this. 565 if (Ty.isObjCGCWeak()) 566 flag |= BLOCK_FIELD_IS_WEAK; 567 568 int isa = 0; 569 if (flag&BLOCK_FIELD_IS_WEAK) 570 isa = 1; 571 V = llvm::ConstantInt::get(Int32Ty, isa); 572 V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); 573 Builder.CreateStore(V, isa_field); 574 575 Builder.CreateStore(DeclPtr, forwarding_field); 576 577 V = llvm::ConstantInt::get(Int32Ty, flags); 578 Builder.CreateStore(V, flags_field); 579 580 const llvm::Type *V1; 581 V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); 582 V = llvm::ConstantInt::get(Int32Ty, 583 CGM.GetTargetTypeStoreSize(V1).getQuantity()); 584 Builder.CreateStore(V, size_field); 585 586 if (flags & BLOCK_HAS_COPY_DISPOSE) { 587 BlockHasCopyDispose = true; 588 llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); 589 Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, 590 Align.getQuantity()), 591 copy_helper); 592 593 llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); 594 Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, 595 Align.getQuantity()), 596 destroy_helper); 597 } 598 } 599 600 if (Init) { 601 llvm::Value *Loc = DeclPtr; 602 if (isByRef) 603 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 604 D.getNameAsString()); 605 606 bool isVolatile = 607 getContext().getCanonicalType(D.getType()).isVolatileQualified(); 608 609 // If the initializer was a simple constant initializer, we can optimize it 610 // in various ways. 611 if (IsSimpleConstantInitializer) { 612 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this); 613 assert(Init != 0 && "Wasn't a simple constant init?"); 614 615 llvm::Value *AlignVal = 616 llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); 617 const llvm::Type *IntPtr = 618 llvm::IntegerType::get(VMContext, LLVMPointerWidth); 619 llvm::Value *SizeVal = 620 llvm::ConstantInt::get(IntPtr, 621 getContext().getTypeSizeInChars(Ty).getQuantity()); 622 623 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 624 if (Loc->getType() != BP) 625 Loc = Builder.CreateBitCast(Loc, BP, "tmp"); 626 627 llvm::Value *NotVolatile = 628 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0); 629 630 // If the initializer is all zeros, codegen with memset. 631 if (isa<llvm::ConstantAggregateZero>(Init)) { 632 llvm::Value *Zero = 633 llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0); 634 Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()), 635 Loc, Zero, SizeVal, AlignVal, NotVolatile); 636 } else { 637 // Otherwise, create a temporary global with the initializer then 638 // memcpy from the global to the alloca. 639 std::string Name = GetStaticDeclName(*this, D, "."); 640 llvm::GlobalVariable *GV = 641 new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true, 642 llvm::GlobalValue::InternalLinkage, 643 Init, Name, 0, false, 0); 644 GV->setAlignment(Align.getQuantity()); 645 646 llvm::Value *SrcPtr = GV; 647 if (SrcPtr->getType() != BP) 648 SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); 649 650 Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(), 651 SizeVal->getType()), 652 Loc, SrcPtr, SizeVal, AlignVal, NotVolatile); 653 } 654 } else if (Ty->isReferenceType()) { 655 RValue RV = EmitReferenceBindingToExpr(Init, &D); 656 EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); 657 } else if (!hasAggregateLLVMType(Init->getType())) { 658 llvm::Value *V = EmitScalarExpr(Init); 659 EmitStoreOfScalar(V, Loc, isVolatile, D.getType()); 660 } else if (Init->getType()->isAnyComplexType()) { 661 EmitComplexExprIntoAddr(Init, Loc, isVolatile); 662 } else { 663 EmitAggExpr(Init, Loc, isVolatile); 664 } 665 } 666 667 // Handle CXX destruction of variables. 668 QualType DtorTy(Ty); 669 while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) 670 DtorTy = getContext().getBaseElementType(Array); 671 if (const RecordType *RT = DtorTy->getAs<RecordType>()) 672 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 673 if (!ClassDecl->hasTrivialDestructor()) { 674 // Note: We suppress the destructor call when the corresponding NRVO 675 // flag has been set. 676 llvm::Value *Loc = DeclPtr; 677 if (isByRef) 678 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 679 D.getNameAsString()); 680 681 const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); 682 assert(D && "EmitLocalBlockVarDecl - destructor is nul"); 683 684 if (const ConstantArrayType *Array = 685 getContext().getAsConstantArrayType(Ty)) { 686 { 687 DelayedCleanupBlock Scope(*this); 688 QualType BaseElementTy = getContext().getBaseElementType(Array); 689 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 690 BasePtr = llvm::PointerType::getUnqual(BasePtr); 691 llvm::Value *BaseAddrPtr = 692 Builder.CreateBitCast(Loc, BasePtr); 693 EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); 694 695 // Make sure to jump to the exit block. 696 EmitBranch(Scope.getCleanupExitBlock()); 697 } 698 if (Exceptions) { 699 EHCleanupBlock Cleanup(*this); 700 QualType BaseElementTy = getContext().getBaseElementType(Array); 701 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 702 BasePtr = llvm::PointerType::getUnqual(BasePtr); 703 llvm::Value *BaseAddrPtr = 704 Builder.CreateBitCast(Loc, BasePtr); 705 EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); 706 } 707 } else { 708 { 709 // Normal destruction. 710 DelayedCleanupBlock Scope(*this); 711 712 if (NRVO) { 713 // If we exited via NRVO, we skip the destructor call. 714 llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused"); 715 Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"), 716 Scope.getCleanupExitBlock(), 717 NoNRVO); 718 EmitBlock(NoNRVO); 719 } 720 721 // We don't call the destructor along the normal edge if we're 722 // applying the NRVO. 723 EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, 724 Loc); 725 726 // Make sure to jump to the exit block. 727 EmitBranch(Scope.getCleanupExitBlock()); 728 } 729 730 if (Exceptions) { 731 EHCleanupBlock Cleanup(*this); 732 EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, 733 Loc); 734 } 735 } 736 } 737 } 738 739 // Handle the cleanup attribute 740 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { 741 const FunctionDecl *FD = CA->getFunctionDecl(); 742 743 llvm::Constant* F = CGM.GetAddrOfFunction(FD); 744 assert(F && "Could not find function!"); 745 746 const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); 747 748 // In some cases, the type of the function argument will be different from 749 // the type of the pointer. An example of this is 750 // void f(void* arg); 751 // __attribute__((cleanup(f))) void *g; 752 // 753 // To fix this we insert a bitcast here. 754 QualType ArgTy = Info.arg_begin()->type; 755 { 756 DelayedCleanupBlock scope(*this); 757 758 CallArgList Args; 759 Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, 760 ConvertType(ArgTy))), 761 getContext().getPointerType(D.getType()))); 762 EmitCall(Info, F, ReturnValueSlot(), Args); 763 } 764 if (Exceptions) { 765 EHCleanupBlock Cleanup(*this); 766 767 CallArgList Args; 768 Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, 769 ConvertType(ArgTy))), 770 getContext().getPointerType(D.getType()))); 771 EmitCall(Info, F, ReturnValueSlot(), Args); 772 } 773 } 774 775 if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { 776 { 777 DelayedCleanupBlock scope(*this); 778 llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); 779 V = Builder.CreateLoad(V); 780 BuildBlockRelease(V); 781 } 782 // FIXME: Turn this on and audit the codegen 783 if (0 && Exceptions) { 784 EHCleanupBlock Cleanup(*this); 785 llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); 786 V = Builder.CreateLoad(V); 787 BuildBlockRelease(V); 788 } 789 } 790 } 791 792 /// Emit an alloca (or GlobalValue depending on target) 793 /// for the specified parameter and set up LocalDeclMap. 794 void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { 795 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? 796 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && 797 "Invalid argument to EmitParmDecl"); 798 QualType Ty = D.getType(); 799 CanQualType CTy = getContext().getCanonicalType(Ty); 800 801 llvm::Value *DeclPtr; 802 // If this is an aggregate or variable sized value, reuse the input pointer. 803 if (!Ty->isConstantSizeType() || 804 CodeGenFunction::hasAggregateLLVMType(Ty)) { 805 DeclPtr = Arg; 806 } else { 807 // Otherwise, create a temporary to hold the value. 808 DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr"); 809 810 // Store the initial value into the alloca. 811 EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty); 812 } 813 Arg->setName(D.getName()); 814 815 llvm::Value *&DMEntry = LocalDeclMap[&D]; 816 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 817 DMEntry = DeclPtr; 818 819 // Emit debug info for param declaration. 820 if (CGDebugInfo *DI = getDebugInfo()) { 821 DI->setLocation(D.getLocation()); 822 DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); 823 } 824 } 825