1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Aggregate Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGObjCRuntime.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclTemplate.h" 20 #include "clang/AST/StmtVisitor.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/GlobalVariable.h" 24 #include "llvm/IR/Intrinsics.h" 25 using namespace clang; 26 using namespace CodeGen; 27 28 //===----------------------------------------------------------------------===// 29 // Aggregate Expression Emitter 30 //===----------------------------------------------------------------------===// 31 32 llvm::Value *AggValueSlot::getPaddedAtomicAddr() const { 33 assert(isValueOfAtomic()); 34 llvm::GEPOperator *op = cast<llvm::GEPOperator>(getAddr()); 35 assert(op->getNumIndices() == 2); 36 assert(op->hasAllZeroIndices()); 37 return op->getPointerOperand(); 38 } 39 40 namespace { 41 class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 42 CodeGenFunction &CGF; 43 CGBuilderTy &Builder; 44 AggValueSlot Dest; 45 46 /// We want to use 'dest' as the return slot except under two 47 /// conditions: 48 /// - The destination slot requires garbage collection, so we 49 /// need to use the GC API. 50 /// - The destination slot is potentially aliased. 51 bool shouldUseDestForReturnSlot() const { 52 return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased()); 53 } 54 55 ReturnValueSlot getReturnValueSlot() const { 56 if (!shouldUseDestForReturnSlot()) 57 return ReturnValueSlot(); 58 59 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 60 } 61 62 AggValueSlot EnsureSlot(QualType T) { 63 if (!Dest.isIgnored()) return Dest; 64 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 65 } 66 void EnsureDest(QualType T) { 67 if (!Dest.isIgnored()) return; 68 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); 69 } 70 71 public: 72 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest) 73 : CGF(cgf), Builder(CGF.Builder), Dest(Dest) { 74 } 75 76 //===--------------------------------------------------------------------===// 77 // Utilities 78 //===--------------------------------------------------------------------===// 79 80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 81 /// represents a value lvalue, this method emits the address of the lvalue, 82 /// then loads the result into DestPtr. 83 void EmitAggLoadOfLValue(const Expr *E); 84 85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 86 void EmitFinalDestCopy(QualType type, const LValue &src); 87 void EmitFinalDestCopy(QualType type, RValue src, 88 CharUnits srcAlignment = CharUnits::Zero()); 89 void EmitCopy(QualType type, const AggValueSlot &dest, 90 const AggValueSlot &src); 91 92 void EmitMoveFromReturnSlot(const Expr *E, RValue Src); 93 94 void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList); 95 void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 96 QualType elementType, InitListExpr *E); 97 98 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { 99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) 100 return AggValueSlot::NeedsGCBarriers; 101 return AggValueSlot::DoesNotNeedGCBarriers; 102 } 103 104 bool TypeRequiresGCollection(QualType T); 105 106 //===--------------------------------------------------------------------===// 107 // Visitor Methods 108 //===--------------------------------------------------------------------===// 109 110 void VisitStmt(Stmt *S) { 111 CGF.ErrorUnsupported(S, "aggregate expression"); 112 } 113 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 114 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 115 Visit(GE->getResultExpr()); 116 } 117 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 118 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 119 return Visit(E->getReplacement()); 120 } 121 122 // l-values. 123 void VisitDeclRefExpr(DeclRefExpr *E) { 124 // For aggregates, we should always be able to emit the variable 125 // as an l-value unless it's a reference. This is due to the fact 126 // that we can't actually ever see a normal l2r conversion on an 127 // aggregate in C++, and in C there's no language standard 128 // actively preventing us from listing variables in the captures 129 // list of a block. 130 if (E->getDecl()->getType()->isReferenceType()) { 131 if (CodeGenFunction::ConstantEmission result 132 = CGF.tryEmitAsConstant(E)) { 133 EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E)); 134 return; 135 } 136 } 137 138 EmitAggLoadOfLValue(E); 139 } 140 141 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 142 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 143 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 144 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); 145 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 146 EmitAggLoadOfLValue(E); 147 } 148 void VisitPredefinedExpr(const PredefinedExpr *E) { 149 EmitAggLoadOfLValue(E); 150 } 151 152 // Operators. 153 void VisitCastExpr(CastExpr *E); 154 void VisitCallExpr(const CallExpr *E); 155 void VisitStmtExpr(const StmtExpr *E); 156 void VisitBinaryOperator(const BinaryOperator *BO); 157 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 158 void VisitBinAssign(const BinaryOperator *E); 159 void VisitBinComma(const BinaryOperator *E); 160 161 void VisitObjCMessageExpr(ObjCMessageExpr *E); 162 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 163 EmitAggLoadOfLValue(E); 164 } 165 166 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 167 void VisitChooseExpr(const ChooseExpr *CE); 168 void VisitInitListExpr(InitListExpr *E); 169 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 170 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 171 Visit(DAE->getExpr()); 172 } 173 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 174 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF); 175 Visit(DIE->getExpr()); 176 } 177 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 178 void VisitCXXConstructExpr(const CXXConstructExpr *E); 179 void VisitLambdaExpr(LambdaExpr *E); 180 void VisitExprWithCleanups(ExprWithCleanups *E); 181 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 182 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 183 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); 184 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 185 186 void VisitPseudoObjectExpr(PseudoObjectExpr *E) { 187 if (E->isGLValue()) { 188 LValue LV = CGF.EmitPseudoObjectLValue(E); 189 return EmitFinalDestCopy(E->getType(), LV); 190 } 191 192 CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); 193 } 194 195 void VisitVAArgExpr(VAArgExpr *E); 196 197 void EmitInitializationToLValue(Expr *E, LValue Address); 198 void EmitNullInitializationToLValue(LValue Address); 199 // case Expr::ChooseExprClass: 200 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 201 void VisitAtomicExpr(AtomicExpr *E) { 202 CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr()); 203 } 204 }; 205 206 /// A helper class for emitting expressions into the value sub-object 207 /// of a padded atomic type. 208 class ValueDestForAtomic { 209 AggValueSlot Dest; 210 public: 211 ValueDestForAtomic(CodeGenFunction &CGF, AggValueSlot dest, QualType type) 212 : Dest(dest) { 213 assert(!Dest.isValueOfAtomic()); 214 if (!Dest.isIgnored() && CGF.CGM.isPaddedAtomicType(type)) { 215 llvm::Value *valueAddr = CGF.Builder.CreateStructGEP(Dest.getAddr(), 0); 216 Dest = AggValueSlot::forAddr(valueAddr, 217 Dest.getAlignment(), 218 Dest.getQualifiers(), 219 Dest.isExternallyDestructed(), 220 Dest.requiresGCollection(), 221 Dest.isPotentiallyAliased(), 222 Dest.isZeroed(), 223 AggValueSlot::IsValueOfAtomic); 224 } 225 } 226 227 const AggValueSlot &getDest() const { return Dest; } 228 229 ~ValueDestForAtomic() { 230 // Kill the GEP if we made one and it didn't end up used. 231 if (Dest.isValueOfAtomic()) { 232 llvm::Instruction *addr = cast<llvm::GetElementPtrInst>(Dest.getAddr()); 233 if (addr->use_empty()) addr->eraseFromParent(); 234 } 235 } 236 }; 237 } // end anonymous namespace. 238 239 //===----------------------------------------------------------------------===// 240 // Utilities 241 //===----------------------------------------------------------------------===// 242 243 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 244 /// represents a value lvalue, this method emits the address of the lvalue, 245 /// then loads the result into DestPtr. 246 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 247 LValue LV = CGF.EmitLValue(E); 248 249 // If the type of the l-value is atomic, then do an atomic load. 250 if (LV.getType()->isAtomicType()) { 251 ValueDestForAtomic valueDest(CGF, Dest, LV.getType()); 252 CGF.EmitAtomicLoad(LV, valueDest.getDest()); 253 return; 254 } 255 256 EmitFinalDestCopy(E->getType(), LV); 257 } 258 259 /// \brief True if the given aggregate type requires special GC API calls. 260 bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 261 // Only record types have members that might require garbage collection. 262 const RecordType *RecordTy = T->getAs<RecordType>(); 263 if (!RecordTy) return false; 264 265 // Don't mess with non-trivial C++ types. 266 RecordDecl *Record = RecordTy->getDecl(); 267 if (isa<CXXRecordDecl>(Record) && 268 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() || 269 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 270 return false; 271 272 // Check whether the type has an object member. 273 return Record->hasObjectMember(); 274 } 275 276 /// \brief Perform the final move to DestPtr if for some reason 277 /// getReturnValueSlot() didn't use it directly. 278 /// 279 /// The idea is that you do something like this: 280 /// RValue Result = EmitSomething(..., getReturnValueSlot()); 281 /// EmitMoveFromReturnSlot(E, Result); 282 /// 283 /// If nothing interferes, this will cause the result to be emitted 284 /// directly into the return value slot. Otherwise, a final move 285 /// will be performed. 286 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) { 287 if (shouldUseDestForReturnSlot()) { 288 // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). 289 // The possibility of undef rvalues complicates that a lot, 290 // though, so we can't really assert. 291 return; 292 } 293 294 // Otherwise, copy from there to the destination. 295 assert(Dest.getAddr() != src.getAggregateAddr()); 296 std::pair<CharUnits, CharUnits> typeInfo = 297 CGF.getContext().getTypeInfoInChars(E->getType()); 298 EmitFinalDestCopy(E->getType(), src, typeInfo.second); 299 } 300 301 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 302 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src, 303 CharUnits srcAlign) { 304 assert(src.isAggregate() && "value must be aggregate value!"); 305 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign); 306 EmitFinalDestCopy(type, srcLV); 307 } 308 309 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 310 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) { 311 // If Dest is ignored, then we're evaluating an aggregate expression 312 // in a context that doesn't care about the result. Note that loads 313 // from volatile l-values force the existence of a non-ignored 314 // destination. 315 if (Dest.isIgnored()) 316 return; 317 318 AggValueSlot srcAgg = 319 AggValueSlot::forLValue(src, AggValueSlot::IsDestructed, 320 needsGC(type), AggValueSlot::IsAliased); 321 EmitCopy(type, Dest, srcAgg); 322 } 323 324 /// Perform a copy from the source into the destination. 325 /// 326 /// \param type - the type of the aggregate being copied; qualifiers are 327 /// ignored 328 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, 329 const AggValueSlot &src) { 330 if (dest.requiresGCollection()) { 331 CharUnits sz = CGF.getContext().getTypeSizeInChars(type); 332 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); 333 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 334 dest.getAddr(), 335 src.getAddr(), 336 size); 337 return; 338 } 339 340 // If the result of the assignment is used, copy the LHS there also. 341 // It's volatile if either side is. Use the minimum alignment of 342 // the two sides. 343 CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type, 344 dest.isVolatile() || src.isVolatile(), 345 std::min(dest.getAlignment(), src.getAlignment())); 346 } 347 348 static QualType GetStdInitializerListElementType(QualType T) { 349 // Just assume that this is really std::initializer_list. 350 ClassTemplateSpecializationDecl *specialization = 351 cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl()); 352 return specialization->getTemplateArgs()[0].getAsType(); 353 } 354 355 /// \brief Prepare cleanup for the temporary array. 356 static void EmitStdInitializerListCleanup(CodeGenFunction &CGF, 357 QualType arrayType, 358 llvm::Value *addr, 359 const InitListExpr *initList) { 360 QualType::DestructionKind dtorKind = arrayType.isDestructedType(); 361 if (!dtorKind) 362 return; // Type doesn't need destroying. 363 if (dtorKind != QualType::DK_cxx_destructor) { 364 CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list"); 365 return; 366 } 367 368 CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind); 369 CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer, 370 /*EHCleanup=*/true); 371 } 372 373 /// \brief Emit the initializer for a std::initializer_list initialized with a 374 /// real initializer list. 375 void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr, 376 InitListExpr *initList) { 377 // We emit an array containing the elements, then have the init list point 378 // at the array. 379 ASTContext &ctx = CGF.getContext(); 380 unsigned numInits = initList->getNumInits(); 381 QualType element = GetStdInitializerListElementType(initList->getType()); 382 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 383 QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0); 384 llvm::Type *LTy = CGF.ConvertTypeForMem(array); 385 llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy); 386 alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity()); 387 alloc->setName(".initlist."); 388 389 EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList); 390 391 // FIXME: The diagnostics are somewhat out of place here. 392 RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl(); 393 RecordDecl::field_iterator field = record->field_begin(); 394 if (field == record->field_end()) { 395 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 396 return; 397 } 398 399 QualType elementPtr = ctx.getPointerType(element.withConst()); 400 401 // Start pointer. 402 if (!ctx.hasSameType(field->getType(), elementPtr)) { 403 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 404 return; 405 } 406 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType()); 407 LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field); 408 llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart"); 409 CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start); 410 ++field; 411 412 if (field == record->field_end()) { 413 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 414 return; 415 } 416 LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field); 417 if (ctx.hasSameType(field->getType(), elementPtr)) { 418 // End pointer. 419 llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend"); 420 CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength); 421 } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) { 422 // Length. 423 CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength); 424 } else { 425 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 426 return; 427 } 428 429 if (!Dest.isExternallyDestructed()) 430 EmitStdInitializerListCleanup(CGF, array, alloc, initList); 431 } 432 433 /// \brief Emit initialization of an array from an initializer list. 434 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 435 QualType elementType, InitListExpr *E) { 436 uint64_t NumInitElements = E->getNumInits(); 437 438 uint64_t NumArrayElements = AType->getNumElements(); 439 assert(NumInitElements <= NumArrayElements); 440 441 // DestPtr is an array*. Construct an elementType* by drilling 442 // down a level. 443 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 444 llvm::Value *indices[] = { zero, zero }; 445 llvm::Value *begin = 446 Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); 447 448 // Exception safety requires us to destroy all the 449 // already-constructed members if an initializer throws. 450 // For that, we'll need an EH cleanup. 451 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 452 llvm::AllocaInst *endOfInit = 0; 453 EHScopeStack::stable_iterator cleanup; 454 llvm::Instruction *cleanupDominator = 0; 455 if (CGF.needsEHCleanup(dtorKind)) { 456 // In principle we could tell the cleanup where we are more 457 // directly, but the control flow can get so varied here that it 458 // would actually be quite complex. Therefore we go through an 459 // alloca. 460 endOfInit = CGF.CreateTempAlloca(begin->getType(), 461 "arrayinit.endOfInit"); 462 cleanupDominator = Builder.CreateStore(begin, endOfInit); 463 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, 464 CGF.getDestroyer(dtorKind)); 465 cleanup = CGF.EHStack.stable_begin(); 466 467 // Otherwise, remember that we didn't need a cleanup. 468 } else { 469 dtorKind = QualType::DK_none; 470 } 471 472 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); 473 474 // The 'current element to initialize'. The invariants on this 475 // variable are complicated. Essentially, after each iteration of 476 // the loop, it points to the last initialized element, except 477 // that it points to the beginning of the array before any 478 // elements have been initialized. 479 llvm::Value *element = begin; 480 481 // Emit the explicit initializers. 482 for (uint64_t i = 0; i != NumInitElements; ++i) { 483 // Advance to the next element. 484 if (i > 0) { 485 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); 486 487 // Tell the cleanup that it needs to destroy up to this 488 // element. TODO: some of these stores can be trivially 489 // observed to be unnecessary. 490 if (endOfInit) Builder.CreateStore(element, endOfInit); 491 } 492 493 // If these are nested std::initializer_list inits, do them directly, 494 // because they are conceptually the same "location". 495 InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i)); 496 if (initList && initList->initializesStdInitializerList()) { 497 EmitStdInitializerList(element, initList); 498 } else { 499 LValue elementLV = CGF.MakeAddrLValue(element, elementType); 500 EmitInitializationToLValue(E->getInit(i), elementLV); 501 } 502 } 503 504 // Check whether there's a non-trivial array-fill expression. 505 // Note that this will be a CXXConstructExpr even if the element 506 // type is an array (or array of array, etc.) of class type. 507 Expr *filler = E->getArrayFiller(); 508 bool hasTrivialFiller = true; 509 if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { 510 assert(cons->getConstructor()->isDefaultConstructor()); 511 hasTrivialFiller = cons->getConstructor()->isTrivial(); 512 } 513 514 // Any remaining elements need to be zero-initialized, possibly 515 // using the filler expression. We can skip this if the we're 516 // emitting to zeroed memory. 517 if (NumInitElements != NumArrayElements && 518 !(Dest.isZeroed() && hasTrivialFiller && 519 CGF.getTypes().isZeroInitializable(elementType))) { 520 521 // Use an actual loop. This is basically 522 // do { *array++ = filler; } while (array != end); 523 524 // Advance to the start of the rest of the array. 525 if (NumInitElements) { 526 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); 527 if (endOfInit) Builder.CreateStore(element, endOfInit); 528 } 529 530 // Compute the end of the array. 531 llvm::Value *end = Builder.CreateInBoundsGEP(begin, 532 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), 533 "arrayinit.end"); 534 535 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 536 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 537 538 // Jump into the body. 539 CGF.EmitBlock(bodyBB); 540 llvm::PHINode *currentElement = 541 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); 542 currentElement->addIncoming(element, entryBB); 543 544 // Emit the actual filler expression. 545 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); 546 if (filler) 547 EmitInitializationToLValue(filler, elementLV); 548 else 549 EmitNullInitializationToLValue(elementLV); 550 551 // Move on to the next element. 552 llvm::Value *nextElement = 553 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); 554 555 // Tell the EH cleanup that we finished with the last element. 556 if (endOfInit) Builder.CreateStore(nextElement, endOfInit); 557 558 // Leave the loop if we're done. 559 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, 560 "arrayinit.done"); 561 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 562 Builder.CreateCondBr(done, endBB, bodyBB); 563 currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); 564 565 CGF.EmitBlock(endBB); 566 } 567 568 // Leave the partial-array cleanup if we entered one. 569 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); 570 } 571 572 //===----------------------------------------------------------------------===// 573 // Visitor Methods 574 //===----------------------------------------------------------------------===// 575 576 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ 577 Visit(E->GetTemporaryExpr()); 578 } 579 580 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 581 EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e)); 582 } 583 584 void 585 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 586 if (Dest.isPotentiallyAliased() && 587 E->getType().isPODType(CGF.getContext())) { 588 // For a POD type, just emit a load of the lvalue + a copy, because our 589 // compound literal might alias the destination. 590 EmitAggLoadOfLValue(E); 591 return; 592 } 593 594 AggValueSlot Slot = EnsureSlot(E->getType()); 595 CGF.EmitAggExpr(E->getInitializer(), Slot); 596 } 597 598 /// Attempt to look through various unimportant expressions to find a 599 /// cast of the given kind. 600 static Expr *findPeephole(Expr *op, CastKind kind) { 601 while (true) { 602 op = op->IgnoreParens(); 603 if (CastExpr *castE = dyn_cast<CastExpr>(op)) { 604 if (castE->getCastKind() == kind) 605 return castE->getSubExpr(); 606 if (castE->getCastKind() == CK_NoOp) 607 continue; 608 } 609 return 0; 610 } 611 } 612 613 void AggExprEmitter::VisitCastExpr(CastExpr *E) { 614 switch (E->getCastKind()) { 615 case CK_Dynamic: { 616 // FIXME: Can this actually happen? We have no test coverage for it. 617 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 618 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), 619 CodeGenFunction::TCK_Load); 620 // FIXME: Do we also need to handle property references here? 621 if (LV.isSimple()) 622 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 623 else 624 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 625 626 if (!Dest.isIgnored()) 627 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 628 break; 629 } 630 631 case CK_ToUnion: { 632 if (Dest.isIgnored()) break; 633 634 // GCC union extension 635 QualType Ty = E->getSubExpr()->getType(); 636 QualType PtrTy = CGF.getContext().getPointerType(Ty); 637 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 638 CGF.ConvertType(PtrTy)); 639 EmitInitializationToLValue(E->getSubExpr(), 640 CGF.MakeAddrLValue(CastPtr, Ty)); 641 break; 642 } 643 644 case CK_DerivedToBase: 645 case CK_BaseToDerived: 646 case CK_UncheckedDerivedToBase: { 647 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " 648 "should have been unpacked before we got here"); 649 } 650 651 case CK_NonAtomicToAtomic: 652 case CK_AtomicToNonAtomic: { 653 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); 654 655 // Determine the atomic and value types. 656 QualType atomicType = E->getSubExpr()->getType(); 657 QualType valueType = E->getType(); 658 if (isToAtomic) std::swap(atomicType, valueType); 659 660 assert(atomicType->isAtomicType()); 661 assert(CGF.getContext().hasSameUnqualifiedType(valueType, 662 atomicType->castAs<AtomicType>()->getValueType())); 663 664 // Just recurse normally if we're ignoring the result or the 665 // atomic type doesn't change representation. 666 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { 667 return Visit(E->getSubExpr()); 668 } 669 670 CastKind peepholeTarget = 671 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); 672 673 // These two cases are reverses of each other; try to peephole them. 674 if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) { 675 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), 676 E->getType()) && 677 "peephole significantly changed types?"); 678 return Visit(op); 679 } 680 681 // If we're converting an r-value of non-atomic type to an r-value 682 // of atomic type, just make an atomic temporary, emit into that, 683 // and then copy the value out. (FIXME: do we need to 684 // zero-initialize it first?) 685 if (isToAtomic) { 686 ValueDestForAtomic valueDest(CGF, Dest, atomicType); 687 CGF.EmitAggExpr(E->getSubExpr(), valueDest.getDest()); 688 return; 689 } 690 691 // Otherwise, we're converting an atomic type to a non-atomic type. 692 693 // If the dest is a value-of-atomic subobject, drill back out. 694 if (Dest.isValueOfAtomic()) { 695 AggValueSlot atomicSlot = 696 AggValueSlot::forAddr(Dest.getPaddedAtomicAddr(), 697 Dest.getAlignment(), 698 Dest.getQualifiers(), 699 Dest.isExternallyDestructed(), 700 Dest.requiresGCollection(), 701 Dest.isPotentiallyAliased(), 702 Dest.isZeroed(), 703 AggValueSlot::IsNotValueOfAtomic); 704 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); 705 return; 706 } 707 708 // Otherwise, make an atomic temporary, emit into that, and then 709 // copy the value out. 710 AggValueSlot atomicSlot = 711 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); 712 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); 713 714 llvm::Value *valueAddr = 715 Builder.CreateStructGEP(atomicSlot.getAddr(), 0); 716 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); 717 return EmitFinalDestCopy(valueType, rvalue); 718 } 719 720 case CK_LValueToRValue: 721 // If we're loading from a volatile type, force the destination 722 // into existence. 723 if (E->getSubExpr()->getType().isVolatileQualified()) { 724 EnsureDest(E->getType()); 725 return Visit(E->getSubExpr()); 726 } 727 728 // fallthrough 729 730 case CK_NoOp: 731 case CK_UserDefinedConversion: 732 case CK_ConstructorConversion: 733 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 734 E->getType()) && 735 "Implicit cast types must be compatible"); 736 Visit(E->getSubExpr()); 737 break; 738 739 case CK_LValueBitCast: 740 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 741 742 case CK_Dependent: 743 case CK_BitCast: 744 case CK_ArrayToPointerDecay: 745 case CK_FunctionToPointerDecay: 746 case CK_NullToPointer: 747 case CK_NullToMemberPointer: 748 case CK_BaseToDerivedMemberPointer: 749 case CK_DerivedToBaseMemberPointer: 750 case CK_MemberPointerToBoolean: 751 case CK_ReinterpretMemberPointer: 752 case CK_IntegralToPointer: 753 case CK_PointerToIntegral: 754 case CK_PointerToBoolean: 755 case CK_ToVoid: 756 case CK_VectorSplat: 757 case CK_IntegralCast: 758 case CK_IntegralToBoolean: 759 case CK_IntegralToFloating: 760 case CK_FloatingToIntegral: 761 case CK_FloatingToBoolean: 762 case CK_FloatingCast: 763 case CK_CPointerToObjCPointerCast: 764 case CK_BlockPointerToObjCPointerCast: 765 case CK_AnyPointerToBlockPointerCast: 766 case CK_ObjCObjectLValueCast: 767 case CK_FloatingRealToComplex: 768 case CK_FloatingComplexToReal: 769 case CK_FloatingComplexToBoolean: 770 case CK_FloatingComplexCast: 771 case CK_FloatingComplexToIntegralComplex: 772 case CK_IntegralRealToComplex: 773 case CK_IntegralComplexToReal: 774 case CK_IntegralComplexToBoolean: 775 case CK_IntegralComplexCast: 776 case CK_IntegralComplexToFloatingComplex: 777 case CK_ARCProduceObject: 778 case CK_ARCConsumeObject: 779 case CK_ARCReclaimReturnedObject: 780 case CK_ARCExtendBlockObject: 781 case CK_CopyAndAutoreleaseBlockObject: 782 case CK_BuiltinFnToFnPtr: 783 case CK_ZeroToOCLEvent: 784 llvm_unreachable("cast kind invalid for aggregate types"); 785 } 786 } 787 788 void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 789 if (E->getCallReturnType()->isReferenceType()) { 790 EmitAggLoadOfLValue(E); 791 return; 792 } 793 794 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 795 EmitMoveFromReturnSlot(E, RV); 796 } 797 798 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 799 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 800 EmitMoveFromReturnSlot(E, RV); 801 } 802 803 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 804 CGF.EmitIgnoredExpr(E->getLHS()); 805 Visit(E->getRHS()); 806 } 807 808 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 809 CodeGenFunction::StmtExprEvaluation eval(CGF); 810 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 811 } 812 813 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 814 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 815 VisitPointerToDataMemberBinaryOperator(E); 816 else 817 CGF.ErrorUnsupported(E, "aggregate binary expression"); 818 } 819 820 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 821 const BinaryOperator *E) { 822 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 823 EmitFinalDestCopy(E->getType(), LV); 824 } 825 826 /// Is the value of the given expression possibly a reference to or 827 /// into a __block variable? 828 static bool isBlockVarRef(const Expr *E) { 829 // Make sure we look through parens. 830 E = E->IgnoreParens(); 831 832 // Check for a direct reference to a __block variable. 833 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 834 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); 835 return (var && var->hasAttr<BlocksAttr>()); 836 } 837 838 // More complicated stuff. 839 840 // Binary operators. 841 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { 842 // For an assignment or pointer-to-member operation, just care 843 // about the LHS. 844 if (op->isAssignmentOp() || op->isPtrMemOp()) 845 return isBlockVarRef(op->getLHS()); 846 847 // For a comma, just care about the RHS. 848 if (op->getOpcode() == BO_Comma) 849 return isBlockVarRef(op->getRHS()); 850 851 // FIXME: pointer arithmetic? 852 return false; 853 854 // Check both sides of a conditional operator. 855 } else if (const AbstractConditionalOperator *op 856 = dyn_cast<AbstractConditionalOperator>(E)) { 857 return isBlockVarRef(op->getTrueExpr()) 858 || isBlockVarRef(op->getFalseExpr()); 859 860 // OVEs are required to support BinaryConditionalOperators. 861 } else if (const OpaqueValueExpr *op 862 = dyn_cast<OpaqueValueExpr>(E)) { 863 if (const Expr *src = op->getSourceExpr()) 864 return isBlockVarRef(src); 865 866 // Casts are necessary to get things like (*(int*)&var) = foo(). 867 // We don't really care about the kind of cast here, except 868 // we don't want to look through l2r casts, because it's okay 869 // to get the *value* in a __block variable. 870 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { 871 if (cast->getCastKind() == CK_LValueToRValue) 872 return false; 873 return isBlockVarRef(cast->getSubExpr()); 874 875 // Handle unary operators. Again, just aggressively look through 876 // it, ignoring the operation. 877 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) { 878 return isBlockVarRef(uop->getSubExpr()); 879 880 // Look into the base of a field access. 881 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) { 882 return isBlockVarRef(mem->getBase()); 883 884 // Look into the base of a subscript. 885 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { 886 return isBlockVarRef(sub->getBase()); 887 } 888 889 return false; 890 } 891 892 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 893 // For an assignment to work, the value on the right has 894 // to be compatible with the value on the left. 895 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 896 E->getRHS()->getType()) 897 && "Invalid assignment"); 898 899 // If the LHS might be a __block variable, and the RHS can 900 // potentially cause a block copy, we need to evaluate the RHS first 901 // so that the assignment goes the right place. 902 // This is pretty semantically fragile. 903 if (isBlockVarRef(E->getLHS()) && 904 E->getRHS()->HasSideEffects(CGF.getContext())) { 905 // Ensure that we have a destination, and evaluate the RHS into that. 906 EnsureDest(E->getRHS()->getType()); 907 Visit(E->getRHS()); 908 909 // Now emit the LHS and copy into it. 910 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 911 912 // That copy is an atomic copy if the LHS is atomic. 913 if (LHS.getType()->isAtomicType()) { 914 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 915 return; 916 } 917 918 EmitCopy(E->getLHS()->getType(), 919 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 920 needsGC(E->getLHS()->getType()), 921 AggValueSlot::IsAliased), 922 Dest); 923 return; 924 } 925 926 LValue LHS = CGF.EmitLValue(E->getLHS()); 927 928 // If we have an atomic type, evaluate into the destination and then 929 // do an atomic copy. 930 if (LHS.getType()->isAtomicType()) { 931 EnsureDest(E->getRHS()->getType()); 932 Visit(E->getRHS()); 933 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 934 return; 935 } 936 937 // Codegen the RHS so that it stores directly into the LHS. 938 AggValueSlot LHSSlot = 939 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 940 needsGC(E->getLHS()->getType()), 941 AggValueSlot::IsAliased); 942 // A non-volatile aggregate destination might have volatile member. 943 if (!LHSSlot.isVolatile() && 944 CGF.hasVolatileMember(E->getLHS()->getType())) 945 LHSSlot.setVolatile(true); 946 947 CGF.EmitAggExpr(E->getRHS(), LHSSlot); 948 949 // Copy into the destination if the assignment isn't ignored. 950 EmitFinalDestCopy(E->getType(), LHS); 951 } 952 953 void AggExprEmitter:: 954 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 955 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 956 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 957 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 958 959 // Bind the common expression if necessary. 960 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 961 962 CodeGenFunction::ConditionalEvaluation eval(CGF); 963 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 964 965 // Save whether the destination's lifetime is externally managed. 966 bool isExternallyDestructed = Dest.isExternallyDestructed(); 967 968 eval.begin(CGF); 969 CGF.EmitBlock(LHSBlock); 970 Visit(E->getTrueExpr()); 971 eval.end(CGF); 972 973 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 974 CGF.Builder.CreateBr(ContBlock); 975 976 // If the result of an agg expression is unused, then the emission 977 // of the LHS might need to create a destination slot. That's fine 978 // with us, and we can safely emit the RHS into the same slot, but 979 // we shouldn't claim that it's already being destructed. 980 Dest.setExternallyDestructed(isExternallyDestructed); 981 982 eval.begin(CGF); 983 CGF.EmitBlock(RHSBlock); 984 Visit(E->getFalseExpr()); 985 eval.end(CGF); 986 987 CGF.EmitBlock(ContBlock); 988 } 989 990 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 991 Visit(CE->getChosenSubExpr(CGF.getContext())); 992 } 993 994 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 995 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 996 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 997 998 if (!ArgPtr) { 999 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 1000 return; 1001 } 1002 1003 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); 1004 } 1005 1006 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 1007 // Ensure that we have a slot, but if we already do, remember 1008 // whether it was externally destructed. 1009 bool wasExternallyDestructed = Dest.isExternallyDestructed(); 1010 EnsureDest(E->getType()); 1011 1012 // We're going to push a destructor if there isn't already one. 1013 Dest.setExternallyDestructed(); 1014 1015 Visit(E->getSubExpr()); 1016 1017 // Push that destructor we promised. 1018 if (!wasExternallyDestructed) 1019 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr()); 1020 } 1021 1022 void 1023 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 1024 AggValueSlot Slot = EnsureSlot(E->getType()); 1025 CGF.EmitCXXConstructExpr(E, Slot); 1026 } 1027 1028 void 1029 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { 1030 AggValueSlot Slot = EnsureSlot(E->getType()); 1031 CGF.EmitLambdaExpr(E, Slot); 1032 } 1033 1034 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 1035 CGF.enterFullExpression(E); 1036 CodeGenFunction::RunCleanupsScope cleanups(CGF); 1037 Visit(E->getSubExpr()); 1038 } 1039 1040 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 1041 QualType T = E->getType(); 1042 AggValueSlot Slot = EnsureSlot(T); 1043 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 1044 } 1045 1046 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 1047 QualType T = E->getType(); 1048 AggValueSlot Slot = EnsureSlot(T); 1049 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 1050 } 1051 1052 /// isSimpleZero - If emitting this value will obviously just cause a store of 1053 /// zero to memory, return true. This can return false if uncertain, so it just 1054 /// handles simple cases. 1055 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 1056 E = E->IgnoreParens(); 1057 1058 // 0 1059 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 1060 return IL->getValue() == 0; 1061 // +0.0 1062 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 1063 return FL->getValue().isPosZero(); 1064 // int() 1065 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 1066 CGF.getTypes().isZeroInitializable(E->getType())) 1067 return true; 1068 // (int*)0 - Null pointer expressions. 1069 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 1070 return ICE->getCastKind() == CK_NullToPointer; 1071 // '\0' 1072 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 1073 return CL->getValue() == 0; 1074 1075 // Otherwise, hard case: conservatively return false. 1076 return false; 1077 } 1078 1079 1080 void 1081 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { 1082 QualType type = LV.getType(); 1083 // FIXME: Ignore result? 1084 // FIXME: Are initializers affected by volatile? 1085 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 1086 // Storing "i32 0" to a zero'd memory location is a noop. 1087 return; 1088 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) { 1089 return EmitNullInitializationToLValue(LV); 1090 } else if (type->isReferenceType()) { 1091 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 1092 return CGF.EmitStoreThroughLValue(RV, LV); 1093 } 1094 1095 switch (CGF.getEvaluationKind(type)) { 1096 case TEK_Complex: 1097 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); 1098 return; 1099 case TEK_Aggregate: 1100 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, 1101 AggValueSlot::IsDestructed, 1102 AggValueSlot::DoesNotNeedGCBarriers, 1103 AggValueSlot::IsNotAliased, 1104 Dest.isZeroed())); 1105 return; 1106 case TEK_Scalar: 1107 if (LV.isSimple()) { 1108 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); 1109 } else { 1110 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); 1111 } 1112 return; 1113 } 1114 llvm_unreachable("bad evaluation kind"); 1115 } 1116 1117 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 1118 QualType type = lv.getType(); 1119 1120 // If the destination slot is already zeroed out before the aggregate is 1121 // copied into it, we don't have to emit any zeros here. 1122 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 1123 return; 1124 1125 if (CGF.hasScalarEvaluationKind(type)) { 1126 // For non-aggregates, we can store the appropriate null constant. 1127 llvm::Value *null = CGF.CGM.EmitNullConstant(type); 1128 // Note that the following is not equivalent to 1129 // EmitStoreThroughBitfieldLValue for ARC types. 1130 if (lv.isBitField()) { 1131 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); 1132 } else { 1133 assert(lv.isSimple()); 1134 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); 1135 } 1136 } else { 1137 // There's a potential optimization opportunity in combining 1138 // memsets; that would be easy for arrays, but relatively 1139 // difficult for structures with the current code. 1140 CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); 1141 } 1142 } 1143 1144 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 1145 #if 0 1146 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 1147 // (Length of globals? Chunks of zeroed-out space?). 1148 // 1149 // If we can, prefer a copy from a global; this is a lot less code for long 1150 // globals, and it's easier for the current optimizers to analyze. 1151 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 1152 llvm::GlobalVariable* GV = 1153 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 1154 llvm::GlobalValue::InternalLinkage, C, ""); 1155 EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType())); 1156 return; 1157 } 1158 #endif 1159 if (E->hadArrayRangeDesignator()) 1160 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1161 1162 if (E->initializesStdInitializerList()) { 1163 EmitStdInitializerList(Dest.getAddr(), E); 1164 return; 1165 } 1166 1167 AggValueSlot Dest = EnsureSlot(E->getType()); 1168 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(), 1169 Dest.getAlignment()); 1170 1171 // Handle initialization of an array. 1172 if (E->getType()->isArrayType()) { 1173 if (E->isStringLiteralInit()) 1174 return Visit(E->getInit(0)); 1175 1176 QualType elementType = 1177 CGF.getContext().getAsArrayType(E->getType())->getElementType(); 1178 1179 llvm::PointerType *APType = 1180 cast<llvm::PointerType>(Dest.getAddr()->getType()); 1181 llvm::ArrayType *AType = 1182 cast<llvm::ArrayType>(APType->getElementType()); 1183 1184 EmitArrayInit(Dest.getAddr(), AType, elementType, E); 1185 return; 1186 } 1187 1188 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 1189 1190 // Do struct initialization; this code just sets each individual member 1191 // to the approprate value. This makes bitfield support automatic; 1192 // the disadvantage is that the generated code is more difficult for 1193 // the optimizer, especially with bitfields. 1194 unsigned NumInitElements = E->getNumInits(); 1195 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); 1196 1197 // Prepare a 'this' for CXXDefaultInitExprs. 1198 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr()); 1199 1200 if (record->isUnion()) { 1201 // Only initialize one field of a union. The field itself is 1202 // specified by the initializer list. 1203 if (!E->getInitializedFieldInUnion()) { 1204 // Empty union; we have nothing to do. 1205 1206 #ifndef NDEBUG 1207 // Make sure that it's really an empty and not a failure of 1208 // semantic analysis. 1209 for (RecordDecl::field_iterator Field = record->field_begin(), 1210 FieldEnd = record->field_end(); 1211 Field != FieldEnd; ++Field) 1212 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 1213 #endif 1214 return; 1215 } 1216 1217 // FIXME: volatility 1218 FieldDecl *Field = E->getInitializedFieldInUnion(); 1219 1220 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); 1221 if (NumInitElements) { 1222 // Store the initializer into the field 1223 EmitInitializationToLValue(E->getInit(0), FieldLoc); 1224 } else { 1225 // Default-initialize to null. 1226 EmitNullInitializationToLValue(FieldLoc); 1227 } 1228 1229 return; 1230 } 1231 1232 // We'll need to enter cleanup scopes in case any of the member 1233 // initializers throw an exception. 1234 SmallVector<EHScopeStack::stable_iterator, 16> cleanups; 1235 llvm::Instruction *cleanupDominator = 0; 1236 1237 // Here we iterate over the fields; this makes it simpler to both 1238 // default-initialize fields and skip over unnamed fields. 1239 unsigned curInitIndex = 0; 1240 for (RecordDecl::field_iterator field = record->field_begin(), 1241 fieldEnd = record->field_end(); 1242 field != fieldEnd; ++field) { 1243 // We're done once we hit the flexible array member. 1244 if (field->getType()->isIncompleteArrayType()) 1245 break; 1246 1247 // Always skip anonymous bitfields. 1248 if (field->isUnnamedBitfield()) 1249 continue; 1250 1251 // We're done if we reach the end of the explicit initializers, we 1252 // have a zeroed object, and the rest of the fields are 1253 // zero-initializable. 1254 if (curInitIndex == NumInitElements && Dest.isZeroed() && 1255 CGF.getTypes().isZeroInitializable(E->getType())) 1256 break; 1257 1258 1259 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field); 1260 // We never generate write-barries for initialized fields. 1261 LV.setNonGC(true); 1262 1263 if (curInitIndex < NumInitElements) { 1264 // Store the initializer into the field. 1265 EmitInitializationToLValue(E->getInit(curInitIndex++), LV); 1266 } else { 1267 // We're out of initalizers; default-initialize to null 1268 EmitNullInitializationToLValue(LV); 1269 } 1270 1271 // Push a destructor if necessary. 1272 // FIXME: if we have an array of structures, all explicitly 1273 // initialized, we can end up pushing a linear number of cleanups. 1274 bool pushedCleanup = false; 1275 if (QualType::DestructionKind dtorKind 1276 = field->getType().isDestructedType()) { 1277 assert(LV.isSimple()); 1278 if (CGF.needsEHCleanup(dtorKind)) { 1279 if (!cleanupDominator) 1280 cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder 1281 1282 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), 1283 CGF.getDestroyer(dtorKind), false); 1284 cleanups.push_back(CGF.EHStack.stable_begin()); 1285 pushedCleanup = true; 1286 } 1287 } 1288 1289 // If the GEP didn't get used because of a dead zero init or something 1290 // else, clean it up for -O0 builds and general tidiness. 1291 if (!pushedCleanup && LV.isSimple()) 1292 if (llvm::GetElementPtrInst *GEP = 1293 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) 1294 if (GEP->use_empty()) 1295 GEP->eraseFromParent(); 1296 } 1297 1298 // Deactivate all the partial cleanups in reverse order, which 1299 // generally means popping them. 1300 for (unsigned i = cleanups.size(); i != 0; --i) 1301 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); 1302 1303 // Destroy the placeholder if we made one. 1304 if (cleanupDominator) 1305 cleanupDominator->eraseFromParent(); 1306 } 1307 1308 //===----------------------------------------------------------------------===// 1309 // Entry Points into this File 1310 //===----------------------------------------------------------------------===// 1311 1312 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of 1313 /// non-zero bytes that will be stored when outputting the initializer for the 1314 /// specified initializer expression. 1315 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 1316 E = E->IgnoreParens(); 1317 1318 // 0 and 0.0 won't require any non-zero stores! 1319 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 1320 1321 // If this is an initlist expr, sum up the size of sizes of the (present) 1322 // elements. If this is something weird, assume the whole thing is non-zero. 1323 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 1324 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) 1325 return CGF.getContext().getTypeSizeInChars(E->getType()); 1326 1327 // InitListExprs for structs have to be handled carefully. If there are 1328 // reference members, we need to consider the size of the reference, not the 1329 // referencee. InitListExprs for unions and arrays can't have references. 1330 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 1331 if (!RT->isUnionType()) { 1332 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 1333 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1334 1335 unsigned ILEElement = 0; 1336 for (RecordDecl::field_iterator Field = SD->field_begin(), 1337 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) { 1338 // We're done once we hit the flexible array member or run out of 1339 // InitListExpr elements. 1340 if (Field->getType()->isIncompleteArrayType() || 1341 ILEElement == ILE->getNumInits()) 1342 break; 1343 if (Field->isUnnamedBitfield()) 1344 continue; 1345 1346 const Expr *E = ILE->getInit(ILEElement++); 1347 1348 // Reference values are always non-null and have the width of a pointer. 1349 if (Field->getType()->isReferenceType()) 1350 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 1351 CGF.getTarget().getPointerWidth(0)); 1352 else 1353 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 1354 } 1355 1356 return NumNonZeroBytes; 1357 } 1358 } 1359 1360 1361 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1362 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1363 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 1364 return NumNonZeroBytes; 1365 } 1366 1367 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 1368 /// zeros in it, emit a memset and avoid storing the individual zeros. 1369 /// 1370 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 1371 CodeGenFunction &CGF) { 1372 // If the slot is already known to be zeroed, nothing to do. Don't mess with 1373 // volatile stores. 1374 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; 1375 1376 // C++ objects with a user-declared constructor don't need zero'ing. 1377 if (CGF.getLangOpts().CPlusPlus) 1378 if (const RecordType *RT = CGF.getContext() 1379 .getBaseElementType(E->getType())->getAs<RecordType>()) { 1380 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1381 if (RD->hasUserDeclaredConstructor()) 1382 return; 1383 } 1384 1385 // If the type is 16-bytes or smaller, prefer individual stores over memset. 1386 std::pair<CharUnits, CharUnits> TypeInfo = 1387 CGF.getContext().getTypeInfoInChars(E->getType()); 1388 if (TypeInfo.first <= CharUnits::fromQuantity(16)) 1389 return; 1390 1391 // Check to see if over 3/4 of the initializer are known to be zero. If so, 1392 // we prefer to emit memset + individual stores for the rest. 1393 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 1394 if (NumNonZeroBytes*4 > TypeInfo.first) 1395 return; 1396 1397 // Okay, it seems like a good idea to use an initial memset, emit the call. 1398 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); 1399 CharUnits Align = TypeInfo.second; 1400 1401 llvm::Value *Loc = Slot.getAddr(); 1402 1403 Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy); 1404 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 1405 Align.getQuantity(), false); 1406 1407 // Tell the AggExprEmitter that the slot is known zero. 1408 Slot.setZeroed(); 1409 } 1410 1411 1412 1413 1414 /// EmitAggExpr - Emit the computation of the specified expression of aggregate 1415 /// type. The result is computed into DestPtr. Note that if DestPtr is null, 1416 /// the value of the aggregate expression is not needed. If VolatileDest is 1417 /// true, DestPtr cannot be 0. 1418 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { 1419 assert(E && hasAggregateEvaluationKind(E->getType()) && 1420 "Invalid aggregate expression to emit"); 1421 assert((Slot.getAddr() != 0 || Slot.isIgnored()) && 1422 "slot has bits but no address"); 1423 1424 // Optimize the slot if possible. 1425 CheckAggExprForMemSetUse(Slot, E, *this); 1426 1427 AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E)); 1428 } 1429 1430 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 1431 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); 1432 llvm::Value *Temp = CreateMemTemp(E->getType()); 1433 LValue LV = MakeAddrLValue(Temp, E->getType()); 1434 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, 1435 AggValueSlot::DoesNotNeedGCBarriers, 1436 AggValueSlot::IsNotAliased)); 1437 return LV; 1438 } 1439 1440 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 1441 llvm::Value *SrcPtr, QualType Ty, 1442 bool isVolatile, 1443 CharUnits alignment, 1444 bool isAssignment) { 1445 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 1446 1447 if (getLangOpts().CPlusPlus) { 1448 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1449 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 1450 assert((Record->hasTrivialCopyConstructor() || 1451 Record->hasTrivialCopyAssignment() || 1452 Record->hasTrivialMoveConstructor() || 1453 Record->hasTrivialMoveAssignment()) && 1454 "Trying to aggregate-copy a type without a trivial copy/move " 1455 "constructor or assignment operator"); 1456 // Ignore empty classes in C++. 1457 if (Record->isEmpty()) 1458 return; 1459 } 1460 } 1461 1462 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 1463 // C99 6.5.16.1p3, which states "If the value being stored in an object is 1464 // read from another object that overlaps in anyway the storage of the first 1465 // object, then the overlap shall be exact and the two objects shall have 1466 // qualified or unqualified versions of a compatible type." 1467 // 1468 // memcpy is not defined if the source and destination pointers are exactly 1469 // equal, but other compilers do this optimization, and almost every memcpy 1470 // implementation handles this case safely. If there is a libc that does not 1471 // safely handle this, we can add a target hook. 1472 1473 // Get data size and alignment info for this aggregate. If this is an 1474 // assignment don't copy the tail padding. Otherwise copying it is fine. 1475 std::pair<CharUnits, CharUnits> TypeInfo; 1476 if (isAssignment) 1477 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); 1478 else 1479 TypeInfo = getContext().getTypeInfoInChars(Ty); 1480 1481 if (alignment.isZero()) 1482 alignment = TypeInfo.second; 1483 1484 // FIXME: Handle variable sized types. 1485 1486 // FIXME: If we have a volatile struct, the optimizer can remove what might 1487 // appear to be `extra' memory ops: 1488 // 1489 // volatile struct { int i; } a, b; 1490 // 1491 // int main() { 1492 // a = b; 1493 // a = b; 1494 // } 1495 // 1496 // we need to use a different call here. We use isVolatile to indicate when 1497 // either the source or the destination is volatile. 1498 1499 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 1500 llvm::Type *DBP = 1501 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 1502 DestPtr = Builder.CreateBitCast(DestPtr, DBP); 1503 1504 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 1505 llvm::Type *SBP = 1506 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 1507 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP); 1508 1509 // Don't do any of the memmove_collectable tests if GC isn't set. 1510 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { 1511 // fall through 1512 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 1513 RecordDecl *Record = RecordTy->getDecl(); 1514 if (Record->hasObjectMember()) { 1515 CharUnits size = TypeInfo.first; 1516 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1517 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1518 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1519 SizeVal); 1520 return; 1521 } 1522 } else if (Ty->isArrayType()) { 1523 QualType BaseType = getContext().getBaseElementType(Ty); 1524 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 1525 if (RecordTy->getDecl()->hasObjectMember()) { 1526 CharUnits size = TypeInfo.first; 1527 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1528 llvm::Value *SizeVal = 1529 llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1530 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1531 SizeVal); 1532 return; 1533 } 1534 } 1535 } 1536 1537 // Determine the metadata to describe the position of any padding in this 1538 // memcpy, as well as the TBAA tags for the members of the struct, in case 1539 // the optimizer wishes to expand it in to scalar memory operations. 1540 llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty); 1541 1542 Builder.CreateMemCpy(DestPtr, SrcPtr, 1543 llvm::ConstantInt::get(IntPtrTy, 1544 TypeInfo.first.getQuantity()), 1545 alignment.getQuantity(), isVolatile, 1546 /*TBAATag=*/0, TBAAStructTag); 1547 } 1548 1549 void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc, 1550 const Expr *init) { 1551 const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init); 1552 if (cleanups) 1553 init = cleanups->getSubExpr(); 1554 1555 if (isa<InitListExpr>(init) && 1556 cast<InitListExpr>(init)->initializesStdInitializerList()) { 1557 // We initialized this std::initializer_list with an initializer list. 1558 // A backing array was created. Push a cleanup for it. 1559 EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init)); 1560 } 1561 } 1562 1563 static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF, 1564 llvm::Value *arrayStart, 1565 const InitListExpr *init) { 1566 // Check if there are any recursive cleanups to do, i.e. if we have 1567 // std::initializer_list<std::initializer_list<obj>> list = {{obj()}}; 1568 // then we need to destroy the inner array as well. 1569 for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) { 1570 const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i)); 1571 if (!subInit || !subInit->initializesStdInitializerList()) 1572 continue; 1573 1574 // This one needs to be destroyed. Get the address of the std::init_list. 1575 llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i); 1576 llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset, 1577 "std.initlist"); 1578 CGF.EmitStdInitializerListCleanup(loc, subInit); 1579 } 1580 } 1581 1582 void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc, 1583 const InitListExpr *init) { 1584 ASTContext &ctx = getContext(); 1585 QualType element = GetStdInitializerListElementType(init->getType()); 1586 unsigned numInits = init->getNumInits(); 1587 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 1588 QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0); 1589 QualType arrayPtr = ctx.getPointerType(array); 1590 llvm::Type *arrayPtrType = ConvertType(arrayPtr); 1591 1592 // lvalue is the location of a std::initializer_list, which as its first 1593 // element has a pointer to the array we want to destroy. 1594 llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer"); 1595 llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress"); 1596 1597 ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init); 1598 1599 llvm::Value *arrayAddress = 1600 Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress"); 1601 ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init); 1602 } 1603