1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Aggregate Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGObjCRuntime.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclTemplate.h" 20 #include "clang/AST/StmtVisitor.h" 21 #include "llvm/Constants.h" 22 #include "llvm/Function.h" 23 #include "llvm/GlobalVariable.h" 24 #include "llvm/Intrinsics.h" 25 using namespace clang; 26 using namespace CodeGen; 27 28 //===----------------------------------------------------------------------===// 29 // Aggregate Expression Emitter 30 //===----------------------------------------------------------------------===// 31 32 namespace { 33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 34 CodeGenFunction &CGF; 35 CGBuilderTy &Builder; 36 AggValueSlot Dest; 37 bool IgnoreResult; 38 39 /// We want to use 'dest' as the return slot except under two 40 /// conditions: 41 /// - The destination slot requires garbage collection, so we 42 /// need to use the GC API. 43 /// - The destination slot is potentially aliased. 44 bool shouldUseDestForReturnSlot() const { 45 return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased()); 46 } 47 48 ReturnValueSlot getReturnValueSlot() const { 49 if (!shouldUseDestForReturnSlot()) 50 return ReturnValueSlot(); 51 52 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 53 } 54 55 AggValueSlot EnsureSlot(QualType T) { 56 if (!Dest.isIgnored()) return Dest; 57 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 58 } 59 60 public: 61 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, 62 bool ignore) 63 : CGF(cgf), Builder(CGF.Builder), Dest(Dest), 64 IgnoreResult(ignore) { 65 } 66 67 //===--------------------------------------------------------------------===// 68 // Utilities 69 //===--------------------------------------------------------------------===// 70 71 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 72 /// represents a value lvalue, this method emits the address of the lvalue, 73 /// then loads the result into DestPtr. 74 void EmitAggLoadOfLValue(const Expr *E); 75 76 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 77 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false); 78 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false, 79 unsigned Alignment = 0); 80 81 void EmitMoveFromReturnSlot(const Expr *E, RValue Src); 82 83 void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList); 84 void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 85 QualType elementType, InitListExpr *E); 86 87 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { 88 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) 89 return AggValueSlot::NeedsGCBarriers; 90 return AggValueSlot::DoesNotNeedGCBarriers; 91 } 92 93 bool TypeRequiresGCollection(QualType T); 94 95 //===--------------------------------------------------------------------===// 96 // Visitor Methods 97 //===--------------------------------------------------------------------===// 98 99 void VisitStmt(Stmt *S) { 100 CGF.ErrorUnsupported(S, "aggregate expression"); 101 } 102 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 103 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 104 Visit(GE->getResultExpr()); 105 } 106 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 107 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 108 return Visit(E->getReplacement()); 109 } 110 111 // l-values. 112 void VisitDeclRefExpr(DeclRefExpr *E) { 113 // For aggregates, we should always be able to emit the variable 114 // as an l-value unless it's a reference. This is due to the fact 115 // that we can't actually ever see a normal l2r conversion on an 116 // aggregate in C++, and in C there's no language standard 117 // actively preventing us from listing variables in the captures 118 // list of a block. 119 if (E->getDecl()->getType()->isReferenceType()) { 120 if (CodeGenFunction::ConstantEmission result 121 = CGF.tryEmitAsConstant(E)) { 122 EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E)); 123 return; 124 } 125 } 126 127 EmitAggLoadOfLValue(E); 128 } 129 130 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 131 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 132 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 133 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); 134 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 135 EmitAggLoadOfLValue(E); 136 } 137 void VisitPredefinedExpr(const PredefinedExpr *E) { 138 EmitAggLoadOfLValue(E); 139 } 140 141 // Operators. 142 void VisitCastExpr(CastExpr *E); 143 void VisitCallExpr(const CallExpr *E); 144 void VisitStmtExpr(const StmtExpr *E); 145 void VisitBinaryOperator(const BinaryOperator *BO); 146 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 147 void VisitBinAssign(const BinaryOperator *E); 148 void VisitBinComma(const BinaryOperator *E); 149 150 void VisitObjCMessageExpr(ObjCMessageExpr *E); 151 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 152 EmitAggLoadOfLValue(E); 153 } 154 155 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 156 void VisitChooseExpr(const ChooseExpr *CE); 157 void VisitInitListExpr(InitListExpr *E); 158 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 159 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 160 Visit(DAE->getExpr()); 161 } 162 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 163 void VisitCXXConstructExpr(const CXXConstructExpr *E); 164 void VisitLambdaExpr(LambdaExpr *E); 165 void VisitExprWithCleanups(ExprWithCleanups *E); 166 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 167 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 168 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); 169 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 170 171 void VisitPseudoObjectExpr(PseudoObjectExpr *E) { 172 if (E->isGLValue()) { 173 LValue LV = CGF.EmitPseudoObjectLValue(E); 174 return EmitFinalDestCopy(E, LV); 175 } 176 177 CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); 178 } 179 180 void VisitVAArgExpr(VAArgExpr *E); 181 182 void EmitInitializationToLValue(Expr *E, LValue Address); 183 void EmitNullInitializationToLValue(LValue Address); 184 // case Expr::ChooseExprClass: 185 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 186 void VisitAtomicExpr(AtomicExpr *E) { 187 CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr()); 188 } 189 }; 190 } // end anonymous namespace. 191 192 //===----------------------------------------------------------------------===// 193 // Utilities 194 //===----------------------------------------------------------------------===// 195 196 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 197 /// represents a value lvalue, this method emits the address of the lvalue, 198 /// then loads the result into DestPtr. 199 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 200 LValue LV = CGF.EmitLValue(E); 201 EmitFinalDestCopy(E, LV); 202 } 203 204 /// \brief True if the given aggregate type requires special GC API calls. 205 bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 206 // Only record types have members that might require garbage collection. 207 const RecordType *RecordTy = T->getAs<RecordType>(); 208 if (!RecordTy) return false; 209 210 // Don't mess with non-trivial C++ types. 211 RecordDecl *Record = RecordTy->getDecl(); 212 if (isa<CXXRecordDecl>(Record) && 213 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() || 214 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 215 return false; 216 217 // Check whether the type has an object member. 218 return Record->hasObjectMember(); 219 } 220 221 /// \brief Perform the final move to DestPtr if for some reason 222 /// getReturnValueSlot() didn't use it directly. 223 /// 224 /// The idea is that you do something like this: 225 /// RValue Result = EmitSomething(..., getReturnValueSlot()); 226 /// EmitMoveFromReturnSlot(E, Result); 227 /// 228 /// If nothing interferes, this will cause the result to be emitted 229 /// directly into the return value slot. Otherwise, a final move 230 /// will be performed. 231 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) { 232 if (shouldUseDestForReturnSlot()) { 233 // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). 234 // The possibility of undef rvalues complicates that a lot, 235 // though, so we can't really assert. 236 return; 237 } 238 239 // Otherwise, do a final copy, 240 assert(Dest.getAddr() != Src.getAggregateAddr()); 241 EmitFinalDestCopy(E, Src, /*Ignore*/ true); 242 } 243 244 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 245 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore, 246 unsigned Alignment) { 247 assert(Src.isAggregate() && "value must be aggregate value!"); 248 249 // If Dest is ignored, then we're evaluating an aggregate expression 250 // in a context (like an expression statement) that doesn't care 251 // about the result. C says that an lvalue-to-rvalue conversion is 252 // performed in these cases; C++ says that it is not. In either 253 // case, we don't actually need to do anything unless the value is 254 // volatile. 255 if (Dest.isIgnored()) { 256 if (!Src.isVolatileQualified() || 257 CGF.CGM.getLangOpts().CPlusPlus || 258 (IgnoreResult && Ignore)) 259 return; 260 261 // If the source is volatile, we must read from it; to do that, we need 262 // some place to put it. 263 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); 264 } 265 266 if (Dest.requiresGCollection()) { 267 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); 268 llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 269 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 270 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 271 Dest.getAddr(), 272 Src.getAggregateAddr(), 273 SizeVal); 274 return; 275 } 276 // If the result of the assignment is used, copy the LHS there also. 277 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile 278 // from the source as well, as we can't eliminate it if either operand 279 // is volatile, unless copy has volatile for both source and destination.. 280 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), 281 Dest.isVolatile()|Src.isVolatileQualified(), 282 Alignment); 283 } 284 285 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 286 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { 287 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); 288 289 CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment()); 290 EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity()); 291 } 292 293 static QualType GetStdInitializerListElementType(QualType T) { 294 // Just assume that this is really std::initializer_list. 295 ClassTemplateSpecializationDecl *specialization = 296 cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl()); 297 return specialization->getTemplateArgs()[0].getAsType(); 298 } 299 300 /// \brief Prepare cleanup for the temporary array. 301 static void EmitStdInitializerListCleanup(CodeGenFunction &CGF, 302 QualType arrayType, 303 llvm::Value *addr, 304 const InitListExpr *initList) { 305 QualType::DestructionKind dtorKind = arrayType.isDestructedType(); 306 if (!dtorKind) 307 return; // Type doesn't need destroying. 308 if (dtorKind != QualType::DK_cxx_destructor) { 309 CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list"); 310 return; 311 } 312 313 CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind); 314 CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer, 315 /*EHCleanup=*/true); 316 } 317 318 /// \brief Emit the initializer for a std::initializer_list initialized with a 319 /// real initializer list. 320 void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr, 321 InitListExpr *initList) { 322 // We emit an array containing the elements, then have the init list point 323 // at the array. 324 ASTContext &ctx = CGF.getContext(); 325 unsigned numInits = initList->getNumInits(); 326 QualType element = GetStdInitializerListElementType(initList->getType()); 327 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 328 QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0); 329 llvm::Type *LTy = CGF.ConvertTypeForMem(array); 330 llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy); 331 alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity()); 332 alloc->setName(".initlist."); 333 334 EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList); 335 336 // FIXME: The diagnostics are somewhat out of place here. 337 RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl(); 338 RecordDecl::field_iterator field = record->field_begin(); 339 if (field == record->field_end()) { 340 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 341 return; 342 } 343 344 QualType elementPtr = ctx.getPointerType(element.withConst()); 345 346 // Start pointer. 347 if (!ctx.hasSameType(field->getType(), elementPtr)) { 348 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 349 return; 350 } 351 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType()); 352 LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field); 353 llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart"); 354 CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start); 355 ++field; 356 357 if (field == record->field_end()) { 358 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 359 return; 360 } 361 LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field); 362 if (ctx.hasSameType(field->getType(), elementPtr)) { 363 // End pointer. 364 llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend"); 365 CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength); 366 } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) { 367 // Length. 368 CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength); 369 } else { 370 CGF.ErrorUnsupported(initList, "weird std::initializer_list"); 371 return; 372 } 373 374 if (!Dest.isExternallyDestructed()) 375 EmitStdInitializerListCleanup(CGF, array, alloc, initList); 376 } 377 378 /// \brief Emit initialization of an array from an initializer list. 379 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 380 QualType elementType, InitListExpr *E) { 381 uint64_t NumInitElements = E->getNumInits(); 382 383 uint64_t NumArrayElements = AType->getNumElements(); 384 assert(NumInitElements <= NumArrayElements); 385 386 // DestPtr is an array*. Construct an elementType* by drilling 387 // down a level. 388 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 389 llvm::Value *indices[] = { zero, zero }; 390 llvm::Value *begin = 391 Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); 392 393 // Exception safety requires us to destroy all the 394 // already-constructed members if an initializer throws. 395 // For that, we'll need an EH cleanup. 396 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 397 llvm::AllocaInst *endOfInit = 0; 398 EHScopeStack::stable_iterator cleanup; 399 llvm::Instruction *cleanupDominator = 0; 400 if (CGF.needsEHCleanup(dtorKind)) { 401 // In principle we could tell the cleanup where we are more 402 // directly, but the control flow can get so varied here that it 403 // would actually be quite complex. Therefore we go through an 404 // alloca. 405 endOfInit = CGF.CreateTempAlloca(begin->getType(), 406 "arrayinit.endOfInit"); 407 cleanupDominator = Builder.CreateStore(begin, endOfInit); 408 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, 409 CGF.getDestroyer(dtorKind)); 410 cleanup = CGF.EHStack.stable_begin(); 411 412 // Otherwise, remember that we didn't need a cleanup. 413 } else { 414 dtorKind = QualType::DK_none; 415 } 416 417 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); 418 419 // The 'current element to initialize'. The invariants on this 420 // variable are complicated. Essentially, after each iteration of 421 // the loop, it points to the last initialized element, except 422 // that it points to the beginning of the array before any 423 // elements have been initialized. 424 llvm::Value *element = begin; 425 426 // Emit the explicit initializers. 427 for (uint64_t i = 0; i != NumInitElements; ++i) { 428 // Advance to the next element. 429 if (i > 0) { 430 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); 431 432 // Tell the cleanup that it needs to destroy up to this 433 // element. TODO: some of these stores can be trivially 434 // observed to be unnecessary. 435 if (endOfInit) Builder.CreateStore(element, endOfInit); 436 } 437 438 // If these are nested std::initializer_list inits, do them directly, 439 // because they are conceptually the same "location". 440 InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i)); 441 if (initList && initList->initializesStdInitializerList()) { 442 EmitStdInitializerList(element, initList); 443 } else { 444 LValue elementLV = CGF.MakeAddrLValue(element, elementType); 445 EmitInitializationToLValue(E->getInit(i), elementLV); 446 } 447 } 448 449 // Check whether there's a non-trivial array-fill expression. 450 // Note that this will be a CXXConstructExpr even if the element 451 // type is an array (or array of array, etc.) of class type. 452 Expr *filler = E->getArrayFiller(); 453 bool hasTrivialFiller = true; 454 if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { 455 assert(cons->getConstructor()->isDefaultConstructor()); 456 hasTrivialFiller = cons->getConstructor()->isTrivial(); 457 } 458 459 // Any remaining elements need to be zero-initialized, possibly 460 // using the filler expression. We can skip this if the we're 461 // emitting to zeroed memory. 462 if (NumInitElements != NumArrayElements && 463 !(Dest.isZeroed() && hasTrivialFiller && 464 CGF.getTypes().isZeroInitializable(elementType))) { 465 466 // Use an actual loop. This is basically 467 // do { *array++ = filler; } while (array != end); 468 469 // Advance to the start of the rest of the array. 470 if (NumInitElements) { 471 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); 472 if (endOfInit) Builder.CreateStore(element, endOfInit); 473 } 474 475 // Compute the end of the array. 476 llvm::Value *end = Builder.CreateInBoundsGEP(begin, 477 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), 478 "arrayinit.end"); 479 480 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 481 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 482 483 // Jump into the body. 484 CGF.EmitBlock(bodyBB); 485 llvm::PHINode *currentElement = 486 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); 487 currentElement->addIncoming(element, entryBB); 488 489 // Emit the actual filler expression. 490 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); 491 if (filler) 492 EmitInitializationToLValue(filler, elementLV); 493 else 494 EmitNullInitializationToLValue(elementLV); 495 496 // Move on to the next element. 497 llvm::Value *nextElement = 498 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); 499 500 // Tell the EH cleanup that we finished with the last element. 501 if (endOfInit) Builder.CreateStore(nextElement, endOfInit); 502 503 // Leave the loop if we're done. 504 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, 505 "arrayinit.done"); 506 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 507 Builder.CreateCondBr(done, endBB, bodyBB); 508 currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); 509 510 CGF.EmitBlock(endBB); 511 } 512 513 // Leave the partial-array cleanup if we entered one. 514 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); 515 } 516 517 //===----------------------------------------------------------------------===// 518 // Visitor Methods 519 //===----------------------------------------------------------------------===// 520 521 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ 522 Visit(E->GetTemporaryExpr()); 523 } 524 525 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 526 EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e)); 527 } 528 529 void 530 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 531 if (E->getType().isPODType(CGF.getContext())) { 532 // For a POD type, just emit a load of the lvalue + a copy, because our 533 // compound literal might alias the destination. 534 // FIXME: This is a band-aid; the real problem appears to be in our handling 535 // of assignments, where we store directly into the LHS without checking 536 // whether anything in the RHS aliases. 537 EmitAggLoadOfLValue(E); 538 return; 539 } 540 541 AggValueSlot Slot = EnsureSlot(E->getType()); 542 CGF.EmitAggExpr(E->getInitializer(), Slot); 543 } 544 545 546 void AggExprEmitter::VisitCastExpr(CastExpr *E) { 547 switch (E->getCastKind()) { 548 case CK_Dynamic: { 549 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 550 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); 551 // FIXME: Do we also need to handle property references here? 552 if (LV.isSimple()) 553 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 554 else 555 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 556 557 if (!Dest.isIgnored()) 558 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 559 break; 560 } 561 562 case CK_ToUnion: { 563 if (Dest.isIgnored()) break; 564 565 // GCC union extension 566 QualType Ty = E->getSubExpr()->getType(); 567 QualType PtrTy = CGF.getContext().getPointerType(Ty); 568 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 569 CGF.ConvertType(PtrTy)); 570 EmitInitializationToLValue(E->getSubExpr(), 571 CGF.MakeAddrLValue(CastPtr, Ty)); 572 break; 573 } 574 575 case CK_DerivedToBase: 576 case CK_BaseToDerived: 577 case CK_UncheckedDerivedToBase: { 578 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " 579 "should have been unpacked before we got here"); 580 } 581 582 case CK_LValueToRValue: // hope for downstream optimization 583 case CK_NoOp: 584 case CK_AtomicToNonAtomic: 585 case CK_NonAtomicToAtomic: 586 case CK_UserDefinedConversion: 587 case CK_ConstructorConversion: 588 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 589 E->getType()) && 590 "Implicit cast types must be compatible"); 591 Visit(E->getSubExpr()); 592 break; 593 594 case CK_LValueBitCast: 595 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 596 597 case CK_Dependent: 598 case CK_BitCast: 599 case CK_ArrayToPointerDecay: 600 case CK_FunctionToPointerDecay: 601 case CK_NullToPointer: 602 case CK_NullToMemberPointer: 603 case CK_BaseToDerivedMemberPointer: 604 case CK_DerivedToBaseMemberPointer: 605 case CK_MemberPointerToBoolean: 606 case CK_ReinterpretMemberPointer: 607 case CK_IntegralToPointer: 608 case CK_PointerToIntegral: 609 case CK_PointerToBoolean: 610 case CK_ToVoid: 611 case CK_VectorSplat: 612 case CK_IntegralCast: 613 case CK_IntegralToBoolean: 614 case CK_IntegralToFloating: 615 case CK_FloatingToIntegral: 616 case CK_FloatingToBoolean: 617 case CK_FloatingCast: 618 case CK_CPointerToObjCPointerCast: 619 case CK_BlockPointerToObjCPointerCast: 620 case CK_AnyPointerToBlockPointerCast: 621 case CK_ObjCObjectLValueCast: 622 case CK_FloatingRealToComplex: 623 case CK_FloatingComplexToReal: 624 case CK_FloatingComplexToBoolean: 625 case CK_FloatingComplexCast: 626 case CK_FloatingComplexToIntegralComplex: 627 case CK_IntegralRealToComplex: 628 case CK_IntegralComplexToReal: 629 case CK_IntegralComplexToBoolean: 630 case CK_IntegralComplexCast: 631 case CK_IntegralComplexToFloatingComplex: 632 case CK_ARCProduceObject: 633 case CK_ARCConsumeObject: 634 case CK_ARCReclaimReturnedObject: 635 case CK_ARCExtendBlockObject: 636 case CK_CopyAndAutoreleaseBlockObject: 637 llvm_unreachable("cast kind invalid for aggregate types"); 638 } 639 } 640 641 void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 642 if (E->getCallReturnType()->isReferenceType()) { 643 EmitAggLoadOfLValue(E); 644 return; 645 } 646 647 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 648 EmitMoveFromReturnSlot(E, RV); 649 } 650 651 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 652 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 653 EmitMoveFromReturnSlot(E, RV); 654 } 655 656 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 657 CGF.EmitIgnoredExpr(E->getLHS()); 658 Visit(E->getRHS()); 659 } 660 661 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 662 CodeGenFunction::StmtExprEvaluation eval(CGF); 663 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 664 } 665 666 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 667 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 668 VisitPointerToDataMemberBinaryOperator(E); 669 else 670 CGF.ErrorUnsupported(E, "aggregate binary expression"); 671 } 672 673 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 674 const BinaryOperator *E) { 675 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 676 EmitFinalDestCopy(E, LV); 677 } 678 679 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 680 // For an assignment to work, the value on the right has 681 // to be compatible with the value on the left. 682 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 683 E->getRHS()->getType()) 684 && "Invalid assignment"); 685 686 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS())) 687 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 688 if (VD->hasAttr<BlocksAttr>() && 689 E->getRHS()->HasSideEffects(CGF.getContext())) { 690 // When __block variable on LHS, the RHS must be evaluated first 691 // as it may change the 'forwarding' field via call to Block_copy. 692 LValue RHS = CGF.EmitLValue(E->getRHS()); 693 LValue LHS = CGF.EmitLValue(E->getLHS()); 694 Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 695 needsGC(E->getLHS()->getType()), 696 AggValueSlot::IsAliased); 697 EmitFinalDestCopy(E, RHS, true); 698 return; 699 } 700 701 LValue LHS = CGF.EmitLValue(E->getLHS()); 702 703 // Codegen the RHS so that it stores directly into the LHS. 704 AggValueSlot LHSSlot = 705 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 706 needsGC(E->getLHS()->getType()), 707 AggValueSlot::IsAliased); 708 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); 709 EmitFinalDestCopy(E, LHS, true); 710 } 711 712 void AggExprEmitter:: 713 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 714 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 715 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 716 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 717 718 // Bind the common expression if necessary. 719 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 720 721 CodeGenFunction::ConditionalEvaluation eval(CGF); 722 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 723 724 // Save whether the destination's lifetime is externally managed. 725 bool isExternallyDestructed = Dest.isExternallyDestructed(); 726 727 eval.begin(CGF); 728 CGF.EmitBlock(LHSBlock); 729 Visit(E->getTrueExpr()); 730 eval.end(CGF); 731 732 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 733 CGF.Builder.CreateBr(ContBlock); 734 735 // If the result of an agg expression is unused, then the emission 736 // of the LHS might need to create a destination slot. That's fine 737 // with us, and we can safely emit the RHS into the same slot, but 738 // we shouldn't claim that it's already being destructed. 739 Dest.setExternallyDestructed(isExternallyDestructed); 740 741 eval.begin(CGF); 742 CGF.EmitBlock(RHSBlock); 743 Visit(E->getFalseExpr()); 744 eval.end(CGF); 745 746 CGF.EmitBlock(ContBlock); 747 } 748 749 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 750 Visit(CE->getChosenSubExpr(CGF.getContext())); 751 } 752 753 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 754 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 755 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 756 757 if (!ArgPtr) { 758 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 759 return; 760 } 761 762 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType())); 763 } 764 765 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 766 // Ensure that we have a slot, but if we already do, remember 767 // whether it was externally destructed. 768 bool wasExternallyDestructed = Dest.isExternallyDestructed(); 769 Dest = EnsureSlot(E->getType()); 770 771 // We're going to push a destructor if there isn't already one. 772 Dest.setExternallyDestructed(); 773 774 Visit(E->getSubExpr()); 775 776 // Push that destructor we promised. 777 if (!wasExternallyDestructed) 778 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr()); 779 } 780 781 void 782 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 783 AggValueSlot Slot = EnsureSlot(E->getType()); 784 CGF.EmitCXXConstructExpr(E, Slot); 785 } 786 787 void 788 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { 789 AggValueSlot Slot = EnsureSlot(E->getType()); 790 CGF.EmitLambdaExpr(E, Slot); 791 } 792 793 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 794 CGF.enterFullExpression(E); 795 CodeGenFunction::RunCleanupsScope cleanups(CGF); 796 Visit(E->getSubExpr()); 797 } 798 799 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 800 QualType T = E->getType(); 801 AggValueSlot Slot = EnsureSlot(T); 802 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 803 } 804 805 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 806 QualType T = E->getType(); 807 AggValueSlot Slot = EnsureSlot(T); 808 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 809 } 810 811 /// isSimpleZero - If emitting this value will obviously just cause a store of 812 /// zero to memory, return true. This can return false if uncertain, so it just 813 /// handles simple cases. 814 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 815 E = E->IgnoreParens(); 816 817 // 0 818 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 819 return IL->getValue() == 0; 820 // +0.0 821 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 822 return FL->getValue().isPosZero(); 823 // int() 824 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 825 CGF.getTypes().isZeroInitializable(E->getType())) 826 return true; 827 // (int*)0 - Null pointer expressions. 828 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 829 return ICE->getCastKind() == CK_NullToPointer; 830 // '\0' 831 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 832 return CL->getValue() == 0; 833 834 // Otherwise, hard case: conservatively return false. 835 return false; 836 } 837 838 839 void 840 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { 841 QualType type = LV.getType(); 842 // FIXME: Ignore result? 843 // FIXME: Are initializers affected by volatile? 844 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 845 // Storing "i32 0" to a zero'd memory location is a noop. 846 } else if (isa<ImplicitValueInitExpr>(E)) { 847 EmitNullInitializationToLValue(LV); 848 } else if (type->isReferenceType()) { 849 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 850 CGF.EmitStoreThroughLValue(RV, LV); 851 } else if (type->isAnyComplexType()) { 852 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); 853 } else if (CGF.hasAggregateLLVMType(type)) { 854 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, 855 AggValueSlot::IsDestructed, 856 AggValueSlot::DoesNotNeedGCBarriers, 857 AggValueSlot::IsNotAliased, 858 Dest.isZeroed())); 859 } else if (LV.isSimple()) { 860 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); 861 } else { 862 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); 863 } 864 } 865 866 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 867 QualType type = lv.getType(); 868 869 // If the destination slot is already zeroed out before the aggregate is 870 // copied into it, we don't have to emit any zeros here. 871 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 872 return; 873 874 if (!CGF.hasAggregateLLVMType(type)) { 875 // For non-aggregates, we can store zero. 876 llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type)); 877 // Note that the following is not equivalent to 878 // EmitStoreThroughBitfieldLValue for ARC types. 879 if (lv.isBitField()) { 880 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); 881 } else { 882 assert(lv.isSimple()); 883 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); 884 } 885 } else { 886 // There's a potential optimization opportunity in combining 887 // memsets; that would be easy for arrays, but relatively 888 // difficult for structures with the current code. 889 CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); 890 } 891 } 892 893 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 894 #if 0 895 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 896 // (Length of globals? Chunks of zeroed-out space?). 897 // 898 // If we can, prefer a copy from a global; this is a lot less code for long 899 // globals, and it's easier for the current optimizers to analyze. 900 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 901 llvm::GlobalVariable* GV = 902 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 903 llvm::GlobalValue::InternalLinkage, C, ""); 904 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); 905 return; 906 } 907 #endif 908 if (E->hadArrayRangeDesignator()) 909 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 910 911 if (E->initializesStdInitializerList()) { 912 EmitStdInitializerList(Dest.getAddr(), E); 913 return; 914 } 915 916 AggValueSlot Dest = EnsureSlot(E->getType()); 917 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(), 918 Dest.getAlignment()); 919 920 // Handle initialization of an array. 921 if (E->getType()->isArrayType()) { 922 if (E->isStringLiteralInit()) 923 return Visit(E->getInit(0)); 924 925 QualType elementType = 926 CGF.getContext().getAsArrayType(E->getType())->getElementType(); 927 928 llvm::PointerType *APType = 929 cast<llvm::PointerType>(Dest.getAddr()->getType()); 930 llvm::ArrayType *AType = 931 cast<llvm::ArrayType>(APType->getElementType()); 932 933 EmitArrayInit(Dest.getAddr(), AType, elementType, E); 934 return; 935 } 936 937 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 938 939 // Do struct initialization; this code just sets each individual member 940 // to the approprate value. This makes bitfield support automatic; 941 // the disadvantage is that the generated code is more difficult for 942 // the optimizer, especially with bitfields. 943 unsigned NumInitElements = E->getNumInits(); 944 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); 945 946 if (record->isUnion()) { 947 // Only initialize one field of a union. The field itself is 948 // specified by the initializer list. 949 if (!E->getInitializedFieldInUnion()) { 950 // Empty union; we have nothing to do. 951 952 #ifndef NDEBUG 953 // Make sure that it's really an empty and not a failure of 954 // semantic analysis. 955 for (RecordDecl::field_iterator Field = record->field_begin(), 956 FieldEnd = record->field_end(); 957 Field != FieldEnd; ++Field) 958 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 959 #endif 960 return; 961 } 962 963 // FIXME: volatility 964 FieldDecl *Field = E->getInitializedFieldInUnion(); 965 966 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); 967 if (NumInitElements) { 968 // Store the initializer into the field 969 EmitInitializationToLValue(E->getInit(0), FieldLoc); 970 } else { 971 // Default-initialize to null. 972 EmitNullInitializationToLValue(FieldLoc); 973 } 974 975 return; 976 } 977 978 // We'll need to enter cleanup scopes in case any of the member 979 // initializers throw an exception. 980 SmallVector<EHScopeStack::stable_iterator, 16> cleanups; 981 llvm::Instruction *cleanupDominator = 0; 982 983 // Here we iterate over the fields; this makes it simpler to both 984 // default-initialize fields and skip over unnamed fields. 985 unsigned curInitIndex = 0; 986 for (RecordDecl::field_iterator field = record->field_begin(), 987 fieldEnd = record->field_end(); 988 field != fieldEnd; ++field) { 989 // We're done once we hit the flexible array member. 990 if (field->getType()->isIncompleteArrayType()) 991 break; 992 993 // Always skip anonymous bitfields. 994 if (field->isUnnamedBitfield()) 995 continue; 996 997 // We're done if we reach the end of the explicit initializers, we 998 // have a zeroed object, and the rest of the fields are 999 // zero-initializable. 1000 if (curInitIndex == NumInitElements && Dest.isZeroed() && 1001 CGF.getTypes().isZeroInitializable(E->getType())) 1002 break; 1003 1004 1005 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field); 1006 // We never generate write-barries for initialized fields. 1007 LV.setNonGC(true); 1008 1009 if (curInitIndex < NumInitElements) { 1010 // Store the initializer into the field. 1011 EmitInitializationToLValue(E->getInit(curInitIndex++), LV); 1012 } else { 1013 // We're out of initalizers; default-initialize to null 1014 EmitNullInitializationToLValue(LV); 1015 } 1016 1017 // Push a destructor if necessary. 1018 // FIXME: if we have an array of structures, all explicitly 1019 // initialized, we can end up pushing a linear number of cleanups. 1020 bool pushedCleanup = false; 1021 if (QualType::DestructionKind dtorKind 1022 = field->getType().isDestructedType()) { 1023 assert(LV.isSimple()); 1024 if (CGF.needsEHCleanup(dtorKind)) { 1025 if (!cleanupDominator) 1026 cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder 1027 1028 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), 1029 CGF.getDestroyer(dtorKind), false); 1030 cleanups.push_back(CGF.EHStack.stable_begin()); 1031 pushedCleanup = true; 1032 } 1033 } 1034 1035 // If the GEP didn't get used because of a dead zero init or something 1036 // else, clean it up for -O0 builds and general tidiness. 1037 if (!pushedCleanup && LV.isSimple()) 1038 if (llvm::GetElementPtrInst *GEP = 1039 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) 1040 if (GEP->use_empty()) 1041 GEP->eraseFromParent(); 1042 } 1043 1044 // Deactivate all the partial cleanups in reverse order, which 1045 // generally means popping them. 1046 for (unsigned i = cleanups.size(); i != 0; --i) 1047 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); 1048 1049 // Destroy the placeholder if we made one. 1050 if (cleanupDominator) 1051 cleanupDominator->eraseFromParent(); 1052 } 1053 1054 //===----------------------------------------------------------------------===// 1055 // Entry Points into this File 1056 //===----------------------------------------------------------------------===// 1057 1058 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of 1059 /// non-zero bytes that will be stored when outputting the initializer for the 1060 /// specified initializer expression. 1061 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 1062 E = E->IgnoreParens(); 1063 1064 // 0 and 0.0 won't require any non-zero stores! 1065 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 1066 1067 // If this is an initlist expr, sum up the size of sizes of the (present) 1068 // elements. If this is something weird, assume the whole thing is non-zero. 1069 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 1070 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) 1071 return CGF.getContext().getTypeSizeInChars(E->getType()); 1072 1073 // InitListExprs for structs have to be handled carefully. If there are 1074 // reference members, we need to consider the size of the reference, not the 1075 // referencee. InitListExprs for unions and arrays can't have references. 1076 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 1077 if (!RT->isUnionType()) { 1078 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 1079 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1080 1081 unsigned ILEElement = 0; 1082 for (RecordDecl::field_iterator Field = SD->field_begin(), 1083 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) { 1084 // We're done once we hit the flexible array member or run out of 1085 // InitListExpr elements. 1086 if (Field->getType()->isIncompleteArrayType() || 1087 ILEElement == ILE->getNumInits()) 1088 break; 1089 if (Field->isUnnamedBitfield()) 1090 continue; 1091 1092 const Expr *E = ILE->getInit(ILEElement++); 1093 1094 // Reference values are always non-null and have the width of a pointer. 1095 if (Field->getType()->isReferenceType()) 1096 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 1097 CGF.getContext().getTargetInfo().getPointerWidth(0)); 1098 else 1099 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 1100 } 1101 1102 return NumNonZeroBytes; 1103 } 1104 } 1105 1106 1107 CharUnits NumNonZeroBytes = CharUnits::Zero(); 1108 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1109 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 1110 return NumNonZeroBytes; 1111 } 1112 1113 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 1114 /// zeros in it, emit a memset and avoid storing the individual zeros. 1115 /// 1116 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 1117 CodeGenFunction &CGF) { 1118 // If the slot is already known to be zeroed, nothing to do. Don't mess with 1119 // volatile stores. 1120 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; 1121 1122 // C++ objects with a user-declared constructor don't need zero'ing. 1123 if (CGF.getContext().getLangOpts().CPlusPlus) 1124 if (const RecordType *RT = CGF.getContext() 1125 .getBaseElementType(E->getType())->getAs<RecordType>()) { 1126 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1127 if (RD->hasUserDeclaredConstructor()) 1128 return; 1129 } 1130 1131 // If the type is 16-bytes or smaller, prefer individual stores over memset. 1132 std::pair<CharUnits, CharUnits> TypeInfo = 1133 CGF.getContext().getTypeInfoInChars(E->getType()); 1134 if (TypeInfo.first <= CharUnits::fromQuantity(16)) 1135 return; 1136 1137 // Check to see if over 3/4 of the initializer are known to be zero. If so, 1138 // we prefer to emit memset + individual stores for the rest. 1139 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 1140 if (NumNonZeroBytes*4 > TypeInfo.first) 1141 return; 1142 1143 // Okay, it seems like a good idea to use an initial memset, emit the call. 1144 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); 1145 CharUnits Align = TypeInfo.second; 1146 1147 llvm::Value *Loc = Slot.getAddr(); 1148 1149 Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy); 1150 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 1151 Align.getQuantity(), false); 1152 1153 // Tell the AggExprEmitter that the slot is known zero. 1154 Slot.setZeroed(); 1155 } 1156 1157 1158 1159 1160 /// EmitAggExpr - Emit the computation of the specified expression of aggregate 1161 /// type. The result is computed into DestPtr. Note that if DestPtr is null, 1162 /// the value of the aggregate expression is not needed. If VolatileDest is 1163 /// true, DestPtr cannot be 0. 1164 /// 1165 /// \param IsInitializer - true if this evaluation is initializing an 1166 /// object whose lifetime is already being managed. 1167 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot, 1168 bool IgnoreResult) { 1169 assert(E && hasAggregateLLVMType(E->getType()) && 1170 "Invalid aggregate expression to emit"); 1171 assert((Slot.getAddr() != 0 || Slot.isIgnored()) && 1172 "slot has bits but no address"); 1173 1174 // Optimize the slot if possible. 1175 CheckAggExprForMemSetUse(Slot, E, *this); 1176 1177 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E)); 1178 } 1179 1180 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 1181 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!"); 1182 llvm::Value *Temp = CreateMemTemp(E->getType()); 1183 LValue LV = MakeAddrLValue(Temp, E->getType()); 1184 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, 1185 AggValueSlot::DoesNotNeedGCBarriers, 1186 AggValueSlot::IsNotAliased)); 1187 return LV; 1188 } 1189 1190 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 1191 llvm::Value *SrcPtr, QualType Ty, 1192 bool isVolatile, unsigned Alignment) { 1193 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 1194 1195 if (getContext().getLangOpts().CPlusPlus) { 1196 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1197 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 1198 assert((Record->hasTrivialCopyConstructor() || 1199 Record->hasTrivialCopyAssignment() || 1200 Record->hasTrivialMoveConstructor() || 1201 Record->hasTrivialMoveAssignment()) && 1202 "Trying to aggregate-copy a type without a trivial copy " 1203 "constructor or assignment operator"); 1204 // Ignore empty classes in C++. 1205 if (Record->isEmpty()) 1206 return; 1207 } 1208 } 1209 1210 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 1211 // C99 6.5.16.1p3, which states "If the value being stored in an object is 1212 // read from another object that overlaps in anyway the storage of the first 1213 // object, then the overlap shall be exact and the two objects shall have 1214 // qualified or unqualified versions of a compatible type." 1215 // 1216 // memcpy is not defined if the source and destination pointers are exactly 1217 // equal, but other compilers do this optimization, and almost every memcpy 1218 // implementation handles this case safely. If there is a libc that does not 1219 // safely handle this, we can add a target hook. 1220 1221 // Get size and alignment info for this aggregate. 1222 std::pair<CharUnits, CharUnits> TypeInfo = 1223 getContext().getTypeInfoInChars(Ty); 1224 1225 if (!Alignment) 1226 Alignment = TypeInfo.second.getQuantity(); 1227 1228 // FIXME: Handle variable sized types. 1229 1230 // FIXME: If we have a volatile struct, the optimizer can remove what might 1231 // appear to be `extra' memory ops: 1232 // 1233 // volatile struct { int i; } a, b; 1234 // 1235 // int main() { 1236 // a = b; 1237 // a = b; 1238 // } 1239 // 1240 // we need to use a different call here. We use isVolatile to indicate when 1241 // either the source or the destination is volatile. 1242 1243 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 1244 llvm::Type *DBP = 1245 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 1246 DestPtr = Builder.CreateBitCast(DestPtr, DBP); 1247 1248 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 1249 llvm::Type *SBP = 1250 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 1251 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP); 1252 1253 // Don't do any of the memmove_collectable tests if GC isn't set. 1254 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { 1255 // fall through 1256 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 1257 RecordDecl *Record = RecordTy->getDecl(); 1258 if (Record->hasObjectMember()) { 1259 CharUnits size = TypeInfo.first; 1260 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1261 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1262 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1263 SizeVal); 1264 return; 1265 } 1266 } else if (Ty->isArrayType()) { 1267 QualType BaseType = getContext().getBaseElementType(Ty); 1268 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 1269 if (RecordTy->getDecl()->hasObjectMember()) { 1270 CharUnits size = TypeInfo.first; 1271 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 1272 llvm::Value *SizeVal = 1273 llvm::ConstantInt::get(SizeTy, size.getQuantity()); 1274 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 1275 SizeVal); 1276 return; 1277 } 1278 } 1279 } 1280 1281 Builder.CreateMemCpy(DestPtr, SrcPtr, 1282 llvm::ConstantInt::get(IntPtrTy, 1283 TypeInfo.first.getQuantity()), 1284 Alignment, isVolatile); 1285 } 1286 1287 void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc, 1288 const Expr *init) { 1289 const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init); 1290 if (cleanups) 1291 init = cleanups->getSubExpr(); 1292 1293 if (isa<InitListExpr>(init) && 1294 cast<InitListExpr>(init)->initializesStdInitializerList()) { 1295 // We initialized this std::initializer_list with an initializer list. 1296 // A backing array was created. Push a cleanup for it. 1297 EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init)); 1298 } 1299 } 1300 1301 static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF, 1302 llvm::Value *arrayStart, 1303 const InitListExpr *init) { 1304 // Check if there are any recursive cleanups to do, i.e. if we have 1305 // std::initializer_list<std::initializer_list<obj>> list = {{obj()}}; 1306 // then we need to destroy the inner array as well. 1307 for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) { 1308 const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i)); 1309 if (!subInit || !subInit->initializesStdInitializerList()) 1310 continue; 1311 1312 // This one needs to be destroyed. Get the address of the std::init_list. 1313 llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i); 1314 llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset, 1315 "std.initlist"); 1316 CGF.EmitStdInitializerListCleanup(loc, subInit); 1317 } 1318 } 1319 1320 void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc, 1321 const InitListExpr *init) { 1322 ASTContext &ctx = getContext(); 1323 QualType element = GetStdInitializerListElementType(init->getType()); 1324 unsigned numInits = init->getNumInits(); 1325 llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits); 1326 QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0); 1327 QualType arrayPtr = ctx.getPointerType(array); 1328 llvm::Type *arrayPtrType = ConvertType(arrayPtr); 1329 1330 // lvalue is the location of a std::initializer_list, which as its first 1331 // element has a pointer to the array we want to destroy. 1332 llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer"); 1333 llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress"); 1334 1335 ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init); 1336 1337 llvm::Value *arrayAddress = 1338 Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress"); 1339 ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init); 1340 } 1341