1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "clang/AST/ASTContext.h" 17 #include "clang/AST/DeclObjC.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/AST/StmtVisitor.h" 20 #include "clang/Basic/TargetInfo.h" 21 #include "llvm/Constants.h" 22 #include "llvm/Function.h" 23 #include "llvm/GlobalVariable.h" 24 #include "llvm/Intrinsics.h" 25 #include "llvm/Module.h" 26 #include "llvm/Support/Compiler.h" 27 #include "llvm/Support/CFG.h" 28 #include "llvm/Target/TargetData.h" 29 #include <cstdarg> 30 31 using namespace clang; 32 using namespace CodeGen; 33 using llvm::Value; 34 35 //===----------------------------------------------------------------------===// 36 // Scalar Expression Emitter 37 //===----------------------------------------------------------------------===// 38 39 struct BinOpInfo { 40 Value *LHS; 41 Value *RHS; 42 QualType Ty; // Computation Type. 43 const BinaryOperator *E; 44 }; 45 46 namespace { 47 class VISIBILITY_HIDDEN ScalarExprEmitter 48 : public StmtVisitor<ScalarExprEmitter, Value*> { 49 CodeGenFunction &CGF; 50 CGBuilderTy &Builder; 51 bool IgnoreResultAssign; 52 llvm::LLVMContext &VMContext; 53 public: 54 55 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 56 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 57 VMContext(cgf.getLLVMContext()) { 58 } 59 60 //===--------------------------------------------------------------------===// 61 // Utilities 62 //===--------------------------------------------------------------------===// 63 64 bool TestAndClearIgnoreResultAssign() { 65 bool I = IgnoreResultAssign; 66 IgnoreResultAssign = false; 67 return I; 68 } 69 70 const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 71 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 72 73 Value *EmitLoadOfLValue(LValue LV, QualType T) { 74 return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); 75 } 76 77 /// EmitLoadOfLValue - Given an expression with complex type that represents a 78 /// value l-value, this method emits the address of the l-value, then loads 79 /// and returns the result. 80 Value *EmitLoadOfLValue(const Expr *E) { 81 return EmitLoadOfLValue(EmitLValue(E), E->getType()); 82 } 83 84 /// EmitConversionToBool - Convert the specified expression value to a 85 /// boolean (i1) truth value. This is equivalent to "Val != 0". 86 Value *EmitConversionToBool(Value *Src, QualType DstTy); 87 88 /// EmitScalarConversion - Emit a conversion from the specified type to the 89 /// specified destination type, both of which are LLVM scalar types. 90 Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); 91 92 /// EmitComplexToScalarConversion - Emit a conversion from the specified 93 /// complex type to the specified destination type, where the destination 94 /// type is an LLVM scalar type. 95 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 96 QualType SrcTy, QualType DstTy); 97 98 //===--------------------------------------------------------------------===// 99 // Visitor Methods 100 //===--------------------------------------------------------------------===// 101 102 Value *VisitStmt(Stmt *S) { 103 S->dump(CGF.getContext().getSourceManager()); 104 assert(0 && "Stmt can't have complex result type!"); 105 return 0; 106 } 107 Value *VisitExpr(Expr *S); 108 Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } 109 110 // Leaves. 111 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 112 return llvm::ConstantInt::get(VMContext, E->getValue()); 113 } 114 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 115 return VMContext.getConstantFP(E->getValue()); 116 } 117 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 118 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 119 } 120 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 121 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 122 } 123 Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) { 124 return VMContext.getNullValue(ConvertType(E->getType())); 125 } 126 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 127 return VMContext.getNullValue(ConvertType(E->getType())); 128 } 129 Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) { 130 return llvm::ConstantInt::get(ConvertType(E->getType()), 131 CGF.getContext().typesAreCompatible( 132 E->getArgType1(), E->getArgType2())); 133 } 134 Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); 135 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 136 llvm::Value *V = 137 llvm::ConstantInt::get(llvm::Type::Int32Ty, 138 CGF.GetIDForAddrOfLabel(E->getLabel())); 139 140 return Builder.CreateIntToPtr(V, ConvertType(E->getType())); 141 } 142 143 // l-values. 144 Value *VisitDeclRefExpr(DeclRefExpr *E) { 145 if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl())) 146 return llvm::ConstantInt::get(VMContext, EC->getInitVal()); 147 return EmitLoadOfLValue(E); 148 } 149 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 150 return CGF.EmitObjCSelectorExpr(E); 151 } 152 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 153 return CGF.EmitObjCProtocolExpr(E); 154 } 155 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 156 return EmitLoadOfLValue(E); 157 } 158 Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { 159 return EmitLoadOfLValue(E); 160 } 161 Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) { 162 return EmitLoadOfLValue(E); 163 } 164 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 165 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 166 } 167 168 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 169 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 170 Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); } 171 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 172 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 173 return EmitLoadOfLValue(E); 174 } 175 Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); } 176 Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { 177 return EmitLValue(E).getAddress(); 178 } 179 180 Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); } 181 182 Value *VisitInitListExpr(InitListExpr *E) { 183 bool Ignore = TestAndClearIgnoreResultAssign(); 184 (void)Ignore; 185 assert (Ignore == false && "init list ignored"); 186 unsigned NumInitElements = E->getNumInits(); 187 188 if (E->hadArrayRangeDesignator()) { 189 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 190 } 191 192 const llvm::VectorType *VType = 193 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 194 195 // We have a scalar in braces. Just use the first element. 196 if (!VType) 197 return Visit(E->getInit(0)); 198 199 unsigned NumVectorElements = VType->getNumElements(); 200 const llvm::Type *ElementType = VType->getElementType(); 201 202 // Emit individual vector element stores. 203 llvm::Value *V = VMContext.getUndef(VType); 204 205 // Emit initializers 206 unsigned i; 207 for (i = 0; i < NumInitElements; ++i) { 208 Value *NewV = Visit(E->getInit(i)); 209 Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); 210 V = Builder.CreateInsertElement(V, NewV, Idx); 211 } 212 213 // Emit remaining default initializers 214 for (/* Do not initialize i*/; i < NumVectorElements; ++i) { 215 Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); 216 llvm::Value *NewV = VMContext.getNullValue(ElementType); 217 V = Builder.CreateInsertElement(V, NewV, Idx); 218 } 219 220 return V; 221 } 222 223 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 224 return VMContext.getNullValue(ConvertType(E->getType())); 225 } 226 Value *VisitImplicitCastExpr(const ImplicitCastExpr *E); 227 Value *VisitCastExpr(const CastExpr *E) { 228 // Make sure to evaluate VLA bounds now so that we have them for later. 229 if (E->getType()->isVariablyModifiedType()) 230 CGF.EmitVLASize(E->getType()); 231 232 return EmitCastExpr(E->getSubExpr(), E->getType()); 233 } 234 Value *EmitCastExpr(const Expr *E, QualType T); 235 236 Value *VisitCallExpr(const CallExpr *E) { 237 if (E->getCallReturnType()->isReferenceType()) 238 return EmitLoadOfLValue(E); 239 240 return CGF.EmitCallExpr(E).getScalarVal(); 241 } 242 243 Value *VisitStmtExpr(const StmtExpr *E); 244 245 Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); 246 247 // Unary Operators. 248 Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre); 249 Value *VisitUnaryPostDec(const UnaryOperator *E) { 250 return VisitPrePostIncDec(E, false, false); 251 } 252 Value *VisitUnaryPostInc(const UnaryOperator *E) { 253 return VisitPrePostIncDec(E, true, false); 254 } 255 Value *VisitUnaryPreDec(const UnaryOperator *E) { 256 return VisitPrePostIncDec(E, false, true); 257 } 258 Value *VisitUnaryPreInc(const UnaryOperator *E) { 259 return VisitPrePostIncDec(E, true, true); 260 } 261 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 262 return EmitLValue(E->getSubExpr()).getAddress(); 263 } 264 Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } 265 Value *VisitUnaryPlus(const UnaryOperator *E) { 266 // This differs from gcc, though, most likely due to a bug in gcc. 267 TestAndClearIgnoreResultAssign(); 268 return Visit(E->getSubExpr()); 269 } 270 Value *VisitUnaryMinus (const UnaryOperator *E); 271 Value *VisitUnaryNot (const UnaryOperator *E); 272 Value *VisitUnaryLNot (const UnaryOperator *E); 273 Value *VisitUnaryReal (const UnaryOperator *E); 274 Value *VisitUnaryImag (const UnaryOperator *E); 275 Value *VisitUnaryExtension(const UnaryOperator *E) { 276 return Visit(E->getSubExpr()); 277 } 278 Value *VisitUnaryOffsetOf(const UnaryOperator *E); 279 280 // C++ 281 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 282 return Visit(DAE->getExpr()); 283 } 284 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 285 return CGF.LoadCXXThis(); 286 } 287 288 Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { 289 return CGF.EmitCXXExprWithTemporaries(E).getScalarVal(); 290 } 291 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 292 return CGF.EmitCXXNewExpr(E); 293 } 294 295 // Binary Operators. 296 Value *EmitMul(const BinOpInfo &Ops) { 297 if (CGF.getContext().getLangOptions().OverflowChecking 298 && Ops.Ty->isSignedIntegerType()) 299 return EmitOverflowCheckedBinOp(Ops); 300 if (Ops.LHS->getType()->isFPOrFPVector()) 301 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 302 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 303 } 304 /// Create a binary op that checks for overflow. 305 /// Currently only supports +, - and *. 306 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 307 Value *EmitDiv(const BinOpInfo &Ops); 308 Value *EmitRem(const BinOpInfo &Ops); 309 Value *EmitAdd(const BinOpInfo &Ops); 310 Value *EmitSub(const BinOpInfo &Ops); 311 Value *EmitShl(const BinOpInfo &Ops); 312 Value *EmitShr(const BinOpInfo &Ops); 313 Value *EmitAnd(const BinOpInfo &Ops) { 314 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 315 } 316 Value *EmitXor(const BinOpInfo &Ops) { 317 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 318 } 319 Value *EmitOr (const BinOpInfo &Ops) { 320 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 321 } 322 323 BinOpInfo EmitBinOps(const BinaryOperator *E); 324 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 325 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 326 327 // Binary operators and binary compound assignment operators. 328 #define HANDLEBINOP(OP) \ 329 Value *VisitBin ## OP(const BinaryOperator *E) { \ 330 return Emit ## OP(EmitBinOps(E)); \ 331 } \ 332 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 333 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 334 } 335 HANDLEBINOP(Mul); 336 HANDLEBINOP(Div); 337 HANDLEBINOP(Rem); 338 HANDLEBINOP(Add); 339 HANDLEBINOP(Sub); 340 HANDLEBINOP(Shl); 341 HANDLEBINOP(Shr); 342 HANDLEBINOP(And); 343 HANDLEBINOP(Xor); 344 HANDLEBINOP(Or); 345 #undef HANDLEBINOP 346 347 // Comparisons. 348 Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, 349 unsigned SICmpOpc, unsigned FCmpOpc); 350 #define VISITCOMP(CODE, UI, SI, FP) \ 351 Value *VisitBin##CODE(const BinaryOperator *E) { \ 352 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 353 llvm::FCmpInst::FP); } 354 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT); 355 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT); 356 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE); 357 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE); 358 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ); 359 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE); 360 #undef VISITCOMP 361 362 Value *VisitBinAssign (const BinaryOperator *E); 363 364 Value *VisitBinLAnd (const BinaryOperator *E); 365 Value *VisitBinLOr (const BinaryOperator *E); 366 Value *VisitBinComma (const BinaryOperator *E); 367 368 // Other Operators. 369 Value *VisitBlockExpr(const BlockExpr *BE); 370 Value *VisitConditionalOperator(const ConditionalOperator *CO); 371 Value *VisitChooseExpr(ChooseExpr *CE); 372 Value *VisitVAArgExpr(VAArgExpr *VE); 373 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 374 return CGF.EmitObjCStringLiteral(E); 375 } 376 }; 377 } // end anonymous namespace. 378 379 //===----------------------------------------------------------------------===// 380 // Utilities 381 //===----------------------------------------------------------------------===// 382 383 /// EmitConversionToBool - Convert the specified expression value to a 384 /// boolean (i1) truth value. This is equivalent to "Val != 0". 385 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 386 assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs"); 387 388 if (SrcType->isRealFloatingType()) { 389 // Compare against 0.0 for fp scalars. 390 llvm::Value *Zero = VMContext.getNullValue(Src->getType()); 391 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 392 } 393 394 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 395 "Unknown scalar type to convert"); 396 397 // Because of the type rules of C, we often end up computing a logical value, 398 // then zero extending it to int, then wanting it as a logical value again. 399 // Optimize this common case. 400 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) { 401 if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) { 402 Value *Result = ZI->getOperand(0); 403 // If there aren't any more uses, zap the instruction to save space. 404 // Note that there can be more uses, for example if this 405 // is the result of an assignment. 406 if (ZI->use_empty()) 407 ZI->eraseFromParent(); 408 return Result; 409 } 410 } 411 412 // Compare against an integer or pointer null. 413 llvm::Value *Zero = VMContext.getNullValue(Src->getType()); 414 return Builder.CreateICmpNE(Src, Zero, "tobool"); 415 } 416 417 /// EmitScalarConversion - Emit a conversion from the specified type to the 418 /// specified destination type, both of which are LLVM scalar types. 419 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 420 QualType DstType) { 421 SrcType = CGF.getContext().getCanonicalType(SrcType); 422 DstType = CGF.getContext().getCanonicalType(DstType); 423 if (SrcType == DstType) return Src; 424 425 if (DstType->isVoidType()) return 0; 426 427 // Handle conversions to bool first, they are special: comparisons against 0. 428 if (DstType->isBooleanType()) 429 return EmitConversionToBool(Src, SrcType); 430 431 const llvm::Type *DstTy = ConvertType(DstType); 432 433 // Ignore conversions like int -> uint. 434 if (Src->getType() == DstTy) 435 return Src; 436 437 // Handle pointer conversions next: pointers can only be converted 438 // to/from other pointers and integers. Check for pointer types in 439 // terms of LLVM, as some native types (like Obj-C id) may map to a 440 // pointer type. 441 if (isa<llvm::PointerType>(DstTy)) { 442 // The source value may be an integer, or a pointer. 443 if (isa<llvm::PointerType>(Src->getType())) 444 return Builder.CreateBitCast(Src, DstTy, "conv"); 445 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 446 // First, convert to the correct width so that we control the kind of 447 // extension. 448 const llvm::Type *MiddleTy = VMContext.getIntegerType(CGF.LLVMPointerWidth); 449 bool InputSigned = SrcType->isSignedIntegerType(); 450 llvm::Value* IntResult = 451 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 452 // Then, cast to pointer. 453 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 454 } 455 456 if (isa<llvm::PointerType>(Src->getType())) { 457 // Must be an ptr to int cast. 458 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 459 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 460 } 461 462 // A scalar can be splatted to an extended vector of the same element type 463 if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) { 464 // Cast the scalar to element type 465 QualType EltTy = DstType->getAsExtVectorType()->getElementType(); 466 llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); 467 468 // Insert the element in element zero of an undef vector 469 llvm::Value *UnV = VMContext.getUndef(DstTy); 470 llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); 471 UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); 472 473 // Splat the element across to all elements 474 llvm::SmallVector<llvm::Constant*, 16> Args; 475 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 476 for (unsigned i = 0; i < NumElements; i++) 477 Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); 478 479 llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements); 480 llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); 481 return Yay; 482 } 483 484 // Allow bitcast from vector to integer/fp of the same size. 485 if (isa<llvm::VectorType>(Src->getType()) || 486 isa<llvm::VectorType>(DstTy)) 487 return Builder.CreateBitCast(Src, DstTy, "conv"); 488 489 // Finally, we have the arithmetic types: real int/float. 490 if (isa<llvm::IntegerType>(Src->getType())) { 491 bool InputSigned = SrcType->isSignedIntegerType(); 492 if (isa<llvm::IntegerType>(DstTy)) 493 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 494 else if (InputSigned) 495 return Builder.CreateSIToFP(Src, DstTy, "conv"); 496 else 497 return Builder.CreateUIToFP(Src, DstTy, "conv"); 498 } 499 500 assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); 501 if (isa<llvm::IntegerType>(DstTy)) { 502 if (DstType->isSignedIntegerType()) 503 return Builder.CreateFPToSI(Src, DstTy, "conv"); 504 else 505 return Builder.CreateFPToUI(Src, DstTy, "conv"); 506 } 507 508 assert(DstTy->isFloatingPoint() && "Unknown real conversion"); 509 if (DstTy->getTypeID() < Src->getType()->getTypeID()) 510 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 511 else 512 return Builder.CreateFPExt(Src, DstTy, "conv"); 513 } 514 515 /// EmitComplexToScalarConversion - Emit a conversion from the specified 516 /// complex type to the specified destination type, where the destination 517 /// type is an LLVM scalar type. 518 Value *ScalarExprEmitter:: 519 EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 520 QualType SrcTy, QualType DstTy) { 521 // Get the source element type. 522 SrcTy = SrcTy->getAsComplexType()->getElementType(); 523 524 // Handle conversions to bool first, they are special: comparisons against 0. 525 if (DstTy->isBooleanType()) { 526 // Complex != 0 -> (Real != 0) | (Imag != 0) 527 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); 528 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); 529 return Builder.CreateOr(Src.first, Src.second, "tobool"); 530 } 531 532 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 533 // the imaginary part of the complex value is discarded and the value of the 534 // real part is converted according to the conversion rules for the 535 // corresponding real type. 536 return EmitScalarConversion(Src.first, SrcTy, DstTy); 537 } 538 539 540 //===----------------------------------------------------------------------===// 541 // Visitor Methods 542 //===----------------------------------------------------------------------===// 543 544 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 545 CGF.ErrorUnsupported(E, "scalar expression"); 546 if (E->getType()->isVoidType()) 547 return 0; 548 return VMContext.getUndef(CGF.ConvertType(E->getType())); 549 } 550 551 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 552 llvm::SmallVector<llvm::Constant*, 32> indices; 553 for (unsigned i = 2; i < E->getNumSubExprs(); i++) { 554 indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)))); 555 } 556 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 557 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 558 Value* SV = VMContext.getConstantVector(indices.begin(), indices.size()); 559 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); 560 } 561 562 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 563 TestAndClearIgnoreResultAssign(); 564 565 // Emit subscript expressions in rvalue context's. For most cases, this just 566 // loads the lvalue formed by the subscript expr. However, we have to be 567 // careful, because the base of a vector subscript is occasionally an rvalue, 568 // so we can't get it as an lvalue. 569 if (!E->getBase()->getType()->isVectorType()) 570 return EmitLoadOfLValue(E); 571 572 // Handle the vector case. The base must be a vector, the index must be an 573 // integer value. 574 Value *Base = Visit(E->getBase()); 575 Value *Idx = Visit(E->getIdx()); 576 bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); 577 Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, 578 "vecidxcast"); 579 return Builder.CreateExtractElement(Base, Idx, "vecext"); 580 } 581 582 /// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but 583 /// also handle things like function to pointer-to-function decay, and array to 584 /// pointer decay. 585 Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) { 586 const Expr *Op = E->getSubExpr(); 587 588 // If this is due to array->pointer conversion, emit the array expression as 589 // an l-value. 590 if (Op->getType()->isArrayType()) { 591 Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays. 592 593 // Note that VLA pointers are always decayed, so we don't need to do 594 // anything here. 595 if (!Op->getType()->isVariableArrayType()) { 596 assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); 597 assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) 598 ->getElementType()) && 599 "Expected pointer to array"); 600 V = Builder.CreateStructGEP(V, 0, "arraydecay"); 601 } 602 603 // The resultant pointer type can be implicitly casted to other pointer 604 // types as well (e.g. void*) and can be implicitly converted to integer. 605 const llvm::Type *DestTy = ConvertType(E->getType()); 606 if (V->getType() != DestTy) { 607 if (isa<llvm::PointerType>(DestTy)) 608 V = Builder.CreateBitCast(V, DestTy, "ptrconv"); 609 else { 610 assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay"); 611 V = Builder.CreatePtrToInt(V, DestTy, "ptrconv"); 612 } 613 } 614 return V; 615 } 616 617 return EmitCastExpr(Op, E->getType()); 618 } 619 620 621 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 622 // have to handle a more broad range of conversions than explicit casts, as they 623 // handle things like function to ptr-to-function decay etc. 624 Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) { 625 if (!DestTy->isVoidType()) 626 TestAndClearIgnoreResultAssign(); 627 628 // Handle cases where the source is an non-complex type. 629 630 if (!CGF.hasAggregateLLVMType(E->getType())) { 631 Value *Src = Visit(const_cast<Expr*>(E)); 632 633 // Use EmitScalarConversion to perform the conversion. 634 return EmitScalarConversion(Src, E->getType(), DestTy); 635 } 636 637 if (E->getType()->isAnyComplexType()) { 638 // Handle cases where the source is a complex type. 639 bool IgnoreImag = true; 640 bool IgnoreImagAssign = true; 641 bool IgnoreReal = IgnoreResultAssign; 642 bool IgnoreRealAssign = IgnoreResultAssign; 643 if (DestTy->isBooleanType()) 644 IgnoreImagAssign = IgnoreImag = false; 645 else if (DestTy->isVoidType()) { 646 IgnoreReal = IgnoreImag = false; 647 IgnoreRealAssign = IgnoreImagAssign = true; 648 } 649 CodeGenFunction::ComplexPairTy V 650 = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign, 651 IgnoreImagAssign); 652 return EmitComplexToScalarConversion(V, E->getType(), DestTy); 653 } 654 655 // Okay, this is a cast from an aggregate. It must be a cast to void. Just 656 // evaluate the result and return. 657 CGF.EmitAggExpr(E, 0, false, true); 658 return 0; 659 } 660 661 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 662 return CGF.EmitCompoundStmt(*E->getSubStmt(), 663 !E->getType()->isVoidType()).getScalarVal(); 664 } 665 666 Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { 667 return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp"); 668 } 669 670 //===----------------------------------------------------------------------===// 671 // Unary Operators 672 //===----------------------------------------------------------------------===// 673 674 Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, 675 bool isInc, bool isPre) { 676 LValue LV = EmitLValue(E->getSubExpr()); 677 QualType ValTy = E->getSubExpr()->getType(); 678 Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal(); 679 680 int AmountVal = isInc ? 1 : -1; 681 682 if (ValTy->isPointerType() && 683 ValTy->getAsPointerType()->isVariableArrayType()) { 684 // The amount of the addition/subtraction needs to account for the VLA size 685 CGF.ErrorUnsupported(E, "VLA pointer inc/dec"); 686 } 687 688 Value *NextVal; 689 if (const llvm::PointerType *PT = 690 dyn_cast<llvm::PointerType>(InVal->getType())) { 691 llvm::Constant *Inc = 692 llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal); 693 if (!isa<llvm::FunctionType>(PT->getElementType())) { 694 QualType PTEE = ValTy->getPointeeType(); 695 if (const ObjCInterfaceType *OIT = 696 dyn_cast<ObjCInterfaceType>(PTEE)) { 697 // Handle interface types, which are not represented with a concrete type. 698 int size = CGF.getContext().getTypeSize(OIT) / 8; 699 if (!isInc) 700 size = -size; 701 Inc = llvm::ConstantInt::get(Inc->getType(), size); 702 const llvm::Type *i8Ty = 703 VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 704 InVal = Builder.CreateBitCast(InVal, i8Ty); 705 NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); 706 llvm::Value *lhs = LV.getAddress(); 707 lhs = Builder.CreateBitCast(lhs, VMContext.getPointerTypeUnqual(i8Ty)); 708 LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(), 709 CGF.getContext().getObjCGCAttrKind(ValTy)); 710 } 711 else 712 NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec"); 713 } else { 714 const llvm::Type *i8Ty = 715 VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 716 NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); 717 NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); 718 NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); 719 } 720 } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) { 721 // Bool++ is an interesting case, due to promotion rules, we get: 722 // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> 723 // Bool = ((int)Bool+1) != 0 724 // An interesting aspect of this is that increment is always true. 725 // Decrement does not have this property. 726 NextVal = VMContext.getTrue(); 727 } else if (isa<llvm::IntegerType>(InVal->getType())) { 728 NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); 729 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 730 } else { 731 // Add the inc/dec to the real part. 732 if (InVal->getType() == llvm::Type::FloatTy) 733 NextVal = 734 VMContext.getConstantFP(llvm::APFloat(static_cast<float>(AmountVal))); 735 else if (InVal->getType() == llvm::Type::DoubleTy) 736 NextVal = 737 VMContext.getConstantFP(llvm::APFloat(static_cast<double>(AmountVal))); 738 else { 739 llvm::APFloat F(static_cast<float>(AmountVal)); 740 bool ignored; 741 F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, 742 &ignored); 743 NextVal = VMContext.getConstantFP(F); 744 } 745 NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); 746 } 747 748 // Store the updated result through the lvalue. 749 if (LV.isBitfield()) 750 CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, 751 &NextVal); 752 else 753 CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); 754 755 // If this is a postinc, return the value read from memory, otherwise use the 756 // updated value. 757 return isPre ? NextVal : InVal; 758 } 759 760 761 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 762 TestAndClearIgnoreResultAssign(); 763 Value *Op = Visit(E->getSubExpr()); 764 if (Op->getType()->isFPOrFPVector()) 765 return Builder.CreateFNeg(Op, "neg"); 766 return Builder.CreateNeg(Op, "neg"); 767 } 768 769 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 770 TestAndClearIgnoreResultAssign(); 771 Value *Op = Visit(E->getSubExpr()); 772 return Builder.CreateNot(Op, "neg"); 773 } 774 775 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 776 // Compare operand to zero. 777 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 778 779 // Invert value. 780 // TODO: Could dynamically modify easy computations here. For example, if 781 // the operand is an icmp ne, turn into icmp eq. 782 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 783 784 // ZExt result to the expr type. 785 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 786 } 787 788 /// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of 789 /// argument of the sizeof expression as an integer. 790 Value * 791 ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { 792 QualType TypeToSize = E->getTypeOfArgument(); 793 if (E->isSizeOf()) { 794 if (const VariableArrayType *VAT = 795 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 796 if (E->isArgumentType()) { 797 // sizeof(type) - make sure to emit the VLA size. 798 CGF.EmitVLASize(TypeToSize); 799 } else { 800 // C99 6.5.3.4p2: If the argument is an expression of type 801 // VLA, it is evaluated. 802 CGF.EmitAnyExpr(E->getArgumentExpr()); 803 } 804 805 return CGF.GetVLASize(VAT); 806 } 807 } 808 809 // If this isn't sizeof(vla), the result must be constant; use the 810 // constant folding logic so we don't have to duplicate it here. 811 Expr::EvalResult Result; 812 E->Evaluate(Result, CGF.getContext()); 813 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 814 } 815 816 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 817 Expr *Op = E->getSubExpr(); 818 if (Op->getType()->isAnyComplexType()) 819 return CGF.EmitComplexExpr(Op, false, true, false, true).first; 820 return Visit(Op); 821 } 822 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 823 Expr *Op = E->getSubExpr(); 824 if (Op->getType()->isAnyComplexType()) 825 return CGF.EmitComplexExpr(Op, true, false, true, false).second; 826 827 // __imag on a scalar returns zero. Emit the subexpr to ensure side 828 // effects are evaluated, but not the actual value. 829 if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) 830 CGF.EmitLValue(Op); 831 else 832 CGF.EmitScalarExpr(Op, true); 833 return VMContext.getNullValue(ConvertType(E->getType())); 834 } 835 836 Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) 837 { 838 Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress(); 839 const llvm::Type* ResultType = ConvertType(E->getType()); 840 return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof"); 841 } 842 843 //===----------------------------------------------------------------------===// 844 // Binary Operators 845 //===----------------------------------------------------------------------===// 846 847 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 848 TestAndClearIgnoreResultAssign(); 849 BinOpInfo Result; 850 Result.LHS = Visit(E->getLHS()); 851 Result.RHS = Visit(E->getRHS()); 852 Result.Ty = E->getType(); 853 Result.E = E; 854 return Result; 855 } 856 857 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 858 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 859 bool Ignore = TestAndClearIgnoreResultAssign(); 860 QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); 861 862 BinOpInfo OpInfo; 863 864 if (E->getComputationResultType()->isAnyComplexType()) { 865 // This needs to go through the complex expression emitter, but 866 // it's a tad complicated to do that... I'm leaving it out for now. 867 // (Note that we do actually need the imaginary part of the RHS for 868 // multiplication and division.) 869 CGF.ErrorUnsupported(E, "complex compound assignment"); 870 return VMContext.getUndef(CGF.ConvertType(E->getType())); 871 } 872 873 // Emit the RHS first. __block variables need to have the rhs evaluated 874 // first, plus this should improve codegen a little. 875 OpInfo.RHS = Visit(E->getRHS()); 876 OpInfo.Ty = E->getComputationResultType(); 877 OpInfo.E = E; 878 // Load/convert the LHS. 879 LValue LHSLV = EmitLValue(E->getLHS()); 880 OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); 881 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 882 E->getComputationLHSType()); 883 884 // Expand the binary operator. 885 Value *Result = (this->*Func)(OpInfo); 886 887 // Convert the result back to the LHS type. 888 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); 889 890 // Store the result value into the LHS lvalue. Bit-fields are 891 // handled specially because the result is altered by the store, 892 // i.e., [C99 6.5.16p1] 'An assignment expression has the value of 893 // the left operand after the assignment...'. 894 if (LHSLV.isBitfield()) { 895 if (!LHSLV.isVolatileQualified()) { 896 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, 897 &Result); 898 return Result; 899 } else 900 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy); 901 } else 902 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy); 903 if (Ignore) 904 return 0; 905 return EmitLoadOfLValue(LHSLV, E->getType()); 906 } 907 908 909 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 910 if (Ops.LHS->getType()->isFPOrFPVector()) 911 return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 912 else if (Ops.Ty->isUnsignedIntegerType()) 913 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 914 else 915 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 916 } 917 918 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 919 // Rem in C can't be a floating point type: C99 6.5.5p2. 920 if (Ops.Ty->isUnsignedIntegerType()) 921 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 922 else 923 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 924 } 925 926 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 927 unsigned IID; 928 unsigned OpID = 0; 929 930 switch (Ops.E->getOpcode()) { 931 case BinaryOperator::Add: 932 case BinaryOperator::AddAssign: 933 OpID = 1; 934 IID = llvm::Intrinsic::sadd_with_overflow; 935 break; 936 case BinaryOperator::Sub: 937 case BinaryOperator::SubAssign: 938 OpID = 2; 939 IID = llvm::Intrinsic::ssub_with_overflow; 940 break; 941 case BinaryOperator::Mul: 942 case BinaryOperator::MulAssign: 943 OpID = 3; 944 IID = llvm::Intrinsic::smul_with_overflow; 945 break; 946 default: 947 assert(false && "Unsupported operation for overflow detection"); 948 IID = 0; 949 } 950 OpID <<= 1; 951 OpID |= 1; 952 953 const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 954 955 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1); 956 957 Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); 958 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 959 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 960 961 // Branch in case of overflow. 962 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 963 llvm::BasicBlock *overflowBB = 964 CGF.createBasicBlock("overflow", CGF.CurFn); 965 llvm::BasicBlock *continueBB = 966 CGF.createBasicBlock("overflow.continue", CGF.CurFn); 967 968 Builder.CreateCondBr(overflow, overflowBB, continueBB); 969 970 // Handle overflow 971 972 Builder.SetInsertPoint(overflowBB); 973 974 // Handler is: 975 // long long *__overflow_handler)(long long a, long long b, char op, 976 // char width) 977 std::vector<const llvm::Type*> handerArgTypes; 978 handerArgTypes.push_back(llvm::Type::Int64Ty); 979 handerArgTypes.push_back(llvm::Type::Int64Ty); 980 handerArgTypes.push_back(llvm::Type::Int8Ty); 981 handerArgTypes.push_back(llvm::Type::Int8Ty); 982 llvm::FunctionType *handlerTy = VMContext.getFunctionType(llvm::Type::Int64Ty, 983 handerArgTypes, false); 984 llvm::Value *handlerFunction = 985 CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", 986 VMContext.getPointerTypeUnqual(handlerTy)); 987 handlerFunction = Builder.CreateLoad(handlerFunction); 988 989 llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, 990 Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty), 991 Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty), 992 llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID), 993 llvm::ConstantInt::get(llvm::Type::Int8Ty, 994 cast<llvm::IntegerType>(opTy)->getBitWidth())); 995 996 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 997 998 Builder.CreateBr(continueBB); 999 1000 // Set up the continuation 1001 Builder.SetInsertPoint(continueBB); 1002 // Get the correct result 1003 llvm::PHINode *phi = Builder.CreatePHI(opTy); 1004 phi->reserveOperandSpace(2); 1005 phi->addIncoming(result, initialBB); 1006 phi->addIncoming(handlerResult, overflowBB); 1007 1008 return phi; 1009 } 1010 1011 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { 1012 if (!Ops.Ty->isAnyPointerType()) { 1013 if (CGF.getContext().getLangOptions().OverflowChecking && 1014 Ops.Ty->isSignedIntegerType()) 1015 return EmitOverflowCheckedBinOp(Ops); 1016 1017 if (Ops.LHS->getType()->isFPOrFPVector()) 1018 return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add"); 1019 1020 return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); 1021 } 1022 1023 if (Ops.Ty->isPointerType() && 1024 Ops.Ty->getAsPointerType()->isVariableArrayType()) { 1025 // The amount of the addition needs to account for the VLA size 1026 CGF.ErrorUnsupported(Ops.E, "VLA pointer addition"); 1027 } 1028 Value *Ptr, *Idx; 1029 Expr *IdxExp; 1030 const PointerType *PT = Ops.E->getLHS()->getType()->getAsPointerType(); 1031 const ObjCObjectPointerType *OPT = 1032 Ops.E->getLHS()->getType()->getAsObjCObjectPointerType(); 1033 if (PT || OPT) { 1034 Ptr = Ops.LHS; 1035 Idx = Ops.RHS; 1036 IdxExp = Ops.E->getRHS(); 1037 } else { // int + pointer 1038 PT = Ops.E->getRHS()->getType()->getAsPointerType(); 1039 OPT = Ops.E->getRHS()->getType()->getAsObjCObjectPointerType(); 1040 assert((PT || OPT) && "Invalid add expr"); 1041 Ptr = Ops.RHS; 1042 Idx = Ops.LHS; 1043 IdxExp = Ops.E->getLHS(); 1044 } 1045 1046 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1047 if (Width < CGF.LLVMPointerWidth) { 1048 // Zero or sign extend the pointer value based on whether the index is 1049 // signed or not. 1050 const llvm::Type *IdxType = VMContext.getIntegerType(CGF.LLVMPointerWidth); 1051 if (IdxExp->getType()->isSignedIntegerType()) 1052 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1053 else 1054 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1055 } 1056 const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType(); 1057 // Handle interface types, which are not represented with a concrete 1058 // type. 1059 if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) { 1060 llvm::Value *InterfaceSize = 1061 llvm::ConstantInt::get(Idx->getType(), 1062 CGF.getContext().getTypeSize(OIT) / 8); 1063 Idx = Builder.CreateMul(Idx, InterfaceSize); 1064 const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 1065 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1066 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1067 return Builder.CreateBitCast(Res, Ptr->getType()); 1068 } 1069 1070 // Explicitly handle GNU void* and function pointer arithmetic 1071 // extensions. The GNU void* casts amount to no-ops since our void* 1072 // type is i8*, but this is future proof. 1073 if (ElementType->isVoidType() || ElementType->isFunctionType()) { 1074 const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 1075 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1076 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1077 return Builder.CreateBitCast(Res, Ptr->getType()); 1078 } 1079 1080 return Builder.CreateGEP(Ptr, Idx, "add.ptr"); 1081 } 1082 1083 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { 1084 if (!isa<llvm::PointerType>(Ops.LHS->getType())) { 1085 if (CGF.getContext().getLangOptions().OverflowChecking 1086 && Ops.Ty->isSignedIntegerType()) 1087 return EmitOverflowCheckedBinOp(Ops); 1088 1089 if (Ops.LHS->getType()->isFPOrFPVector()) 1090 return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub"); 1091 return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); 1092 } 1093 1094 if (Ops.E->getLHS()->getType()->isPointerType() && 1095 Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) { 1096 // The amount of the addition needs to account for the VLA size for 1097 // ptr-int 1098 // The amount of the division needs to account for the VLA size for 1099 // ptr-ptr. 1100 CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction"); 1101 } 1102 1103 const QualType LHSType = Ops.E->getLHS()->getType(); 1104 const QualType LHSElementType = LHSType->getPointeeType(); 1105 if (!isa<llvm::PointerType>(Ops.RHS->getType())) { 1106 // pointer - int 1107 Value *Idx = Ops.RHS; 1108 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1109 if (Width < CGF.LLVMPointerWidth) { 1110 // Zero or sign extend the pointer value based on whether the index is 1111 // signed or not. 1112 const llvm::Type *IdxType = 1113 VMContext.getIntegerType(CGF.LLVMPointerWidth); 1114 if (Ops.E->getRHS()->getType()->isSignedIntegerType()) 1115 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1116 else 1117 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1118 } 1119 Idx = Builder.CreateNeg(Idx, "sub.ptr.neg"); 1120 1121 // Handle interface types, which are not represented with a concrete 1122 // type. 1123 if (const ObjCInterfaceType *OIT = 1124 dyn_cast<ObjCInterfaceType>(LHSElementType)) { 1125 llvm::Value *InterfaceSize = 1126 llvm::ConstantInt::get(Idx->getType(), 1127 CGF.getContext().getTypeSize(OIT) / 8); 1128 Idx = Builder.CreateMul(Idx, InterfaceSize); 1129 const llvm::Type *i8Ty = 1130 VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 1131 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1132 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); 1133 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1134 } 1135 1136 // Explicitly handle GNU void* and function pointer arithmetic 1137 // extensions. The GNU void* casts amount to no-ops since our 1138 // void* type is i8*, but this is future proof. 1139 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1140 const llvm::Type *i8Ty = 1141 VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 1142 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1143 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); 1144 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1145 } 1146 1147 return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr"); 1148 } else { 1149 // pointer - pointer 1150 Value *LHS = Ops.LHS; 1151 Value *RHS = Ops.RHS; 1152 1153 uint64_t ElementSize; 1154 1155 // Handle GCC extension for pointer arithmetic on void* and function pointer 1156 // types. 1157 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1158 ElementSize = 1; 1159 } else { 1160 ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8; 1161 } 1162 1163 const llvm::Type *ResultType = ConvertType(Ops.Ty); 1164 LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); 1165 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1166 Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 1167 1168 // Optimize out the shift for element size of 1. 1169 if (ElementSize == 1) 1170 return BytesBetween; 1171 1172 // HACK: LLVM doesn't have an divide instruction that 'knows' there is no 1173 // remainder. As such, we handle common power-of-two cases here to generate 1174 // better code. See PR2247. 1175 if (llvm::isPowerOf2_64(ElementSize)) { 1176 Value *ShAmt = 1177 llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize)); 1178 return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr"); 1179 } 1180 1181 // Otherwise, do a full sdiv. 1182 Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize); 1183 return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); 1184 } 1185 } 1186 1187 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 1188 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1189 // RHS to the same size as the LHS. 1190 Value *RHS = Ops.RHS; 1191 if (Ops.LHS->getType() != RHS->getType()) 1192 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1193 1194 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 1195 } 1196 1197 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 1198 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1199 // RHS to the same size as the LHS. 1200 Value *RHS = Ops.RHS; 1201 if (Ops.LHS->getType() != RHS->getType()) 1202 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1203 1204 if (Ops.Ty->isUnsignedIntegerType()) 1205 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 1206 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 1207 } 1208 1209 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, 1210 unsigned SICmpOpc, unsigned FCmpOpc) { 1211 TestAndClearIgnoreResultAssign(); 1212 Value *Result; 1213 QualType LHSTy = E->getLHS()->getType(); 1214 if (!LHSTy->isAnyComplexType()) { 1215 Value *LHS = Visit(E->getLHS()); 1216 Value *RHS = Visit(E->getRHS()); 1217 1218 if (LHS->getType()->isFPOrFPVector()) { 1219 Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, 1220 LHS, RHS, "cmp"); 1221 } else if (LHSTy->isSignedIntegerType()) { 1222 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, 1223 LHS, RHS, "cmp"); 1224 } else { 1225 // Unsigned integers and pointers. 1226 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1227 LHS, RHS, "cmp"); 1228 } 1229 1230 // If this is a vector comparison, sign extend the result to the appropriate 1231 // vector integer type and return it (don't convert to bool). 1232 if (LHSTy->isVectorType()) 1233 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 1234 1235 } else { 1236 // Complex Comparison: can only be an equality comparison. 1237 CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); 1238 CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); 1239 1240 QualType CETy = LHSTy->getAsComplexType()->getElementType(); 1241 1242 Value *ResultR, *ResultI; 1243 if (CETy->isRealFloatingType()) { 1244 ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 1245 LHS.first, RHS.first, "cmp.r"); 1246 ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 1247 LHS.second, RHS.second, "cmp.i"); 1248 } else { 1249 // Complex comparisons can only be equality comparisons. As such, signed 1250 // and unsigned opcodes are the same. 1251 ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1252 LHS.first, RHS.first, "cmp.r"); 1253 ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1254 LHS.second, RHS.second, "cmp.i"); 1255 } 1256 1257 if (E->getOpcode() == BinaryOperator::EQ) { 1258 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 1259 } else { 1260 assert(E->getOpcode() == BinaryOperator::NE && 1261 "Complex comparison other than == or != ?"); 1262 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 1263 } 1264 } 1265 1266 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); 1267 } 1268 1269 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 1270 bool Ignore = TestAndClearIgnoreResultAssign(); 1271 1272 // __block variables need to have the rhs evaluated first, plus this should 1273 // improve codegen just a little. 1274 Value *RHS = Visit(E->getRHS()); 1275 LValue LHS = EmitLValue(E->getLHS()); 1276 1277 // Store the value into the LHS. Bit-fields are handled specially 1278 // because the result is altered by the store, i.e., [C99 6.5.16p1] 1279 // 'An assignment expression has the value of the left operand after 1280 // the assignment...'. 1281 if (LHS.isBitfield()) { 1282 if (!LHS.isVolatileQualified()) { 1283 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), 1284 &RHS); 1285 return RHS; 1286 } else 1287 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType()); 1288 } else 1289 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); 1290 if (Ignore) 1291 return 0; 1292 return EmitLoadOfLValue(LHS, E->getType()); 1293 } 1294 1295 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 1296 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 1297 // If we have 1 && X, just emit X without inserting the control flow. 1298 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 1299 if (Cond == 1) { // If we have 1 && X, just emit X. 1300 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1301 // ZExt result to int. 1302 return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext"); 1303 } 1304 1305 // 0 && RHS: If it is safe, just elide the RHS, and return 0. 1306 if (!CGF.ContainsLabel(E->getRHS())) 1307 return VMContext.getNullValue(CGF.LLVMIntTy); 1308 } 1309 1310 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 1311 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 1312 1313 // Branch on the LHS first. If it is false, go to the failure (cont) block. 1314 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock); 1315 1316 // Any edges into the ContBlock are now from an (indeterminate number of) 1317 // edges from this first condition. All of these values will be false. Start 1318 // setting up the PHI node in the Cont Block for this. 1319 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); 1320 PN->reserveOperandSpace(2); // Normal case, two inputs. 1321 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 1322 PI != PE; ++PI) 1323 PN->addIncoming(VMContext.getFalse(), *PI); 1324 1325 CGF.PushConditionalTempDestruction(); 1326 CGF.EmitBlock(RHSBlock); 1327 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1328 CGF.PopConditionalTempDestruction(); 1329 1330 // Reaquire the RHS block, as there may be subblocks inserted. 1331 RHSBlock = Builder.GetInsertBlock(); 1332 1333 // Emit an unconditional branch from this block to ContBlock. Insert an entry 1334 // into the phi node for the edge with the value of RHSCond. 1335 CGF.EmitBlock(ContBlock); 1336 PN->addIncoming(RHSCond, RHSBlock); 1337 1338 // ZExt result to int. 1339 return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext"); 1340 } 1341 1342 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 1343 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 1344 // If we have 0 || X, just emit X without inserting the control flow. 1345 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 1346 if (Cond == -1) { // If we have 0 || X, just emit X. 1347 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1348 // ZExt result to int. 1349 return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext"); 1350 } 1351 1352 // 1 || RHS: If it is safe, just elide the RHS, and return 1. 1353 if (!CGF.ContainsLabel(E->getRHS())) 1354 return llvm::ConstantInt::get(CGF.LLVMIntTy, 1); 1355 } 1356 1357 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 1358 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 1359 1360 // Branch on the LHS first. If it is true, go to the success (cont) block. 1361 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock); 1362 1363 // Any edges into the ContBlock are now from an (indeterminate number of) 1364 // edges from this first condition. All of these values will be true. Start 1365 // setting up the PHI node in the Cont Block for this. 1366 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); 1367 PN->reserveOperandSpace(2); // Normal case, two inputs. 1368 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 1369 PI != PE; ++PI) 1370 PN->addIncoming(VMContext.getTrue(), *PI); 1371 1372 CGF.PushConditionalTempDestruction(); 1373 1374 // Emit the RHS condition as a bool value. 1375 CGF.EmitBlock(RHSBlock); 1376 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1377 1378 CGF.PopConditionalTempDestruction(); 1379 1380 // Reaquire the RHS block, as there may be subblocks inserted. 1381 RHSBlock = Builder.GetInsertBlock(); 1382 1383 // Emit an unconditional branch from this block to ContBlock. Insert an entry 1384 // into the phi node for the edge with the value of RHSCond. 1385 CGF.EmitBlock(ContBlock); 1386 PN->addIncoming(RHSCond, RHSBlock); 1387 1388 // ZExt result to int. 1389 return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext"); 1390 } 1391 1392 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 1393 CGF.EmitStmt(E->getLHS()); 1394 CGF.EnsureInsertPoint(); 1395 return Visit(E->getRHS()); 1396 } 1397 1398 //===----------------------------------------------------------------------===// 1399 // Other Operators 1400 //===----------------------------------------------------------------------===// 1401 1402 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 1403 /// expression is cheap enough and side-effect-free enough to evaluate 1404 /// unconditionally instead of conditionally. This is used to convert control 1405 /// flow into selects in some cases. 1406 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) { 1407 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 1408 return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr()); 1409 1410 // TODO: Allow anything we can constant fold to an integer or fp constant. 1411 if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) || 1412 isa<FloatingLiteral>(E)) 1413 return true; 1414 1415 // Non-volatile automatic variables too, to get "cond ? X : Y" where 1416 // X and Y are local variables. 1417 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 1418 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 1419 if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified()) 1420 return true; 1421 1422 return false; 1423 } 1424 1425 1426 Value *ScalarExprEmitter:: 1427 VisitConditionalOperator(const ConditionalOperator *E) { 1428 TestAndClearIgnoreResultAssign(); 1429 // If the condition constant folds and can be elided, try to avoid emitting 1430 // the condition and the dead arm. 1431 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){ 1432 Expr *Live = E->getLHS(), *Dead = E->getRHS(); 1433 if (Cond == -1) 1434 std::swap(Live, Dead); 1435 1436 // If the dead side doesn't have labels we need, and if the Live side isn't 1437 // the gnu missing ?: extension (which we could handle, but don't bother 1438 // to), just emit the Live part. 1439 if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part 1440 Live) // Live part isn't missing. 1441 return Visit(Live); 1442 } 1443 1444 1445 // If this is a really simple expression (like x ? 4 : 5), emit this as a 1446 // select instead of as control flow. We can only do this if it is cheap and 1447 // safe to evaluate the LHS and RHS unconditionally. 1448 if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) && 1449 isCheapEnoughToEvaluateUnconditionally(E->getRHS())) { 1450 llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond()); 1451 llvm::Value *LHS = Visit(E->getLHS()); 1452 llvm::Value *RHS = Visit(E->getRHS()); 1453 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 1454 } 1455 1456 1457 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 1458 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 1459 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 1460 Value *CondVal = 0; 1461 1462 // If we don't have the GNU missing condition extension, emit a branch on 1463 // bool the normal way. 1464 if (E->getLHS()) { 1465 // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for 1466 // the branch on bool. 1467 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1468 } else { 1469 // Otherwise, for the ?: extension, evaluate the conditional and then 1470 // convert it to bool the hard way. We do this explicitly because we need 1471 // the unconverted value for the missing middle value of the ?:. 1472 CondVal = CGF.EmitScalarExpr(E->getCond()); 1473 1474 // In some cases, EmitScalarConversion will delete the "CondVal" expression 1475 // if there are no extra uses (an optimization). Inhibit this by making an 1476 // extra dead use, because we're going to add a use of CondVal later. We 1477 // don't use the builder for this, because we don't want it to get optimized 1478 // away. This leaves dead code, but the ?: extension isn't common. 1479 new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder", 1480 Builder.GetInsertBlock()); 1481 1482 Value *CondBoolVal = 1483 CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), 1484 CGF.getContext().BoolTy); 1485 Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); 1486 } 1487 1488 CGF.PushConditionalTempDestruction(); 1489 CGF.EmitBlock(LHSBlock); 1490 1491 // Handle the GNU extension for missing LHS. 1492 Value *LHS; 1493 if (E->getLHS()) 1494 LHS = Visit(E->getLHS()); 1495 else // Perform promotions, to handle cases like "short ?: int" 1496 LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); 1497 1498 CGF.PopConditionalTempDestruction(); 1499 LHSBlock = Builder.GetInsertBlock(); 1500 CGF.EmitBranch(ContBlock); 1501 1502 CGF.PushConditionalTempDestruction(); 1503 CGF.EmitBlock(RHSBlock); 1504 1505 Value *RHS = Visit(E->getRHS()); 1506 CGF.PopConditionalTempDestruction(); 1507 RHSBlock = Builder.GetInsertBlock(); 1508 CGF.EmitBranch(ContBlock); 1509 1510 CGF.EmitBlock(ContBlock); 1511 1512 if (!LHS || !RHS) { 1513 assert(E->getType()->isVoidType() && "Non-void value should have a value"); 1514 return 0; 1515 } 1516 1517 // Create a PHI node for the real part. 1518 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); 1519 PN->reserveOperandSpace(2); 1520 PN->addIncoming(LHS, LHSBlock); 1521 PN->addIncoming(RHS, RHSBlock); 1522 return PN; 1523 } 1524 1525 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 1526 return Visit(E->getChosenSubExpr(CGF.getContext())); 1527 } 1528 1529 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 1530 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 1531 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 1532 1533 // If EmitVAArg fails, we fall back to the LLVM instruction. 1534 if (!ArgPtr) 1535 return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); 1536 1537 // FIXME Volatility. 1538 return Builder.CreateLoad(ArgPtr); 1539 } 1540 1541 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) { 1542 return CGF.BuildBlockLiteralTmp(BE); 1543 } 1544 1545 //===----------------------------------------------------------------------===// 1546 // Entry Point into this File 1547 //===----------------------------------------------------------------------===// 1548 1549 /// EmitScalarExpr - Emit the computation of the specified expression of 1550 /// scalar type, ignoring the result. 1551 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 1552 assert(E && !hasAggregateLLVMType(E->getType()) && 1553 "Invalid scalar expression to emit"); 1554 1555 return ScalarExprEmitter(*this, IgnoreResultAssign) 1556 .Visit(const_cast<Expr*>(E)); 1557 } 1558 1559 /// EmitScalarConversion - Emit a conversion from the specified type to the 1560 /// specified destination type, both of which are LLVM scalar types. 1561 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 1562 QualType DstTy) { 1563 assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && 1564 "Invalid scalar expression to emit"); 1565 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); 1566 } 1567 1568 /// EmitComplexToScalarConversion - Emit a conversion from the specified 1569 /// complex type to the specified destination type, where the destination 1570 /// type is an LLVM scalar type. 1571 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 1572 QualType SrcTy, 1573 QualType DstTy) { 1574 assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) && 1575 "Invalid complex -> scalar conversion"); 1576 return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, 1577 DstTy); 1578 } 1579 1580 Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { 1581 assert(V1->getType() == V2->getType() && 1582 "Vector operands must be of the same type"); 1583 unsigned NumElements = 1584 cast<llvm::VectorType>(V1->getType())->getNumElements(); 1585 1586 va_list va; 1587 va_start(va, V2); 1588 1589 llvm::SmallVector<llvm::Constant*, 16> Args; 1590 for (unsigned i = 0; i < NumElements; i++) { 1591 int n = va_arg(va, int); 1592 assert(n >= 0 && n < (int)NumElements * 2 && 1593 "Vector shuffle index out of bounds!"); 1594 Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n)); 1595 } 1596 1597 const char *Name = va_arg(va, const char *); 1598 va_end(va); 1599 1600 llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements); 1601 1602 return Builder.CreateShuffleVector(V1, V2, Mask, Name); 1603 } 1604 1605 llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, 1606 unsigned NumVals, bool isSplat) { 1607 llvm::Value *Vec 1608 = VMContext.getUndef(VMContext.getVectorType(Vals[0]->getType(), NumVals)); 1609 1610 for (unsigned i = 0, e = NumVals; i != e; ++i) { 1611 llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; 1612 llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); 1613 Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp"); 1614 } 1615 1616 return Vec; 1617 } 1618