1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGObjCRuntime.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/DeclObjC.h" 19 #include "clang/AST/RecordLayout.h" 20 #include "clang/AST/StmtVisitor.h" 21 #include "clang/Basic/TargetInfo.h" 22 #include "llvm/Constants.h" 23 #include "llvm/Function.h" 24 #include "llvm/GlobalVariable.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/Module.h" 27 #include "llvm/Support/CFG.h" 28 #include "llvm/Target/TargetData.h" 29 #include <cstdarg> 30 31 using namespace clang; 32 using namespace CodeGen; 33 using llvm::Value; 34 35 //===----------------------------------------------------------------------===// 36 // Scalar Expression Emitter 37 //===----------------------------------------------------------------------===// 38 39 struct BinOpInfo { 40 Value *LHS; 41 Value *RHS; 42 QualType Ty; // Computation Type. 43 const BinaryOperator *E; 44 }; 45 46 namespace { 47 class ScalarExprEmitter 48 : public StmtVisitor<ScalarExprEmitter, Value*> { 49 CodeGenFunction &CGF; 50 CGBuilderTy &Builder; 51 bool IgnoreResultAssign; 52 llvm::LLVMContext &VMContext; 53 public: 54 55 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 56 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 57 VMContext(cgf.getLLVMContext()) { 58 } 59 60 //===--------------------------------------------------------------------===// 61 // Utilities 62 //===--------------------------------------------------------------------===// 63 64 bool TestAndClearIgnoreResultAssign() { 65 bool I = IgnoreResultAssign; 66 IgnoreResultAssign = false; 67 return I; 68 } 69 70 const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 71 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 72 LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); } 73 74 Value *EmitLoadOfLValue(LValue LV, QualType T) { 75 return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); 76 } 77 78 /// EmitLoadOfLValue - Given an expression with complex type that represents a 79 /// value l-value, this method emits the address of the l-value, then loads 80 /// and returns the result. 81 Value *EmitLoadOfLValue(const Expr *E) { 82 return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType()); 83 } 84 85 /// EmitConversionToBool - Convert the specified expression value to a 86 /// boolean (i1) truth value. This is equivalent to "Val != 0". 87 Value *EmitConversionToBool(Value *Src, QualType DstTy); 88 89 /// EmitScalarConversion - Emit a conversion from the specified type to the 90 /// specified destination type, both of which are LLVM scalar types. 91 Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); 92 93 /// EmitComplexToScalarConversion - Emit a conversion from the specified 94 /// complex type to the specified destination type, where the destination type 95 /// is an LLVM scalar type. 96 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 97 QualType SrcTy, QualType DstTy); 98 99 //===--------------------------------------------------------------------===// 100 // Visitor Methods 101 //===--------------------------------------------------------------------===// 102 103 Value *VisitStmt(Stmt *S) { 104 S->dump(CGF.getContext().getSourceManager()); 105 assert(0 && "Stmt can't have complex result type!"); 106 return 0; 107 } 108 Value *VisitExpr(Expr *S); 109 110 Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } 111 112 // Leaves. 113 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 114 return llvm::ConstantInt::get(VMContext, E->getValue()); 115 } 116 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 117 return llvm::ConstantFP::get(VMContext, E->getValue()); 118 } 119 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 120 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 121 } 122 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 123 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 124 } 125 Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) { 126 return llvm::Constant::getNullValue(ConvertType(E->getType())); 127 } 128 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 129 return llvm::Constant::getNullValue(ConvertType(E->getType())); 130 } 131 Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) { 132 return llvm::ConstantInt::get(ConvertType(E->getType()), 133 CGF.getContext().typesAreCompatible( 134 E->getArgType1(), E->getArgType2())); 135 } 136 Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); 137 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 138 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 139 return Builder.CreateBitCast(V, ConvertType(E->getType())); 140 } 141 142 // l-values. 143 Value *VisitDeclRefExpr(DeclRefExpr *E) { 144 Expr::EvalResult Result; 145 if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) { 146 assert(!Result.HasSideEffects && "Constant declref with side-effect?!"); 147 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 148 } 149 return EmitLoadOfLValue(E); 150 } 151 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 152 return CGF.EmitObjCSelectorExpr(E); 153 } 154 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 155 return CGF.EmitObjCProtocolExpr(E); 156 } 157 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 158 return EmitLoadOfLValue(E); 159 } 160 Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { 161 return EmitLoadOfLValue(E); 162 } 163 Value *VisitObjCImplicitSetterGetterRefExpr( 164 ObjCImplicitSetterGetterRefExpr *E) { 165 return EmitLoadOfLValue(E); 166 } 167 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 168 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 169 } 170 171 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 172 LValue LV = CGF.EmitObjCIsaExpr(E); 173 Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal(); 174 return V; 175 } 176 177 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 178 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 179 Value *VisitMemberExpr(MemberExpr *E); 180 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 181 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 182 return EmitLoadOfLValue(E); 183 } 184 185 Value *VisitInitListExpr(InitListExpr *E); 186 187 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 188 return llvm::Constant::getNullValue(ConvertType(E->getType())); 189 } 190 Value *VisitCastExpr(CastExpr *E) { 191 // Make sure to evaluate VLA bounds now so that we have them for later. 192 if (E->getType()->isVariablyModifiedType()) 193 CGF.EmitVLASize(E->getType()); 194 195 return EmitCastExpr(E); 196 } 197 Value *EmitCastExpr(CastExpr *E); 198 199 Value *VisitCallExpr(const CallExpr *E) { 200 if (E->getCallReturnType()->isReferenceType()) 201 return EmitLoadOfLValue(E); 202 203 return CGF.EmitCallExpr(E).getScalarVal(); 204 } 205 206 Value *VisitStmtExpr(const StmtExpr *E); 207 208 Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); 209 210 // Unary Operators. 211 Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) { 212 LValue LV = EmitLValue(E->getSubExpr()); 213 return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre); 214 } 215 Value *VisitUnaryPostDec(const UnaryOperator *E) { 216 return VisitPrePostIncDec(E, false, false); 217 } 218 Value *VisitUnaryPostInc(const UnaryOperator *E) { 219 return VisitPrePostIncDec(E, true, false); 220 } 221 Value *VisitUnaryPreDec(const UnaryOperator *E) { 222 return VisitPrePostIncDec(E, false, true); 223 } 224 Value *VisitUnaryPreInc(const UnaryOperator *E) { 225 return VisitPrePostIncDec(E, true, true); 226 } 227 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 228 return EmitLValue(E->getSubExpr()).getAddress(); 229 } 230 Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } 231 Value *VisitUnaryPlus(const UnaryOperator *E) { 232 // This differs from gcc, though, most likely due to a bug in gcc. 233 TestAndClearIgnoreResultAssign(); 234 return Visit(E->getSubExpr()); 235 } 236 Value *VisitUnaryMinus (const UnaryOperator *E); 237 Value *VisitUnaryNot (const UnaryOperator *E); 238 Value *VisitUnaryLNot (const UnaryOperator *E); 239 Value *VisitUnaryReal (const UnaryOperator *E); 240 Value *VisitUnaryImag (const UnaryOperator *E); 241 Value *VisitUnaryExtension(const UnaryOperator *E) { 242 return Visit(E->getSubExpr()); 243 } 244 Value *VisitUnaryOffsetOf(const UnaryOperator *E); 245 246 // C++ 247 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 248 return Visit(DAE->getExpr()); 249 } 250 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 251 return CGF.LoadCXXThis(); 252 } 253 254 Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { 255 return CGF.EmitCXXExprWithTemporaries(E).getScalarVal(); 256 } 257 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 258 return CGF.EmitCXXNewExpr(E); 259 } 260 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 261 CGF.EmitCXXDeleteExpr(E); 262 return 0; 263 } 264 Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) { 265 return llvm::ConstantInt::get(Builder.getInt1Ty(), 266 E->EvaluateTrait(CGF.getContext())); 267 } 268 269 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 270 // C++ [expr.pseudo]p1: 271 // The result shall only be used as the operand for the function call 272 // operator (), and the result of such a call has type void. The only 273 // effect is the evaluation of the postfix-expression before the dot or 274 // arrow. 275 CGF.EmitScalarExpr(E->getBase()); 276 return 0; 277 } 278 279 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 280 return llvm::Constant::getNullValue(ConvertType(E->getType())); 281 } 282 283 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 284 CGF.EmitCXXThrowExpr(E); 285 return 0; 286 } 287 288 // Binary Operators. 289 Value *EmitMul(const BinOpInfo &Ops) { 290 if (CGF.getContext().getLangOptions().OverflowChecking 291 && Ops.Ty->isSignedIntegerType()) 292 return EmitOverflowCheckedBinOp(Ops); 293 if (Ops.LHS->getType()->isFPOrFPVector()) 294 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 295 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 296 } 297 /// Create a binary op that checks for overflow. 298 /// Currently only supports +, - and *. 299 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 300 Value *EmitDiv(const BinOpInfo &Ops); 301 Value *EmitRem(const BinOpInfo &Ops); 302 Value *EmitAdd(const BinOpInfo &Ops); 303 Value *EmitSub(const BinOpInfo &Ops); 304 Value *EmitShl(const BinOpInfo &Ops); 305 Value *EmitShr(const BinOpInfo &Ops); 306 Value *EmitAnd(const BinOpInfo &Ops) { 307 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 308 } 309 Value *EmitXor(const BinOpInfo &Ops) { 310 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 311 } 312 Value *EmitOr (const BinOpInfo &Ops) { 313 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 314 } 315 316 BinOpInfo EmitBinOps(const BinaryOperator *E); 317 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 318 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 319 320 // Binary operators and binary compound assignment operators. 321 #define HANDLEBINOP(OP) \ 322 Value *VisitBin ## OP(const BinaryOperator *E) { \ 323 return Emit ## OP(EmitBinOps(E)); \ 324 } \ 325 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 326 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 327 } 328 HANDLEBINOP(Mul) 329 HANDLEBINOP(Div) 330 HANDLEBINOP(Rem) 331 HANDLEBINOP(Add) 332 HANDLEBINOP(Sub) 333 HANDLEBINOP(Shl) 334 HANDLEBINOP(Shr) 335 HANDLEBINOP(And) 336 HANDLEBINOP(Xor) 337 HANDLEBINOP(Or) 338 #undef HANDLEBINOP 339 340 // Comparisons. 341 Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, 342 unsigned SICmpOpc, unsigned FCmpOpc); 343 #define VISITCOMP(CODE, UI, SI, FP) \ 344 Value *VisitBin##CODE(const BinaryOperator *E) { \ 345 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 346 llvm::FCmpInst::FP); } 347 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT) 348 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT) 349 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE) 350 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE) 351 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ) 352 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE) 353 #undef VISITCOMP 354 355 Value *VisitBinAssign (const BinaryOperator *E); 356 357 Value *VisitBinLAnd (const BinaryOperator *E); 358 Value *VisitBinLOr (const BinaryOperator *E); 359 Value *VisitBinComma (const BinaryOperator *E); 360 361 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 362 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 363 364 // Other Operators. 365 Value *VisitBlockExpr(const BlockExpr *BE); 366 Value *VisitConditionalOperator(const ConditionalOperator *CO); 367 Value *VisitChooseExpr(ChooseExpr *CE); 368 Value *VisitVAArgExpr(VAArgExpr *VE); 369 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 370 return CGF.EmitObjCStringLiteral(E); 371 } 372 }; 373 } // end anonymous namespace. 374 375 //===----------------------------------------------------------------------===// 376 // Utilities 377 //===----------------------------------------------------------------------===// 378 379 /// EmitConversionToBool - Convert the specified expression value to a 380 /// boolean (i1) truth value. This is equivalent to "Val != 0". 381 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 382 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 383 384 if (SrcType->isRealFloatingType()) { 385 // Compare against 0.0 for fp scalars. 386 llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); 387 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 388 } 389 390 if (SrcType->isMemberPointerType()) { 391 // FIXME: This is ABI specific. 392 393 // Compare against -1. 394 llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType()); 395 return Builder.CreateICmpNE(Src, NegativeOne, "tobool"); 396 } 397 398 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 399 "Unknown scalar type to convert"); 400 401 // Because of the type rules of C, we often end up computing a logical value, 402 // then zero extending it to int, then wanting it as a logical value again. 403 // Optimize this common case. 404 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) { 405 if (ZI->getOperand(0)->getType() == 406 llvm::Type::getInt1Ty(CGF.getLLVMContext())) { 407 Value *Result = ZI->getOperand(0); 408 // If there aren't any more uses, zap the instruction to save space. 409 // Note that there can be more uses, for example if this 410 // is the result of an assignment. 411 if (ZI->use_empty()) 412 ZI->eraseFromParent(); 413 return Result; 414 } 415 } 416 417 // Compare against an integer or pointer null. 418 llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); 419 return Builder.CreateICmpNE(Src, Zero, "tobool"); 420 } 421 422 /// EmitScalarConversion - Emit a conversion from the specified type to the 423 /// specified destination type, both of which are LLVM scalar types. 424 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 425 QualType DstType) { 426 SrcType = CGF.getContext().getCanonicalType(SrcType); 427 DstType = CGF.getContext().getCanonicalType(DstType); 428 if (SrcType == DstType) return Src; 429 430 if (DstType->isVoidType()) return 0; 431 432 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 433 434 // Handle conversions to bool first, they are special: comparisons against 0. 435 if (DstType->isBooleanType()) 436 return EmitConversionToBool(Src, SrcType); 437 438 const llvm::Type *DstTy = ConvertType(DstType); 439 440 // Ignore conversions like int -> uint. 441 if (Src->getType() == DstTy) 442 return Src; 443 444 // Handle pointer conversions next: pointers can only be converted to/from 445 // other pointers and integers. Check for pointer types in terms of LLVM, as 446 // some native types (like Obj-C id) may map to a pointer type. 447 if (isa<llvm::PointerType>(DstTy)) { 448 // The source value may be an integer, or a pointer. 449 if (isa<llvm::PointerType>(Src->getType())) 450 return Builder.CreateBitCast(Src, DstTy, "conv"); 451 452 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 453 // First, convert to the correct width so that we control the kind of 454 // extension. 455 const llvm::Type *MiddleTy = 456 llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); 457 bool InputSigned = SrcType->isSignedIntegerType(); 458 llvm::Value* IntResult = 459 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 460 // Then, cast to pointer. 461 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 462 } 463 464 if (isa<llvm::PointerType>(Src->getType())) { 465 // Must be an ptr to int cast. 466 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 467 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 468 } 469 470 // A scalar can be splatted to an extended vector of the same element type 471 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 472 // Cast the scalar to element type 473 QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType(); 474 llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); 475 476 // Insert the element in element zero of an undef vector 477 llvm::Value *UnV = llvm::UndefValue::get(DstTy); 478 llvm::Value *Idx = 479 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); 480 UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); 481 482 // Splat the element across to all elements 483 llvm::SmallVector<llvm::Constant*, 16> Args; 484 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 485 for (unsigned i = 0; i < NumElements; i++) 486 Args.push_back(llvm::ConstantInt::get( 487 llvm::Type::getInt32Ty(VMContext), 0)); 488 489 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); 490 llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); 491 return Yay; 492 } 493 494 // Allow bitcast from vector to integer/fp of the same size. 495 if (isa<llvm::VectorType>(Src->getType()) || 496 isa<llvm::VectorType>(DstTy)) 497 return Builder.CreateBitCast(Src, DstTy, "conv"); 498 499 // Finally, we have the arithmetic types: real int/float. 500 if (isa<llvm::IntegerType>(Src->getType())) { 501 bool InputSigned = SrcType->isSignedIntegerType(); 502 if (isa<llvm::IntegerType>(DstTy)) 503 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 504 else if (InputSigned) 505 return Builder.CreateSIToFP(Src, DstTy, "conv"); 506 else 507 return Builder.CreateUIToFP(Src, DstTy, "conv"); 508 } 509 510 assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); 511 if (isa<llvm::IntegerType>(DstTy)) { 512 if (DstType->isSignedIntegerType()) 513 return Builder.CreateFPToSI(Src, DstTy, "conv"); 514 else 515 return Builder.CreateFPToUI(Src, DstTy, "conv"); 516 } 517 518 assert(DstTy->isFloatingPoint() && "Unknown real conversion"); 519 if (DstTy->getTypeID() < Src->getType()->getTypeID()) 520 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 521 else 522 return Builder.CreateFPExt(Src, DstTy, "conv"); 523 } 524 525 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex 526 /// type to the specified destination type, where the destination type is an 527 /// LLVM scalar type. 528 Value *ScalarExprEmitter:: 529 EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 530 QualType SrcTy, QualType DstTy) { 531 // Get the source element type. 532 SrcTy = SrcTy->getAs<ComplexType>()->getElementType(); 533 534 // Handle conversions to bool first, they are special: comparisons against 0. 535 if (DstTy->isBooleanType()) { 536 // Complex != 0 -> (Real != 0) | (Imag != 0) 537 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); 538 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); 539 return Builder.CreateOr(Src.first, Src.second, "tobool"); 540 } 541 542 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 543 // the imaginary part of the complex value is discarded and the value of the 544 // real part is converted according to the conversion rules for the 545 // corresponding real type. 546 return EmitScalarConversion(Src.first, SrcTy, DstTy); 547 } 548 549 550 //===----------------------------------------------------------------------===// 551 // Visitor Methods 552 //===----------------------------------------------------------------------===// 553 554 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 555 CGF.ErrorUnsupported(E, "scalar expression"); 556 if (E->getType()->isVoidType()) 557 return 0; 558 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 559 } 560 561 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 562 llvm::SmallVector<llvm::Constant*, 32> indices; 563 for (unsigned i = 2; i < E->getNumSubExprs(); i++) { 564 indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)))); 565 } 566 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 567 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 568 Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size()); 569 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); 570 } 571 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 572 Expr::EvalResult Result; 573 if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) { 574 if (E->isArrow()) 575 CGF.EmitScalarExpr(E->getBase()); 576 else 577 EmitLValue(E->getBase()); 578 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 579 } 580 return EmitLoadOfLValue(E); 581 } 582 583 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 584 TestAndClearIgnoreResultAssign(); 585 586 // Emit subscript expressions in rvalue context's. For most cases, this just 587 // loads the lvalue formed by the subscript expr. However, we have to be 588 // careful, because the base of a vector subscript is occasionally an rvalue, 589 // so we can't get it as an lvalue. 590 if (!E->getBase()->getType()->isVectorType()) 591 return EmitLoadOfLValue(E); 592 593 // Handle the vector case. The base must be a vector, the index must be an 594 // integer value. 595 Value *Base = Visit(E->getBase()); 596 Value *Idx = Visit(E->getIdx()); 597 bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); 598 Idx = Builder.CreateIntCast(Idx, 599 llvm::Type::getInt32Ty(CGF.getLLVMContext()), 600 IdxSigned, 601 "vecidxcast"); 602 return Builder.CreateExtractElement(Base, Idx, "vecext"); 603 } 604 605 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 606 unsigned Off, const llvm::Type *I32Ty) { 607 int MV = SVI->getMaskValue(Idx); 608 if (MV == -1) 609 return llvm::UndefValue::get(I32Ty); 610 return llvm::ConstantInt::get(I32Ty, Off+MV); 611 } 612 613 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 614 bool Ignore = TestAndClearIgnoreResultAssign(); 615 (void)Ignore; 616 assert (Ignore == false && "init list ignored"); 617 unsigned NumInitElements = E->getNumInits(); 618 619 if (E->hadArrayRangeDesignator()) 620 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 621 622 const llvm::VectorType *VType = 623 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 624 625 // We have a scalar in braces. Just use the first element. 626 if (!VType) 627 return Visit(E->getInit(0)); 628 629 unsigned ResElts = VType->getNumElements(); 630 const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext()); 631 632 // Loop over initializers collecting the Value for each, and remembering 633 // whether the source was swizzle (ExtVectorElementExpr). This will allow 634 // us to fold the shuffle for the swizzle into the shuffle for the vector 635 // initializer, since LLVM optimizers generally do not want to touch 636 // shuffles. 637 unsigned CurIdx = 0; 638 bool VIsUndefShuffle = false; 639 llvm::Value *V = llvm::UndefValue::get(VType); 640 for (unsigned i = 0; i != NumInitElements; ++i) { 641 Expr *IE = E->getInit(i); 642 Value *Init = Visit(IE); 643 llvm::SmallVector<llvm::Constant*, 16> Args; 644 645 const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 646 647 // Handle scalar elements. If the scalar initializer is actually one 648 // element of a different vector of the same width, use shuffle instead of 649 // extract+insert. 650 if (!VVT) { 651 if (isa<ExtVectorElementExpr>(IE)) { 652 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 653 654 if (EI->getVectorOperandType()->getNumElements() == ResElts) { 655 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 656 Value *LHS = 0, *RHS = 0; 657 if (CurIdx == 0) { 658 // insert into undef -> shuffle (src, undef) 659 Args.push_back(C); 660 for (unsigned j = 1; j != ResElts; ++j) 661 Args.push_back(llvm::UndefValue::get(I32Ty)); 662 663 LHS = EI->getVectorOperand(); 664 RHS = V; 665 VIsUndefShuffle = true; 666 } else if (VIsUndefShuffle) { 667 // insert into undefshuffle && size match -> shuffle (v, src) 668 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 669 for (unsigned j = 0; j != CurIdx; ++j) 670 Args.push_back(getMaskElt(SVV, j, 0, I32Ty)); 671 Args.push_back(llvm::ConstantInt::get(I32Ty, 672 ResElts + C->getZExtValue())); 673 for (unsigned j = CurIdx + 1; j != ResElts; ++j) 674 Args.push_back(llvm::UndefValue::get(I32Ty)); 675 676 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 677 RHS = EI->getVectorOperand(); 678 VIsUndefShuffle = false; 679 } 680 if (!Args.empty()) { 681 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 682 V = Builder.CreateShuffleVector(LHS, RHS, Mask); 683 ++CurIdx; 684 continue; 685 } 686 } 687 } 688 Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); 689 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 690 VIsUndefShuffle = false; 691 ++CurIdx; 692 continue; 693 } 694 695 unsigned InitElts = VVT->getNumElements(); 696 697 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 698 // input is the same width as the vector being constructed, generate an 699 // optimized shuffle of the swizzle input into the result. 700 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 701 if (isa<ExtVectorElementExpr>(IE)) { 702 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 703 Value *SVOp = SVI->getOperand(0); 704 const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); 705 706 if (OpTy->getNumElements() == ResElts) { 707 for (unsigned j = 0; j != CurIdx; ++j) { 708 // If the current vector initializer is a shuffle with undef, merge 709 // this shuffle directly into it. 710 if (VIsUndefShuffle) { 711 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0, 712 I32Ty)); 713 } else { 714 Args.push_back(llvm::ConstantInt::get(I32Ty, j)); 715 } 716 } 717 for (unsigned j = 0, je = InitElts; j != je; ++j) 718 Args.push_back(getMaskElt(SVI, j, Offset, I32Ty)); 719 for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) 720 Args.push_back(llvm::UndefValue::get(I32Ty)); 721 722 if (VIsUndefShuffle) 723 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 724 725 Init = SVOp; 726 } 727 } 728 729 // Extend init to result vector length, and then shuffle its contribution 730 // to the vector initializer into V. 731 if (Args.empty()) { 732 for (unsigned j = 0; j != InitElts; ++j) 733 Args.push_back(llvm::ConstantInt::get(I32Ty, j)); 734 for (unsigned j = InitElts; j != ResElts; ++j) 735 Args.push_back(llvm::UndefValue::get(I32Ty)); 736 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 737 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), 738 Mask, "vext"); 739 740 Args.clear(); 741 for (unsigned j = 0; j != CurIdx; ++j) 742 Args.push_back(llvm::ConstantInt::get(I32Ty, j)); 743 for (unsigned j = 0; j != InitElts; ++j) 744 Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset)); 745 for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) 746 Args.push_back(llvm::UndefValue::get(I32Ty)); 747 } 748 749 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 750 // merging subsequent shuffles into this one. 751 if (CurIdx == 0) 752 std::swap(V, Init); 753 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 754 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit"); 755 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 756 CurIdx += InitElts; 757 } 758 759 // FIXME: evaluate codegen vs. shuffling against constant null vector. 760 // Emit remaining default initializers. 761 const llvm::Type *EltTy = VType->getElementType(); 762 763 // Emit remaining default initializers 764 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 765 Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); 766 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 767 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 768 } 769 return V; 770 } 771 772 static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { 773 const Expr *E = CE->getSubExpr(); 774 775 if (isa<CXXThisExpr>(E)) { 776 // We always assume that 'this' is never null. 777 return false; 778 } 779 780 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 781 // And that lvalue casts are never null. 782 if (ICE->isLvalueCast()) 783 return false; 784 } 785 786 return true; 787 } 788 789 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 790 // have to handle a more broad range of conversions than explicit casts, as they 791 // handle things like function to ptr-to-function decay etc. 792 Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { 793 Expr *E = CE->getSubExpr(); 794 QualType DestTy = CE->getType(); 795 CastExpr::CastKind Kind = CE->getCastKind(); 796 797 if (!DestTy->isVoidType()) 798 TestAndClearIgnoreResultAssign(); 799 800 // Since almost all cast kinds apply to scalars, this switch doesn't have 801 // a default case, so the compiler will warn on a missing case. The cases 802 // are in the same order as in the CastKind enum. 803 switch (Kind) { 804 case CastExpr::CK_Unknown: 805 // FIXME: All casts should have a known kind! 806 //assert(0 && "Unknown cast kind!"); 807 break; 808 809 case CastExpr::CK_AnyPointerToObjCPointerCast: 810 case CastExpr::CK_AnyPointerToBlockPointerCast: 811 case CastExpr::CK_BitCast: { 812 Value *Src = Visit(const_cast<Expr*>(E)); 813 return Builder.CreateBitCast(Src, ConvertType(DestTy)); 814 } 815 case CastExpr::CK_NoOp: 816 case CastExpr::CK_UserDefinedConversion: 817 return Visit(const_cast<Expr*>(E)); 818 819 case CastExpr::CK_BaseToDerived: { 820 const CXXRecordDecl *BaseClassDecl = 821 E->getType()->getCXXRecordDeclForPointerType(); 822 const CXXRecordDecl *DerivedClassDecl = 823 DestTy->getCXXRecordDeclForPointerType(); 824 825 Value *Src = Visit(const_cast<Expr*>(E)); 826 827 bool NullCheckValue = ShouldNullCheckClassCastValue(CE); 828 return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl, 829 NullCheckValue); 830 } 831 case CastExpr::CK_DerivedToBase: { 832 const RecordType *DerivedClassTy = 833 E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>(); 834 CXXRecordDecl *DerivedClassDecl = 835 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 836 837 const RecordType *BaseClassTy = 838 DestTy->getAs<PointerType>()->getPointeeType()->getAs<RecordType>(); 839 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl()); 840 841 Value *Src = Visit(const_cast<Expr*>(E)); 842 843 bool NullCheckValue = ShouldNullCheckClassCastValue(CE); 844 return CGF.GetAddressOfBaseClass(Src, DerivedClassDecl, BaseClassDecl, 845 NullCheckValue); 846 } 847 case CastExpr::CK_Dynamic: { 848 Value *V = Visit(const_cast<Expr*>(E)); 849 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 850 return CGF.EmitDynamicCast(V, DCE); 851 } 852 case CastExpr::CK_ToUnion: 853 assert(0 && "Should be unreachable!"); 854 break; 855 856 case CastExpr::CK_ArrayToPointerDecay: { 857 assert(E->getType()->isArrayType() && 858 "Array to pointer decay must have array source type!"); 859 860 Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays. 861 862 // Note that VLA pointers are always decayed, so we don't need to do 863 // anything here. 864 if (!E->getType()->isVariableArrayType()) { 865 assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); 866 assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) 867 ->getElementType()) && 868 "Expected pointer to array"); 869 V = Builder.CreateStructGEP(V, 0, "arraydecay"); 870 } 871 872 return V; 873 } 874 case CastExpr::CK_FunctionToPointerDecay: 875 return EmitLValue(E).getAddress(); 876 877 case CastExpr::CK_NullToMemberPointer: 878 return CGF.CGM.EmitNullConstant(DestTy); 879 880 case CastExpr::CK_BaseToDerivedMemberPointer: 881 case CastExpr::CK_DerivedToBaseMemberPointer: { 882 Value *Src = Visit(E); 883 884 // See if we need to adjust the pointer. 885 const CXXRecordDecl *BaseDecl = 886 cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()-> 887 getClass()->getAs<RecordType>()->getDecl()); 888 const CXXRecordDecl *DerivedDecl = 889 cast<CXXRecordDecl>(CE->getType()->getAs<MemberPointerType>()-> 890 getClass()->getAs<RecordType>()->getDecl()); 891 if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) 892 std::swap(DerivedDecl, BaseDecl); 893 894 llvm::Constant *Adj = CGF.CGM.GetCXXBaseClassOffset(DerivedDecl, BaseDecl); 895 if (Adj) { 896 if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) 897 Src = Builder.CreateSub(Src, Adj, "adj"); 898 else 899 Src = Builder.CreateAdd(Src, Adj, "adj"); 900 } 901 return Src; 902 } 903 904 case CastExpr::CK_ConstructorConversion: 905 assert(0 && "Should be unreachable!"); 906 break; 907 908 case CastExpr::CK_IntegralToPointer: { 909 Value *Src = Visit(const_cast<Expr*>(E)); 910 911 // First, convert to the correct width so that we control the kind of 912 // extension. 913 const llvm::Type *MiddleTy = 914 llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); 915 bool InputSigned = E->getType()->isSignedIntegerType(); 916 llvm::Value* IntResult = 917 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 918 919 return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy)); 920 } 921 case CastExpr::CK_PointerToIntegral: { 922 Value *Src = Visit(const_cast<Expr*>(E)); 923 return Builder.CreatePtrToInt(Src, ConvertType(DestTy)); 924 } 925 case CastExpr::CK_ToVoid: { 926 CGF.EmitAnyExpr(E, 0, false, true); 927 return 0; 928 } 929 case CastExpr::CK_VectorSplat: { 930 const llvm::Type *DstTy = ConvertType(DestTy); 931 Value *Elt = Visit(const_cast<Expr*>(E)); 932 933 // Insert the element in element zero of an undef vector 934 llvm::Value *UnV = llvm::UndefValue::get(DstTy); 935 llvm::Value *Idx = 936 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); 937 UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); 938 939 // Splat the element across to all elements 940 llvm::SmallVector<llvm::Constant*, 16> Args; 941 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 942 for (unsigned i = 0; i < NumElements; i++) 943 Args.push_back(llvm::ConstantInt::get( 944 llvm::Type::getInt32Ty(VMContext), 0)); 945 946 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); 947 llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); 948 return Yay; 949 } 950 case CastExpr::CK_IntegralCast: 951 case CastExpr::CK_IntegralToFloating: 952 case CastExpr::CK_FloatingToIntegral: 953 case CastExpr::CK_FloatingCast: 954 return EmitScalarConversion(Visit(E), E->getType(), DestTy); 955 956 case CastExpr::CK_MemberPointerToBoolean: 957 return CGF.EvaluateExprAsBool(E); 958 } 959 960 // Handle cases where the source is an non-complex type. 961 962 if (!CGF.hasAggregateLLVMType(E->getType())) { 963 Value *Src = Visit(const_cast<Expr*>(E)); 964 965 // Use EmitScalarConversion to perform the conversion. 966 return EmitScalarConversion(Src, E->getType(), DestTy); 967 } 968 969 if (E->getType()->isAnyComplexType()) { 970 // Handle cases where the source is a complex type. 971 bool IgnoreImag = true; 972 bool IgnoreImagAssign = true; 973 bool IgnoreReal = IgnoreResultAssign; 974 bool IgnoreRealAssign = IgnoreResultAssign; 975 if (DestTy->isBooleanType()) 976 IgnoreImagAssign = IgnoreImag = false; 977 else if (DestTy->isVoidType()) { 978 IgnoreReal = IgnoreImag = false; 979 IgnoreRealAssign = IgnoreImagAssign = true; 980 } 981 CodeGenFunction::ComplexPairTy V 982 = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign, 983 IgnoreImagAssign); 984 return EmitComplexToScalarConversion(V, E->getType(), DestTy); 985 } 986 987 // Okay, this is a cast from an aggregate. It must be a cast to void. Just 988 // evaluate the result and return. 989 CGF.EmitAggExpr(E, 0, false, true); 990 return 0; 991 } 992 993 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 994 return CGF.EmitCompoundStmt(*E->getSubStmt(), 995 !E->getType()->isVoidType()).getScalarVal(); 996 } 997 998 Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { 999 llvm::Value *V = CGF.GetAddrOfBlockDecl(E); 1000 if (E->getType().isObjCGCWeak()) 1001 return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V); 1002 return Builder.CreateLoad(V, "tmp"); 1003 } 1004 1005 //===----------------------------------------------------------------------===// 1006 // Unary Operators 1007 //===----------------------------------------------------------------------===// 1008 1009 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 1010 TestAndClearIgnoreResultAssign(); 1011 Value *Op = Visit(E->getSubExpr()); 1012 if (Op->getType()->isFPOrFPVector()) 1013 return Builder.CreateFNeg(Op, "neg"); 1014 return Builder.CreateNeg(Op, "neg"); 1015 } 1016 1017 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 1018 TestAndClearIgnoreResultAssign(); 1019 Value *Op = Visit(E->getSubExpr()); 1020 return Builder.CreateNot(Op, "neg"); 1021 } 1022 1023 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 1024 // Compare operand to zero. 1025 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 1026 1027 // Invert value. 1028 // TODO: Could dynamically modify easy computations here. For example, if 1029 // the operand is an icmp ne, turn into icmp eq. 1030 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 1031 1032 // ZExt result to the expr type. 1033 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 1034 } 1035 1036 /// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of 1037 /// argument of the sizeof expression as an integer. 1038 Value * 1039 ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { 1040 QualType TypeToSize = E->getTypeOfArgument(); 1041 if (E->isSizeOf()) { 1042 if (const VariableArrayType *VAT = 1043 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 1044 if (E->isArgumentType()) { 1045 // sizeof(type) - make sure to emit the VLA size. 1046 CGF.EmitVLASize(TypeToSize); 1047 } else { 1048 // C99 6.5.3.4p2: If the argument is an expression of type 1049 // VLA, it is evaluated. 1050 CGF.EmitAnyExpr(E->getArgumentExpr()); 1051 } 1052 1053 return CGF.GetVLASize(VAT); 1054 } 1055 } 1056 1057 // If this isn't sizeof(vla), the result must be constant; use the constant 1058 // folding logic so we don't have to duplicate it here. 1059 Expr::EvalResult Result; 1060 E->Evaluate(Result, CGF.getContext()); 1061 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 1062 } 1063 1064 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 1065 Expr *Op = E->getSubExpr(); 1066 if (Op->getType()->isAnyComplexType()) 1067 return CGF.EmitComplexExpr(Op, false, true, false, true).first; 1068 return Visit(Op); 1069 } 1070 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 1071 Expr *Op = E->getSubExpr(); 1072 if (Op->getType()->isAnyComplexType()) 1073 return CGF.EmitComplexExpr(Op, true, false, true, false).second; 1074 1075 // __imag on a scalar returns zero. Emit the subexpr to ensure side 1076 // effects are evaluated, but not the actual value. 1077 if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) 1078 CGF.EmitLValue(Op); 1079 else 1080 CGF.EmitScalarExpr(Op, true); 1081 return llvm::Constant::getNullValue(ConvertType(E->getType())); 1082 } 1083 1084 Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) { 1085 Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress(); 1086 const llvm::Type* ResultType = ConvertType(E->getType()); 1087 return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof"); 1088 } 1089 1090 //===----------------------------------------------------------------------===// 1091 // Binary Operators 1092 //===----------------------------------------------------------------------===// 1093 1094 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 1095 TestAndClearIgnoreResultAssign(); 1096 BinOpInfo Result; 1097 Result.LHS = Visit(E->getLHS()); 1098 Result.RHS = Visit(E->getRHS()); 1099 Result.Ty = E->getType(); 1100 Result.E = E; 1101 return Result; 1102 } 1103 1104 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 1105 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 1106 bool Ignore = TestAndClearIgnoreResultAssign(); 1107 QualType LHSTy = E->getLHS()->getType(); 1108 1109 BinOpInfo OpInfo; 1110 1111 if (E->getComputationResultType()->isAnyComplexType()) { 1112 // This needs to go through the complex expression emitter, but it's a tad 1113 // complicated to do that... I'm leaving it out for now. (Note that we do 1114 // actually need the imaginary part of the RHS for multiplication and 1115 // division.) 1116 CGF.ErrorUnsupported(E, "complex compound assignment"); 1117 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1118 } 1119 1120 // Emit the RHS first. __block variables need to have the rhs evaluated 1121 // first, plus this should improve codegen a little. 1122 OpInfo.RHS = Visit(E->getRHS()); 1123 OpInfo.Ty = E->getComputationResultType(); 1124 OpInfo.E = E; 1125 // Load/convert the LHS. 1126 LValue LHSLV = EmitCheckedLValue(E->getLHS()); 1127 OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); 1128 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 1129 E->getComputationLHSType()); 1130 1131 // Expand the binary operator. 1132 Value *Result = (this->*Func)(OpInfo); 1133 1134 // Convert the result back to the LHS type. 1135 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); 1136 1137 // Store the result value into the LHS lvalue. Bit-fields are handled 1138 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 1139 // 'An assignment expression has the value of the left operand after the 1140 // assignment...'. 1141 if (LHSLV.isBitfield()) { 1142 if (!LHSLV.isVolatileQualified()) { 1143 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, 1144 &Result); 1145 return Result; 1146 } else 1147 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy); 1148 } else 1149 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy); 1150 if (Ignore) 1151 return 0; 1152 return EmitLoadOfLValue(LHSLV, E->getType()); 1153 } 1154 1155 1156 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 1157 if (Ops.LHS->getType()->isFPOrFPVector()) 1158 return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 1159 else if (Ops.Ty->isUnsignedIntegerType()) 1160 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 1161 else 1162 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 1163 } 1164 1165 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 1166 // Rem in C can't be a floating point type: C99 6.5.5p2. 1167 if (Ops.Ty->isUnsignedIntegerType()) 1168 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 1169 else 1170 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 1171 } 1172 1173 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 1174 unsigned IID; 1175 unsigned OpID = 0; 1176 1177 switch (Ops.E->getOpcode()) { 1178 case BinaryOperator::Add: 1179 case BinaryOperator::AddAssign: 1180 OpID = 1; 1181 IID = llvm::Intrinsic::sadd_with_overflow; 1182 break; 1183 case BinaryOperator::Sub: 1184 case BinaryOperator::SubAssign: 1185 OpID = 2; 1186 IID = llvm::Intrinsic::ssub_with_overflow; 1187 break; 1188 case BinaryOperator::Mul: 1189 case BinaryOperator::MulAssign: 1190 OpID = 3; 1191 IID = llvm::Intrinsic::smul_with_overflow; 1192 break; 1193 default: 1194 assert(false && "Unsupported operation for overflow detection"); 1195 IID = 0; 1196 } 1197 OpID <<= 1; 1198 OpID |= 1; 1199 1200 const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 1201 1202 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1); 1203 1204 Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); 1205 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 1206 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 1207 1208 // Branch in case of overflow. 1209 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 1210 llvm::BasicBlock *overflowBB = 1211 CGF.createBasicBlock("overflow", CGF.CurFn); 1212 llvm::BasicBlock *continueBB = 1213 CGF.createBasicBlock("overflow.continue", CGF.CurFn); 1214 1215 Builder.CreateCondBr(overflow, overflowBB, continueBB); 1216 1217 // Handle overflow 1218 1219 Builder.SetInsertPoint(overflowBB); 1220 1221 // Handler is: 1222 // long long *__overflow_handler)(long long a, long long b, char op, 1223 // char width) 1224 std::vector<const llvm::Type*> handerArgTypes; 1225 handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); 1226 handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); 1227 handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); 1228 handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); 1229 llvm::FunctionType *handlerTy = llvm::FunctionType::get( 1230 llvm::Type::getInt64Ty(VMContext), handerArgTypes, false); 1231 llvm::Value *handlerFunction = 1232 CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", 1233 llvm::PointerType::getUnqual(handlerTy)); 1234 handlerFunction = Builder.CreateLoad(handlerFunction); 1235 1236 llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, 1237 Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)), 1238 Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)), 1239 llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID), 1240 llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1241 cast<llvm::IntegerType>(opTy)->getBitWidth())); 1242 1243 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 1244 1245 Builder.CreateBr(continueBB); 1246 1247 // Set up the continuation 1248 Builder.SetInsertPoint(continueBB); 1249 // Get the correct result 1250 llvm::PHINode *phi = Builder.CreatePHI(opTy); 1251 phi->reserveOperandSpace(2); 1252 phi->addIncoming(result, initialBB); 1253 phi->addIncoming(handlerResult, overflowBB); 1254 1255 return phi; 1256 } 1257 1258 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { 1259 if (!Ops.Ty->isAnyPointerType()) { 1260 if (CGF.getContext().getLangOptions().OverflowChecking && 1261 Ops.Ty->isSignedIntegerType()) 1262 return EmitOverflowCheckedBinOp(Ops); 1263 1264 if (Ops.LHS->getType()->isFPOrFPVector()) 1265 return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add"); 1266 1267 // Signed integer overflow is undefined behavior. 1268 if (Ops.Ty->isSignedIntegerType()) 1269 return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add"); 1270 1271 return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); 1272 } 1273 1274 if (Ops.Ty->isPointerType() && 1275 Ops.Ty->getAs<PointerType>()->isVariableArrayType()) { 1276 // The amount of the addition needs to account for the VLA size 1277 CGF.ErrorUnsupported(Ops.E, "VLA pointer addition"); 1278 } 1279 Value *Ptr, *Idx; 1280 Expr *IdxExp; 1281 const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>(); 1282 const ObjCObjectPointerType *OPT = 1283 Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>(); 1284 if (PT || OPT) { 1285 Ptr = Ops.LHS; 1286 Idx = Ops.RHS; 1287 IdxExp = Ops.E->getRHS(); 1288 } else { // int + pointer 1289 PT = Ops.E->getRHS()->getType()->getAs<PointerType>(); 1290 OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>(); 1291 assert((PT || OPT) && "Invalid add expr"); 1292 Ptr = Ops.RHS; 1293 Idx = Ops.LHS; 1294 IdxExp = Ops.E->getLHS(); 1295 } 1296 1297 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1298 if (Width < CGF.LLVMPointerWidth) { 1299 // Zero or sign extend the pointer value based on whether the index is 1300 // signed or not. 1301 const llvm::Type *IdxType = 1302 llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); 1303 if (IdxExp->getType()->isSignedIntegerType()) 1304 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1305 else 1306 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1307 } 1308 const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType(); 1309 // Handle interface types, which are not represented with a concrete type. 1310 if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) { 1311 llvm::Value *InterfaceSize = 1312 llvm::ConstantInt::get(Idx->getType(), 1313 CGF.getContext().getTypeSizeInChars(OIT).getQuantity()); 1314 Idx = Builder.CreateMul(Idx, InterfaceSize); 1315 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1316 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1317 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1318 return Builder.CreateBitCast(Res, Ptr->getType()); 1319 } 1320 1321 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 1322 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 1323 // future proof. 1324 if (ElementType->isVoidType() || ElementType->isFunctionType()) { 1325 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1326 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1327 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1328 return Builder.CreateBitCast(Res, Ptr->getType()); 1329 } 1330 1331 return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr"); 1332 } 1333 1334 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { 1335 if (!isa<llvm::PointerType>(Ops.LHS->getType())) { 1336 if (CGF.getContext().getLangOptions().OverflowChecking 1337 && Ops.Ty->isSignedIntegerType()) 1338 return EmitOverflowCheckedBinOp(Ops); 1339 1340 if (Ops.LHS->getType()->isFPOrFPVector()) 1341 return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub"); 1342 return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); 1343 } 1344 1345 if (Ops.E->getLHS()->getType()->isPointerType() && 1346 Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) { 1347 // The amount of the addition needs to account for the VLA size for 1348 // ptr-int 1349 // The amount of the division needs to account for the VLA size for 1350 // ptr-ptr. 1351 CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction"); 1352 } 1353 1354 const QualType LHSType = Ops.E->getLHS()->getType(); 1355 const QualType LHSElementType = LHSType->getPointeeType(); 1356 if (!isa<llvm::PointerType>(Ops.RHS->getType())) { 1357 // pointer - int 1358 Value *Idx = Ops.RHS; 1359 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1360 if (Width < CGF.LLVMPointerWidth) { 1361 // Zero or sign extend the pointer value based on whether the index is 1362 // signed or not. 1363 const llvm::Type *IdxType = 1364 llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); 1365 if (Ops.E->getRHS()->getType()->isSignedIntegerType()) 1366 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1367 else 1368 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1369 } 1370 Idx = Builder.CreateNeg(Idx, "sub.ptr.neg"); 1371 1372 // Handle interface types, which are not represented with a concrete type. 1373 if (const ObjCInterfaceType *OIT = 1374 dyn_cast<ObjCInterfaceType>(LHSElementType)) { 1375 llvm::Value *InterfaceSize = 1376 llvm::ConstantInt::get(Idx->getType(), 1377 CGF.getContext(). 1378 getTypeSizeInChars(OIT).getQuantity()); 1379 Idx = Builder.CreateMul(Idx, InterfaceSize); 1380 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1381 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1382 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); 1383 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1384 } 1385 1386 // Explicitly handle GNU void* and function pointer arithmetic 1387 // extensions. The GNU void* casts amount to no-ops since our void* type is 1388 // i8*, but this is future proof. 1389 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1390 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1391 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1392 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); 1393 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1394 } 1395 1396 return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr"); 1397 } else { 1398 // pointer - pointer 1399 Value *LHS = Ops.LHS; 1400 Value *RHS = Ops.RHS; 1401 1402 CharUnits ElementSize; 1403 1404 // Handle GCC extension for pointer arithmetic on void* and function pointer 1405 // types. 1406 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1407 ElementSize = CharUnits::One(); 1408 } else { 1409 ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType); 1410 } 1411 1412 const llvm::Type *ResultType = ConvertType(Ops.Ty); 1413 LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); 1414 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1415 Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 1416 1417 // Optimize out the shift for element size of 1. 1418 if (ElementSize.isOne()) 1419 return BytesBetween; 1420 1421 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 1422 // pointer difference in C is only defined in the case where both operands 1423 // are pointing to elements of an array. 1424 Value *BytesPerElt = 1425 llvm::ConstantInt::get(ResultType, ElementSize.getQuantity()); 1426 return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); 1427 } 1428 } 1429 1430 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 1431 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1432 // RHS to the same size as the LHS. 1433 Value *RHS = Ops.RHS; 1434 if (Ops.LHS->getType() != RHS->getType()) 1435 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1436 1437 if (CGF.CatchUndefined 1438 && isa<llvm::IntegerType>(Ops.LHS->getType())) { 1439 unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); 1440 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 1441 CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS, 1442 llvm::ConstantInt::get(RHS->getType(), Width)), 1443 Cont, CGF.getTrapBB()); 1444 CGF.EmitBlock(Cont); 1445 } 1446 1447 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 1448 } 1449 1450 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 1451 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1452 // RHS to the same size as the LHS. 1453 Value *RHS = Ops.RHS; 1454 if (Ops.LHS->getType() != RHS->getType()) 1455 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1456 1457 if (CGF.CatchUndefined 1458 && isa<llvm::IntegerType>(Ops.LHS->getType())) { 1459 unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); 1460 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 1461 CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS, 1462 llvm::ConstantInt::get(RHS->getType(), Width)), 1463 Cont, CGF.getTrapBB()); 1464 CGF.EmitBlock(Cont); 1465 } 1466 1467 if (Ops.Ty->isUnsignedIntegerType()) 1468 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 1469 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 1470 } 1471 1472 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, 1473 unsigned SICmpOpc, unsigned FCmpOpc) { 1474 TestAndClearIgnoreResultAssign(); 1475 Value *Result; 1476 QualType LHSTy = E->getLHS()->getType(); 1477 if (LHSTy->isMemberFunctionPointerType()) { 1478 Value *LHSPtr = CGF.EmitAnyExprToTemp(E->getLHS()).getAggregateAddr(); 1479 Value *RHSPtr = CGF.EmitAnyExprToTemp(E->getRHS()).getAggregateAddr(); 1480 llvm::Value *LHSFunc = Builder.CreateStructGEP(LHSPtr, 0); 1481 LHSFunc = Builder.CreateLoad(LHSFunc); 1482 llvm::Value *RHSFunc = Builder.CreateStructGEP(RHSPtr, 0); 1483 RHSFunc = Builder.CreateLoad(RHSFunc); 1484 Value *ResultF = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1485 LHSFunc, RHSFunc, "cmp.func"); 1486 Value *NullPtr = llvm::Constant::getNullValue(LHSFunc->getType()); 1487 Value *ResultNull = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1488 LHSFunc, NullPtr, "cmp.null"); 1489 llvm::Value *LHSAdj = Builder.CreateStructGEP(LHSPtr, 1); 1490 LHSAdj = Builder.CreateLoad(LHSAdj); 1491 llvm::Value *RHSAdj = Builder.CreateStructGEP(RHSPtr, 1); 1492 RHSAdj = Builder.CreateLoad(RHSAdj); 1493 Value *ResultA = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1494 LHSAdj, RHSAdj, "cmp.adj"); 1495 if (E->getOpcode() == BinaryOperator::EQ) { 1496 Result = Builder.CreateOr(ResultNull, ResultA, "or.na"); 1497 Result = Builder.CreateAnd(Result, ResultF, "and.f"); 1498 } else { 1499 assert(E->getOpcode() == BinaryOperator::NE && 1500 "Member pointer comparison other than == or != ?"); 1501 Result = Builder.CreateAnd(ResultNull, ResultA, "and.na"); 1502 Result = Builder.CreateOr(Result, ResultF, "or.f"); 1503 } 1504 } else if (!LHSTy->isAnyComplexType()) { 1505 Value *LHS = Visit(E->getLHS()); 1506 Value *RHS = Visit(E->getRHS()); 1507 1508 if (LHS->getType()->isFPOrFPVector()) { 1509 Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, 1510 LHS, RHS, "cmp"); 1511 } else if (LHSTy->isSignedIntegerType()) { 1512 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, 1513 LHS, RHS, "cmp"); 1514 } else { 1515 // Unsigned integers and pointers. 1516 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1517 LHS, RHS, "cmp"); 1518 } 1519 1520 // If this is a vector comparison, sign extend the result to the appropriate 1521 // vector integer type and return it (don't convert to bool). 1522 if (LHSTy->isVectorType()) 1523 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 1524 1525 } else { 1526 // Complex Comparison: can only be an equality comparison. 1527 CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); 1528 CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); 1529 1530 QualType CETy = LHSTy->getAs<ComplexType>()->getElementType(); 1531 1532 Value *ResultR, *ResultI; 1533 if (CETy->isRealFloatingType()) { 1534 ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 1535 LHS.first, RHS.first, "cmp.r"); 1536 ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 1537 LHS.second, RHS.second, "cmp.i"); 1538 } else { 1539 // Complex comparisons can only be equality comparisons. As such, signed 1540 // and unsigned opcodes are the same. 1541 ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1542 LHS.first, RHS.first, "cmp.r"); 1543 ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1544 LHS.second, RHS.second, "cmp.i"); 1545 } 1546 1547 if (E->getOpcode() == BinaryOperator::EQ) { 1548 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 1549 } else { 1550 assert(E->getOpcode() == BinaryOperator::NE && 1551 "Complex comparison other than == or != ?"); 1552 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 1553 } 1554 } 1555 1556 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); 1557 } 1558 1559 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 1560 bool Ignore = TestAndClearIgnoreResultAssign(); 1561 1562 // __block variables need to have the rhs evaluated first, plus this should 1563 // improve codegen just a little. 1564 Value *RHS = Visit(E->getRHS()); 1565 LValue LHS = EmitCheckedLValue(E->getLHS()); 1566 1567 // Store the value into the LHS. Bit-fields are handled specially 1568 // because the result is altered by the store, i.e., [C99 6.5.16p1] 1569 // 'An assignment expression has the value of the left operand after 1570 // the assignment...'. 1571 if (LHS.isBitfield()) { 1572 if (!LHS.isVolatileQualified()) { 1573 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), 1574 &RHS); 1575 return RHS; 1576 } else 1577 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType()); 1578 } else 1579 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); 1580 if (Ignore) 1581 return 0; 1582 return EmitLoadOfLValue(LHS, E->getType()); 1583 } 1584 1585 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 1586 const llvm::Type *ResTy = ConvertType(E->getType()); 1587 1588 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 1589 // If we have 1 && X, just emit X without inserting the control flow. 1590 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 1591 if (Cond == 1) { // If we have 1 && X, just emit X. 1592 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1593 // ZExt result to int or bool. 1594 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 1595 } 1596 1597 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 1598 if (!CGF.ContainsLabel(E->getRHS())) 1599 return llvm::Constant::getNullValue(ResTy); 1600 } 1601 1602 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 1603 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 1604 1605 // Branch on the LHS first. If it is false, go to the failure (cont) block. 1606 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock); 1607 1608 // Any edges into the ContBlock are now from an (indeterminate number of) 1609 // edges from this first condition. All of these values will be false. Start 1610 // setting up the PHI node in the Cont Block for this. 1611 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 1612 "", ContBlock); 1613 PN->reserveOperandSpace(2); // Normal case, two inputs. 1614 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 1615 PI != PE; ++PI) 1616 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 1617 1618 CGF.StartConditionalBranch(); 1619 CGF.EmitBlock(RHSBlock); 1620 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1621 CGF.FinishConditionalBranch(); 1622 1623 // Reaquire the RHS block, as there may be subblocks inserted. 1624 RHSBlock = Builder.GetInsertBlock(); 1625 1626 // Emit an unconditional branch from this block to ContBlock. Insert an entry 1627 // into the phi node for the edge with the value of RHSCond. 1628 CGF.EmitBlock(ContBlock); 1629 PN->addIncoming(RHSCond, RHSBlock); 1630 1631 // ZExt result to int. 1632 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 1633 } 1634 1635 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 1636 const llvm::Type *ResTy = ConvertType(E->getType()); 1637 1638 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 1639 // If we have 0 || X, just emit X without inserting the control flow. 1640 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 1641 if (Cond == -1) { // If we have 0 || X, just emit X. 1642 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1643 // ZExt result to int or bool. 1644 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 1645 } 1646 1647 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 1648 if (!CGF.ContainsLabel(E->getRHS())) 1649 return llvm::ConstantInt::get(ResTy, 1); 1650 } 1651 1652 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 1653 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 1654 1655 // Branch on the LHS first. If it is true, go to the success (cont) block. 1656 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock); 1657 1658 // Any edges into the ContBlock are now from an (indeterminate number of) 1659 // edges from this first condition. All of these values will be true. Start 1660 // setting up the PHI node in the Cont Block for this. 1661 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 1662 "", ContBlock); 1663 PN->reserveOperandSpace(2); // Normal case, two inputs. 1664 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 1665 PI != PE; ++PI) 1666 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 1667 1668 CGF.StartConditionalBranch(); 1669 1670 // Emit the RHS condition as a bool value. 1671 CGF.EmitBlock(RHSBlock); 1672 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 1673 1674 CGF.FinishConditionalBranch(); 1675 1676 // Reaquire the RHS block, as there may be subblocks inserted. 1677 RHSBlock = Builder.GetInsertBlock(); 1678 1679 // Emit an unconditional branch from this block to ContBlock. Insert an entry 1680 // into the phi node for the edge with the value of RHSCond. 1681 CGF.EmitBlock(ContBlock); 1682 PN->addIncoming(RHSCond, RHSBlock); 1683 1684 // ZExt result to int. 1685 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 1686 } 1687 1688 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 1689 CGF.EmitStmt(E->getLHS()); 1690 CGF.EnsureInsertPoint(); 1691 return Visit(E->getRHS()); 1692 } 1693 1694 //===----------------------------------------------------------------------===// 1695 // Other Operators 1696 //===----------------------------------------------------------------------===// 1697 1698 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 1699 /// expression is cheap enough and side-effect-free enough to evaluate 1700 /// unconditionally instead of conditionally. This is used to convert control 1701 /// flow into selects in some cases. 1702 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 1703 CodeGenFunction &CGF) { 1704 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 1705 return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF); 1706 1707 // TODO: Allow anything we can constant fold to an integer or fp constant. 1708 if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) || 1709 isa<FloatingLiteral>(E)) 1710 return true; 1711 1712 // Non-volatile automatic variables too, to get "cond ? X : Y" where 1713 // X and Y are local variables. 1714 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 1715 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 1716 if (VD->hasLocalStorage() && !(CGF.getContext() 1717 .getCanonicalType(VD->getType()) 1718 .isVolatileQualified())) 1719 return true; 1720 1721 return false; 1722 } 1723 1724 1725 Value *ScalarExprEmitter:: 1726 VisitConditionalOperator(const ConditionalOperator *E) { 1727 TestAndClearIgnoreResultAssign(); 1728 // If the condition constant folds and can be elided, try to avoid emitting 1729 // the condition and the dead arm. 1730 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){ 1731 Expr *Live = E->getLHS(), *Dead = E->getRHS(); 1732 if (Cond == -1) 1733 std::swap(Live, Dead); 1734 1735 // If the dead side doesn't have labels we need, and if the Live side isn't 1736 // the gnu missing ?: extension (which we could handle, but don't bother 1737 // to), just emit the Live part. 1738 if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part 1739 Live) // Live part isn't missing. 1740 return Visit(Live); 1741 } 1742 1743 1744 // If this is a really simple expression (like x ? 4 : 5), emit this as a 1745 // select instead of as control flow. We can only do this if it is cheap and 1746 // safe to evaluate the LHS and RHS unconditionally. 1747 if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(), 1748 CGF) && 1749 isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) { 1750 llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond()); 1751 llvm::Value *LHS = Visit(E->getLHS()); 1752 llvm::Value *RHS = Visit(E->getRHS()); 1753 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 1754 } 1755 1756 1757 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 1758 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 1759 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 1760 Value *CondVal = 0; 1761 1762 // If we don't have the GNU missing condition extension, emit a branch on bool 1763 // the normal way. 1764 if (E->getLHS()) { 1765 // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for 1766 // the branch on bool. 1767 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1768 } else { 1769 // Otherwise, for the ?: extension, evaluate the conditional and then 1770 // convert it to bool the hard way. We do this explicitly because we need 1771 // the unconverted value for the missing middle value of the ?:. 1772 CondVal = CGF.EmitScalarExpr(E->getCond()); 1773 1774 // In some cases, EmitScalarConversion will delete the "CondVal" expression 1775 // if there are no extra uses (an optimization). Inhibit this by making an 1776 // extra dead use, because we're going to add a use of CondVal later. We 1777 // don't use the builder for this, because we don't want it to get optimized 1778 // away. This leaves dead code, but the ?: extension isn't common. 1779 new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder", 1780 Builder.GetInsertBlock()); 1781 1782 Value *CondBoolVal = 1783 CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), 1784 CGF.getContext().BoolTy); 1785 Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); 1786 } 1787 1788 CGF.StartConditionalBranch(); 1789 CGF.EmitBlock(LHSBlock); 1790 1791 // Handle the GNU extension for missing LHS. 1792 Value *LHS; 1793 if (E->getLHS()) 1794 LHS = Visit(E->getLHS()); 1795 else // Perform promotions, to handle cases like "short ?: int" 1796 LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); 1797 1798 CGF.FinishConditionalBranch(); 1799 LHSBlock = Builder.GetInsertBlock(); 1800 CGF.EmitBranch(ContBlock); 1801 1802 CGF.StartConditionalBranch(); 1803 CGF.EmitBlock(RHSBlock); 1804 1805 Value *RHS = Visit(E->getRHS()); 1806 CGF.FinishConditionalBranch(); 1807 RHSBlock = Builder.GetInsertBlock(); 1808 CGF.EmitBranch(ContBlock); 1809 1810 CGF.EmitBlock(ContBlock); 1811 1812 // If the LHS or RHS is a throw expression, it will be legitimately null. 1813 if (!LHS) 1814 return RHS; 1815 if (!RHS) 1816 return LHS; 1817 1818 // Create a PHI node for the real part. 1819 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); 1820 PN->reserveOperandSpace(2); 1821 PN->addIncoming(LHS, LHSBlock); 1822 PN->addIncoming(RHS, RHSBlock); 1823 return PN; 1824 } 1825 1826 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 1827 return Visit(E->getChosenSubExpr(CGF.getContext())); 1828 } 1829 1830 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 1831 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 1832 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 1833 1834 // If EmitVAArg fails, we fall back to the LLVM instruction. 1835 if (!ArgPtr) 1836 return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); 1837 1838 // FIXME Volatility. 1839 return Builder.CreateLoad(ArgPtr); 1840 } 1841 1842 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) { 1843 return CGF.BuildBlockLiteralTmp(BE); 1844 } 1845 1846 //===----------------------------------------------------------------------===// 1847 // Entry Point into this File 1848 //===----------------------------------------------------------------------===// 1849 1850 /// EmitScalarExpr - Emit the computation of the specified expression of scalar 1851 /// type, ignoring the result. 1852 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 1853 assert(E && !hasAggregateLLVMType(E->getType()) && 1854 "Invalid scalar expression to emit"); 1855 1856 return ScalarExprEmitter(*this, IgnoreResultAssign) 1857 .Visit(const_cast<Expr*>(E)); 1858 } 1859 1860 /// EmitScalarConversion - Emit a conversion from the specified type to the 1861 /// specified destination type, both of which are LLVM scalar types. 1862 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 1863 QualType DstTy) { 1864 assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && 1865 "Invalid scalar expression to emit"); 1866 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); 1867 } 1868 1869 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex 1870 /// type to the specified destination type, where the destination type is an 1871 /// LLVM scalar type. 1872 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 1873 QualType SrcTy, 1874 QualType DstTy) { 1875 assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) && 1876 "Invalid complex -> scalar conversion"); 1877 return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, 1878 DstTy); 1879 } 1880 1881 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 1882 llvm::Value *V; 1883 // object->isa or (*object).isa 1884 // Generate code as for: *(Class*)object 1885 Expr *BaseExpr = E->getBase(); 1886 if (E->isArrow()) 1887 V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr); 1888 else 1889 V = EmitLValue(BaseExpr).getAddress(); 1890 1891 // build Class* type 1892 const llvm::Type *ClassPtrTy = ConvertType(E->getType()); 1893 ClassPtrTy = ClassPtrTy->getPointerTo(); 1894 V = Builder.CreateBitCast(V, ClassPtrTy); 1895 LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1896 return LV; 1897 } 1898 1899