1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "ConstantEmitter.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/RecordLayout.h" 27 #include "clang/AST/StmtVisitor.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "llvm/ADT/APFixedPoint.h" 31 #include "llvm/ADT/Optional.h" 32 #include "llvm/IR/CFG.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/FixedPointBuilder.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/IntrinsicsPowerPC.h" 41 #include "llvm/IR/MatrixBuilder.h" 42 #include "llvm/IR/Module.h" 43 #include <cstdarg> 44 45 using namespace clang; 46 using namespace CodeGen; 47 using llvm::Value; 48 49 //===----------------------------------------------------------------------===// 50 // Scalar Expression Emitter 51 //===----------------------------------------------------------------------===// 52 53 namespace { 54 55 /// Determine whether the given binary operation may overflow. 56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 58 /// the returned overflow check is precise. The returned value is 'true' for 59 /// all other opcodes, to be conservative. 60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 61 BinaryOperator::Opcode Opcode, bool Signed, 62 llvm::APInt &Result) { 63 // Assume overflow is possible, unless we can prove otherwise. 64 bool Overflow = true; 65 const auto &LHSAP = LHS->getValue(); 66 const auto &RHSAP = RHS->getValue(); 67 if (Opcode == BO_Add) { 68 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow) 69 : LHSAP.uadd_ov(RHSAP, Overflow); 70 } else if (Opcode == BO_Sub) { 71 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow) 72 : LHSAP.usub_ov(RHSAP, Overflow); 73 } else if (Opcode == BO_Mul) { 74 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow) 75 : LHSAP.umul_ov(RHSAP, Overflow); 76 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 77 if (Signed && !RHS->isZero()) 78 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 79 else 80 return false; 81 } 82 return Overflow; 83 } 84 85 struct BinOpInfo { 86 Value *LHS; 87 Value *RHS; 88 QualType Ty; // Computation Type. 89 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 90 FPOptions FPFeatures; 91 const Expr *E; // Entire expr, for error unsupported. May not be binop. 92 93 /// Check if the binop can result in integer overflow. 94 bool mayHaveIntegerOverflow() const { 95 // Without constant input, we can't rule out overflow. 96 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 97 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 98 if (!LHSCI || !RHSCI) 99 return true; 100 101 llvm::APInt Result; 102 return ::mayHaveIntegerOverflow( 103 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 104 } 105 106 /// Check if the binop computes a division or a remainder. 107 bool isDivremOp() const { 108 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 109 Opcode == BO_RemAssign; 110 } 111 112 /// Check if the binop can result in an integer division by zero. 113 bool mayHaveIntegerDivisionByZero() const { 114 if (isDivremOp()) 115 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 116 return CI->isZero(); 117 return true; 118 } 119 120 /// Check if the binop can result in a float division by zero. 121 bool mayHaveFloatDivisionByZero() const { 122 if (isDivremOp()) 123 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 124 return CFP->isZero(); 125 return true; 126 } 127 128 /// Check if at least one operand is a fixed point type. In such cases, this 129 /// operation did not follow usual arithmetic conversion and both operands 130 /// might not be of the same type. 131 bool isFixedPointOp() const { 132 // We cannot simply check the result type since comparison operations return 133 // an int. 134 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 135 QualType LHSType = BinOp->getLHS()->getType(); 136 QualType RHSType = BinOp->getRHS()->getType(); 137 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 138 } 139 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 140 return UnOp->getSubExpr()->getType()->isFixedPointType(); 141 return false; 142 } 143 }; 144 145 static bool MustVisitNullValue(const Expr *E) { 146 // If a null pointer expression's type is the C++0x nullptr_t, then 147 // it's not necessarily a simple constant and it must be evaluated 148 // for its potential side effects. 149 return E->getType()->isNullPtrType(); 150 } 151 152 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 153 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 154 const Expr *E) { 155 const Expr *Base = E->IgnoreImpCasts(); 156 if (E == Base) 157 return llvm::None; 158 159 QualType BaseTy = Base->getType(); 160 if (!BaseTy->isPromotableIntegerType() || 161 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 162 return llvm::None; 163 164 return BaseTy; 165 } 166 167 /// Check if \p E is a widened promoted integer. 168 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 169 return getUnwidenedIntegerType(Ctx, E).hasValue(); 170 } 171 172 /// Check if we can skip the overflow check for \p Op. 173 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 174 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 175 "Expected a unary or binary operator"); 176 177 // If the binop has constant inputs and we can prove there is no overflow, 178 // we can elide the overflow check. 179 if (!Op.mayHaveIntegerOverflow()) 180 return true; 181 182 // If a unary op has a widened operand, the op cannot overflow. 183 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) 184 return !UO->canOverflow(); 185 186 // We usually don't need overflow checks for binops with widened operands. 187 // Multiplication with promoted unsigned operands is a special case. 188 const auto *BO = cast<BinaryOperator>(Op.E); 189 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 190 if (!OptionalLHSTy) 191 return false; 192 193 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 194 if (!OptionalRHSTy) 195 return false; 196 197 QualType LHSTy = *OptionalLHSTy; 198 QualType RHSTy = *OptionalRHSTy; 199 200 // This is the simple case: binops without unsigned multiplication, and with 201 // widened operands. No overflow check is needed here. 202 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 203 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 204 return true; 205 206 // For unsigned multiplication the overflow check can be elided if either one 207 // of the unpromoted types are less than half the size of the promoted type. 208 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 209 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 210 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 211 } 212 213 class ScalarExprEmitter 214 : public StmtVisitor<ScalarExprEmitter, Value*> { 215 CodeGenFunction &CGF; 216 CGBuilderTy &Builder; 217 bool IgnoreResultAssign; 218 llvm::LLVMContext &VMContext; 219 public: 220 221 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 222 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 223 VMContext(cgf.getLLVMContext()) { 224 } 225 226 //===--------------------------------------------------------------------===// 227 // Utilities 228 //===--------------------------------------------------------------------===// 229 230 bool TestAndClearIgnoreResultAssign() { 231 bool I = IgnoreResultAssign; 232 IgnoreResultAssign = false; 233 return I; 234 } 235 236 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 237 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 238 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 239 return CGF.EmitCheckedLValue(E, TCK); 240 } 241 242 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, 243 const BinOpInfo &Info); 244 245 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 246 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 247 } 248 249 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 250 const AlignValueAttr *AVAttr = nullptr; 251 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 252 const ValueDecl *VD = DRE->getDecl(); 253 254 if (VD->getType()->isReferenceType()) { 255 if (const auto *TTy = 256 dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) 257 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 258 } else { 259 // Assumptions for function parameters are emitted at the start of the 260 // function, so there is no need to repeat that here, 261 // unless the alignment-assumption sanitizer is enabled, 262 // then we prefer the assumption over alignment attribute 263 // on IR function param. 264 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 265 return; 266 267 AVAttr = VD->getAttr<AlignValueAttr>(); 268 } 269 } 270 271 if (!AVAttr) 272 if (const auto *TTy = 273 dyn_cast<TypedefType>(E->getType())) 274 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 275 276 if (!AVAttr) 277 return; 278 279 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 280 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 281 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 282 } 283 284 /// EmitLoadOfLValue - Given an expression with complex type that represents a 285 /// value l-value, this method emits the address of the l-value, then loads 286 /// and returns the result. 287 Value *EmitLoadOfLValue(const Expr *E) { 288 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 289 E->getExprLoc()); 290 291 EmitLValueAlignmentAssumption(E, V); 292 return V; 293 } 294 295 /// EmitConversionToBool - Convert the specified expression value to a 296 /// boolean (i1) truth value. This is equivalent to "Val != 0". 297 Value *EmitConversionToBool(Value *Src, QualType DstTy); 298 299 /// Emit a check that a conversion from a floating-point type does not 300 /// overflow. 301 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 302 Value *Src, QualType SrcType, QualType DstType, 303 llvm::Type *DstTy, SourceLocation Loc); 304 305 /// Known implicit conversion check kinds. 306 /// Keep in sync with the enum of the same name in ubsan_handlers.h 307 enum ImplicitConversionCheckKind : unsigned char { 308 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 309 ICCK_UnsignedIntegerTruncation = 1, 310 ICCK_SignedIntegerTruncation = 2, 311 ICCK_IntegerSignChange = 3, 312 ICCK_SignedIntegerTruncationOrSignChange = 4, 313 }; 314 315 /// Emit a check that an [implicit] truncation of an integer does not 316 /// discard any bits. It is not UB, so we use the value after truncation. 317 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 318 QualType DstType, SourceLocation Loc); 319 320 /// Emit a check that an [implicit] conversion of an integer does not change 321 /// the sign of the value. It is not UB, so we use the value after conversion. 322 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 323 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 324 QualType DstType, SourceLocation Loc); 325 326 /// Emit a conversion from the specified type to the specified destination 327 /// type, both of which are LLVM scalar types. 328 struct ScalarConversionOpts { 329 bool TreatBooleanAsSigned; 330 bool EmitImplicitIntegerTruncationChecks; 331 bool EmitImplicitIntegerSignChangeChecks; 332 333 ScalarConversionOpts() 334 : TreatBooleanAsSigned(false), 335 EmitImplicitIntegerTruncationChecks(false), 336 EmitImplicitIntegerSignChangeChecks(false) {} 337 338 ScalarConversionOpts(clang::SanitizerSet SanOpts) 339 : TreatBooleanAsSigned(false), 340 EmitImplicitIntegerTruncationChecks( 341 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 342 EmitImplicitIntegerSignChangeChecks( 343 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 344 }; 345 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType, 346 llvm::Type *SrcTy, llvm::Type *DstTy, 347 ScalarConversionOpts Opts); 348 Value * 349 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 350 SourceLocation Loc, 351 ScalarConversionOpts Opts = ScalarConversionOpts()); 352 353 /// Convert between either a fixed point and other fixed point or fixed point 354 /// and an integer. 355 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 356 SourceLocation Loc); 357 358 /// Emit a conversion from the specified complex type to the specified 359 /// destination type, where the destination type is an LLVM scalar type. 360 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 361 QualType SrcTy, QualType DstTy, 362 SourceLocation Loc); 363 364 /// EmitNullValue - Emit a value that corresponds to null for the given type. 365 Value *EmitNullValue(QualType Ty); 366 367 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 368 Value *EmitFloatToBoolConversion(Value *V) { 369 // Compare against 0.0 for fp scalars. 370 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 371 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 372 } 373 374 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 375 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 376 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 377 378 return Builder.CreateICmpNE(V, Zero, "tobool"); 379 } 380 381 Value *EmitIntToBoolConversion(Value *V) { 382 // Because of the type rules of C, we often end up computing a 383 // logical value, then zero extending it to int, then wanting it 384 // as a logical value again. Optimize this common case. 385 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 386 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 387 Value *Result = ZI->getOperand(0); 388 // If there aren't any more uses, zap the instruction to save space. 389 // Note that there can be more uses, for example if this 390 // is the result of an assignment. 391 if (ZI->use_empty()) 392 ZI->eraseFromParent(); 393 return Result; 394 } 395 } 396 397 return Builder.CreateIsNotNull(V, "tobool"); 398 } 399 400 //===--------------------------------------------------------------------===// 401 // Visitor Methods 402 //===--------------------------------------------------------------------===// 403 404 Value *Visit(Expr *E) { 405 ApplyDebugLocation DL(CGF, E); 406 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 407 } 408 409 Value *VisitStmt(Stmt *S) { 410 S->dump(llvm::errs(), CGF.getContext()); 411 llvm_unreachable("Stmt can't have complex result type!"); 412 } 413 Value *VisitExpr(Expr *S); 414 415 Value *VisitConstantExpr(ConstantExpr *E) { 416 // A constant expression of type 'void' generates no code and produces no 417 // value. 418 if (E->getType()->isVoidType()) 419 return nullptr; 420 421 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 422 if (E->isGLValue()) 423 return CGF.Builder.CreateLoad(Address( 424 Result, CGF.ConvertTypeForMem(E->getType()), 425 CGF.getContext().getTypeAlignInChars(E->getType()))); 426 return Result; 427 } 428 return Visit(E->getSubExpr()); 429 } 430 Value *VisitParenExpr(ParenExpr *PE) { 431 return Visit(PE->getSubExpr()); 432 } 433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 434 return Visit(E->getReplacement()); 435 } 436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 437 return Visit(GE->getResultExpr()); 438 } 439 Value *VisitCoawaitExpr(CoawaitExpr *S) { 440 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 441 } 442 Value *VisitCoyieldExpr(CoyieldExpr *S) { 443 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 444 } 445 Value *VisitUnaryCoawait(const UnaryOperator *E) { 446 return Visit(E->getSubExpr()); 447 } 448 449 // Leaves. 450 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 451 return Builder.getInt(E->getValue()); 452 } 453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 454 return Builder.getInt(E->getValue()); 455 } 456 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 457 return llvm::ConstantFP::get(VMContext, E->getValue()); 458 } 459 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 461 } 462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 464 } 465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 467 } 468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 469 return EmitNullValue(E->getType()); 470 } 471 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 472 return EmitNullValue(E->getType()); 473 } 474 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 478 return Builder.CreateBitCast(V, ConvertType(E->getType())); 479 } 480 481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 483 } 484 485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 486 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 487 } 488 489 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E); 490 491 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 492 if (E->isGLValue()) 493 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 494 E->getExprLoc()); 495 496 // Otherwise, assume the mapping is the scalar directly. 497 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 498 } 499 500 // l-values. 501 Value *VisitDeclRefExpr(DeclRefExpr *E) { 502 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 503 return CGF.emitScalarConstant(Constant, E); 504 return EmitLoadOfLValue(E); 505 } 506 507 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 508 return CGF.EmitObjCSelectorExpr(E); 509 } 510 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 511 return CGF.EmitObjCProtocolExpr(E); 512 } 513 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 514 return EmitLoadOfLValue(E); 515 } 516 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 517 if (E->getMethodDecl() && 518 E->getMethodDecl()->getReturnType()->isReferenceType()) 519 return EmitLoadOfLValue(E); 520 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 521 } 522 523 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 524 LValue LV = CGF.EmitObjCIsaExpr(E); 525 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 526 return V; 527 } 528 529 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 530 VersionTuple Version = E->getVersion(); 531 532 // If we're checking for a platform older than our minimum deployment 533 // target, we can fold the check away. 534 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 535 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 536 537 return CGF.EmitBuiltinAvailable(Version); 538 } 539 540 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 541 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 542 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 543 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 544 Value *VisitMemberExpr(MemberExpr *E); 545 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 546 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 547 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 548 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 549 // literals aren't l-values in C++. We do so simply because that's the 550 // cleanest way to handle compound literals in C++. 551 // See the discussion here: https://reviews.llvm.org/D64464 552 return EmitLoadOfLValue(E); 553 } 554 555 Value *VisitInitListExpr(InitListExpr *E); 556 557 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 558 assert(CGF.getArrayInitIndex() && 559 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 560 return CGF.getArrayInitIndex(); 561 } 562 563 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 564 return EmitNullValue(E->getType()); 565 } 566 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 567 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 568 return VisitCastExpr(E); 569 } 570 Value *VisitCastExpr(CastExpr *E); 571 572 Value *VisitCallExpr(const CallExpr *E) { 573 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 574 return EmitLoadOfLValue(E); 575 576 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 577 578 EmitLValueAlignmentAssumption(E, V); 579 return V; 580 } 581 582 Value *VisitStmtExpr(const StmtExpr *E); 583 584 // Unary Operators. 585 Value *VisitUnaryPostDec(const UnaryOperator *E) { 586 LValue LV = EmitLValue(E->getSubExpr()); 587 return EmitScalarPrePostIncDec(E, LV, false, false); 588 } 589 Value *VisitUnaryPostInc(const UnaryOperator *E) { 590 LValue LV = EmitLValue(E->getSubExpr()); 591 return EmitScalarPrePostIncDec(E, LV, true, false); 592 } 593 Value *VisitUnaryPreDec(const UnaryOperator *E) { 594 LValue LV = EmitLValue(E->getSubExpr()); 595 return EmitScalarPrePostIncDec(E, LV, false, true); 596 } 597 Value *VisitUnaryPreInc(const UnaryOperator *E) { 598 LValue LV = EmitLValue(E->getSubExpr()); 599 return EmitScalarPrePostIncDec(E, LV, true, true); 600 } 601 602 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 603 llvm::Value *InVal, 604 bool IsInc); 605 606 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 607 bool isInc, bool isPre); 608 609 610 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 611 if (isa<MemberPointerType>(E->getType())) // never sugared 612 return CGF.CGM.getMemberPointerConstant(E); 613 614 return EmitLValue(E->getSubExpr()).getPointer(CGF); 615 } 616 Value *VisitUnaryDeref(const UnaryOperator *E) { 617 if (E->getType()->isVoidType()) 618 return Visit(E->getSubExpr()); // the actual value should be unused 619 return EmitLoadOfLValue(E); 620 } 621 Value *VisitUnaryPlus(const UnaryOperator *E) { 622 // This differs from gcc, though, most likely due to a bug in gcc. 623 TestAndClearIgnoreResultAssign(); 624 return Visit(E->getSubExpr()); 625 } 626 Value *VisitUnaryMinus (const UnaryOperator *E); 627 Value *VisitUnaryNot (const UnaryOperator *E); 628 Value *VisitUnaryLNot (const UnaryOperator *E); 629 Value *VisitUnaryReal (const UnaryOperator *E); 630 Value *VisitUnaryImag (const UnaryOperator *E); 631 Value *VisitUnaryExtension(const UnaryOperator *E) { 632 return Visit(E->getSubExpr()); 633 } 634 635 // C++ 636 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 637 return EmitLoadOfLValue(E); 638 } 639 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 640 auto &Ctx = CGF.getContext(); 641 APValue Evaluated = 642 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 643 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 644 SLE->getType()); 645 } 646 647 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 648 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 649 return Visit(DAE->getExpr()); 650 } 651 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 652 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 653 return Visit(DIE->getExpr()); 654 } 655 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 656 return CGF.LoadCXXThis(); 657 } 658 659 Value *VisitExprWithCleanups(ExprWithCleanups *E); 660 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 661 return CGF.EmitCXXNewExpr(E); 662 } 663 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 664 CGF.EmitCXXDeleteExpr(E); 665 return nullptr; 666 } 667 668 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 669 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 670 } 671 672 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 673 return Builder.getInt1(E->isSatisfied()); 674 } 675 676 Value *VisitRequiresExpr(const RequiresExpr *E) { 677 return Builder.getInt1(E->isSatisfied()); 678 } 679 680 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 681 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); 682 } 683 684 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 685 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 686 } 687 688 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 689 // C++ [expr.pseudo]p1: 690 // The result shall only be used as the operand for the function call 691 // operator (), and the result of such a call has type void. The only 692 // effect is the evaluation of the postfix-expression before the dot or 693 // arrow. 694 CGF.EmitScalarExpr(E->getBase()); 695 return nullptr; 696 } 697 698 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 699 return EmitNullValue(E->getType()); 700 } 701 702 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 703 CGF.EmitCXXThrowExpr(E); 704 return nullptr; 705 } 706 707 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 708 return Builder.getInt1(E->getValue()); 709 } 710 711 // Binary Operators. 712 Value *EmitMul(const BinOpInfo &Ops) { 713 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 714 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 715 case LangOptions::SOB_Defined: 716 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 717 case LangOptions::SOB_Undefined: 718 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 719 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 720 LLVM_FALLTHROUGH; 721 case LangOptions::SOB_Trapping: 722 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 723 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 724 return EmitOverflowCheckedBinOp(Ops); 725 } 726 } 727 728 if (Ops.Ty->isConstantMatrixType()) { 729 llvm::MatrixBuilder MB(Builder); 730 // We need to check the types of the operands of the operator to get the 731 // correct matrix dimensions. 732 auto *BO = cast<BinaryOperator>(Ops.E); 733 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 734 BO->getLHS()->getType().getCanonicalType()); 735 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 736 BO->getRHS()->getType().getCanonicalType()); 737 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 738 if (LHSMatTy && RHSMatTy) 739 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 740 LHSMatTy->getNumColumns(), 741 RHSMatTy->getNumColumns()); 742 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 743 } 744 745 if (Ops.Ty->isUnsignedIntegerType() && 746 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 747 !CanElideOverflowCheck(CGF.getContext(), Ops)) 748 return EmitOverflowCheckedBinOp(Ops); 749 750 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 751 // Preserve the old values 752 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 753 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 754 } 755 if (Ops.isFixedPointOp()) 756 return EmitFixedPointBinOp(Ops); 757 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 758 } 759 /// Create a binary op that checks for overflow. 760 /// Currently only supports +, - and *. 761 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 762 763 // Check for undefined division and modulus behaviors. 764 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 765 llvm::Value *Zero,bool isDiv); 766 // Common helper for getting how wide LHS of shift is. 767 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); 768 769 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 770 // non powers of two. 771 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 772 773 Value *EmitDiv(const BinOpInfo &Ops); 774 Value *EmitRem(const BinOpInfo &Ops); 775 Value *EmitAdd(const BinOpInfo &Ops); 776 Value *EmitSub(const BinOpInfo &Ops); 777 Value *EmitShl(const BinOpInfo &Ops); 778 Value *EmitShr(const BinOpInfo &Ops); 779 Value *EmitAnd(const BinOpInfo &Ops) { 780 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 781 } 782 Value *EmitXor(const BinOpInfo &Ops) { 783 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 784 } 785 Value *EmitOr (const BinOpInfo &Ops) { 786 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 787 } 788 789 // Helper functions for fixed point binary operations. 790 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 791 792 BinOpInfo EmitBinOps(const BinaryOperator *E); 793 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 794 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 795 Value *&Result); 796 797 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 798 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 799 800 // Binary operators and binary compound assignment operators. 801 #define HANDLEBINOP(OP) \ 802 Value *VisitBin ## OP(const BinaryOperator *E) { \ 803 return Emit ## OP(EmitBinOps(E)); \ 804 } \ 805 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 806 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 807 } 808 HANDLEBINOP(Mul) 809 HANDLEBINOP(Div) 810 HANDLEBINOP(Rem) 811 HANDLEBINOP(Add) 812 HANDLEBINOP(Sub) 813 HANDLEBINOP(Shl) 814 HANDLEBINOP(Shr) 815 HANDLEBINOP(And) 816 HANDLEBINOP(Xor) 817 HANDLEBINOP(Or) 818 #undef HANDLEBINOP 819 820 // Comparisons. 821 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 822 llvm::CmpInst::Predicate SICmpOpc, 823 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 824 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 825 Value *VisitBin##CODE(const BinaryOperator *E) { \ 826 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 827 llvm::FCmpInst::FP, SIG); } 828 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 829 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 830 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 831 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 832 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 833 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 834 #undef VISITCOMP 835 836 Value *VisitBinAssign (const BinaryOperator *E); 837 838 Value *VisitBinLAnd (const BinaryOperator *E); 839 Value *VisitBinLOr (const BinaryOperator *E); 840 Value *VisitBinComma (const BinaryOperator *E); 841 842 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 843 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 844 845 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 846 return Visit(E->getSemanticForm()); 847 } 848 849 // Other Operators. 850 Value *VisitBlockExpr(const BlockExpr *BE); 851 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 852 Value *VisitChooseExpr(ChooseExpr *CE); 853 Value *VisitVAArgExpr(VAArgExpr *VE); 854 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 855 return CGF.EmitObjCStringLiteral(E); 856 } 857 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 858 return CGF.EmitObjCBoxedExpr(E); 859 } 860 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 861 return CGF.EmitObjCArrayLiteral(E); 862 } 863 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 864 return CGF.EmitObjCDictionaryLiteral(E); 865 } 866 Value *VisitAsTypeExpr(AsTypeExpr *CE); 867 Value *VisitAtomicExpr(AtomicExpr *AE); 868 }; 869 } // end anonymous namespace. 870 871 //===----------------------------------------------------------------------===// 872 // Utilities 873 //===----------------------------------------------------------------------===// 874 875 /// EmitConversionToBool - Convert the specified expression value to a 876 /// boolean (i1) truth value. This is equivalent to "Val != 0". 877 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 878 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 879 880 if (SrcType->isRealFloatingType()) 881 return EmitFloatToBoolConversion(Src); 882 883 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 884 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 885 886 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 887 "Unknown scalar type to convert"); 888 889 if (isa<llvm::IntegerType>(Src->getType())) 890 return EmitIntToBoolConversion(Src); 891 892 assert(isa<llvm::PointerType>(Src->getType())); 893 return EmitPointerToBoolConversion(Src, SrcType); 894 } 895 896 void ScalarExprEmitter::EmitFloatConversionCheck( 897 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 898 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 899 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 900 if (!isa<llvm::IntegerType>(DstTy)) 901 return; 902 903 CodeGenFunction::SanitizerScope SanScope(&CGF); 904 using llvm::APFloat; 905 using llvm::APSInt; 906 907 llvm::Value *Check = nullptr; 908 const llvm::fltSemantics &SrcSema = 909 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 910 911 // Floating-point to integer. This has undefined behavior if the source is 912 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 913 // to an integer). 914 unsigned Width = CGF.getContext().getIntWidth(DstType); 915 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 916 917 APSInt Min = APSInt::getMinValue(Width, Unsigned); 918 APFloat MinSrc(SrcSema, APFloat::uninitialized); 919 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 920 APFloat::opOverflow) 921 // Don't need an overflow check for lower bound. Just check for 922 // -Inf/NaN. 923 MinSrc = APFloat::getInf(SrcSema, true); 924 else 925 // Find the largest value which is too small to represent (before 926 // truncation toward zero). 927 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 928 929 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 930 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 931 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 932 APFloat::opOverflow) 933 // Don't need an overflow check for upper bound. Just check for 934 // +Inf/NaN. 935 MaxSrc = APFloat::getInf(SrcSema, false); 936 else 937 // Find the smallest value which is too large to represent (before 938 // truncation toward zero). 939 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 940 941 // If we're converting from __half, convert the range to float to match 942 // the type of src. 943 if (OrigSrcType->isHalfType()) { 944 const llvm::fltSemantics &Sema = 945 CGF.getContext().getFloatTypeSemantics(SrcType); 946 bool IsInexact; 947 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 948 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 949 } 950 951 llvm::Value *GE = 952 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 953 llvm::Value *LE = 954 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 955 Check = Builder.CreateAnd(GE, LE); 956 957 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 958 CGF.EmitCheckTypeDescriptor(OrigSrcType), 959 CGF.EmitCheckTypeDescriptor(DstType)}; 960 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), 961 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 962 } 963 964 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 965 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 966 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 967 std::pair<llvm::Value *, SanitizerMask>> 968 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 969 QualType DstType, CGBuilderTy &Builder) { 970 llvm::Type *SrcTy = Src->getType(); 971 llvm::Type *DstTy = Dst->getType(); 972 (void)DstTy; // Only used in assert() 973 974 // This should be truncation of integral types. 975 assert(Src != Dst); 976 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 977 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 978 "non-integer llvm type"); 979 980 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 981 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 982 983 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 984 // Else, it is a signed truncation. 985 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 986 SanitizerMask Mask; 987 if (!SrcSigned && !DstSigned) { 988 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 989 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation; 990 } else { 991 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 992 Mask = SanitizerKind::ImplicitSignedIntegerTruncation; 993 } 994 995 llvm::Value *Check = nullptr; 996 // 1. Extend the truncated value back to the same width as the Src. 997 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 998 // 2. Equality-compare with the original source value 999 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1000 // If the comparison result is 'i1 false', then the truncation was lossy. 1001 return std::make_pair(Kind, std::make_pair(Check, Mask)); 1002 } 1003 1004 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1005 QualType SrcType, QualType DstType) { 1006 return SrcType->isIntegerType() && DstType->isIntegerType(); 1007 } 1008 1009 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1010 Value *Dst, QualType DstType, 1011 SourceLocation Loc) { 1012 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1013 return; 1014 1015 // We only care about int->int conversions here. 1016 // We ignore conversions to/from pointer and/or bool. 1017 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1018 DstType)) 1019 return; 1020 1021 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1022 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1023 // This must be truncation. Else we do not care. 1024 if (SrcBits <= DstBits) 1025 return; 1026 1027 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1028 1029 // If the integer sign change sanitizer is enabled, 1030 // and we are truncating from larger unsigned type to smaller signed type, 1031 // let that next sanitizer deal with it. 1032 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1033 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1034 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1035 (!SrcSigned && DstSigned)) 1036 return; 1037 1038 CodeGenFunction::SanitizerScope SanScope(&CGF); 1039 1040 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1041 std::pair<llvm::Value *, SanitizerMask>> 1042 Check = 1043 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1044 // If the comparison result is 'i1 false', then the truncation was lossy. 1045 1046 // Do we care about this type of truncation? 1047 if (!CGF.SanOpts.has(Check.second.second)) 1048 return; 1049 1050 llvm::Constant *StaticArgs[] = { 1051 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1052 CGF.EmitCheckTypeDescriptor(DstType), 1053 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)}; 1054 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1055 {Src, Dst}); 1056 } 1057 1058 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1059 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1060 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1061 std::pair<llvm::Value *, SanitizerMask>> 1062 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1063 QualType DstType, CGBuilderTy &Builder) { 1064 llvm::Type *SrcTy = Src->getType(); 1065 llvm::Type *DstTy = Dst->getType(); 1066 1067 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1068 "non-integer llvm type"); 1069 1070 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1071 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1072 (void)SrcSigned; // Only used in assert() 1073 (void)DstSigned; // Only used in assert() 1074 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1075 unsigned DstBits = DstTy->getScalarSizeInBits(); 1076 (void)SrcBits; // Only used in assert() 1077 (void)DstBits; // Only used in assert() 1078 1079 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1080 "either the widths should be different, or the signednesses."); 1081 1082 // NOTE: zero value is considered to be non-negative. 1083 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType, 1084 const char *Name) -> Value * { 1085 // Is this value a signed type? 1086 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1087 llvm::Type *VTy = V->getType(); 1088 if (!VSigned) { 1089 // If the value is unsigned, then it is never negative. 1090 // FIXME: can we encounter non-scalar VTy here? 1091 return llvm::ConstantInt::getFalse(VTy->getContext()); 1092 } 1093 // Get the zero of the same type with which we will be comparing. 1094 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1095 // %V.isnegative = icmp slt %V, 0 1096 // I.e is %V *strictly* less than zero, does it have negative value? 1097 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1098 llvm::Twine(Name) + "." + V->getName() + 1099 ".negativitycheck"); 1100 }; 1101 1102 // 1. Was the old Value negative? 1103 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src"); 1104 // 2. Is the new Value negative? 1105 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst"); 1106 // 3. Now, was the 'negativity status' preserved during the conversion? 1107 // NOTE: conversion from negative to zero is considered to change the sign. 1108 // (We want to get 'false' when the conversion changed the sign) 1109 // So we should just equality-compare the negativity statuses. 1110 llvm::Value *Check = nullptr; 1111 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1112 // If the comparison result is 'false', then the conversion changed the sign. 1113 return std::make_pair( 1114 ScalarExprEmitter::ICCK_IntegerSignChange, 1115 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange)); 1116 } 1117 1118 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1119 Value *Dst, QualType DstType, 1120 SourceLocation Loc) { 1121 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) 1122 return; 1123 1124 llvm::Type *SrcTy = Src->getType(); 1125 llvm::Type *DstTy = Dst->getType(); 1126 1127 // We only care about int->int conversions here. 1128 // We ignore conversions to/from pointer and/or bool. 1129 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1130 DstType)) 1131 return; 1132 1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1135 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1136 unsigned DstBits = DstTy->getScalarSizeInBits(); 1137 1138 // Now, we do not need to emit the check in *all* of the cases. 1139 // We can avoid emitting it in some obvious cases where it would have been 1140 // dropped by the opt passes (instcombine) always anyways. 1141 // If it's a cast between effectively the same type, no check. 1142 // NOTE: this is *not* equivalent to checking the canonical types. 1143 if (SrcSigned == DstSigned && SrcBits == DstBits) 1144 return; 1145 // At least one of the values needs to have signed type. 1146 // If both are unsigned, then obviously, neither of them can be negative. 1147 if (!SrcSigned && !DstSigned) 1148 return; 1149 // If the conversion is to *larger* *signed* type, then no check is needed. 1150 // Because either sign-extension happens (so the sign will remain), 1151 // or zero-extension will happen (the sign bit will be zero.) 1152 if ((DstBits > SrcBits) && DstSigned) 1153 return; 1154 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1155 (SrcBits > DstBits) && SrcSigned) { 1156 // If the signed integer truncation sanitizer is enabled, 1157 // and this is a truncation from signed type, then no check is needed. 1158 // Because here sign change check is interchangeable with truncation check. 1159 return; 1160 } 1161 // That's it. We can't rule out any more cases with the data we have. 1162 1163 CodeGenFunction::SanitizerScope SanScope(&CGF); 1164 1165 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1166 std::pair<llvm::Value *, SanitizerMask>> 1167 Check; 1168 1169 // Each of these checks needs to return 'false' when an issue was detected. 1170 ImplicitConversionCheckKind CheckKind; 1171 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 1172 // So we can 'and' all the checks together, and still get 'false', 1173 // if at least one of the checks detected an issue. 1174 1175 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1176 CheckKind = Check.first; 1177 Checks.emplace_back(Check.second); 1178 1179 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1180 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1181 // If the signed integer truncation sanitizer was enabled, 1182 // and we are truncating from larger unsigned type to smaller signed type, 1183 // let's handle the case we skipped in that check. 1184 Check = 1185 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1186 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1187 Checks.emplace_back(Check.second); 1188 // If the comparison result is 'i1 false', then the truncation was lossy. 1189 } 1190 1191 llvm::Constant *StaticArgs[] = { 1192 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1193 CGF.EmitCheckTypeDescriptor(DstType), 1194 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)}; 1195 // EmitCheck() will 'and' all the checks together. 1196 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1197 {Src, Dst}); 1198 } 1199 1200 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType, 1201 QualType DstType, llvm::Type *SrcTy, 1202 llvm::Type *DstTy, 1203 ScalarConversionOpts Opts) { 1204 // The Element types determine the type of cast to perform. 1205 llvm::Type *SrcElementTy; 1206 llvm::Type *DstElementTy; 1207 QualType SrcElementType; 1208 QualType DstElementType; 1209 if (SrcType->isMatrixType() && DstType->isMatrixType()) { 1210 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1211 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1212 SrcElementType = SrcType->castAs<MatrixType>()->getElementType(); 1213 DstElementType = DstType->castAs<MatrixType>()->getElementType(); 1214 } else { 1215 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && 1216 "cannot cast between matrix and non-matrix types"); 1217 SrcElementTy = SrcTy; 1218 DstElementTy = DstTy; 1219 SrcElementType = SrcType; 1220 DstElementType = DstType; 1221 } 1222 1223 if (isa<llvm::IntegerType>(SrcElementTy)) { 1224 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); 1225 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1226 InputSigned = true; 1227 } 1228 1229 if (isa<llvm::IntegerType>(DstElementTy)) 1230 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1231 if (InputSigned) 1232 return Builder.CreateSIToFP(Src, DstTy, "conv"); 1233 return Builder.CreateUIToFP(Src, DstTy, "conv"); 1234 } 1235 1236 if (isa<llvm::IntegerType>(DstElementTy)) { 1237 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion"); 1238 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType(); 1239 1240 // If we can't recognize overflow as undefined behavior, assume that 1241 // overflow saturates. This protects against normal optimizations if we are 1242 // compiling with non-standard FP semantics. 1243 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) { 1244 llvm::Intrinsic::ID IID = 1245 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat; 1246 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src); 1247 } 1248 1249 if (IsSigned) 1250 return Builder.CreateFPToSI(Src, DstTy, "conv"); 1251 return Builder.CreateFPToUI(Src, DstTy, "conv"); 1252 } 1253 1254 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID()) 1255 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1256 return Builder.CreateFPExt(Src, DstTy, "conv"); 1257 } 1258 1259 /// Emit a conversion from the specified type to the specified destination type, 1260 /// both of which are LLVM scalar types. 1261 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1262 QualType DstType, 1263 SourceLocation Loc, 1264 ScalarConversionOpts Opts) { 1265 // All conversions involving fixed point types should be handled by the 1266 // EmitFixedPoint family functions. This is done to prevent bloating up this 1267 // function more, and although fixed point numbers are represented by 1268 // integers, we do not want to follow any logic that assumes they should be 1269 // treated as integers. 1270 // TODO(leonardchan): When necessary, add another if statement checking for 1271 // conversions to fixed point types from other types. 1272 if (SrcType->isFixedPointType()) { 1273 if (DstType->isBooleanType()) 1274 // It is important that we check this before checking if the dest type is 1275 // an integer because booleans are technically integer types. 1276 // We do not need to check the padding bit on unsigned types if unsigned 1277 // padding is enabled because overflow into this bit is undefined 1278 // behavior. 1279 return Builder.CreateIsNotNull(Src, "tobool"); 1280 if (DstType->isFixedPointType() || DstType->isIntegerType() || 1281 DstType->isRealFloatingType()) 1282 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1283 1284 llvm_unreachable( 1285 "Unhandled scalar conversion from a fixed point type to another type."); 1286 } else if (DstType->isFixedPointType()) { 1287 if (SrcType->isIntegerType() || SrcType->isRealFloatingType()) 1288 // This also includes converting booleans and enums to fixed point types. 1289 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1290 1291 llvm_unreachable( 1292 "Unhandled scalar conversion to a fixed point type from another type."); 1293 } 1294 1295 QualType NoncanonicalSrcType = SrcType; 1296 QualType NoncanonicalDstType = DstType; 1297 1298 SrcType = CGF.getContext().getCanonicalType(SrcType); 1299 DstType = CGF.getContext().getCanonicalType(DstType); 1300 if (SrcType == DstType) return Src; 1301 1302 if (DstType->isVoidType()) return nullptr; 1303 1304 llvm::Value *OrigSrc = Src; 1305 QualType OrigSrcType = SrcType; 1306 llvm::Type *SrcTy = Src->getType(); 1307 1308 // Handle conversions to bool first, they are special: comparisons against 0. 1309 if (DstType->isBooleanType()) 1310 return EmitConversionToBool(Src, SrcType); 1311 1312 llvm::Type *DstTy = ConvertType(DstType); 1313 1314 // Cast from half through float if half isn't a native type. 1315 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1316 // Cast to FP using the intrinsic if the half type itself isn't supported. 1317 if (DstTy->isFloatingPointTy()) { 1318 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1319 return Builder.CreateCall( 1320 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1321 Src); 1322 } else { 1323 // Cast to other types through float, using either the intrinsic or FPExt, 1324 // depending on whether the half type itself is supported 1325 // (as opposed to operations on half, available with NativeHalfType). 1326 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1327 Src = Builder.CreateCall( 1328 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1329 CGF.CGM.FloatTy), 1330 Src); 1331 } else { 1332 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1333 } 1334 SrcType = CGF.getContext().FloatTy; 1335 SrcTy = CGF.FloatTy; 1336 } 1337 } 1338 1339 // Ignore conversions like int -> uint. 1340 if (SrcTy == DstTy) { 1341 if (Opts.EmitImplicitIntegerSignChangeChecks) 1342 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1343 NoncanonicalDstType, Loc); 1344 1345 return Src; 1346 } 1347 1348 // Handle pointer conversions next: pointers can only be converted to/from 1349 // other pointers and integers. Check for pointer types in terms of LLVM, as 1350 // some native types (like Obj-C id) may map to a pointer type. 1351 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1352 // The source value may be an integer, or a pointer. 1353 if (isa<llvm::PointerType>(SrcTy)) 1354 return Builder.CreateBitCast(Src, DstTy, "conv"); 1355 1356 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1357 // First, convert to the correct width so that we control the kind of 1358 // extension. 1359 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1360 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1361 llvm::Value* IntResult = 1362 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1363 // Then, cast to pointer. 1364 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1365 } 1366 1367 if (isa<llvm::PointerType>(SrcTy)) { 1368 // Must be an ptr to int cast. 1369 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1370 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1371 } 1372 1373 // A scalar can be splatted to an extended vector of the same element type 1374 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1375 // Sema should add casts to make sure that the source expression's type is 1376 // the same as the vector's element type (sans qualifiers) 1377 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1378 SrcType.getTypePtr() && 1379 "Splatted expr doesn't match with vector element type?"); 1380 1381 // Splat the element across to all elements 1382 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 1383 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1384 } 1385 1386 if (SrcType->isMatrixType() && DstType->isMatrixType()) 1387 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1388 1389 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1390 // Allow bitcast from vector to integer/fp of the same size. 1391 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits(); 1392 unsigned DstSize = DstTy->getPrimitiveSizeInBits(); 1393 if (SrcSize == DstSize) 1394 return Builder.CreateBitCast(Src, DstTy, "conv"); 1395 1396 // Conversions between vectors of different sizes are not allowed except 1397 // when vectors of half are involved. Operations on storage-only half 1398 // vectors require promoting half vector operands to float vectors and 1399 // truncating the result, which is either an int or float vector, to a 1400 // short or half vector. 1401 1402 // Source and destination are both expected to be vectors. 1403 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1404 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1405 (void)DstElementTy; 1406 1407 assert(((SrcElementTy->isIntegerTy() && 1408 DstElementTy->isIntegerTy()) || 1409 (SrcElementTy->isFloatingPointTy() && 1410 DstElementTy->isFloatingPointTy())) && 1411 "unexpected conversion between a floating-point vector and an " 1412 "integer vector"); 1413 1414 // Truncate an i32 vector to an i16 vector. 1415 if (SrcElementTy->isIntegerTy()) 1416 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1417 1418 // Truncate a float vector to a half vector. 1419 if (SrcSize > DstSize) 1420 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1421 1422 // Promote a half vector to a float vector. 1423 return Builder.CreateFPExt(Src, DstTy, "conv"); 1424 } 1425 1426 // Finally, we have the arithmetic types: real int/float. 1427 Value *Res = nullptr; 1428 llvm::Type *ResTy = DstTy; 1429 1430 // An overflowing conversion has undefined behavior if either the source type 1431 // or the destination type is a floating-point type. However, we consider the 1432 // range of representable values for all floating-point types to be 1433 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1434 // floating-point type. 1435 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1436 OrigSrcType->isFloatingType()) 1437 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1438 Loc); 1439 1440 // Cast to half through float if half isn't a native type. 1441 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1442 // Make sure we cast in a single step if from another FP type. 1443 if (SrcTy->isFloatingPointTy()) { 1444 // Use the intrinsic if the half type itself isn't supported 1445 // (as opposed to operations on half, available with NativeHalfType). 1446 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1447 return Builder.CreateCall( 1448 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1449 // If the half type is supported, just use an fptrunc. 1450 return Builder.CreateFPTrunc(Src, DstTy); 1451 } 1452 DstTy = CGF.FloatTy; 1453 } 1454 1455 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1456 1457 if (DstTy != ResTy) { 1458 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1459 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1460 Res = Builder.CreateCall( 1461 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1462 Res); 1463 } else { 1464 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1465 } 1466 } 1467 1468 if (Opts.EmitImplicitIntegerTruncationChecks) 1469 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1470 NoncanonicalDstType, Loc); 1471 1472 if (Opts.EmitImplicitIntegerSignChangeChecks) 1473 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1474 NoncanonicalDstType, Loc); 1475 1476 return Res; 1477 } 1478 1479 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1480 QualType DstTy, 1481 SourceLocation Loc) { 1482 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 1483 llvm::Value *Result; 1484 if (SrcTy->isRealFloatingType()) 1485 Result = FPBuilder.CreateFloatingToFixed(Src, 1486 CGF.getContext().getFixedPointSemantics(DstTy)); 1487 else if (DstTy->isRealFloatingType()) 1488 Result = FPBuilder.CreateFixedToFloating(Src, 1489 CGF.getContext().getFixedPointSemantics(SrcTy), 1490 ConvertType(DstTy)); 1491 else { 1492 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy); 1493 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy); 1494 1495 if (DstTy->isIntegerType()) 1496 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema, 1497 DstFPSema.getWidth(), 1498 DstFPSema.isSigned()); 1499 else if (SrcTy->isIntegerType()) 1500 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(), 1501 DstFPSema); 1502 else 1503 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema); 1504 } 1505 return Result; 1506 } 1507 1508 /// Emit a conversion from the specified complex type to the specified 1509 /// destination type, where the destination type is an LLVM scalar type. 1510 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1511 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1512 SourceLocation Loc) { 1513 // Get the source element type. 1514 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1515 1516 // Handle conversions to bool first, they are special: comparisons against 0. 1517 if (DstTy->isBooleanType()) { 1518 // Complex != 0 -> (Real != 0) | (Imag != 0) 1519 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1520 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1521 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1522 } 1523 1524 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1525 // the imaginary part of the complex value is discarded and the value of the 1526 // real part is converted according to the conversion rules for the 1527 // corresponding real type. 1528 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1529 } 1530 1531 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1532 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1533 } 1534 1535 /// Emit a sanitization check for the given "binary" operation (which 1536 /// might actually be a unary increment which has been lowered to a binary 1537 /// operation). The check passes if all values in \p Checks (which are \c i1), 1538 /// are \c true. 1539 void ScalarExprEmitter::EmitBinOpCheck( 1540 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { 1541 assert(CGF.IsSanitizerScope); 1542 SanitizerHandler Check; 1543 SmallVector<llvm::Constant *, 4> StaticData; 1544 SmallVector<llvm::Value *, 2> DynamicData; 1545 1546 BinaryOperatorKind Opcode = Info.Opcode; 1547 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1548 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1549 1550 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1551 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1552 if (UO && UO->getOpcode() == UO_Minus) { 1553 Check = SanitizerHandler::NegateOverflow; 1554 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1555 DynamicData.push_back(Info.RHS); 1556 } else { 1557 if (BinaryOperator::isShiftOp(Opcode)) { 1558 // Shift LHS negative or too large, or RHS out of bounds. 1559 Check = SanitizerHandler::ShiftOutOfBounds; 1560 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1561 StaticData.push_back( 1562 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1563 StaticData.push_back( 1564 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1565 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1566 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1567 Check = SanitizerHandler::DivremOverflow; 1568 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1569 } else { 1570 // Arithmetic overflow (+, -, *). 1571 switch (Opcode) { 1572 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1573 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1574 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1575 default: llvm_unreachable("unexpected opcode for bin op check"); 1576 } 1577 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1578 } 1579 DynamicData.push_back(Info.LHS); 1580 DynamicData.push_back(Info.RHS); 1581 } 1582 1583 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1584 } 1585 1586 //===----------------------------------------------------------------------===// 1587 // Visitor Methods 1588 //===----------------------------------------------------------------------===// 1589 1590 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1591 CGF.ErrorUnsupported(E, "scalar expression"); 1592 if (E->getType()->isVoidType()) 1593 return nullptr; 1594 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1595 } 1596 1597 Value * 1598 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { 1599 ASTContext &Context = CGF.getContext(); 1600 llvm::Optional<LangAS> GlobalAS = 1601 Context.getTargetInfo().getConstantAddressSpace(); 1602 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr( 1603 E->ComputeName(Context), "__usn_str", 1604 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default))); 1605 1606 unsigned ExprAS = Context.getTargetAddressSpace(E->getType()); 1607 1608 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS) 1609 return GlobalConstStr; 1610 1611 llvm::PointerType *PtrTy = cast<llvm::PointerType>(GlobalConstStr->getType()); 1612 llvm::PointerType *NewPtrTy = 1613 llvm::PointerType::getWithSamePointeeType(PtrTy, ExprAS); 1614 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast"); 1615 } 1616 1617 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1618 // Vector Mask Case 1619 if (E->getNumSubExprs() == 2) { 1620 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1621 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1622 Value *Mask; 1623 1624 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType()); 1625 unsigned LHSElts = LTy->getNumElements(); 1626 1627 Mask = RHS; 1628 1629 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType()); 1630 1631 // Mask off the high bits of each shuffle index. 1632 Value *MaskBits = 1633 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1634 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1635 1636 // newv = undef 1637 // mask = mask & maskbits 1638 // for each elt 1639 // n = extract mask i 1640 // x = extract val n 1641 // newv = insert newv, x, i 1642 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1643 MTy->getNumElements()); 1644 Value* NewV = llvm::UndefValue::get(RTy); 1645 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1646 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1647 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1648 1649 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1650 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1651 } 1652 return NewV; 1653 } 1654 1655 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1656 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1657 1658 SmallVector<int, 32> Indices; 1659 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1660 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1661 // Check for -1 and output it as undef in the IR. 1662 if (Idx.isSigned() && Idx.isAllOnes()) 1663 Indices.push_back(-1); 1664 else 1665 Indices.push_back(Idx.getZExtValue()); 1666 } 1667 1668 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1669 } 1670 1671 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1672 QualType SrcType = E->getSrcExpr()->getType(), 1673 DstType = E->getType(); 1674 1675 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1676 1677 SrcType = CGF.getContext().getCanonicalType(SrcType); 1678 DstType = CGF.getContext().getCanonicalType(DstType); 1679 if (SrcType == DstType) return Src; 1680 1681 assert(SrcType->isVectorType() && 1682 "ConvertVector source type must be a vector"); 1683 assert(DstType->isVectorType() && 1684 "ConvertVector destination type must be a vector"); 1685 1686 llvm::Type *SrcTy = Src->getType(); 1687 llvm::Type *DstTy = ConvertType(DstType); 1688 1689 // Ignore conversions like int -> uint. 1690 if (SrcTy == DstTy) 1691 return Src; 1692 1693 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1694 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1695 1696 assert(SrcTy->isVectorTy() && 1697 "ConvertVector source IR type must be a vector"); 1698 assert(DstTy->isVectorTy() && 1699 "ConvertVector destination IR type must be a vector"); 1700 1701 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1702 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1703 1704 if (DstEltType->isBooleanType()) { 1705 assert((SrcEltTy->isFloatingPointTy() || 1706 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1707 1708 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1709 if (SrcEltTy->isFloatingPointTy()) { 1710 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1711 } else { 1712 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1713 } 1714 } 1715 1716 // We have the arithmetic types: real int/float. 1717 Value *Res = nullptr; 1718 1719 if (isa<llvm::IntegerType>(SrcEltTy)) { 1720 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1721 if (isa<llvm::IntegerType>(DstEltTy)) 1722 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1723 else if (InputSigned) 1724 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1725 else 1726 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1727 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1728 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1729 if (DstEltType->isSignedIntegerOrEnumerationType()) 1730 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1731 else 1732 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1733 } else { 1734 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1735 "Unknown real conversion"); 1736 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1737 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1738 else 1739 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1740 } 1741 1742 return Res; 1743 } 1744 1745 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1746 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1747 CGF.EmitIgnoredExpr(E->getBase()); 1748 return CGF.emitScalarConstant(Constant, E); 1749 } else { 1750 Expr::EvalResult Result; 1751 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1752 llvm::APSInt Value = Result.Val.getInt(); 1753 CGF.EmitIgnoredExpr(E->getBase()); 1754 return Builder.getInt(Value); 1755 } 1756 } 1757 1758 return EmitLoadOfLValue(E); 1759 } 1760 1761 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 1762 TestAndClearIgnoreResultAssign(); 1763 1764 // Emit subscript expressions in rvalue context's. For most cases, this just 1765 // loads the lvalue formed by the subscript expr. However, we have to be 1766 // careful, because the base of a vector subscript is occasionally an rvalue, 1767 // so we can't get it as an lvalue. 1768 if (!E->getBase()->getType()->isVectorType()) 1769 return EmitLoadOfLValue(E); 1770 1771 // Handle the vector case. The base must be a vector, the index must be an 1772 // integer value. 1773 Value *Base = Visit(E->getBase()); 1774 Value *Idx = Visit(E->getIdx()); 1775 QualType IdxTy = E->getIdx()->getType(); 1776 1777 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 1778 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 1779 1780 return Builder.CreateExtractElement(Base, Idx, "vecext"); 1781 } 1782 1783 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 1784 TestAndClearIgnoreResultAssign(); 1785 1786 // Handle the vector case. The base must be a vector, the index must be an 1787 // integer value. 1788 Value *RowIdx = Visit(E->getRowIdx()); 1789 Value *ColumnIdx = Visit(E->getColumnIdx()); 1790 1791 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>(); 1792 unsigned NumRows = MatrixTy->getNumRows(); 1793 llvm::MatrixBuilder MB(Builder); 1794 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows); 1795 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) 1796 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened()); 1797 1798 Value *Matrix = Visit(E->getBase()); 1799 1800 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 1801 return Builder.CreateExtractElement(Matrix, Idx, "matrixext"); 1802 } 1803 1804 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 1805 unsigned Off) { 1806 int MV = SVI->getMaskValue(Idx); 1807 if (MV == -1) 1808 return -1; 1809 return Off + MV; 1810 } 1811 1812 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 1813 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 1814 "Index operand too large for shufflevector mask!"); 1815 return C->getZExtValue(); 1816 } 1817 1818 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 1819 bool Ignore = TestAndClearIgnoreResultAssign(); 1820 (void)Ignore; 1821 assert (Ignore == false && "init list ignored"); 1822 unsigned NumInitElements = E->getNumInits(); 1823 1824 if (E->hadArrayRangeDesignator()) 1825 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 1826 1827 llvm::VectorType *VType = 1828 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 1829 1830 if (!VType) { 1831 if (NumInitElements == 0) { 1832 // C++11 value-initialization for the scalar. 1833 return EmitNullValue(E->getType()); 1834 } 1835 // We have a scalar in braces. Just use the first element. 1836 return Visit(E->getInit(0)); 1837 } 1838 1839 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements(); 1840 1841 // Loop over initializers collecting the Value for each, and remembering 1842 // whether the source was swizzle (ExtVectorElementExpr). This will allow 1843 // us to fold the shuffle for the swizzle into the shuffle for the vector 1844 // initializer, since LLVM optimizers generally do not want to touch 1845 // shuffles. 1846 unsigned CurIdx = 0; 1847 bool VIsUndefShuffle = false; 1848 llvm::Value *V = llvm::UndefValue::get(VType); 1849 for (unsigned i = 0; i != NumInitElements; ++i) { 1850 Expr *IE = E->getInit(i); 1851 Value *Init = Visit(IE); 1852 SmallVector<int, 16> Args; 1853 1854 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 1855 1856 // Handle scalar elements. If the scalar initializer is actually one 1857 // element of a different vector of the same width, use shuffle instead of 1858 // extract+insert. 1859 if (!VVT) { 1860 if (isa<ExtVectorElementExpr>(IE)) { 1861 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 1862 1863 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType()) 1864 ->getNumElements() == ResElts) { 1865 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 1866 Value *LHS = nullptr, *RHS = nullptr; 1867 if (CurIdx == 0) { 1868 // insert into undef -> shuffle (src, undef) 1869 // shufflemask must use an i32 1870 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 1871 Args.resize(ResElts, -1); 1872 1873 LHS = EI->getVectorOperand(); 1874 RHS = V; 1875 VIsUndefShuffle = true; 1876 } else if (VIsUndefShuffle) { 1877 // insert into undefshuffle && size match -> shuffle (v, src) 1878 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 1879 for (unsigned j = 0; j != CurIdx; ++j) 1880 Args.push_back(getMaskElt(SVV, j, 0)); 1881 Args.push_back(ResElts + C->getZExtValue()); 1882 Args.resize(ResElts, -1); 1883 1884 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1885 RHS = EI->getVectorOperand(); 1886 VIsUndefShuffle = false; 1887 } 1888 if (!Args.empty()) { 1889 V = Builder.CreateShuffleVector(LHS, RHS, Args); 1890 ++CurIdx; 1891 continue; 1892 } 1893 } 1894 } 1895 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 1896 "vecinit"); 1897 VIsUndefShuffle = false; 1898 ++CurIdx; 1899 continue; 1900 } 1901 1902 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements(); 1903 1904 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 1905 // input is the same width as the vector being constructed, generate an 1906 // optimized shuffle of the swizzle input into the result. 1907 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 1908 if (isa<ExtVectorElementExpr>(IE)) { 1909 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 1910 Value *SVOp = SVI->getOperand(0); 1911 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType()); 1912 1913 if (OpTy->getNumElements() == ResElts) { 1914 for (unsigned j = 0; j != CurIdx; ++j) { 1915 // If the current vector initializer is a shuffle with undef, merge 1916 // this shuffle directly into it. 1917 if (VIsUndefShuffle) { 1918 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 1919 } else { 1920 Args.push_back(j); 1921 } 1922 } 1923 for (unsigned j = 0, je = InitElts; j != je; ++j) 1924 Args.push_back(getMaskElt(SVI, j, Offset)); 1925 Args.resize(ResElts, -1); 1926 1927 if (VIsUndefShuffle) 1928 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 1929 1930 Init = SVOp; 1931 } 1932 } 1933 1934 // Extend init to result vector length, and then shuffle its contribution 1935 // to the vector initializer into V. 1936 if (Args.empty()) { 1937 for (unsigned j = 0; j != InitElts; ++j) 1938 Args.push_back(j); 1939 Args.resize(ResElts, -1); 1940 Init = Builder.CreateShuffleVector(Init, Args, "vext"); 1941 1942 Args.clear(); 1943 for (unsigned j = 0; j != CurIdx; ++j) 1944 Args.push_back(j); 1945 for (unsigned j = 0; j != InitElts; ++j) 1946 Args.push_back(j + Offset); 1947 Args.resize(ResElts, -1); 1948 } 1949 1950 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 1951 // merging subsequent shuffles into this one. 1952 if (CurIdx == 0) 1953 std::swap(V, Init); 1954 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 1955 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 1956 CurIdx += InitElts; 1957 } 1958 1959 // FIXME: evaluate codegen vs. shuffling against constant null vector. 1960 // Emit remaining default initializers. 1961 llvm::Type *EltTy = VType->getElementType(); 1962 1963 // Emit remaining default initializers 1964 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 1965 Value *Idx = Builder.getInt32(CurIdx); 1966 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 1967 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 1968 } 1969 return V; 1970 } 1971 1972 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 1973 const Expr *E = CE->getSubExpr(); 1974 1975 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 1976 return false; 1977 1978 if (isa<CXXThisExpr>(E->IgnoreParens())) { 1979 // We always assume that 'this' is never null. 1980 return false; 1981 } 1982 1983 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 1984 // And that glvalue casts are never null. 1985 if (ICE->isGLValue()) 1986 return false; 1987 } 1988 1989 return true; 1990 } 1991 1992 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 1993 // have to handle a more broad range of conversions than explicit casts, as they 1994 // handle things like function to ptr-to-function decay etc. 1995 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 1996 Expr *E = CE->getSubExpr(); 1997 QualType DestTy = CE->getType(); 1998 CastKind Kind = CE->getCastKind(); 1999 2000 // These cases are generally not written to ignore the result of 2001 // evaluating their sub-expressions, so we clear this now. 2002 bool Ignored = TestAndClearIgnoreResultAssign(); 2003 2004 // Since almost all cast kinds apply to scalars, this switch doesn't have 2005 // a default case, so the compiler will warn on a missing case. The cases 2006 // are in the same order as in the CastKind enum. 2007 switch (Kind) { 2008 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2009 case CK_BuiltinFnToFnPtr: 2010 llvm_unreachable("builtin functions are handled elsewhere"); 2011 2012 case CK_LValueBitCast: 2013 case CK_ObjCObjectLValueCast: { 2014 Address Addr = EmitLValue(E).getAddress(CGF); 2015 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); 2016 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2017 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2018 } 2019 2020 case CK_LValueToRValueBitCast: { 2021 LValue SourceLVal = CGF.EmitLValue(E); 2022 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), 2023 CGF.ConvertTypeForMem(DestTy)); 2024 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2025 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2026 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2027 } 2028 2029 case CK_CPointerToObjCPointerCast: 2030 case CK_BlockPointerToObjCPointerCast: 2031 case CK_AnyPointerToBlockPointerCast: 2032 case CK_BitCast: { 2033 Value *Src = Visit(const_cast<Expr*>(E)); 2034 llvm::Type *SrcTy = Src->getType(); 2035 llvm::Type *DstTy = ConvertType(DestTy); 2036 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && 2037 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { 2038 llvm_unreachable("wrong cast for pointers in different address spaces" 2039 "(must be an address space cast)!"); 2040 } 2041 2042 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2043 if (auto *PT = DestTy->getAs<PointerType>()) { 2044 CGF.EmitVTablePtrCheckForCast( 2045 PT->getPointeeType(), 2046 Address(Src, 2047 CGF.ConvertTypeForMem( 2048 E->getType()->castAs<PointerType>()->getPointeeType()), 2049 CGF.getPointerAlign()), 2050 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, 2051 CE->getBeginLoc()); 2052 } 2053 } 2054 2055 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2056 const QualType SrcType = E->getType(); 2057 2058 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2059 // Casting to pointer that could carry dynamic information (provided by 2060 // invariant.group) requires launder. 2061 Src = Builder.CreateLaunderInvariantGroup(Src); 2062 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2063 // Casting to pointer that does not carry dynamic information (provided 2064 // by invariant.group) requires stripping it. Note that we don't do it 2065 // if the source could not be dynamic type and destination could be 2066 // dynamic because dynamic information is already laundered. It is 2067 // because launder(strip(src)) == launder(src), so there is no need to 2068 // add extra strip before launder. 2069 Src = Builder.CreateStripInvariantGroup(Src); 2070 } 2071 } 2072 2073 // Update heapallocsite metadata when there is an explicit pointer cast. 2074 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2075 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) { 2076 QualType PointeeType = DestTy->getPointeeType(); 2077 if (!PointeeType.isNull()) 2078 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2079 CE->getExprLoc()); 2080 } 2081 } 2082 2083 // If Src is a fixed vector and Dst is a scalable vector, and both have the 2084 // same element type, use the llvm.experimental.vector.insert intrinsic to 2085 // perform the bitcast. 2086 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2087 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) { 2088 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 2089 // vector, use a vector insert and bitcast the result. 2090 bool NeedsBitCast = false; 2091 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2092 llvm::Type *OrigType = DstTy; 2093 if (ScalableDst == PredType && 2094 FixedSrc->getElementType() == Builder.getInt8Ty()) { 2095 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2096 ScalableDst = cast<llvm::ScalableVectorType>(DstTy); 2097 NeedsBitCast = true; 2098 } 2099 if (FixedSrc->getElementType() == ScalableDst->getElementType()) { 2100 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy); 2101 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2102 llvm::Value *Result = Builder.CreateInsertVector( 2103 DstTy, UndefVec, Src, Zero, "castScalableSve"); 2104 if (NeedsBitCast) 2105 Result = Builder.CreateBitCast(Result, OrigType); 2106 return Result; 2107 } 2108 } 2109 } 2110 2111 // If Src is a scalable vector and Dst is a fixed vector, and both have the 2112 // same element type, use the llvm.experimental.vector.extract intrinsic to 2113 // perform the bitcast. 2114 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) { 2115 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) { 2116 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2117 // vector, bitcast the source and use a vector extract. 2118 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2119 if (ScalableSrc == PredType && 2120 FixedDst->getElementType() == Builder.getInt8Ty()) { 2121 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2122 ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy); 2123 Src = Builder.CreateBitCast(Src, SrcTy); 2124 } 2125 if (ScalableSrc->getElementType() == FixedDst->getElementType()) { 2126 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2127 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve"); 2128 } 2129 } 2130 } 2131 2132 // Perform VLAT <-> VLST bitcast through memory. 2133 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics 2134 // require the element types of the vectors to be the same, we 2135 // need to keep this around for bitcasts between VLAT <-> VLST where 2136 // the element types of the vectors are not the same, until we figure 2137 // out a better way of doing these casts. 2138 if ((isa<llvm::FixedVectorType>(SrcTy) && 2139 isa<llvm::ScalableVectorType>(DstTy)) || 2140 (isa<llvm::ScalableVectorType>(SrcTy) && 2141 isa<llvm::FixedVectorType>(DstTy))) { 2142 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); 2143 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); 2144 CGF.EmitStoreOfScalar(Src, LV); 2145 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), 2146 "castFixedSve"); 2147 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2148 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2149 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2150 } 2151 return Builder.CreateBitCast(Src, DstTy); 2152 } 2153 case CK_AddressSpaceConversion: { 2154 Expr::EvalResult Result; 2155 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2156 Result.Val.isNullPointer()) { 2157 // If E has side effect, it is emitted even if its final result is a 2158 // null pointer. In that case, a DCE pass should be able to 2159 // eliminate the useless instructions emitted during translating E. 2160 if (Result.HasSideEffects) 2161 Visit(E); 2162 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2163 ConvertType(DestTy)), DestTy); 2164 } 2165 // Since target may map different address spaces in AST to the same address 2166 // space, an address space conversion may end up as a bitcast. 2167 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2168 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2169 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2170 } 2171 case CK_AtomicToNonAtomic: 2172 case CK_NonAtomicToAtomic: 2173 case CK_UserDefinedConversion: 2174 return Visit(const_cast<Expr*>(E)); 2175 2176 case CK_NoOp: { 2177 llvm::Value *V = Visit(const_cast<Expr *>(E)); 2178 if (V) { 2179 // CK_NoOp can model a pointer qualification conversion, which can remove 2180 // an array bound and change the IR type. 2181 // FIXME: Once pointee types are removed from IR, remove this. 2182 llvm::Type *T = ConvertType(DestTy); 2183 if (T != V->getType()) 2184 V = Builder.CreateBitCast(V, T); 2185 } 2186 return V; 2187 } 2188 2189 case CK_BaseToDerived: { 2190 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2191 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2192 2193 Address Base = CGF.EmitPointerWithAlignment(E); 2194 Address Derived = 2195 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2196 CE->path_begin(), CE->path_end(), 2197 CGF.ShouldNullCheckClassCastValue(CE)); 2198 2199 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2200 // performed and the object is not of the derived type. 2201 if (CGF.sanitizePerformTypeCheck()) 2202 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2203 Derived.getPointer(), DestTy->getPointeeType()); 2204 2205 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2206 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, 2207 /*MayBeNull=*/true, 2208 CodeGenFunction::CFITCK_DerivedCast, 2209 CE->getBeginLoc()); 2210 2211 return Derived.getPointer(); 2212 } 2213 case CK_UncheckedDerivedToBase: 2214 case CK_DerivedToBase: { 2215 // The EmitPointerWithAlignment path does this fine; just discard 2216 // the alignment. 2217 return CGF.EmitPointerWithAlignment(CE).getPointer(); 2218 } 2219 2220 case CK_Dynamic: { 2221 Address V = CGF.EmitPointerWithAlignment(E); 2222 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2223 return CGF.EmitDynamicCast(V, DCE); 2224 } 2225 2226 case CK_ArrayToPointerDecay: 2227 return CGF.EmitArrayToPointerDecay(E).getPointer(); 2228 case CK_FunctionToPointerDecay: 2229 return EmitLValue(E).getPointer(CGF); 2230 2231 case CK_NullToPointer: 2232 if (MustVisitNullValue(E)) 2233 CGF.EmitIgnoredExpr(E); 2234 2235 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2236 DestTy); 2237 2238 case CK_NullToMemberPointer: { 2239 if (MustVisitNullValue(E)) 2240 CGF.EmitIgnoredExpr(E); 2241 2242 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2243 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2244 } 2245 2246 case CK_ReinterpretMemberPointer: 2247 case CK_BaseToDerivedMemberPointer: 2248 case CK_DerivedToBaseMemberPointer: { 2249 Value *Src = Visit(E); 2250 2251 // Note that the AST doesn't distinguish between checked and 2252 // unchecked member pointer conversions, so we always have to 2253 // implement checked conversions here. This is inefficient when 2254 // actual control flow may be required in order to perform the 2255 // check, which it is for data member pointers (but not member 2256 // function pointers on Itanium and ARM). 2257 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2258 } 2259 2260 case CK_ARCProduceObject: 2261 return CGF.EmitARCRetainScalarExpr(E); 2262 case CK_ARCConsumeObject: 2263 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2264 case CK_ARCReclaimReturnedObject: 2265 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2266 case CK_ARCExtendBlockObject: 2267 return CGF.EmitARCExtendBlockObject(E); 2268 2269 case CK_CopyAndAutoreleaseBlockObject: 2270 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2271 2272 case CK_FloatingRealToComplex: 2273 case CK_FloatingComplexCast: 2274 case CK_IntegralRealToComplex: 2275 case CK_IntegralComplexCast: 2276 case CK_IntegralComplexToFloatingComplex: 2277 case CK_FloatingComplexToIntegralComplex: 2278 case CK_ConstructorConversion: 2279 case CK_ToUnion: 2280 llvm_unreachable("scalar cast to non-scalar value"); 2281 2282 case CK_LValueToRValue: 2283 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2284 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2285 return Visit(const_cast<Expr*>(E)); 2286 2287 case CK_IntegralToPointer: { 2288 Value *Src = Visit(const_cast<Expr*>(E)); 2289 2290 // First, convert to the correct width so that we control the kind of 2291 // extension. 2292 auto DestLLVMTy = ConvertType(DestTy); 2293 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2294 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2295 llvm::Value* IntResult = 2296 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2297 2298 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2299 2300 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2301 // Going from integer to pointer that could be dynamic requires reloading 2302 // dynamic information from invariant.group. 2303 if (DestTy.mayBeDynamicClass()) 2304 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2305 } 2306 return IntToPtr; 2307 } 2308 case CK_PointerToIntegral: { 2309 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2310 auto *PtrExpr = Visit(E); 2311 2312 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2313 const QualType SrcType = E->getType(); 2314 2315 // Casting to integer requires stripping dynamic information as it does 2316 // not carries it. 2317 if (SrcType.mayBeDynamicClass()) 2318 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2319 } 2320 2321 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2322 } 2323 case CK_ToVoid: { 2324 CGF.EmitIgnoredExpr(E); 2325 return nullptr; 2326 } 2327 case CK_MatrixCast: { 2328 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2329 CE->getExprLoc()); 2330 } 2331 case CK_VectorSplat: { 2332 llvm::Type *DstTy = ConvertType(DestTy); 2333 Value *Elt = Visit(const_cast<Expr*>(E)); 2334 // Splat the element across to all elements 2335 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 2336 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2337 } 2338 2339 case CK_FixedPointCast: 2340 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2341 CE->getExprLoc()); 2342 2343 case CK_FixedPointToBoolean: 2344 assert(E->getType()->isFixedPointType() && 2345 "Expected src type to be fixed point type"); 2346 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2347 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2348 CE->getExprLoc()); 2349 2350 case CK_FixedPointToIntegral: 2351 assert(E->getType()->isFixedPointType() && 2352 "Expected src type to be fixed point type"); 2353 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2354 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2355 CE->getExprLoc()); 2356 2357 case CK_IntegralToFixedPoint: 2358 assert(E->getType()->isIntegerType() && 2359 "Expected src type to be an integer"); 2360 assert(DestTy->isFixedPointType() && 2361 "Expected dest type to be fixed point type"); 2362 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2363 CE->getExprLoc()); 2364 2365 case CK_IntegralCast: { 2366 ScalarConversionOpts Opts; 2367 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2368 if (!ICE->isPartOfExplicitCast()) 2369 Opts = ScalarConversionOpts(CGF.SanOpts); 2370 } 2371 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2372 CE->getExprLoc(), Opts); 2373 } 2374 case CK_IntegralToFloating: 2375 case CK_FloatingToIntegral: 2376 case CK_FloatingCast: 2377 case CK_FixedPointToFloating: 2378 case CK_FloatingToFixedPoint: { 2379 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2380 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2381 CE->getExprLoc()); 2382 } 2383 case CK_BooleanToSignedIntegral: { 2384 ScalarConversionOpts Opts; 2385 Opts.TreatBooleanAsSigned = true; 2386 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2387 CE->getExprLoc(), Opts); 2388 } 2389 case CK_IntegralToBoolean: 2390 return EmitIntToBoolConversion(Visit(E)); 2391 case CK_PointerToBoolean: 2392 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2393 case CK_FloatingToBoolean: { 2394 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2395 return EmitFloatToBoolConversion(Visit(E)); 2396 } 2397 case CK_MemberPointerToBoolean: { 2398 llvm::Value *MemPtr = Visit(E); 2399 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2400 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2401 } 2402 2403 case CK_FloatingComplexToReal: 2404 case CK_IntegralComplexToReal: 2405 return CGF.EmitComplexExpr(E, false, true).first; 2406 2407 case CK_FloatingComplexToBoolean: 2408 case CK_IntegralComplexToBoolean: { 2409 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2410 2411 // TODO: kill this function off, inline appropriate case here 2412 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2413 CE->getExprLoc()); 2414 } 2415 2416 case CK_ZeroToOCLOpaqueType: { 2417 assert((DestTy->isEventT() || DestTy->isQueueT() || 2418 DestTy->isOCLIntelSubgroupAVCType()) && 2419 "CK_ZeroToOCLEvent cast on non-event type"); 2420 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2421 } 2422 2423 case CK_IntToOCLSampler: 2424 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2425 2426 } // end of switch 2427 2428 llvm_unreachable("unknown scalar cast"); 2429 } 2430 2431 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2432 CodeGenFunction::StmtExprEvaluation eval(CGF); 2433 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2434 !E->getType()->isVoidType()); 2435 if (!RetAlloca.isValid()) 2436 return nullptr; 2437 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2438 E->getExprLoc()); 2439 } 2440 2441 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2442 CodeGenFunction::RunCleanupsScope Scope(CGF); 2443 Value *V = Visit(E->getSubExpr()); 2444 // Defend against dominance problems caused by jumps out of expression 2445 // evaluation through the shared cleanup block. 2446 Scope.ForceCleanup({&V}); 2447 return V; 2448 } 2449 2450 //===----------------------------------------------------------------------===// 2451 // Unary Operators 2452 //===----------------------------------------------------------------------===// 2453 2454 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2455 llvm::Value *InVal, bool IsInc, 2456 FPOptions FPFeatures) { 2457 BinOpInfo BinOp; 2458 BinOp.LHS = InVal; 2459 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2460 BinOp.Ty = E->getType(); 2461 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2462 BinOp.FPFeatures = FPFeatures; 2463 BinOp.E = E; 2464 return BinOp; 2465 } 2466 2467 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2468 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2469 llvm::Value *Amount = 2470 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2471 StringRef Name = IsInc ? "inc" : "dec"; 2472 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2473 case LangOptions::SOB_Defined: 2474 return Builder.CreateAdd(InVal, Amount, Name); 2475 case LangOptions::SOB_Undefined: 2476 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2477 return Builder.CreateNSWAdd(InVal, Amount, Name); 2478 LLVM_FALLTHROUGH; 2479 case LangOptions::SOB_Trapping: 2480 if (!E->canOverflow()) 2481 return Builder.CreateNSWAdd(InVal, Amount, Name); 2482 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2483 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2484 } 2485 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2486 } 2487 2488 namespace { 2489 /// Handles check and update for lastprivate conditional variables. 2490 class OMPLastprivateConditionalUpdateRAII { 2491 private: 2492 CodeGenFunction &CGF; 2493 const UnaryOperator *E; 2494 2495 public: 2496 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2497 const UnaryOperator *E) 2498 : CGF(CGF), E(E) {} 2499 ~OMPLastprivateConditionalUpdateRAII() { 2500 if (CGF.getLangOpts().OpenMP) 2501 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2502 CGF, E->getSubExpr()); 2503 } 2504 }; 2505 } // namespace 2506 2507 llvm::Value * 2508 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2509 bool isInc, bool isPre) { 2510 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2511 QualType type = E->getSubExpr()->getType(); 2512 llvm::PHINode *atomicPHI = nullptr; 2513 llvm::Value *value; 2514 llvm::Value *input; 2515 2516 int amount = (isInc ? 1 : -1); 2517 bool isSubtraction = !isInc; 2518 2519 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2520 type = atomicTy->getValueType(); 2521 if (isInc && type->isBooleanType()) { 2522 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2523 if (isPre) { 2524 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) 2525 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2526 return Builder.getTrue(); 2527 } 2528 // For atomic bool increment, we just store true and return it for 2529 // preincrement, do an atomic swap with true for postincrement 2530 return Builder.CreateAtomicRMW( 2531 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, 2532 llvm::AtomicOrdering::SequentiallyConsistent); 2533 } 2534 // Special case for atomic increment / decrement on integers, emit 2535 // atomicrmw instructions. We skip this if we want to be doing overflow 2536 // checking, and fall into the slow path with the atomic cmpxchg loop. 2537 if (!type->isBooleanType() && type->isIntegerType() && 2538 !(type->isUnsignedIntegerType() && 2539 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2540 CGF.getLangOpts().getSignedOverflowBehavior() != 2541 LangOptions::SOB_Trapping) { 2542 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2543 llvm::AtomicRMWInst::Sub; 2544 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2545 llvm::Instruction::Sub; 2546 llvm::Value *amt = CGF.EmitToMemory( 2547 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2548 llvm::Value *old = 2549 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, 2550 llvm::AtomicOrdering::SequentiallyConsistent); 2551 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2552 } 2553 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2554 input = value; 2555 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2556 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2557 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2558 value = CGF.EmitToMemory(value, type); 2559 Builder.CreateBr(opBB); 2560 Builder.SetInsertPoint(opBB); 2561 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2562 atomicPHI->addIncoming(value, startBB); 2563 value = atomicPHI; 2564 } else { 2565 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2566 input = value; 2567 } 2568 2569 // Special case of integer increment that we have to check first: bool++. 2570 // Due to promotion rules, we get: 2571 // bool++ -> bool = bool + 1 2572 // -> bool = (int)bool + 1 2573 // -> bool = ((int)bool + 1 != 0) 2574 // An interesting aspect of this is that increment is always true. 2575 // Decrement does not have this property. 2576 if (isInc && type->isBooleanType()) { 2577 value = Builder.getTrue(); 2578 2579 // Most common case by far: integer increment. 2580 } else if (type->isIntegerType()) { 2581 QualType promotedType; 2582 bool canPerformLossyDemotionCheck = false; 2583 if (type->isPromotableIntegerType()) { 2584 promotedType = CGF.getContext().getPromotedIntegerType(type); 2585 assert(promotedType != type && "Shouldn't promote to the same type."); 2586 canPerformLossyDemotionCheck = true; 2587 canPerformLossyDemotionCheck &= 2588 CGF.getContext().getCanonicalType(type) != 2589 CGF.getContext().getCanonicalType(promotedType); 2590 canPerformLossyDemotionCheck &= 2591 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2592 type, promotedType); 2593 assert((!canPerformLossyDemotionCheck || 2594 type->isSignedIntegerOrEnumerationType() || 2595 promotedType->isSignedIntegerOrEnumerationType() || 2596 ConvertType(type)->getScalarSizeInBits() == 2597 ConvertType(promotedType)->getScalarSizeInBits()) && 2598 "The following check expects that if we do promotion to different " 2599 "underlying canonical type, at least one of the types (either " 2600 "base or promoted) will be signed, or the bitwidths will match."); 2601 } 2602 if (CGF.SanOpts.hasOneOf( 2603 SanitizerKind::ImplicitIntegerArithmeticValueChange) && 2604 canPerformLossyDemotionCheck) { 2605 // While `x += 1` (for `x` with width less than int) is modeled as 2606 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2607 // ease; inc/dec with width less than int can't overflow because of 2608 // promotion rules, so we omit promotion+demotion, which means that we can 2609 // not catch lossy "demotion". Because we still want to catch these cases 2610 // when the sanitizer is enabled, we perform the promotion, then perform 2611 // the increment/decrement in the wider type, and finally 2612 // perform the demotion. This will catch lossy demotions. 2613 2614 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 2615 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2616 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2617 // Do pass non-default ScalarConversionOpts so that sanitizer check is 2618 // emitted. 2619 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 2620 ScalarConversionOpts(CGF.SanOpts)); 2621 2622 // Note that signed integer inc/dec with width less than int can't 2623 // overflow because of promotion rules; we're just eliding a few steps 2624 // here. 2625 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 2626 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 2627 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 2628 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { 2629 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 2630 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 2631 } else { 2632 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 2633 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2634 } 2635 2636 // Next most common: pointer increment. 2637 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 2638 QualType type = ptr->getPointeeType(); 2639 2640 // VLA types don't have constant size. 2641 if (const VariableArrayType *vla 2642 = CGF.getContext().getAsVariableArrayType(type)) { 2643 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 2644 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 2645 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 2646 if (CGF.getLangOpts().isSignedOverflowDefined()) 2647 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc"); 2648 else 2649 value = CGF.EmitCheckedInBoundsGEP( 2650 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction, 2651 E->getExprLoc(), "vla.inc"); 2652 2653 // Arithmetic on function pointers (!) is just +-1. 2654 } else if (type->isFunctionType()) { 2655 llvm::Value *amt = Builder.getInt32(amount); 2656 2657 value = CGF.EmitCastToVoidPtr(value); 2658 if (CGF.getLangOpts().isSignedOverflowDefined()) 2659 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr"); 2660 else 2661 value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt, 2662 /*SignedIndices=*/false, 2663 isSubtraction, E->getExprLoc(), 2664 "incdec.funcptr"); 2665 value = Builder.CreateBitCast(value, input->getType()); 2666 2667 // For everything else, we can just do a simple increment. 2668 } else { 2669 llvm::Value *amt = Builder.getInt32(amount); 2670 llvm::Type *elemTy = CGF.ConvertTypeForMem(type); 2671 if (CGF.getLangOpts().isSignedOverflowDefined()) 2672 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr"); 2673 else 2674 value = CGF.EmitCheckedInBoundsGEP( 2675 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction, 2676 E->getExprLoc(), "incdec.ptr"); 2677 } 2678 2679 // Vector increment/decrement. 2680 } else if (type->isVectorType()) { 2681 if (type->hasIntegerRepresentation()) { 2682 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 2683 2684 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 2685 } else { 2686 value = Builder.CreateFAdd( 2687 value, 2688 llvm::ConstantFP::get(value->getType(), amount), 2689 isInc ? "inc" : "dec"); 2690 } 2691 2692 // Floating point. 2693 } else if (type->isRealFloatingType()) { 2694 // Add the inc/dec to the real part. 2695 llvm::Value *amt; 2696 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); 2697 2698 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2699 // Another special case: half FP increment should be done via float 2700 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2701 value = Builder.CreateCall( 2702 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 2703 CGF.CGM.FloatTy), 2704 input, "incdec.conv"); 2705 } else { 2706 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 2707 } 2708 } 2709 2710 if (value->getType()->isFloatTy()) 2711 amt = llvm::ConstantFP::get(VMContext, 2712 llvm::APFloat(static_cast<float>(amount))); 2713 else if (value->getType()->isDoubleTy()) 2714 amt = llvm::ConstantFP::get(VMContext, 2715 llvm::APFloat(static_cast<double>(amount))); 2716 else { 2717 // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert 2718 // from float. 2719 llvm::APFloat F(static_cast<float>(amount)); 2720 bool ignored; 2721 const llvm::fltSemantics *FS; 2722 // Don't use getFloatTypeSemantics because Half isn't 2723 // necessarily represented using the "half" LLVM type. 2724 if (value->getType()->isFP128Ty()) 2725 FS = &CGF.getTarget().getFloat128Format(); 2726 else if (value->getType()->isHalfTy()) 2727 FS = &CGF.getTarget().getHalfFormat(); 2728 else if (value->getType()->isPPC_FP128Ty()) 2729 FS = &CGF.getTarget().getIbm128Format(); 2730 else 2731 FS = &CGF.getTarget().getLongDoubleFormat(); 2732 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 2733 amt = llvm::ConstantFP::get(VMContext, F); 2734 } 2735 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 2736 2737 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 2738 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 2739 value = Builder.CreateCall( 2740 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 2741 CGF.CGM.FloatTy), 2742 value, "incdec.conv"); 2743 } else { 2744 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 2745 } 2746 } 2747 2748 // Fixed-point types. 2749 } else if (type->isFixedPointType()) { 2750 // Fixed-point types are tricky. In some cases, it isn't possible to 2751 // represent a 1 or a -1 in the type at all. Piggyback off of 2752 // EmitFixedPointBinOp to avoid having to reimplement saturation. 2753 BinOpInfo Info; 2754 Info.E = E; 2755 Info.Ty = E->getType(); 2756 Info.Opcode = isInc ? BO_Add : BO_Sub; 2757 Info.LHS = value; 2758 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 2759 // If the type is signed, it's better to represent this as +(-1) or -(-1), 2760 // since -1 is guaranteed to be representable. 2761 if (type->isSignedFixedPointType()) { 2762 Info.Opcode = isInc ? BO_Sub : BO_Add; 2763 Info.RHS = Builder.CreateNeg(Info.RHS); 2764 } 2765 // Now, convert from our invented integer literal to the type of the unary 2766 // op. This will upscale and saturate if necessary. This value can become 2767 // undef in some cases. 2768 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 2769 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty); 2770 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema); 2771 value = EmitFixedPointBinOp(Info); 2772 2773 // Objective-C pointer types. 2774 } else { 2775 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 2776 value = CGF.EmitCastToVoidPtr(value); 2777 2778 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 2779 if (!isInc) size = -size; 2780 llvm::Value *sizeValue = 2781 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 2782 2783 if (CGF.getLangOpts().isSignedOverflowDefined()) 2784 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr"); 2785 else 2786 value = CGF.EmitCheckedInBoundsGEP( 2787 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction, 2788 E->getExprLoc(), "incdec.objptr"); 2789 value = Builder.CreateBitCast(value, input->getType()); 2790 } 2791 2792 if (atomicPHI) { 2793 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 2794 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 2795 auto Pair = CGF.EmitAtomicCompareExchange( 2796 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 2797 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 2798 llvm::Value *success = Pair.second; 2799 atomicPHI->addIncoming(old, curBlock); 2800 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 2801 Builder.SetInsertPoint(contBB); 2802 return isPre ? value : input; 2803 } 2804 2805 // Store the updated result through the lvalue. 2806 if (LV.isBitField()) 2807 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 2808 else 2809 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 2810 2811 // If this is a postinc, return the value read from memory, otherwise use the 2812 // updated value. 2813 return isPre ? value : input; 2814 } 2815 2816 2817 2818 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 2819 TestAndClearIgnoreResultAssign(); 2820 Value *Op = Visit(E->getSubExpr()); 2821 2822 // Generate a unary FNeg for FP ops. 2823 if (Op->getType()->isFPOrFPVectorTy()) 2824 return Builder.CreateFNeg(Op, "fneg"); 2825 2826 // Emit unary minus with EmitSub so we handle overflow cases etc. 2827 BinOpInfo BinOp; 2828 BinOp.RHS = Op; 2829 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 2830 BinOp.Ty = E->getType(); 2831 BinOp.Opcode = BO_Sub; 2832 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 2833 BinOp.E = E; 2834 return EmitSub(BinOp); 2835 } 2836 2837 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 2838 TestAndClearIgnoreResultAssign(); 2839 Value *Op = Visit(E->getSubExpr()); 2840 return Builder.CreateNot(Op, "neg"); 2841 } 2842 2843 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 2844 // Perform vector logical not on comparison with zero vector. 2845 if (E->getType()->isVectorType() && 2846 E->getType()->castAs<VectorType>()->getVectorKind() == 2847 VectorType::GenericVector) { 2848 Value *Oper = Visit(E->getSubExpr()); 2849 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 2850 Value *Result; 2851 if (Oper->getType()->isFPOrFPVectorTy()) { 2852 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 2853 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2854 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 2855 } else 2856 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 2857 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2858 } 2859 2860 // Compare operand to zero. 2861 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 2862 2863 // Invert value. 2864 // TODO: Could dynamically modify easy computations here. For example, if 2865 // the operand is an icmp ne, turn into icmp eq. 2866 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 2867 2868 // ZExt result to the expr type. 2869 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 2870 } 2871 2872 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 2873 // Try folding the offsetof to a constant. 2874 Expr::EvalResult EVResult; 2875 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 2876 llvm::APSInt Value = EVResult.Val.getInt(); 2877 return Builder.getInt(Value); 2878 } 2879 2880 // Loop over the components of the offsetof to compute the value. 2881 unsigned n = E->getNumComponents(); 2882 llvm::Type* ResultType = ConvertType(E->getType()); 2883 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 2884 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2885 for (unsigned i = 0; i != n; ++i) { 2886 OffsetOfNode ON = E->getComponent(i); 2887 llvm::Value *Offset = nullptr; 2888 switch (ON.getKind()) { 2889 case OffsetOfNode::Array: { 2890 // Compute the index 2891 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 2892 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 2893 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 2894 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 2895 2896 // Save the element type 2897 CurrentType = 2898 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 2899 2900 // Compute the element size 2901 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 2902 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 2903 2904 // Multiply out to compute the result 2905 Offset = Builder.CreateMul(Idx, ElemSize); 2906 break; 2907 } 2908 2909 case OffsetOfNode::Field: { 2910 FieldDecl *MemberDecl = ON.getField(); 2911 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2912 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2913 2914 // Compute the index of the field in its parent. 2915 unsigned i = 0; 2916 // FIXME: It would be nice if we didn't have to loop here! 2917 for (RecordDecl::field_iterator Field = RD->field_begin(), 2918 FieldEnd = RD->field_end(); 2919 Field != FieldEnd; ++Field, ++i) { 2920 if (*Field == MemberDecl) 2921 break; 2922 } 2923 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 2924 2925 // Compute the offset to the field 2926 int64_t OffsetInt = RL.getFieldOffset(i) / 2927 CGF.getContext().getCharWidth(); 2928 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 2929 2930 // Save the element type. 2931 CurrentType = MemberDecl->getType(); 2932 break; 2933 } 2934 2935 case OffsetOfNode::Identifier: 2936 llvm_unreachable("dependent __builtin_offsetof"); 2937 2938 case OffsetOfNode::Base: { 2939 if (ON.getBase()->isVirtual()) { 2940 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 2941 continue; 2942 } 2943 2944 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 2945 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 2946 2947 // Save the element type. 2948 CurrentType = ON.getBase()->getType(); 2949 2950 // Compute the offset to the base. 2951 auto *BaseRT = CurrentType->castAs<RecordType>(); 2952 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 2953 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 2954 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 2955 break; 2956 } 2957 } 2958 Result = Builder.CreateAdd(Result, Offset); 2959 } 2960 return Result; 2961 } 2962 2963 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 2964 /// argument of the sizeof expression as an integer. 2965 Value * 2966 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 2967 const UnaryExprOrTypeTraitExpr *E) { 2968 QualType TypeToSize = E->getTypeOfArgument(); 2969 if (E->getKind() == UETT_SizeOf) { 2970 if (const VariableArrayType *VAT = 2971 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 2972 if (E->isArgumentType()) { 2973 // sizeof(type) - make sure to emit the VLA size. 2974 CGF.EmitVariablyModifiedType(TypeToSize); 2975 } else { 2976 // C99 6.5.3.4p2: If the argument is an expression of type 2977 // VLA, it is evaluated. 2978 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 2979 } 2980 2981 auto VlaSize = CGF.getVLASize(VAT); 2982 llvm::Value *size = VlaSize.NumElts; 2983 2984 // Scale the number of non-VLA elements by the non-VLA element size. 2985 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 2986 if (!eltSize.isOne()) 2987 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 2988 2989 return size; 2990 } 2991 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 2992 auto Alignment = 2993 CGF.getContext() 2994 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2995 E->getTypeOfArgument()->getPointeeType())) 2996 .getQuantity(); 2997 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 2998 } 2999 3000 // If this isn't sizeof(vla), the result must be constant; use the constant 3001 // folding logic so we don't have to duplicate it here. 3002 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 3003 } 3004 3005 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 3006 Expr *Op = E->getSubExpr(); 3007 if (Op->getType()->isAnyComplexType()) { 3008 // If it's an l-value, load through the appropriate subobject l-value. 3009 // Note that we have to ask E because Op might be an l-value that 3010 // this won't work for, e.g. an Obj-C property. 3011 if (E->isGLValue()) 3012 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3013 E->getExprLoc()).getScalarVal(); 3014 3015 // Otherwise, calculate and project. 3016 return CGF.EmitComplexExpr(Op, false, true).first; 3017 } 3018 3019 return Visit(Op); 3020 } 3021 3022 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 3023 Expr *Op = E->getSubExpr(); 3024 if (Op->getType()->isAnyComplexType()) { 3025 // If it's an l-value, load through the appropriate subobject l-value. 3026 // Note that we have to ask E because Op might be an l-value that 3027 // this won't work for, e.g. an Obj-C property. 3028 if (Op->isGLValue()) 3029 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), 3030 E->getExprLoc()).getScalarVal(); 3031 3032 // Otherwise, calculate and project. 3033 return CGF.EmitComplexExpr(Op, true, false).second; 3034 } 3035 3036 // __imag on a scalar returns zero. Emit the subexpr to ensure side 3037 // effects are evaluated, but not the actual value. 3038 if (Op->isGLValue()) 3039 CGF.EmitLValue(Op); 3040 else 3041 CGF.EmitScalarExpr(Op, true); 3042 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3043 } 3044 3045 //===----------------------------------------------------------------------===// 3046 // Binary Operators 3047 //===----------------------------------------------------------------------===// 3048 3049 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 3050 TestAndClearIgnoreResultAssign(); 3051 BinOpInfo Result; 3052 Result.LHS = Visit(E->getLHS()); 3053 Result.RHS = Visit(E->getRHS()); 3054 Result.Ty = E->getType(); 3055 Result.Opcode = E->getOpcode(); 3056 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3057 Result.E = E; 3058 return Result; 3059 } 3060 3061 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 3062 const CompoundAssignOperator *E, 3063 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 3064 Value *&Result) { 3065 QualType LHSTy = E->getLHS()->getType(); 3066 BinOpInfo OpInfo; 3067 3068 if (E->getComputationResultType()->isAnyComplexType()) 3069 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 3070 3071 // Emit the RHS first. __block variables need to have the rhs evaluated 3072 // first, plus this should improve codegen a little. 3073 OpInfo.RHS = Visit(E->getRHS()); 3074 OpInfo.Ty = E->getComputationResultType(); 3075 OpInfo.Opcode = E->getOpcode(); 3076 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3077 OpInfo.E = E; 3078 // Load/convert the LHS. 3079 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 3080 3081 llvm::PHINode *atomicPHI = nullptr; 3082 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 3083 QualType type = atomicTy->getValueType(); 3084 if (!type->isBooleanType() && type->isIntegerType() && 3085 !(type->isUnsignedIntegerType() && 3086 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 3087 CGF.getLangOpts().getSignedOverflowBehavior() != 3088 LangOptions::SOB_Trapping) { 3089 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 3090 llvm::Instruction::BinaryOps Op; 3091 switch (OpInfo.Opcode) { 3092 // We don't have atomicrmw operands for *, %, /, <<, >> 3093 case BO_MulAssign: case BO_DivAssign: 3094 case BO_RemAssign: 3095 case BO_ShlAssign: 3096 case BO_ShrAssign: 3097 break; 3098 case BO_AddAssign: 3099 AtomicOp = llvm::AtomicRMWInst::Add; 3100 Op = llvm::Instruction::Add; 3101 break; 3102 case BO_SubAssign: 3103 AtomicOp = llvm::AtomicRMWInst::Sub; 3104 Op = llvm::Instruction::Sub; 3105 break; 3106 case BO_AndAssign: 3107 AtomicOp = llvm::AtomicRMWInst::And; 3108 Op = llvm::Instruction::And; 3109 break; 3110 case BO_XorAssign: 3111 AtomicOp = llvm::AtomicRMWInst::Xor; 3112 Op = llvm::Instruction::Xor; 3113 break; 3114 case BO_OrAssign: 3115 AtomicOp = llvm::AtomicRMWInst::Or; 3116 Op = llvm::Instruction::Or; 3117 break; 3118 default: 3119 llvm_unreachable("Invalid compound assignment type"); 3120 } 3121 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3122 llvm::Value *Amt = CGF.EmitToMemory( 3123 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3124 E->getExprLoc()), 3125 LHSTy); 3126 Value *OldVal = Builder.CreateAtomicRMW( 3127 AtomicOp, LHSLV.getPointer(CGF), Amt, 3128 llvm::AtomicOrdering::SequentiallyConsistent); 3129 3130 // Since operation is atomic, the result type is guaranteed to be the 3131 // same as the input in LLVM terms. 3132 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3133 return LHSLV; 3134 } 3135 } 3136 // FIXME: For floating point types, we should be saving and restoring the 3137 // floating point environment in the loop. 3138 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3139 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3140 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3141 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3142 Builder.CreateBr(opBB); 3143 Builder.SetInsertPoint(opBB); 3144 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3145 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3146 OpInfo.LHS = atomicPHI; 3147 } 3148 else 3149 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3150 3151 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures); 3152 SourceLocation Loc = E->getExprLoc(); 3153 OpInfo.LHS = 3154 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc); 3155 3156 // Expand the binary operator. 3157 Result = (this->*Func)(OpInfo); 3158 3159 // Convert the result back to the LHS type, 3160 // potentially with Implicit Conversion sanitizer check. 3161 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, 3162 Loc, ScalarConversionOpts(CGF.SanOpts)); 3163 3164 if (atomicPHI) { 3165 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3166 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3167 auto Pair = CGF.EmitAtomicCompareExchange( 3168 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3169 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3170 llvm::Value *success = Pair.second; 3171 atomicPHI->addIncoming(old, curBlock); 3172 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3173 Builder.SetInsertPoint(contBB); 3174 return LHSLV; 3175 } 3176 3177 // Store the result value into the LHS lvalue. Bit-fields are handled 3178 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3179 // 'An assignment expression has the value of the left operand after the 3180 // assignment...'. 3181 if (LHSLV.isBitField()) 3182 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3183 else 3184 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3185 3186 if (CGF.getLangOpts().OpenMP) 3187 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3188 E->getLHS()); 3189 return LHSLV; 3190 } 3191 3192 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3193 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3194 bool Ignore = TestAndClearIgnoreResultAssign(); 3195 Value *RHS = nullptr; 3196 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3197 3198 // If the result is clearly ignored, return now. 3199 if (Ignore) 3200 return nullptr; 3201 3202 // The result of an assignment in C is the assigned r-value. 3203 if (!CGF.getLangOpts().CPlusPlus) 3204 return RHS; 3205 3206 // If the lvalue is non-volatile, return the computed value of the assignment. 3207 if (!LHS.isVolatileQualified()) 3208 return RHS; 3209 3210 // Otherwise, reload the value. 3211 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3212 } 3213 3214 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3215 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3216 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 3217 3218 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3219 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3220 SanitizerKind::IntegerDivideByZero)); 3221 } 3222 3223 const auto *BO = cast<BinaryOperator>(Ops.E); 3224 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3225 Ops.Ty->hasSignedIntegerRepresentation() && 3226 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3227 Ops.mayHaveIntegerOverflow()) { 3228 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3229 3230 llvm::Value *IntMin = 3231 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3232 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty); 3233 3234 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3235 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3236 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3237 Checks.push_back( 3238 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); 3239 } 3240 3241 if (Checks.size() > 0) 3242 EmitBinOpCheck(Checks, Ops); 3243 } 3244 3245 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3246 { 3247 CodeGenFunction::SanitizerScope SanScope(&CGF); 3248 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3249 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3250 Ops.Ty->isIntegerType() && 3251 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3252 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3253 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3254 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3255 Ops.Ty->isRealFloatingType() && 3256 Ops.mayHaveFloatDivisionByZero()) { 3257 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3258 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3259 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), 3260 Ops); 3261 } 3262 } 3263 3264 if (Ops.Ty->isConstantMatrixType()) { 3265 llvm::MatrixBuilder MB(Builder); 3266 // We need to check the types of the operands of the operator to get the 3267 // correct matrix dimensions. 3268 auto *BO = cast<BinaryOperator>(Ops.E); 3269 (void)BO; 3270 assert( 3271 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && 3272 "first operand must be a matrix"); 3273 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && 3274 "second operand must be an arithmetic type"); 3275 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3276 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS, 3277 Ops.Ty->hasUnsignedIntegerRepresentation()); 3278 } 3279 3280 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3281 llvm::Value *Val; 3282 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3283 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3284 if ((CGF.getLangOpts().OpenCL && 3285 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 3286 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice && 3287 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 3288 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 3289 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 3290 // build option allows an application to specify that single precision 3291 // floating-point divide (x/y and 1/x) and sqrt used in the program 3292 // source are correctly rounded. 3293 llvm::Type *ValTy = Val->getType(); 3294 if (ValTy->isFloatTy() || 3295 (isa<llvm::VectorType>(ValTy) && 3296 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) 3297 CGF.SetFPAccuracy(Val, 2.5); 3298 } 3299 return Val; 3300 } 3301 else if (Ops.isFixedPointOp()) 3302 return EmitFixedPointBinOp(Ops); 3303 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3304 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3305 else 3306 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3307 } 3308 3309 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3310 // Rem in C can't be a floating point type: C99 6.5.5p2. 3311 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3312 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3313 Ops.Ty->isIntegerType() && 3314 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3315 CodeGenFunction::SanitizerScope SanScope(&CGF); 3316 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3317 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3318 } 3319 3320 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3321 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3322 else 3323 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3324 } 3325 3326 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3327 unsigned IID; 3328 unsigned OpID = 0; 3329 SanitizerHandler OverflowKind; 3330 3331 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3332 switch (Ops.Opcode) { 3333 case BO_Add: 3334 case BO_AddAssign: 3335 OpID = 1; 3336 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3337 llvm::Intrinsic::uadd_with_overflow; 3338 OverflowKind = SanitizerHandler::AddOverflow; 3339 break; 3340 case BO_Sub: 3341 case BO_SubAssign: 3342 OpID = 2; 3343 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3344 llvm::Intrinsic::usub_with_overflow; 3345 OverflowKind = SanitizerHandler::SubOverflow; 3346 break; 3347 case BO_Mul: 3348 case BO_MulAssign: 3349 OpID = 3; 3350 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3351 llvm::Intrinsic::umul_with_overflow; 3352 OverflowKind = SanitizerHandler::MulOverflow; 3353 break; 3354 default: 3355 llvm_unreachable("Unsupported operation for overflow detection"); 3356 } 3357 OpID <<= 1; 3358 if (isSigned) 3359 OpID |= 1; 3360 3361 CodeGenFunction::SanitizerScope SanScope(&CGF); 3362 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3363 3364 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3365 3366 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3367 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3368 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3369 3370 // Handle overflow with llvm.trap if no custom handler has been specified. 3371 const std::string *handlerName = 3372 &CGF.getLangOpts().OverflowHandler; 3373 if (handlerName->empty()) { 3374 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3375 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3376 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3377 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3378 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow 3379 : SanitizerKind::UnsignedIntegerOverflow; 3380 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); 3381 } else 3382 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind); 3383 return result; 3384 } 3385 3386 // Branch in case of overflow. 3387 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3388 llvm::BasicBlock *continueBB = 3389 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3390 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3391 3392 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3393 3394 // If an overflow handler is set, then we want to call it and then use its 3395 // result, if it returns. 3396 Builder.SetInsertPoint(overflowBB); 3397 3398 // Get the overflow handler. 3399 llvm::Type *Int8Ty = CGF.Int8Ty; 3400 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3401 llvm::FunctionType *handlerTy = 3402 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3403 llvm::FunctionCallee handler = 3404 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3405 3406 // Sign extend the args to 64-bit, so that we can use the same handler for 3407 // all types of overflow. 3408 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3409 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3410 3411 // Call the handler with the two arguments, the operation, and the size of 3412 // the result. 3413 llvm::Value *handlerArgs[] = { 3414 lhs, 3415 rhs, 3416 Builder.getInt8(OpID), 3417 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3418 }; 3419 llvm::Value *handlerResult = 3420 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3421 3422 // Truncate the result back to the desired size. 3423 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3424 Builder.CreateBr(continueBB); 3425 3426 Builder.SetInsertPoint(continueBB); 3427 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3428 phi->addIncoming(result, initialBB); 3429 phi->addIncoming(handlerResult, overflowBB); 3430 3431 return phi; 3432 } 3433 3434 /// Emit pointer + index arithmetic. 3435 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3436 const BinOpInfo &op, 3437 bool isSubtraction) { 3438 // Must have binary (not unary) expr here. Unary pointer 3439 // increment/decrement doesn't use this path. 3440 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3441 3442 Value *pointer = op.LHS; 3443 Expr *pointerOperand = expr->getLHS(); 3444 Value *index = op.RHS; 3445 Expr *indexOperand = expr->getRHS(); 3446 3447 // In a subtraction, the LHS is always the pointer. 3448 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 3449 std::swap(pointer, index); 3450 std::swap(pointerOperand, indexOperand); 3451 } 3452 3453 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 3454 3455 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 3456 auto &DL = CGF.CGM.getDataLayout(); 3457 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 3458 3459 // Some versions of glibc and gcc use idioms (particularly in their malloc 3460 // routines) that add a pointer-sized integer (known to be a pointer value) 3461 // to a null pointer in order to cast the value back to an integer or as 3462 // part of a pointer alignment algorithm. This is undefined behavior, but 3463 // we'd like to be able to compile programs that use it. 3464 // 3465 // Normally, we'd generate a GEP with a null-pointer base here in response 3466 // to that code, but it's also UB to dereference a pointer created that 3467 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 3468 // generate a direct cast of the integer value to a pointer. 3469 // 3470 // The idiom (p = nullptr + N) is not met if any of the following are true: 3471 // 3472 // The operation is subtraction. 3473 // The index is not pointer-sized. 3474 // The pointer type is not byte-sized. 3475 // 3476 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 3477 op.Opcode, 3478 expr->getLHS(), 3479 expr->getRHS())) 3480 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 3481 3482 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 3483 // Zero-extend or sign-extend the pointer value according to 3484 // whether the index is signed or not. 3485 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 3486 "idx.ext"); 3487 } 3488 3489 // If this is subtraction, negate the index. 3490 if (isSubtraction) 3491 index = CGF.Builder.CreateNeg(index, "idx.neg"); 3492 3493 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 3494 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 3495 /*Accessed*/ false); 3496 3497 const PointerType *pointerType 3498 = pointerOperand->getType()->getAs<PointerType>(); 3499 if (!pointerType) { 3500 QualType objectType = pointerOperand->getType() 3501 ->castAs<ObjCObjectPointerType>() 3502 ->getPointeeType(); 3503 llvm::Value *objectSize 3504 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 3505 3506 index = CGF.Builder.CreateMul(index, objectSize); 3507 3508 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); 3509 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3510 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3511 } 3512 3513 QualType elementType = pointerType->getPointeeType(); 3514 if (const VariableArrayType *vla 3515 = CGF.getContext().getAsVariableArrayType(elementType)) { 3516 // The element count here is the total number of non-VLA elements. 3517 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3518 3519 // Effectively, the multiply by the VLA size is part of the GEP. 3520 // GEP indexes are signed, and scaling an index isn't permitted to 3521 // signed-overflow, so we use the same semantics for our explicit 3522 // multiply. We suppress this if overflow is not undefined behavior. 3523 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 3524 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3525 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3526 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3527 } else { 3528 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3529 pointer = CGF.EmitCheckedInBoundsGEP( 3530 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3531 "add.ptr"); 3532 } 3533 return pointer; 3534 } 3535 3536 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3537 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3538 // future proof. 3539 if (elementType->isVoidType() || elementType->isFunctionType()) { 3540 Value *result = CGF.EmitCastToVoidPtr(pointer); 3541 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3542 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3543 } 3544 3545 llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType); 3546 if (CGF.getLangOpts().isSignedOverflowDefined()) 3547 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 3548 3549 return CGF.EmitCheckedInBoundsGEP( 3550 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 3551 "add.ptr"); 3552 } 3553 3554 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3555 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 3556 // the add operand respectively. This allows fmuladd to represent a*b-c, or 3557 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3558 // efficient operations. 3559 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3560 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3561 bool negMul, bool negAdd) { 3562 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); 3563 3564 Value *MulOp0 = MulOp->getOperand(0); 3565 Value *MulOp1 = MulOp->getOperand(1); 3566 if (negMul) 3567 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3568 if (negAdd) 3569 Addend = Builder.CreateFNeg(Addend, "neg"); 3570 3571 Value *FMulAdd = nullptr; 3572 if (Builder.getIsFPConstrained()) { 3573 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 3574 "Only constrained operation should be created when Builder is in FP " 3575 "constrained mode"); 3576 FMulAdd = Builder.CreateConstrainedFPCall( 3577 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3578 Addend->getType()), 3579 {MulOp0, MulOp1, Addend}); 3580 } else { 3581 FMulAdd = Builder.CreateCall( 3582 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3583 {MulOp0, MulOp1, Addend}); 3584 } 3585 MulOp->eraseFromParent(); 3586 3587 return FMulAdd; 3588 } 3589 3590 // Check whether it would be legal to emit an fmuladd intrinsic call to 3591 // represent op and if so, build the fmuladd. 3592 // 3593 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3594 // Does NOT check the type of the operation - it's assumed that this function 3595 // will be called from contexts where it's known that the type is contractable. 3596 static Value* tryEmitFMulAdd(const BinOpInfo &op, 3597 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3598 bool isSub=false) { 3599 3600 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 3601 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 3602 "Only fadd/fsub can be the root of an fmuladd."); 3603 3604 // Check whether this op is marked as fusable. 3605 if (!op.FPFeatures.allowFPContractWithinStatement()) 3606 return nullptr; 3607 3608 // We have a potentially fusable op. Look for a mul on one of the operands. 3609 // Also, make sure that the mul result isn't used directly. In that case, 3610 // there's no point creating a muladd operation. 3611 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3612 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3613 LHSBinOp->use_empty()) 3614 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3615 } 3616 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3617 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3618 RHSBinOp->use_empty()) 3619 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3620 } 3621 3622 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3623 if (LHSBinOp->getIntrinsicID() == 3624 llvm::Intrinsic::experimental_constrained_fmul && 3625 LHSBinOp->use_empty()) 3626 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3627 } 3628 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3629 if (RHSBinOp->getIntrinsicID() == 3630 llvm::Intrinsic::experimental_constrained_fmul && 3631 RHSBinOp->use_empty()) 3632 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3633 } 3634 3635 return nullptr; 3636 } 3637 3638 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3639 if (op.LHS->getType()->isPointerTy() || 3640 op.RHS->getType()->isPointerTy()) 3641 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3642 3643 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3644 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3645 case LangOptions::SOB_Defined: 3646 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3647 case LangOptions::SOB_Undefined: 3648 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3649 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3650 LLVM_FALLTHROUGH; 3651 case LangOptions::SOB_Trapping: 3652 if (CanElideOverflowCheck(CGF.getContext(), op)) 3653 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3654 return EmitOverflowCheckedBinOp(op); 3655 } 3656 } 3657 3658 if (op.Ty->isConstantMatrixType()) { 3659 llvm::MatrixBuilder MB(Builder); 3660 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3661 return MB.CreateAdd(op.LHS, op.RHS); 3662 } 3663 3664 if (op.Ty->isUnsignedIntegerType() && 3665 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3666 !CanElideOverflowCheck(CGF.getContext(), op)) 3667 return EmitOverflowCheckedBinOp(op); 3668 3669 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3670 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3671 // Try to form an fmuladd. 3672 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3673 return FMulAdd; 3674 3675 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3676 } 3677 3678 if (op.isFixedPointOp()) 3679 return EmitFixedPointBinOp(op); 3680 3681 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3682 } 3683 3684 /// The resulting value must be calculated with exact precision, so the operands 3685 /// may not be the same type. 3686 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3687 using llvm::APSInt; 3688 using llvm::ConstantInt; 3689 3690 // This is either a binary operation where at least one of the operands is 3691 // a fixed-point type, or a unary operation where the operand is a fixed-point 3692 // type. The result type of a binary operation is determined by 3693 // Sema::handleFixedPointConversions(). 3694 QualType ResultTy = op.Ty; 3695 QualType LHSTy, RHSTy; 3696 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3697 RHSTy = BinOp->getRHS()->getType(); 3698 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3699 // For compound assignment, the effective type of the LHS at this point 3700 // is the computation LHS type, not the actual LHS type, and the final 3701 // result type is not the type of the expression but rather the 3702 // computation result type. 3703 LHSTy = CAO->getComputationLHSType(); 3704 ResultTy = CAO->getComputationResultType(); 3705 } else 3706 LHSTy = BinOp->getLHS()->getType(); 3707 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3708 LHSTy = UnOp->getSubExpr()->getType(); 3709 RHSTy = UnOp->getSubExpr()->getType(); 3710 } 3711 ASTContext &Ctx = CGF.getContext(); 3712 Value *LHS = op.LHS; 3713 Value *RHS = op.RHS; 3714 3715 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3716 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3717 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3718 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3719 3720 // Perform the actual operation. 3721 Value *Result; 3722 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3723 switch (op.Opcode) { 3724 case BO_AddAssign: 3725 case BO_Add: 3726 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3727 break; 3728 case BO_SubAssign: 3729 case BO_Sub: 3730 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3731 break; 3732 case BO_MulAssign: 3733 case BO_Mul: 3734 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3735 break; 3736 case BO_DivAssign: 3737 case BO_Div: 3738 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3739 break; 3740 case BO_ShlAssign: 3741 case BO_Shl: 3742 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3743 break; 3744 case BO_ShrAssign: 3745 case BO_Shr: 3746 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3747 break; 3748 case BO_LT: 3749 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3750 case BO_GT: 3751 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3752 case BO_LE: 3753 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3754 case BO_GE: 3755 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3756 case BO_EQ: 3757 // For equality operations, we assume any padding bits on unsigned types are 3758 // zero'd out. They could be overwritten through non-saturating operations 3759 // that cause overflow, but this leads to undefined behavior. 3760 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3761 case BO_NE: 3762 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3763 case BO_Cmp: 3764 case BO_LAnd: 3765 case BO_LOr: 3766 llvm_unreachable("Found unimplemented fixed point binary operation"); 3767 case BO_PtrMemD: 3768 case BO_PtrMemI: 3769 case BO_Rem: 3770 case BO_Xor: 3771 case BO_And: 3772 case BO_Or: 3773 case BO_Assign: 3774 case BO_RemAssign: 3775 case BO_AndAssign: 3776 case BO_XorAssign: 3777 case BO_OrAssign: 3778 case BO_Comma: 3779 llvm_unreachable("Found unsupported binary operation for fixed point types."); 3780 } 3781 3782 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3783 BinaryOperator::isShiftAssignOp(op.Opcode); 3784 // Convert to the result type. 3785 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3786 : CommonFixedSema, 3787 ResultFixedSema); 3788 } 3789 3790 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3791 // The LHS is always a pointer if either side is. 3792 if (!op.LHS->getType()->isPointerTy()) { 3793 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3794 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3795 case LangOptions::SOB_Defined: 3796 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3797 case LangOptions::SOB_Undefined: 3798 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3799 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3800 LLVM_FALLTHROUGH; 3801 case LangOptions::SOB_Trapping: 3802 if (CanElideOverflowCheck(CGF.getContext(), op)) 3803 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3804 return EmitOverflowCheckedBinOp(op); 3805 } 3806 } 3807 3808 if (op.Ty->isConstantMatrixType()) { 3809 llvm::MatrixBuilder MB(Builder); 3810 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3811 return MB.CreateSub(op.LHS, op.RHS); 3812 } 3813 3814 if (op.Ty->isUnsignedIntegerType() && 3815 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3816 !CanElideOverflowCheck(CGF.getContext(), op)) 3817 return EmitOverflowCheckedBinOp(op); 3818 3819 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3820 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3821 // Try to form an fmuladd. 3822 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3823 return FMulAdd; 3824 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3825 } 3826 3827 if (op.isFixedPointOp()) 3828 return EmitFixedPointBinOp(op); 3829 3830 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3831 } 3832 3833 // If the RHS is not a pointer, then we have normal pointer 3834 // arithmetic. 3835 if (!op.RHS->getType()->isPointerTy()) 3836 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 3837 3838 // Otherwise, this is a pointer subtraction. 3839 3840 // Do the raw subtraction part. 3841 llvm::Value *LHS 3842 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3843 llvm::Value *RHS 3844 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3845 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3846 3847 // Okay, figure out the element size. 3848 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3849 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3850 3851 llvm::Value *divisor = nullptr; 3852 3853 // For a variable-length array, this is going to be non-constant. 3854 if (const VariableArrayType *vla 3855 = CGF.getContext().getAsVariableArrayType(elementType)) { 3856 auto VlaSize = CGF.getVLASize(vla); 3857 elementType = VlaSize.Type; 3858 divisor = VlaSize.NumElts; 3859 3860 // Scale the number of non-VLA elements by the non-VLA element size. 3861 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3862 if (!eltSize.isOne()) 3863 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3864 3865 // For everything elese, we can just compute it, safe in the 3866 // assumption that Sema won't let anything through that we can't 3867 // safely compute the size of. 3868 } else { 3869 CharUnits elementSize; 3870 // Handle GCC extension for pointer arithmetic on void* and 3871 // function pointer types. 3872 if (elementType->isVoidType() || elementType->isFunctionType()) 3873 elementSize = CharUnits::One(); 3874 else 3875 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3876 3877 // Don't even emit the divide for element size of 1. 3878 if (elementSize.isOne()) 3879 return diffInChars; 3880 3881 divisor = CGF.CGM.getSize(elementSize); 3882 } 3883 3884 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3885 // pointer difference in C is only defined in the case where both operands 3886 // are pointing to elements of an array. 3887 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3888 } 3889 3890 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3891 llvm::IntegerType *Ty; 3892 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3893 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3894 else 3895 Ty = cast<llvm::IntegerType>(LHS->getType()); 3896 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3897 } 3898 3899 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3900 const Twine &Name) { 3901 llvm::IntegerType *Ty; 3902 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3903 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3904 else 3905 Ty = cast<llvm::IntegerType>(LHS->getType()); 3906 3907 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3908 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3909 3910 return Builder.CreateURem( 3911 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3912 } 3913 3914 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3915 // TODO: This misses out on the sanitizer check below. 3916 if (Ops.isFixedPointOp()) 3917 return EmitFixedPointBinOp(Ops); 3918 3919 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3920 // RHS to the same size as the LHS. 3921 Value *RHS = Ops.RHS; 3922 if (Ops.LHS->getType() != RHS->getType()) 3923 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3924 3925 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3926 Ops.Ty->hasSignedIntegerRepresentation() && 3927 !CGF.getLangOpts().isSignedOverflowDefined() && 3928 !CGF.getLangOpts().CPlusPlus20; 3929 bool SanitizeUnsignedBase = 3930 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3931 Ops.Ty->hasUnsignedIntegerRepresentation(); 3932 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3933 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3934 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3935 if (CGF.getLangOpts().OpenCL) 3936 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3937 else if ((SanitizeBase || SanitizeExponent) && 3938 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3939 CodeGenFunction::SanitizerScope SanScope(&CGF); 3940 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3941 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3942 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3943 3944 if (SanitizeExponent) { 3945 Checks.push_back( 3946 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3947 } 3948 3949 if (SanitizeBase) { 3950 // Check whether we are shifting any non-zero bits off the top of the 3951 // integer. We only emit this check if exponent is valid - otherwise 3952 // instructions below will have undefined behavior themselves. 3953 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3954 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3955 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3956 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3957 llvm::Value *PromotedWidthMinusOne = 3958 (RHS == Ops.RHS) ? WidthMinusOne 3959 : GetWidthMinusOneValue(Ops.LHS, RHS); 3960 CGF.EmitBlock(CheckShiftBase); 3961 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3962 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3963 /*NUW*/ true, /*NSW*/ true), 3964 "shl.check"); 3965 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3966 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3967 // Under C++11's rules, shifting a 1 bit into the sign bit is 3968 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3969 // define signed left shifts, so we use the C99 and C++11 rules there). 3970 // Unsigned shifts can always shift into the top bit. 3971 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3972 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3973 } 3974 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3975 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3976 CGF.EmitBlock(Cont); 3977 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3978 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3979 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3980 Checks.push_back(std::make_pair( 3981 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3982 : SanitizerKind::UnsignedShiftBase)); 3983 } 3984 3985 assert(!Checks.empty()); 3986 EmitBinOpCheck(Checks, Ops); 3987 } 3988 3989 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3990 } 3991 3992 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3993 // TODO: This misses out on the sanitizer check below. 3994 if (Ops.isFixedPointOp()) 3995 return EmitFixedPointBinOp(Ops); 3996 3997 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3998 // RHS to the same size as the LHS. 3999 Value *RHS = Ops.RHS; 4000 if (Ops.LHS->getType() != RHS->getType()) 4001 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4002 4003 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4004 if (CGF.getLangOpts().OpenCL) 4005 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 4006 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 4007 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4008 CodeGenFunction::SanitizerScope SanScope(&CGF); 4009 llvm::Value *Valid = 4010 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 4011 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 4012 } 4013 4014 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 4015 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 4016 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 4017 } 4018 4019 enum IntrinsicType { VCMPEQ, VCMPGT }; 4020 // return corresponding comparison intrinsic for given vector type 4021 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 4022 BuiltinType::Kind ElemKind) { 4023 switch (ElemKind) { 4024 default: llvm_unreachable("unexpected element type"); 4025 case BuiltinType::Char_U: 4026 case BuiltinType::UChar: 4027 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4028 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 4029 case BuiltinType::Char_S: 4030 case BuiltinType::SChar: 4031 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4032 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 4033 case BuiltinType::UShort: 4034 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4035 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 4036 case BuiltinType::Short: 4037 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4038 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4039 case BuiltinType::UInt: 4040 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4041 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4042 case BuiltinType::Int: 4043 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4044 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4045 case BuiltinType::ULong: 4046 case BuiltinType::ULongLong: 4047 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4048 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4049 case BuiltinType::Long: 4050 case BuiltinType::LongLong: 4051 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4052 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4053 case BuiltinType::Float: 4054 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4055 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4056 case BuiltinType::Double: 4057 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4058 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4059 case BuiltinType::UInt128: 4060 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4061 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4062 case BuiltinType::Int128: 4063 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4064 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4065 } 4066 } 4067 4068 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4069 llvm::CmpInst::Predicate UICmpOpc, 4070 llvm::CmpInst::Predicate SICmpOpc, 4071 llvm::CmpInst::Predicate FCmpOpc, 4072 bool IsSignaling) { 4073 TestAndClearIgnoreResultAssign(); 4074 Value *Result; 4075 QualType LHSTy = E->getLHS()->getType(); 4076 QualType RHSTy = E->getRHS()->getType(); 4077 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4078 assert(E->getOpcode() == BO_EQ || 4079 E->getOpcode() == BO_NE); 4080 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4081 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4082 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4083 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4084 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4085 BinOpInfo BOInfo = EmitBinOps(E); 4086 Value *LHS = BOInfo.LHS; 4087 Value *RHS = BOInfo.RHS; 4088 4089 // If AltiVec, the comparison results in a numeric type, so we use 4090 // intrinsics comparing vectors and giving 0 or 1 as a result 4091 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4092 // constants for mapping CR6 register bits to predicate result 4093 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4094 4095 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4096 4097 // in several cases vector arguments order will be reversed 4098 Value *FirstVecArg = LHS, 4099 *SecondVecArg = RHS; 4100 4101 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4102 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4103 4104 switch(E->getOpcode()) { 4105 default: llvm_unreachable("is not a comparison operation"); 4106 case BO_EQ: 4107 CR6 = CR6_LT; 4108 ID = GetIntrinsic(VCMPEQ, ElementKind); 4109 break; 4110 case BO_NE: 4111 CR6 = CR6_EQ; 4112 ID = GetIntrinsic(VCMPEQ, ElementKind); 4113 break; 4114 case BO_LT: 4115 CR6 = CR6_LT; 4116 ID = GetIntrinsic(VCMPGT, ElementKind); 4117 std::swap(FirstVecArg, SecondVecArg); 4118 break; 4119 case BO_GT: 4120 CR6 = CR6_LT; 4121 ID = GetIntrinsic(VCMPGT, ElementKind); 4122 break; 4123 case BO_LE: 4124 if (ElementKind == BuiltinType::Float) { 4125 CR6 = CR6_LT; 4126 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4127 std::swap(FirstVecArg, SecondVecArg); 4128 } 4129 else { 4130 CR6 = CR6_EQ; 4131 ID = GetIntrinsic(VCMPGT, ElementKind); 4132 } 4133 break; 4134 case BO_GE: 4135 if (ElementKind == BuiltinType::Float) { 4136 CR6 = CR6_LT; 4137 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4138 } 4139 else { 4140 CR6 = CR6_EQ; 4141 ID = GetIntrinsic(VCMPGT, ElementKind); 4142 std::swap(FirstVecArg, SecondVecArg); 4143 } 4144 break; 4145 } 4146 4147 Value *CR6Param = Builder.getInt32(CR6); 4148 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4149 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4150 4151 // The result type of intrinsic may not be same as E->getType(). 4152 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4153 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4154 // do nothing, if ResultTy is not i1 at the same time, it will cause 4155 // crash later. 4156 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4157 if (ResultTy->getBitWidth() > 1 && 4158 E->getType() == CGF.getContext().BoolTy) 4159 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4160 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4161 E->getExprLoc()); 4162 } 4163 4164 if (BOInfo.isFixedPointOp()) { 4165 Result = EmitFixedPointBinOp(BOInfo); 4166 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4168 if (!IsSignaling) 4169 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4170 else 4171 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4172 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4173 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4174 } else { 4175 // Unsigned integers and pointers. 4176 4177 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4178 !isa<llvm::ConstantPointerNull>(LHS) && 4179 !isa<llvm::ConstantPointerNull>(RHS)) { 4180 4181 // Dynamic information is required to be stripped for comparisons, 4182 // because it could leak the dynamic information. Based on comparisons 4183 // of pointers to dynamic objects, the optimizer can replace one pointer 4184 // with another, which might be incorrect in presence of invariant 4185 // groups. Comparison with null is safe because null does not carry any 4186 // dynamic information. 4187 if (LHSTy.mayBeDynamicClass()) 4188 LHS = Builder.CreateStripInvariantGroup(LHS); 4189 if (RHSTy.mayBeDynamicClass()) 4190 RHS = Builder.CreateStripInvariantGroup(RHS); 4191 } 4192 4193 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4194 } 4195 4196 // If this is a vector comparison, sign extend the result to the appropriate 4197 // vector integer type and return it (don't convert to bool). 4198 if (LHSTy->isVectorType()) 4199 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4200 4201 } else { 4202 // Complex Comparison: can only be an equality comparison. 4203 CodeGenFunction::ComplexPairTy LHS, RHS; 4204 QualType CETy; 4205 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4206 LHS = CGF.EmitComplexExpr(E->getLHS()); 4207 CETy = CTy->getElementType(); 4208 } else { 4209 LHS.first = Visit(E->getLHS()); 4210 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4211 CETy = LHSTy; 4212 } 4213 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4214 RHS = CGF.EmitComplexExpr(E->getRHS()); 4215 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4216 CTy->getElementType()) && 4217 "The element types must always match."); 4218 (void)CTy; 4219 } else { 4220 RHS.first = Visit(E->getRHS()); 4221 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4222 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4223 "The element types must always match."); 4224 } 4225 4226 Value *ResultR, *ResultI; 4227 if (CETy->isRealFloatingType()) { 4228 // As complex comparisons can only be equality comparisons, they 4229 // are never signaling comparisons. 4230 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4231 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4232 } else { 4233 // Complex comparisons can only be equality comparisons. As such, signed 4234 // and unsigned opcodes are the same. 4235 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4236 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4237 } 4238 4239 if (E->getOpcode() == BO_EQ) { 4240 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4241 } else { 4242 assert(E->getOpcode() == BO_NE && 4243 "Complex comparison other than == or != ?"); 4244 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4245 } 4246 } 4247 4248 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4249 E->getExprLoc()); 4250 } 4251 4252 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4253 bool Ignore = TestAndClearIgnoreResultAssign(); 4254 4255 Value *RHS; 4256 LValue LHS; 4257 4258 switch (E->getLHS()->getType().getObjCLifetime()) { 4259 case Qualifiers::OCL_Strong: 4260 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4261 break; 4262 4263 case Qualifiers::OCL_Autoreleasing: 4264 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4265 break; 4266 4267 case Qualifiers::OCL_ExplicitNone: 4268 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4269 break; 4270 4271 case Qualifiers::OCL_Weak: 4272 RHS = Visit(E->getRHS()); 4273 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4274 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4275 break; 4276 4277 case Qualifiers::OCL_None: 4278 // __block variables need to have the rhs evaluated first, plus 4279 // this should improve codegen just a little. 4280 RHS = Visit(E->getRHS()); 4281 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4282 4283 // Store the value into the LHS. Bit-fields are handled specially 4284 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4285 // 'An assignment expression has the value of the left operand after 4286 // the assignment...'. 4287 if (LHS.isBitField()) { 4288 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4289 } else { 4290 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4291 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4292 } 4293 } 4294 4295 // If the result is clearly ignored, return now. 4296 if (Ignore) 4297 return nullptr; 4298 4299 // The result of an assignment in C is the assigned r-value. 4300 if (!CGF.getLangOpts().CPlusPlus) 4301 return RHS; 4302 4303 // If the lvalue is non-volatile, return the computed value of the assignment. 4304 if (!LHS.isVolatileQualified()) 4305 return RHS; 4306 4307 // Otherwise, reload the value. 4308 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4309 } 4310 4311 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4312 // Perform vector logical and on comparisons with zero vectors. 4313 if (E->getType()->isVectorType()) { 4314 CGF.incrementProfileCounter(E); 4315 4316 Value *LHS = Visit(E->getLHS()); 4317 Value *RHS = Visit(E->getRHS()); 4318 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4319 if (LHS->getType()->isFPOrFPVectorTy()) { 4320 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4321 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4322 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4323 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4324 } else { 4325 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4326 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4327 } 4328 Value *And = Builder.CreateAnd(LHS, RHS); 4329 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4330 } 4331 4332 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4333 llvm::Type *ResTy = ConvertType(E->getType()); 4334 4335 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4336 // If we have 1 && X, just emit X without inserting the control flow. 4337 bool LHSCondVal; 4338 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4339 if (LHSCondVal) { // If we have 1 && X, just emit X. 4340 CGF.incrementProfileCounter(E); 4341 4342 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4343 4344 // If we're generating for profiling or coverage, generate a branch to a 4345 // block that increments the RHS counter needed to track branch condition 4346 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4347 // "FalseBlock" after the increment is done. 4348 if (InstrumentRegions && 4349 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4350 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4351 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4352 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4353 CGF.EmitBlock(RHSBlockCnt); 4354 CGF.incrementProfileCounter(E->getRHS()); 4355 CGF.EmitBranch(FBlock); 4356 CGF.EmitBlock(FBlock); 4357 } 4358 4359 // ZExt result to int or bool. 4360 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4361 } 4362 4363 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4364 if (!CGF.ContainsLabel(E->getRHS())) 4365 return llvm::Constant::getNullValue(ResTy); 4366 } 4367 4368 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4369 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4370 4371 CodeGenFunction::ConditionalEvaluation eval(CGF); 4372 4373 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4374 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4375 CGF.getProfileCount(E->getRHS())); 4376 4377 // Any edges into the ContBlock are now from an (indeterminate number of) 4378 // edges from this first condition. All of these values will be false. Start 4379 // setting up the PHI node in the Cont Block for this. 4380 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4381 "", ContBlock); 4382 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4383 PI != PE; ++PI) 4384 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4385 4386 eval.begin(CGF); 4387 CGF.EmitBlock(RHSBlock); 4388 CGF.incrementProfileCounter(E); 4389 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4390 eval.end(CGF); 4391 4392 // Reaquire the RHS block, as there may be subblocks inserted. 4393 RHSBlock = Builder.GetInsertBlock(); 4394 4395 // If we're generating for profiling or coverage, generate a branch on the 4396 // RHS to a block that increments the RHS true counter needed to track branch 4397 // condition coverage. 4398 if (InstrumentRegions && 4399 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4400 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4401 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4402 CGF.EmitBlock(RHSBlockCnt); 4403 CGF.incrementProfileCounter(E->getRHS()); 4404 CGF.EmitBranch(ContBlock); 4405 PN->addIncoming(RHSCond, RHSBlockCnt); 4406 } 4407 4408 // Emit an unconditional branch from this block to ContBlock. 4409 { 4410 // There is no need to emit line number for unconditional branch. 4411 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4412 CGF.EmitBlock(ContBlock); 4413 } 4414 // Insert an entry into the phi node for the edge with the value of RHSCond. 4415 PN->addIncoming(RHSCond, RHSBlock); 4416 4417 // Artificial location to preserve the scope information 4418 { 4419 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4420 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4421 } 4422 4423 // ZExt result to int. 4424 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4425 } 4426 4427 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4428 // Perform vector logical or on comparisons with zero vectors. 4429 if (E->getType()->isVectorType()) { 4430 CGF.incrementProfileCounter(E); 4431 4432 Value *LHS = Visit(E->getLHS()); 4433 Value *RHS = Visit(E->getRHS()); 4434 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4435 if (LHS->getType()->isFPOrFPVectorTy()) { 4436 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4437 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4438 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4439 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4440 } else { 4441 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4442 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4443 } 4444 Value *Or = Builder.CreateOr(LHS, RHS); 4445 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4446 } 4447 4448 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4449 llvm::Type *ResTy = ConvertType(E->getType()); 4450 4451 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4452 // If we have 0 || X, just emit X without inserting the control flow. 4453 bool LHSCondVal; 4454 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4455 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4456 CGF.incrementProfileCounter(E); 4457 4458 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4459 4460 // If we're generating for profiling or coverage, generate a branch to a 4461 // block that increments the RHS counter need to track branch condition 4462 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4463 // "FalseBlock" after the increment is done. 4464 if (InstrumentRegions && 4465 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4466 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4467 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4468 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4469 CGF.EmitBlock(RHSBlockCnt); 4470 CGF.incrementProfileCounter(E->getRHS()); 4471 CGF.EmitBranch(FBlock); 4472 CGF.EmitBlock(FBlock); 4473 } 4474 4475 // ZExt result to int or bool. 4476 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4477 } 4478 4479 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4480 if (!CGF.ContainsLabel(E->getRHS())) 4481 return llvm::ConstantInt::get(ResTy, 1); 4482 } 4483 4484 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4485 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4486 4487 CodeGenFunction::ConditionalEvaluation eval(CGF); 4488 4489 // Branch on the LHS first. If it is true, go to the success (cont) block. 4490 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4491 CGF.getCurrentProfileCount() - 4492 CGF.getProfileCount(E->getRHS())); 4493 4494 // Any edges into the ContBlock are now from an (indeterminate number of) 4495 // edges from this first condition. All of these values will be true. Start 4496 // setting up the PHI node in the Cont Block for this. 4497 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4498 "", ContBlock); 4499 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4500 PI != PE; ++PI) 4501 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4502 4503 eval.begin(CGF); 4504 4505 // Emit the RHS condition as a bool value. 4506 CGF.EmitBlock(RHSBlock); 4507 CGF.incrementProfileCounter(E); 4508 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4509 4510 eval.end(CGF); 4511 4512 // Reaquire the RHS block, as there may be subblocks inserted. 4513 RHSBlock = Builder.GetInsertBlock(); 4514 4515 // If we're generating for profiling or coverage, generate a branch on the 4516 // RHS to a block that increments the RHS true counter needed to track branch 4517 // condition coverage. 4518 if (InstrumentRegions && 4519 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4520 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4521 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4522 CGF.EmitBlock(RHSBlockCnt); 4523 CGF.incrementProfileCounter(E->getRHS()); 4524 CGF.EmitBranch(ContBlock); 4525 PN->addIncoming(RHSCond, RHSBlockCnt); 4526 } 4527 4528 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4529 // into the phi node for the edge with the value of RHSCond. 4530 CGF.EmitBlock(ContBlock); 4531 PN->addIncoming(RHSCond, RHSBlock); 4532 4533 // ZExt result to int. 4534 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4535 } 4536 4537 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4538 CGF.EmitIgnoredExpr(E->getLHS()); 4539 CGF.EnsureInsertPoint(); 4540 return Visit(E->getRHS()); 4541 } 4542 4543 //===----------------------------------------------------------------------===// 4544 // Other Operators 4545 //===----------------------------------------------------------------------===// 4546 4547 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4548 /// expression is cheap enough and side-effect-free enough to evaluate 4549 /// unconditionally instead of conditionally. This is used to convert control 4550 /// flow into selects in some cases. 4551 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4552 CodeGenFunction &CGF) { 4553 // Anything that is an integer or floating point constant is fine. 4554 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4555 4556 // Even non-volatile automatic variables can't be evaluated unconditionally. 4557 // Referencing a thread_local may cause non-trivial initialization work to 4558 // occur. If we're inside a lambda and one of the variables is from the scope 4559 // outside the lambda, that function may have returned already. Reading its 4560 // locals is a bad idea. Also, these reads may introduce races there didn't 4561 // exist in the source-level program. 4562 } 4563 4564 4565 Value *ScalarExprEmitter:: 4566 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4567 TestAndClearIgnoreResultAssign(); 4568 4569 // Bind the common expression if necessary. 4570 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4571 4572 Expr *condExpr = E->getCond(); 4573 Expr *lhsExpr = E->getTrueExpr(); 4574 Expr *rhsExpr = E->getFalseExpr(); 4575 4576 // If the condition constant folds and can be elided, try to avoid emitting 4577 // the condition and the dead arm. 4578 bool CondExprBool; 4579 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4580 Expr *live = lhsExpr, *dead = rhsExpr; 4581 if (!CondExprBool) std::swap(live, dead); 4582 4583 // If the dead side doesn't have labels we need, just emit the Live part. 4584 if (!CGF.ContainsLabel(dead)) { 4585 if (CondExprBool) 4586 CGF.incrementProfileCounter(E); 4587 Value *Result = Visit(live); 4588 4589 // If the live part is a throw expression, it acts like it has a void 4590 // type, so evaluating it returns a null Value*. However, a conditional 4591 // with non-void type must return a non-null Value*. 4592 if (!Result && !E->getType()->isVoidType()) 4593 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4594 4595 return Result; 4596 } 4597 } 4598 4599 // OpenCL: If the condition is a vector, we can treat this condition like 4600 // the select function. 4601 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4602 condExpr->getType()->isExtVectorType()) { 4603 CGF.incrementProfileCounter(E); 4604 4605 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4606 llvm::Value *LHS = Visit(lhsExpr); 4607 llvm::Value *RHS = Visit(rhsExpr); 4608 4609 llvm::Type *condType = ConvertType(condExpr->getType()); 4610 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4611 4612 unsigned numElem = vecTy->getNumElements(); 4613 llvm::Type *elemType = vecTy->getElementType(); 4614 4615 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4616 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4617 llvm::Value *tmp = Builder.CreateSExt( 4618 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4619 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4620 4621 // Cast float to int to perform ANDs if necessary. 4622 llvm::Value *RHSTmp = RHS; 4623 llvm::Value *LHSTmp = LHS; 4624 bool wasCast = false; 4625 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4626 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4627 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4628 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4629 wasCast = true; 4630 } 4631 4632 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4633 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4634 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4635 if (wasCast) 4636 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4637 4638 return tmp5; 4639 } 4640 4641 if (condExpr->getType()->isVectorType()) { 4642 CGF.incrementProfileCounter(E); 4643 4644 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4645 llvm::Value *LHS = Visit(lhsExpr); 4646 llvm::Value *RHS = Visit(rhsExpr); 4647 4648 llvm::Type *CondType = ConvertType(condExpr->getType()); 4649 auto *VecTy = cast<llvm::VectorType>(CondType); 4650 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4651 4652 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4653 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4654 } 4655 4656 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4657 // select instead of as control flow. We can only do this if it is cheap and 4658 // safe to evaluate the LHS and RHS unconditionally. 4659 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4660 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4661 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4662 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4663 4664 CGF.incrementProfileCounter(E, StepV); 4665 4666 llvm::Value *LHS = Visit(lhsExpr); 4667 llvm::Value *RHS = Visit(rhsExpr); 4668 if (!LHS) { 4669 // If the conditional has void type, make sure we return a null Value*. 4670 assert(!RHS && "LHS and RHS types must match"); 4671 return nullptr; 4672 } 4673 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4674 } 4675 4676 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4677 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4678 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4679 4680 CodeGenFunction::ConditionalEvaluation eval(CGF); 4681 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4682 CGF.getProfileCount(lhsExpr)); 4683 4684 CGF.EmitBlock(LHSBlock); 4685 CGF.incrementProfileCounter(E); 4686 eval.begin(CGF); 4687 Value *LHS = Visit(lhsExpr); 4688 eval.end(CGF); 4689 4690 LHSBlock = Builder.GetInsertBlock(); 4691 Builder.CreateBr(ContBlock); 4692 4693 CGF.EmitBlock(RHSBlock); 4694 eval.begin(CGF); 4695 Value *RHS = Visit(rhsExpr); 4696 eval.end(CGF); 4697 4698 RHSBlock = Builder.GetInsertBlock(); 4699 CGF.EmitBlock(ContBlock); 4700 4701 // If the LHS or RHS is a throw expression, it will be legitimately null. 4702 if (!LHS) 4703 return RHS; 4704 if (!RHS) 4705 return LHS; 4706 4707 // Create a PHI node for the real part. 4708 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4709 PN->addIncoming(LHS, LHSBlock); 4710 PN->addIncoming(RHS, RHSBlock); 4711 return PN; 4712 } 4713 4714 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4715 return Visit(E->getChosenSubExpr()); 4716 } 4717 4718 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4719 QualType Ty = VE->getType(); 4720 4721 if (Ty->isVariablyModifiedType()) 4722 CGF.EmitVariablyModifiedType(Ty); 4723 4724 Address ArgValue = Address::invalid(); 4725 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4726 4727 llvm::Type *ArgTy = ConvertType(VE->getType()); 4728 4729 // If EmitVAArg fails, emit an error. 4730 if (!ArgPtr.isValid()) { 4731 CGF.ErrorUnsupported(VE, "va_arg expression"); 4732 return llvm::UndefValue::get(ArgTy); 4733 } 4734 4735 // FIXME Volatility. 4736 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4737 4738 // If EmitVAArg promoted the type, we must truncate it. 4739 if (ArgTy != Val->getType()) { 4740 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4741 Val = Builder.CreateIntToPtr(Val, ArgTy); 4742 else 4743 Val = Builder.CreateTrunc(Val, ArgTy); 4744 } 4745 4746 return Val; 4747 } 4748 4749 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4750 return CGF.EmitBlockLiteral(block); 4751 } 4752 4753 // Convert a vec3 to vec4, or vice versa. 4754 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4755 Value *Src, unsigned NumElementsDst) { 4756 static constexpr int Mask[] = {0, 1, 2, -1}; 4757 return Builder.CreateShuffleVector(Src, 4758 llvm::makeArrayRef(Mask, NumElementsDst)); 4759 } 4760 4761 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 4762 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 4763 // but could be scalar or vectors of different lengths, and either can be 4764 // pointer. 4765 // There are 4 cases: 4766 // 1. non-pointer -> non-pointer : needs 1 bitcast 4767 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4768 // 3. pointer -> non-pointer 4769 // a) pointer -> intptr_t : needs 1 ptrtoint 4770 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4771 // 4. non-pointer -> pointer 4772 // a) intptr_t -> pointer : needs 1 inttoptr 4773 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4774 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 4775 // allow casting directly between pointer types and non-integer non-pointer 4776 // types. 4777 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4778 const llvm::DataLayout &DL, 4779 Value *Src, llvm::Type *DstTy, 4780 StringRef Name = "") { 4781 auto SrcTy = Src->getType(); 4782 4783 // Case 1. 4784 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4785 return Builder.CreateBitCast(Src, DstTy, Name); 4786 4787 // Case 2. 4788 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4789 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4790 4791 // Case 3. 4792 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4793 // Case 3b. 4794 if (!DstTy->isIntegerTy()) 4795 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4796 // Cases 3a and 3b. 4797 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4798 } 4799 4800 // Case 4b. 4801 if (!SrcTy->isIntegerTy()) 4802 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4803 // Cases 4a and 4b. 4804 return Builder.CreateIntToPtr(Src, DstTy, Name); 4805 } 4806 4807 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4808 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4809 llvm::Type *DstTy = ConvertType(E->getType()); 4810 4811 llvm::Type *SrcTy = Src->getType(); 4812 unsigned NumElementsSrc = 4813 isa<llvm::VectorType>(SrcTy) 4814 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4815 : 0; 4816 unsigned NumElementsDst = 4817 isa<llvm::VectorType>(DstTy) 4818 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4819 : 0; 4820 4821 // Use bit vector expansion for ext_vector_type boolean vectors. 4822 if (E->getType()->isExtVectorBoolType()) 4823 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype"); 4824 4825 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4826 // vector to get a vec4, then a bitcast if the target type is different. 4827 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4828 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4829 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4830 DstTy); 4831 4832 Src->setName("astype"); 4833 return Src; 4834 } 4835 4836 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4837 // to vec4 if the original type is not vec4, then a shuffle vector to 4838 // get a vec3. 4839 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4840 auto *Vec4Ty = llvm::FixedVectorType::get( 4841 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4842 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4843 Vec4Ty); 4844 4845 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4846 Src->setName("astype"); 4847 return Src; 4848 } 4849 4850 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4851 Src, DstTy, "astype"); 4852 } 4853 4854 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4855 return CGF.EmitAtomicExpr(E).getScalarVal(); 4856 } 4857 4858 //===----------------------------------------------------------------------===// 4859 // Entry Point into this File 4860 //===----------------------------------------------------------------------===// 4861 4862 /// Emit the computation of the specified expression of scalar type, ignoring 4863 /// the result. 4864 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4865 assert(E && hasScalarEvaluationKind(E->getType()) && 4866 "Invalid scalar expression to emit"); 4867 4868 return ScalarExprEmitter(*this, IgnoreResultAssign) 4869 .Visit(const_cast<Expr *>(E)); 4870 } 4871 4872 /// Emit a conversion from the specified type to the specified destination type, 4873 /// both of which are LLVM scalar types. 4874 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4875 QualType DstTy, 4876 SourceLocation Loc) { 4877 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 4878 "Invalid scalar expression to emit"); 4879 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4880 } 4881 4882 /// Emit a conversion from the specified complex type to the specified 4883 /// destination type, where the destination type is an LLVM scalar type. 4884 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4885 QualType SrcTy, 4886 QualType DstTy, 4887 SourceLocation Loc) { 4888 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 4889 "Invalid complex -> scalar conversion"); 4890 return ScalarExprEmitter(*this) 4891 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4892 } 4893 4894 4895 llvm::Value *CodeGenFunction:: 4896 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4897 bool isInc, bool isPre) { 4898 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4899 } 4900 4901 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4902 // object->isa or (*object).isa 4903 // Generate code as for: *(Class*)object 4904 4905 Expr *BaseExpr = E->getBase(); 4906 Address Addr = Address::invalid(); 4907 if (BaseExpr->isPRValue()) { 4908 llvm::Type *BaseTy = 4909 ConvertTypeForMem(BaseExpr->getType()->getPointeeType()); 4910 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign()); 4911 } else { 4912 Addr = EmitLValue(BaseExpr).getAddress(*this); 4913 } 4914 4915 // Cast the address to Class*. 4916 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4917 return MakeAddrLValue(Addr, E->getType()); 4918 } 4919 4920 4921 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4922 const CompoundAssignOperator *E) { 4923 ScalarExprEmitter Scalar(*this); 4924 Value *Result = nullptr; 4925 switch (E->getOpcode()) { 4926 #define COMPOUND_OP(Op) \ 4927 case BO_##Op##Assign: \ 4928 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4929 Result) 4930 COMPOUND_OP(Mul); 4931 COMPOUND_OP(Div); 4932 COMPOUND_OP(Rem); 4933 COMPOUND_OP(Add); 4934 COMPOUND_OP(Sub); 4935 COMPOUND_OP(Shl); 4936 COMPOUND_OP(Shr); 4937 COMPOUND_OP(And); 4938 COMPOUND_OP(Xor); 4939 COMPOUND_OP(Or); 4940 #undef COMPOUND_OP 4941 4942 case BO_PtrMemD: 4943 case BO_PtrMemI: 4944 case BO_Mul: 4945 case BO_Div: 4946 case BO_Rem: 4947 case BO_Add: 4948 case BO_Sub: 4949 case BO_Shl: 4950 case BO_Shr: 4951 case BO_LT: 4952 case BO_GT: 4953 case BO_LE: 4954 case BO_GE: 4955 case BO_EQ: 4956 case BO_NE: 4957 case BO_Cmp: 4958 case BO_And: 4959 case BO_Xor: 4960 case BO_Or: 4961 case BO_LAnd: 4962 case BO_LOr: 4963 case BO_Assign: 4964 case BO_Comma: 4965 llvm_unreachable("Not valid compound assignment operators"); 4966 } 4967 4968 llvm_unreachable("Unhandled compound assignment operator"); 4969 } 4970 4971 struct GEPOffsetAndOverflow { 4972 // The total (signed) byte offset for the GEP. 4973 llvm::Value *TotalOffset; 4974 // The offset overflow flag - true if the total offset overflows. 4975 llvm::Value *OffsetOverflows; 4976 }; 4977 4978 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4979 /// and compute the total offset it applies from it's base pointer BasePtr. 4980 /// Returns offset in bytes and a boolean flag whether an overflow happened 4981 /// during evaluation. 4982 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4983 llvm::LLVMContext &VMContext, 4984 CodeGenModule &CGM, 4985 CGBuilderTy &Builder) { 4986 const auto &DL = CGM.getDataLayout(); 4987 4988 // The total (signed) byte offset for the GEP. 4989 llvm::Value *TotalOffset = nullptr; 4990 4991 // Was the GEP already reduced to a constant? 4992 if (isa<llvm::Constant>(GEPVal)) { 4993 // Compute the offset by casting both pointers to integers and subtracting: 4994 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4995 Value *BasePtr_int = 4996 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4997 Value *GEPVal_int = 4998 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4999 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 5000 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 5001 } 5002 5003 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 5004 assert(GEP->getPointerOperand() == BasePtr && 5005 "BasePtr must be the base of the GEP."); 5006 assert(GEP->isInBounds() && "Expected inbounds GEP"); 5007 5008 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 5009 5010 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 5011 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5012 auto *SAddIntrinsic = 5013 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 5014 auto *SMulIntrinsic = 5015 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 5016 5017 // The offset overflow flag - true if the total offset overflows. 5018 llvm::Value *OffsetOverflows = Builder.getFalse(); 5019 5020 /// Return the result of the given binary operation. 5021 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 5022 llvm::Value *RHS) -> llvm::Value * { 5023 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 5024 5025 // If the operands are constants, return a constant result. 5026 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 5027 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 5028 llvm::APInt N; 5029 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 5030 /*Signed=*/true, N); 5031 if (HasOverflow) 5032 OffsetOverflows = Builder.getTrue(); 5033 return llvm::ConstantInt::get(VMContext, N); 5034 } 5035 } 5036 5037 // Otherwise, compute the result with checked arithmetic. 5038 auto *ResultAndOverflow = Builder.CreateCall( 5039 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 5040 OffsetOverflows = Builder.CreateOr( 5041 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5042 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5043 }; 5044 5045 // Determine the total byte offset by looking at each GEP operand. 5046 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5047 GTI != GTE; ++GTI) { 5048 llvm::Value *LocalOffset; 5049 auto *Index = GTI.getOperand(); 5050 // Compute the local offset contributed by this indexing step: 5051 if (auto *STy = GTI.getStructTypeOrNull()) { 5052 // For struct indexing, the local offset is the byte position of the 5053 // specified field. 5054 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5055 LocalOffset = llvm::ConstantInt::get( 5056 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5057 } else { 5058 // Otherwise this is array-like indexing. The local offset is the index 5059 // multiplied by the element size. 5060 auto *ElementSize = llvm::ConstantInt::get( 5061 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5062 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5063 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5064 } 5065 5066 // If this is the first offset, set it as the total offset. Otherwise, add 5067 // the local offset into the running total. 5068 if (!TotalOffset || TotalOffset == Zero) 5069 TotalOffset = LocalOffset; 5070 else 5071 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5072 } 5073 5074 return {TotalOffset, OffsetOverflows}; 5075 } 5076 5077 Value * 5078 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, 5079 ArrayRef<Value *> IdxList, 5080 bool SignedIndices, bool IsSubtraction, 5081 SourceLocation Loc, const Twine &Name) { 5082 llvm::Type *PtrTy = Ptr->getType(); 5083 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name); 5084 5085 // If the pointer overflow sanitizer isn't enabled, do nothing. 5086 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 5087 return GEPVal; 5088 5089 // Perform nullptr-and-offset check unless the nullptr is defined. 5090 bool PerformNullCheck = !NullPointerIsDefined( 5091 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5092 // Check for overflows unless the GEP got constant-folded, 5093 // and only in the default address space 5094 bool PerformOverflowCheck = 5095 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 5096 5097 if (!(PerformNullCheck || PerformOverflowCheck)) 5098 return GEPVal; 5099 5100 const auto &DL = CGM.getDataLayout(); 5101 5102 SanitizerScope SanScope(this); 5103 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5104 5105 GEPOffsetAndOverflow EvaluatedGEP = 5106 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 5107 5108 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 5109 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 5110 "If the offset got constant-folded, we don't expect that there was an " 5111 "overflow."); 5112 5113 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5114 5115 // Common case: if the total offset is zero, and we are using C++ semantics, 5116 // where nullptr+0 is defined, don't emit a check. 5117 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus) 5118 return GEPVal; 5119 5120 // Now that we've computed the total offset, add it to the base pointer (with 5121 // wrapping semantics). 5122 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5123 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5124 5125 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5126 5127 if (PerformNullCheck) { 5128 // In C++, if the base pointer evaluates to a null pointer value, 5129 // the only valid pointer this inbounds GEP can produce is also 5130 // a null pointer, so the offset must also evaluate to zero. 5131 // Likewise, if we have non-zero base pointer, we can not get null pointer 5132 // as a result, so the offset can not be -intptr_t(BasePtr). 5133 // In other words, both pointers are either null, or both are non-null, 5134 // or the behaviour is undefined. 5135 // 5136 // C, however, is more strict in this regard, and gives more 5137 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5138 // So both the input to the 'gep inbounds' AND the output must not be null. 5139 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5140 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5141 auto *Valid = 5142 CGM.getLangOpts().CPlusPlus 5143 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5144 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5145 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5146 } 5147 5148 if (PerformOverflowCheck) { 5149 // The GEP is valid if: 5150 // 1) The total offset doesn't overflow, and 5151 // 2) The sign of the difference between the computed address and the base 5152 // pointer matches the sign of the total offset. 5153 llvm::Value *ValidGEP; 5154 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5155 if (SignedIndices) { 5156 // GEP is computed as `unsigned base + signed offset`, therefore: 5157 // * If offset was positive, then the computed pointer can not be 5158 // [unsigned] less than the base pointer, unless it overflowed. 5159 // * If offset was negative, then the computed pointer can not be 5160 // [unsigned] greater than the bas pointere, unless it overflowed. 5161 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5162 auto *PosOrZeroOffset = 5163 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 5164 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5165 ValidGEP = 5166 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5167 } else if (!IsSubtraction) { 5168 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5169 // computed pointer can not be [unsigned] less than base pointer, 5170 // unless there was an overflow. 5171 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5172 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5173 } else { 5174 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5175 // computed pointer can not be [unsigned] greater than base pointer, 5176 // unless there was an overflow. 5177 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5178 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5179 } 5180 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5181 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5182 } 5183 5184 assert(!Checks.empty() && "Should have produced some checks."); 5185 5186 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5187 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5188 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5189 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5190 5191 return GEPVal; 5192 } 5193