1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines routines for folding instructions into constants. 10 // 11 // Also, to supplement the basic IR ConstantExpr simplifications, 12 // this file defines some additional folding routines that can make use of 13 // DataLayout information. These functions cannot go in IR due to library 14 // dependency issues. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/ADT/APSInt.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/TargetFolder.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/Config/config.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GlobalValue.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/InstrTypes.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/IntrinsicsAArch64.h" 45 #include "llvm/IR/IntrinsicsAMDGPU.h" 46 #include "llvm/IR/IntrinsicsARM.h" 47 #include "llvm/IR/IntrinsicsWebAssembly.h" 48 #include "llvm/IR/IntrinsicsX86.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/KnownBits.h" 55 #include "llvm/Support/MathExtras.h" 56 #include <cassert> 57 #include <cerrno> 58 #include <cfenv> 59 #include <cmath> 60 #include <cstddef> 61 #include <cstdint> 62 63 using namespace llvm; 64 65 namespace { 66 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 67 ArrayRef<Constant *> Ops, 68 const DataLayout &DL, 69 const TargetLibraryInfo *TLI, 70 bool ForLoadOperand); 71 72 //===----------------------------------------------------------------------===// 73 // Constant Folding internal helper functions 74 //===----------------------------------------------------------------------===// 75 76 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, 77 Constant *C, Type *SrcEltTy, 78 unsigned NumSrcElts, 79 const DataLayout &DL) { 80 // Now that we know that the input value is a vector of integers, just shift 81 // and insert them into our result. 82 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); 83 for (unsigned i = 0; i != NumSrcElts; ++i) { 84 Constant *Element; 85 if (DL.isLittleEndian()) 86 Element = C->getAggregateElement(NumSrcElts - i - 1); 87 else 88 Element = C->getAggregateElement(i); 89 90 if (Element && isa<UndefValue>(Element)) { 91 Result <<= BitShift; 92 continue; 93 } 94 95 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 96 if (!ElementCI) 97 return ConstantExpr::getBitCast(C, DestTy); 98 99 Result <<= BitShift; 100 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); 101 } 102 103 return nullptr; 104 } 105 106 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 107 /// This always returns a non-null constant, but it may be a 108 /// ConstantExpr if unfoldable. 109 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 110 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && 111 "Invalid constantexpr bitcast!"); 112 113 // Catch the obvious splat cases. 114 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy()) 115 return Constant::getNullValue(DestTy); 116 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() && 117 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 118 return Constant::getAllOnesValue(DestTy); 119 120 if (auto *VTy = dyn_cast<VectorType>(C->getType())) { 121 // Handle a vector->scalar integer/fp cast. 122 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { 123 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements(); 124 Type *SrcEltTy = VTy->getElementType(); 125 126 // If the vector is a vector of floating point, convert it to vector of int 127 // to simplify things. 128 if (SrcEltTy->isFloatingPointTy()) { 129 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 130 auto *SrcIVTy = FixedVectorType::get( 131 IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 132 // Ask IR to do the conversion now that #elts line up. 133 C = ConstantExpr::getBitCast(C, SrcIVTy); 134 } 135 136 APInt Result(DL.getTypeSizeInBits(DestTy), 0); 137 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, 138 SrcEltTy, NumSrcElts, DL)) 139 return CE; 140 141 if (isa<IntegerType>(DestTy)) 142 return ConstantInt::get(DestTy, Result); 143 144 APFloat FP(DestTy->getFltSemantics(), Result); 145 return ConstantFP::get(DestTy->getContext(), FP); 146 } 147 } 148 149 // The code below only handles casts to vectors currently. 150 auto *DestVTy = dyn_cast<VectorType>(DestTy); 151 if (!DestVTy) 152 return ConstantExpr::getBitCast(C, DestTy); 153 154 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 155 // vector so the code below can handle it uniformly. 156 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 157 Constant *Ops = C; // don't take the address of C! 158 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 159 } 160 161 // If this is a bitcast from constant vector -> vector, fold it. 162 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 163 return ConstantExpr::getBitCast(C, DestTy); 164 165 // If the element types match, IR can fold it. 166 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements(); 167 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements(); 168 if (NumDstElt == NumSrcElt) 169 return ConstantExpr::getBitCast(C, DestTy); 170 171 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType(); 172 Type *DstEltTy = DestVTy->getElementType(); 173 174 // Otherwise, we're changing the number of elements in a vector, which 175 // requires endianness information to do the right thing. For example, 176 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 177 // folds to (little endian): 178 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 179 // and to (big endian): 180 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 181 182 // First thing is first. We only want to think about integer here, so if 183 // we have something in FP form, recast it as integer. 184 if (DstEltTy->isFloatingPointTy()) { 185 // Fold to an vector of integers with same size as our FP type. 186 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 187 auto *DestIVTy = FixedVectorType::get( 188 IntegerType::get(C->getContext(), FPWidth), NumDstElt); 189 // Recursively handle this integer conversion, if possible. 190 C = FoldBitCast(C, DestIVTy, DL); 191 192 // Finally, IR can handle this now that #elts line up. 193 return ConstantExpr::getBitCast(C, DestTy); 194 } 195 196 // Okay, we know the destination is integer, if the input is FP, convert 197 // it to integer first. 198 if (SrcEltTy->isFloatingPointTy()) { 199 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 200 auto *SrcIVTy = FixedVectorType::get( 201 IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 202 // Ask IR to do the conversion now that #elts line up. 203 C = ConstantExpr::getBitCast(C, SrcIVTy); 204 // If IR wasn't able to fold it, bail out. 205 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 206 !isa<ConstantDataVector>(C)) 207 return C; 208 } 209 210 // Now we know that the input and output vectors are both integer vectors 211 // of the same size, and that their #elements is not the same. Do the 212 // conversion here, which depends on whether the input or output has 213 // more elements. 214 bool isLittleEndian = DL.isLittleEndian(); 215 216 SmallVector<Constant*, 32> Result; 217 if (NumDstElt < NumSrcElt) { 218 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 219 Constant *Zero = Constant::getNullValue(DstEltTy); 220 unsigned Ratio = NumSrcElt/NumDstElt; 221 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 222 unsigned SrcElt = 0; 223 for (unsigned i = 0; i != NumDstElt; ++i) { 224 // Build each element of the result. 225 Constant *Elt = Zero; 226 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 227 for (unsigned j = 0; j != Ratio; ++j) { 228 Constant *Src = C->getAggregateElement(SrcElt++); 229 if (Src && isa<UndefValue>(Src)) 230 Src = Constant::getNullValue( 231 cast<VectorType>(C->getType())->getElementType()); 232 else 233 Src = dyn_cast_or_null<ConstantInt>(Src); 234 if (!Src) // Reject constantexpr elements. 235 return ConstantExpr::getBitCast(C, DestTy); 236 237 // Zero extend the element to the right size. 238 Src = ConstantExpr::getZExt(Src, Elt->getType()); 239 240 // Shift it to the right place, depending on endianness. 241 Src = ConstantExpr::getShl(Src, 242 ConstantInt::get(Src->getType(), ShiftAmt)); 243 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 244 245 // Mix it in. 246 Elt = ConstantExpr::getOr(Elt, Src); 247 } 248 Result.push_back(Elt); 249 } 250 return ConstantVector::get(Result); 251 } 252 253 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 254 unsigned Ratio = NumDstElt/NumSrcElt; 255 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 256 257 // Loop over each source value, expanding into multiple results. 258 for (unsigned i = 0; i != NumSrcElt; ++i) { 259 auto *Element = C->getAggregateElement(i); 260 261 if (!Element) // Reject constantexpr elements. 262 return ConstantExpr::getBitCast(C, DestTy); 263 264 if (isa<UndefValue>(Element)) { 265 // Correctly Propagate undef values. 266 Result.append(Ratio, UndefValue::get(DstEltTy)); 267 continue; 268 } 269 270 auto *Src = dyn_cast<ConstantInt>(Element); 271 if (!Src) 272 return ConstantExpr::getBitCast(C, DestTy); 273 274 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 275 for (unsigned j = 0; j != Ratio; ++j) { 276 // Shift the piece of the value into the right place, depending on 277 // endianness. 278 Constant *Elt = ConstantExpr::getLShr(Src, 279 ConstantInt::get(Src->getType(), ShiftAmt)); 280 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 281 282 // Truncate the element to an integer with the same pointer size and 283 // convert the element back to a pointer using a inttoptr. 284 if (DstEltTy->isPointerTy()) { 285 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 286 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 287 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 288 continue; 289 } 290 291 // Truncate and remember this piece. 292 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 293 } 294 } 295 296 return ConstantVector::get(Result); 297 } 298 299 } // end anonymous namespace 300 301 /// If this constant is a constant offset from a global, return the global and 302 /// the constant. Because of constantexprs, this function is recursive. 303 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 304 APInt &Offset, const DataLayout &DL, 305 DSOLocalEquivalent **DSOEquiv) { 306 if (DSOEquiv) 307 *DSOEquiv = nullptr; 308 309 // Trivial case, constant is the global. 310 if ((GV = dyn_cast<GlobalValue>(C))) { 311 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 312 Offset = APInt(BitWidth, 0); 313 return true; 314 } 315 316 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) { 317 if (DSOEquiv) 318 *DSOEquiv = FoundDSOEquiv; 319 GV = FoundDSOEquiv->getGlobalValue(); 320 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 321 Offset = APInt(BitWidth, 0); 322 return true; 323 } 324 325 // Otherwise, if this isn't a constant expr, bail out. 326 auto *CE = dyn_cast<ConstantExpr>(C); 327 if (!CE) return false; 328 329 // Look through ptr->int and ptr->ptr casts. 330 if (CE->getOpcode() == Instruction::PtrToInt || 331 CE->getOpcode() == Instruction::BitCast) 332 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL, 333 DSOEquiv); 334 335 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 336 auto *GEP = dyn_cast<GEPOperator>(CE); 337 if (!GEP) 338 return false; 339 340 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 341 APInt TmpOffset(BitWidth, 0); 342 343 // If the base isn't a global+constant, we aren't either. 344 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL, 345 DSOEquiv)) 346 return false; 347 348 // Otherwise, add any offset that our operands provide. 349 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 350 return false; 351 352 Offset = TmpOffset; 353 return true; 354 } 355 356 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, 357 const DataLayout &DL) { 358 do { 359 Type *SrcTy = C->getType(); 360 uint64_t DestSize = DL.getTypeSizeInBits(DestTy); 361 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); 362 if (SrcSize < DestSize) 363 return nullptr; 364 365 // Catch the obvious splat cases (since all-zeros can coerce non-integral 366 // pointers legally). 367 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy()) 368 return Constant::getNullValue(DestTy); 369 if (C->isAllOnesValue() && 370 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() || 371 DestTy->isVectorTy()) && 372 !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() && 373 !DestTy->isPtrOrPtrVectorTy()) 374 // Get ones when the input is trivial, but 375 // only for supported types inside getAllOnesValue. 376 return Constant::getAllOnesValue(DestTy); 377 378 // If the type sizes are the same and a cast is legal, just directly 379 // cast the constant. 380 // But be careful not to coerce non-integral pointers illegally. 381 if (SrcSize == DestSize && 382 DL.isNonIntegralPointerType(SrcTy->getScalarType()) == 383 DL.isNonIntegralPointerType(DestTy->getScalarType())) { 384 Instruction::CastOps Cast = Instruction::BitCast; 385 // If we are going from a pointer to int or vice versa, we spell the cast 386 // differently. 387 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 388 Cast = Instruction::IntToPtr; 389 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 390 Cast = Instruction::PtrToInt; 391 392 if (CastInst::castIsValid(Cast, C, DestTy)) 393 return ConstantExpr::getCast(Cast, C, DestTy); 394 } 395 396 // If this isn't an aggregate type, there is nothing we can do to drill down 397 // and find a bitcastable constant. 398 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy()) 399 return nullptr; 400 401 // We're simulating a load through a pointer that was bitcast to point to 402 // a different type, so we can try to walk down through the initial 403 // elements of an aggregate to see if some part of the aggregate is 404 // castable to implement the "load" semantic model. 405 if (SrcTy->isStructTy()) { 406 // Struct types might have leading zero-length elements like [0 x i32], 407 // which are certainly not what we are looking for, so skip them. 408 unsigned Elem = 0; 409 Constant *ElemC; 410 do { 411 ElemC = C->getAggregateElement(Elem++); 412 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero()); 413 C = ElemC; 414 } else { 415 C = C->getAggregateElement(0u); 416 } 417 } while (C); 418 419 return nullptr; 420 } 421 422 namespace { 423 424 /// Recursive helper to read bits out of global. C is the constant being copied 425 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 426 /// results into and BytesLeft is the number of bytes left in 427 /// the CurPtr buffer. DL is the DataLayout. 428 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, 429 unsigned BytesLeft, const DataLayout &DL) { 430 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 431 "Out of range access"); 432 433 // If this element is zero or undefined, we can just return since *CurPtr is 434 // zero initialized. 435 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 436 return true; 437 438 if (auto *CI = dyn_cast<ConstantInt>(C)) { 439 if (CI->getBitWidth() > 64 || 440 (CI->getBitWidth() & 7) != 0) 441 return false; 442 443 uint64_t Val = CI->getZExtValue(); 444 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 445 446 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 447 int n = ByteOffset; 448 if (!DL.isLittleEndian()) 449 n = IntBytes - n - 1; 450 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 451 ++ByteOffset; 452 } 453 return true; 454 } 455 456 if (auto *CFP = dyn_cast<ConstantFP>(C)) { 457 if (CFP->getType()->isDoubleTy()) { 458 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 459 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 460 } 461 if (CFP->getType()->isFloatTy()){ 462 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 463 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 464 } 465 if (CFP->getType()->isHalfTy()){ 466 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 467 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 468 } 469 return false; 470 } 471 472 if (auto *CS = dyn_cast<ConstantStruct>(C)) { 473 const StructLayout *SL = DL.getStructLayout(CS->getType()); 474 unsigned Index = SL->getElementContainingOffset(ByteOffset); 475 uint64_t CurEltOffset = SL->getElementOffset(Index); 476 ByteOffset -= CurEltOffset; 477 478 while (true) { 479 // If the element access is to the element itself and not to tail padding, 480 // read the bytes from the element. 481 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 482 483 if (ByteOffset < EltSize && 484 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 485 BytesLeft, DL)) 486 return false; 487 488 ++Index; 489 490 // Check to see if we read from the last struct element, if so we're done. 491 if (Index == CS->getType()->getNumElements()) 492 return true; 493 494 // If we read all of the bytes we needed from this element we're done. 495 uint64_t NextEltOffset = SL->getElementOffset(Index); 496 497 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 498 return true; 499 500 // Move to the next element of the struct. 501 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 502 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 503 ByteOffset = 0; 504 CurEltOffset = NextEltOffset; 505 } 506 // not reached. 507 } 508 509 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 510 isa<ConstantDataSequential>(C)) { 511 uint64_t NumElts; 512 Type *EltTy; 513 if (auto *AT = dyn_cast<ArrayType>(C->getType())) { 514 NumElts = AT->getNumElements(); 515 EltTy = AT->getElementType(); 516 } else { 517 NumElts = cast<FixedVectorType>(C->getType())->getNumElements(); 518 EltTy = cast<FixedVectorType>(C->getType())->getElementType(); 519 } 520 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 521 uint64_t Index = ByteOffset / EltSize; 522 uint64_t Offset = ByteOffset - Index * EltSize; 523 524 for (; Index != NumElts; ++Index) { 525 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 526 BytesLeft, DL)) 527 return false; 528 529 uint64_t BytesWritten = EltSize - Offset; 530 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 531 if (BytesWritten >= BytesLeft) 532 return true; 533 534 Offset = 0; 535 BytesLeft -= BytesWritten; 536 CurPtr += BytesWritten; 537 } 538 return true; 539 } 540 541 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 542 if (CE->getOpcode() == Instruction::IntToPtr && 543 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 544 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 545 BytesLeft, DL); 546 } 547 } 548 549 // Otherwise, unknown initializer type. 550 return false; 551 } 552 553 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, 554 const DataLayout &DL) { 555 // Bail out early. Not expect to load from scalable global variable. 556 if (isa<ScalableVectorType>(LoadTy)) 557 return nullptr; 558 559 auto *PTy = cast<PointerType>(C->getType()); 560 auto *IntType = dyn_cast<IntegerType>(LoadTy); 561 562 // If this isn't an integer load we can't fold it directly. 563 if (!IntType) { 564 unsigned AS = PTy->getAddressSpace(); 565 566 // If this is a float/double load, we can try folding it as an int32/64 load 567 // and then bitcast the result. This can be useful for union cases. Note 568 // that address spaces don't matter here since we're not going to result in 569 // an actual new load. 570 Type *MapTy; 571 if (LoadTy->isHalfTy()) 572 MapTy = Type::getInt16Ty(C->getContext()); 573 else if (LoadTy->isFloatTy()) 574 MapTy = Type::getInt32Ty(C->getContext()); 575 else if (LoadTy->isDoubleTy()) 576 MapTy = Type::getInt64Ty(C->getContext()); 577 else if (LoadTy->isVectorTy()) { 578 MapTy = PointerType::getIntNTy( 579 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); 580 } else 581 return nullptr; 582 583 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL); 584 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) { 585 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && 586 !LoadTy->isX86_AMXTy()) 587 // Materializing a zero can be done trivially without a bitcast 588 return Constant::getNullValue(LoadTy); 589 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; 590 Res = FoldBitCast(Res, CastTy, DL); 591 if (LoadTy->isPtrOrPtrVectorTy()) { 592 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr 593 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && 594 !LoadTy->isX86_AMXTy()) 595 return Constant::getNullValue(LoadTy); 596 if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) 597 // Be careful not to replace a load of an addrspace value with an inttoptr here 598 return nullptr; 599 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); 600 } 601 return Res; 602 } 603 return nullptr; 604 } 605 606 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 607 if (BytesLoaded > 32 || BytesLoaded == 0) 608 return nullptr; 609 610 GlobalValue *GVal; 611 APInt OffsetAI; 612 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL)) 613 return nullptr; 614 615 auto *GV = dyn_cast<GlobalVariable>(GVal); 616 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 617 !GV->getInitializer()->getType()->isSized()) 618 return nullptr; 619 620 int64_t Offset = OffsetAI.getSExtValue(); 621 int64_t InitializerSize = 622 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize(); 623 624 // If we're not accessing anything in this constant, the result is undefined. 625 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) 626 return UndefValue::get(IntType); 627 628 // If we're not accessing anything in this constant, the result is undefined. 629 if (Offset >= InitializerSize) 630 return UndefValue::get(IntType); 631 632 unsigned char RawBytes[32] = {0}; 633 unsigned char *CurPtr = RawBytes; 634 unsigned BytesLeft = BytesLoaded; 635 636 // If we're loading off the beginning of the global, some bytes may be valid. 637 if (Offset < 0) { 638 CurPtr += -Offset; 639 BytesLeft += Offset; 640 Offset = 0; 641 } 642 643 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL)) 644 return nullptr; 645 646 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 647 if (DL.isLittleEndian()) { 648 ResultVal = RawBytes[BytesLoaded - 1]; 649 for (unsigned i = 1; i != BytesLoaded; ++i) { 650 ResultVal <<= 8; 651 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 652 } 653 } else { 654 ResultVal = RawBytes[0]; 655 for (unsigned i = 1; i != BytesLoaded; ++i) { 656 ResultVal <<= 8; 657 ResultVal |= RawBytes[i]; 658 } 659 } 660 661 return ConstantInt::get(IntType->getContext(), ResultVal); 662 } 663 664 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy, 665 const DataLayout &DL) { 666 auto *SrcPtr = CE->getOperand(0); 667 if (!SrcPtr->getType()->isPointerTy()) 668 return nullptr; 669 670 return ConstantFoldLoadFromConstPtr(SrcPtr, DestTy, DL); 671 } 672 673 } // end anonymous namespace 674 675 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 676 const DataLayout &DL) { 677 // First, try the easy cases: 678 if (auto *GV = dyn_cast<GlobalVariable>(C)) 679 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 680 return ConstantFoldLoadThroughBitcast(GV->getInitializer(), Ty, DL); 681 682 if (auto *GA = dyn_cast<GlobalAlias>(C)) 683 if (GA->getAliasee() && !GA->isInterposable()) 684 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL); 685 686 // If the loaded value isn't a constant expr, we can't handle it. 687 auto *CE = dyn_cast<ConstantExpr>(C); 688 if (!CE) 689 return nullptr; 690 691 if (CE->getOpcode() == Instruction::GetElementPtr) { 692 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 693 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 694 if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr( 695 GV->getInitializer(), CE, Ty, DL)) 696 return V; 697 } 698 } else { 699 // Try to simplify GEP if the pointer operand wasn't a GlobalVariable. 700 // SymbolicallyEvaluateGEP() with `ForLoadOperand = true` can potentially 701 // simplify the GEP more than it normally would have been, but should only 702 // be used for const folding loads. 703 SmallVector<Constant *> Ops; 704 for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I) 705 Ops.push_back(cast<Constant>(CE->getOperand(I))); 706 if (auto *Simplified = dyn_cast_or_null<ConstantExpr>( 707 SymbolicallyEvaluateGEP(cast<GEPOperator>(CE), Ops, DL, nullptr, 708 /*ForLoadOperand*/ true))) { 709 // If the symbolically evaluated GEP is another GEP, we can only const 710 // fold it if the resulting pointer operand is a GlobalValue. Otherwise 711 // there is nothing else to simplify since the GEP is already in the 712 // most simplified form. 713 if (isa<GEPOperator>(Simplified)) { 714 if (auto *GV = dyn_cast<GlobalVariable>(Simplified->getOperand(0))) { 715 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 716 if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr( 717 GV->getInitializer(), Simplified, Ty, DL)) 718 return V; 719 } 720 } 721 } else { 722 return ConstantFoldLoadFromConstPtr(Simplified, Ty, DL); 723 } 724 } 725 } 726 } 727 728 if (CE->getOpcode() == Instruction::BitCast) 729 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL)) 730 return LoadedC; 731 732 // Instead of loading constant c string, use corresponding integer value 733 // directly if string length is small enough. 734 StringRef Str; 735 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 736 size_t StrLen = Str.size(); 737 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 738 // Replace load with immediate integer if the result is an integer or fp 739 // value. 740 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 741 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 742 APInt StrVal(NumBits, 0); 743 APInt SingleChar(NumBits, 0); 744 if (DL.isLittleEndian()) { 745 for (unsigned char C : reverse(Str.bytes())) { 746 SingleChar = static_cast<uint64_t>(C); 747 StrVal = (StrVal << 8) | SingleChar; 748 } 749 } else { 750 for (unsigned char C : Str.bytes()) { 751 SingleChar = static_cast<uint64_t>(C); 752 StrVal = (StrVal << 8) | SingleChar; 753 } 754 // Append NULL at the end. 755 SingleChar = 0; 756 StrVal = (StrVal << 8) | SingleChar; 757 } 758 759 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 760 if (Ty->isFloatingPointTy()) 761 Res = ConstantExpr::getBitCast(Res, Ty); 762 return Res; 763 } 764 } 765 766 // If this load comes from anywhere in a constant global, and if the global 767 // is all undef or zero, we know what it loads. 768 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) { 769 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 770 if (GV->getInitializer()->isNullValue()) 771 return Constant::getNullValue(Ty); 772 if (isa<UndefValue>(GV->getInitializer())) 773 return UndefValue::get(Ty); 774 } 775 } 776 777 // Try hard to fold loads from bitcasted strange and non-type-safe things. 778 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL); 779 } 780 781 namespace { 782 783 /// One of Op0/Op1 is a constant expression. 784 /// Attempt to symbolically evaluate the result of a binary operator merging 785 /// these together. If target data info is available, it is provided as DL, 786 /// otherwise DL is null. 787 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, 788 const DataLayout &DL) { 789 // SROA 790 791 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 792 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 793 // bits. 794 795 if (Opc == Instruction::And) { 796 KnownBits Known0 = computeKnownBits(Op0, DL); 797 KnownBits Known1 = computeKnownBits(Op1, DL); 798 if ((Known1.One | Known0.Zero).isAllOnesValue()) { 799 // All the bits of Op0 that the 'and' could be masking are already zero. 800 return Op0; 801 } 802 if ((Known0.One | Known1.Zero).isAllOnesValue()) { 803 // All the bits of Op1 that the 'and' could be masking are already zero. 804 return Op1; 805 } 806 807 Known0 &= Known1; 808 if (Known0.isConstant()) 809 return ConstantInt::get(Op0->getType(), Known0.getConstant()); 810 } 811 812 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 813 // constant. This happens frequently when iterating over a global array. 814 if (Opc == Instruction::Sub) { 815 GlobalValue *GV1, *GV2; 816 APInt Offs1, Offs2; 817 818 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 819 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 820 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 821 822 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 823 // PtrToInt may change the bitwidth so we have convert to the right size 824 // first. 825 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 826 Offs2.zextOrTrunc(OpSize)); 827 } 828 } 829 830 return nullptr; 831 } 832 833 /// If array indices are not pointer-sized integers, explicitly cast them so 834 /// that they aren't implicitly casted by the getelementptr. 835 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, 836 Type *ResultTy, Optional<unsigned> InRangeIndex, 837 const DataLayout &DL, const TargetLibraryInfo *TLI) { 838 Type *IntIdxTy = DL.getIndexType(ResultTy); 839 Type *IntIdxScalarTy = IntIdxTy->getScalarType(); 840 841 bool Any = false; 842 SmallVector<Constant*, 32> NewIdxs; 843 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 844 if ((i == 1 || 845 !isa<StructType>(GetElementPtrInst::getIndexedType( 846 SrcElemTy, Ops.slice(1, i - 1)))) && 847 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { 848 Any = true; 849 Type *NewType = Ops[i]->getType()->isVectorTy() 850 ? IntIdxTy 851 : IntIdxScalarTy; 852 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 853 true, 854 NewType, 855 true), 856 Ops[i], NewType)); 857 } else 858 NewIdxs.push_back(Ops[i]); 859 } 860 861 if (!Any) 862 return nullptr; 863 864 Constant *C = ConstantExpr::getGetElementPtr( 865 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex); 866 return ConstantFoldConstant(C, DL, TLI); 867 } 868 869 /// Strip the pointer casts, but preserve the address space information. 870 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy, 871 bool ForLoadOperand) { 872 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 873 auto *OldPtrTy = cast<PointerType>(Ptr->getType()); 874 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 875 if (ForLoadOperand) { 876 while (isa<GlobalAlias>(Ptr) && !cast<GlobalAlias>(Ptr)->isInterposable() && 877 !cast<GlobalAlias>(Ptr)->getBaseObject()->isInterposable()) { 878 Ptr = cast<GlobalAlias>(Ptr)->getAliasee(); 879 } 880 } 881 882 auto *NewPtrTy = cast<PointerType>(Ptr->getType()); 883 884 ElemTy = NewPtrTy->getPointerElementType(); 885 886 // Preserve the address space number of the pointer. 887 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 888 Ptr = ConstantExpr::getPointerCast( 889 Ptr, PointerType::getWithSamePointeeType(NewPtrTy, 890 OldPtrTy->getAddressSpace())); 891 } 892 return Ptr; 893 } 894 895 /// If we can symbolically evaluate the GEP constant expression, do so. 896 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 897 ArrayRef<Constant *> Ops, 898 const DataLayout &DL, 899 const TargetLibraryInfo *TLI, 900 bool ForLoadOperand) { 901 const GEPOperator *InnermostGEP = GEP; 902 bool InBounds = GEP->isInBounds(); 903 904 Type *SrcElemTy = GEP->getSourceElementType(); 905 Type *ResElemTy = GEP->getResultElementType(); 906 Type *ResTy = GEP->getType(); 907 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy)) 908 return nullptr; 909 910 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, 911 GEP->getInRangeIndex(), DL, TLI)) 912 return C; 913 914 Constant *Ptr = Ops[0]; 915 if (!Ptr->getType()->isPointerTy()) 916 return nullptr; 917 918 Type *IntIdxTy = DL.getIndexType(Ptr->getType()); 919 920 // If this is a constant expr gep that is effectively computing an 921 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 922 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 923 if (!isa<ConstantInt>(Ops[i])) { 924 925 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 926 // "inttoptr (sub (ptrtoint Ptr), V)" 927 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { 928 auto *CE = dyn_cast<ConstantExpr>(Ops[1]); 929 assert((!CE || CE->getType() == IntIdxTy) && 930 "CastGEPIndices didn't canonicalize index types!"); 931 if (CE && CE->getOpcode() == Instruction::Sub && 932 CE->getOperand(0)->isNullValue()) { 933 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 934 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 935 Res = ConstantExpr::getIntToPtr(Res, ResTy); 936 return ConstantFoldConstant(Res, DL, TLI); 937 } 938 } 939 return nullptr; 940 } 941 942 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); 943 APInt Offset = 944 APInt(BitWidth, 945 DL.getIndexedOffsetInType( 946 SrcElemTy, 947 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 948 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy, ForLoadOperand); 949 950 // If this is a GEP of a GEP, fold it all into a single GEP. 951 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 952 InnermostGEP = GEP; 953 InBounds &= GEP->isInBounds(); 954 955 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 956 957 // Do not try the incorporate the sub-GEP if some index is not a number. 958 bool AllConstantInt = true; 959 for (Value *NestedOp : NestedOps) 960 if (!isa<ConstantInt>(NestedOp)) { 961 AllConstantInt = false; 962 break; 963 } 964 if (!AllConstantInt) 965 break; 966 967 Ptr = cast<Constant>(GEP->getOperand(0)); 968 SrcElemTy = GEP->getSourceElementType(); 969 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); 970 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy, ForLoadOperand); 971 } 972 973 // If the base value for this address is a literal integer value, fold the 974 // getelementptr to the resulting integer value casted to the pointer type. 975 APInt BasePtr(BitWidth, 0); 976 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { 977 if (CE->getOpcode() == Instruction::IntToPtr) { 978 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 979 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 980 } 981 } 982 983 auto *PTy = cast<PointerType>(Ptr->getType()); 984 if ((Ptr->isNullValue() || BasePtr != 0) && 985 !DL.isNonIntegralPointerType(PTy)) { 986 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 987 return ConstantExpr::getIntToPtr(C, ResTy); 988 } 989 990 // Otherwise form a regular getelementptr. Recompute the indices so that 991 // we eliminate over-indexing of the notional static type array bounds. 992 // This makes it easy to determine if the getelementptr is "inbounds". 993 // Also, this helps GlobalOpt do SROA on GlobalVariables. 994 Type *Ty = PTy; 995 SmallVector<Constant *, 32> NewIdxs; 996 997 do { 998 if (!Ty->isStructTy()) { 999 if (Ty->isPointerTy()) { 1000 // The only pointer indexing we'll do is on the first index of the GEP. 1001 if (!NewIdxs.empty()) 1002 break; 1003 1004 Ty = SrcElemTy; 1005 1006 // Only handle pointers to sized types, not pointers to functions. 1007 if (!Ty->isSized()) 1008 return nullptr; 1009 } else { 1010 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 1011 if (!NextTy) 1012 break; 1013 Ty = NextTy; 1014 } 1015 1016 // Determine which element of the array the offset points into. 1017 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty)); 1018 if (ElemSize == 0) { 1019 // The element size is 0. This may be [0 x Ty]*, so just use a zero 1020 // index for this level and proceed to the next level to see if it can 1021 // accommodate the offset. 1022 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); 1023 } else { 1024 // The element size is non-zero divide the offset by the element 1025 // size (rounding down), to compute the index at this level. 1026 bool Overflow; 1027 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow); 1028 if (Overflow) 1029 break; 1030 Offset -= NewIdx * ElemSize; 1031 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); 1032 } 1033 } else { 1034 auto *STy = cast<StructType>(Ty); 1035 // If we end up with an offset that isn't valid for this struct type, we 1036 // can't re-form this GEP in a regular form, so bail out. The pointer 1037 // operand likely went through casts that are necessary to make the GEP 1038 // sensible. 1039 const StructLayout &SL = *DL.getStructLayout(STy); 1040 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes())) 1041 break; 1042 1043 // Determine which field of the struct the offset points into. The 1044 // getZExtValue is fine as we've already ensured that the offset is 1045 // within the range representable by the StructLayout API. 1046 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 1047 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1048 ElIdx)); 1049 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 1050 Ty = STy->getTypeAtIndex(ElIdx); 1051 } 1052 } while (Ty != ResElemTy); 1053 1054 // If we haven't used up the entire offset by descending the static 1055 // type, then the offset is pointing into the middle of an indivisible 1056 // member, so we can't simplify it. 1057 if (Offset != 0) 1058 return nullptr; 1059 1060 // Preserve the inrange index from the innermost GEP if possible. We must 1061 // have calculated the same indices up to and including the inrange index. 1062 Optional<unsigned> InRangeIndex; 1063 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) 1064 if (SrcElemTy == InnermostGEP->getSourceElementType() && 1065 NewIdxs.size() > *LastIRIndex) { 1066 InRangeIndex = LastIRIndex; 1067 for (unsigned I = 0; I <= *LastIRIndex; ++I) 1068 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) 1069 return nullptr; 1070 } 1071 1072 // Create a GEP. 1073 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, 1074 InBounds, InRangeIndex); 1075 assert(C->getType()->getPointerElementType() == Ty && 1076 "Computed GetElementPtr has unexpected type!"); 1077 1078 // If we ended up indexing a member with a type that doesn't match 1079 // the type of what the original indices indexed, add a cast. 1080 if (Ty != ResElemTy) 1081 C = FoldBitCast(C, ResTy, DL); 1082 1083 return C; 1084 } 1085 1086 /// Attempt to constant fold an instruction with the 1087 /// specified opcode and operands. If successful, the constant result is 1088 /// returned, if not, null is returned. Note that this function can fail when 1089 /// attempting to fold instructions like loads and stores, which have no 1090 /// constant expression form. 1091 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, 1092 ArrayRef<Constant *> Ops, 1093 const DataLayout &DL, 1094 const TargetLibraryInfo *TLI) { 1095 Type *DestTy = InstOrCE->getType(); 1096 1097 if (Instruction::isUnaryOp(Opcode)) 1098 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); 1099 1100 if (Instruction::isBinaryOp(Opcode)) 1101 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); 1102 1103 if (Instruction::isCast(Opcode)) 1104 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); 1105 1106 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { 1107 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI, 1108 /*ForLoadOperand*/ false)) 1109 return C; 1110 1111 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], 1112 Ops.slice(1), GEP->isInBounds(), 1113 GEP->getInRangeIndex()); 1114 } 1115 1116 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) 1117 return CE->getWithOperands(Ops); 1118 1119 switch (Opcode) { 1120 default: return nullptr; 1121 case Instruction::ICmp: 1122 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1123 case Instruction::Freeze: 1124 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr; 1125 case Instruction::Call: 1126 if (auto *F = dyn_cast<Function>(Ops.back())) { 1127 const auto *Call = cast<CallBase>(InstOrCE); 1128 if (canConstantFoldCallTo(Call, F)) 1129 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); 1130 } 1131 return nullptr; 1132 case Instruction::Select: 1133 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1134 case Instruction::ExtractElement: 1135 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1136 case Instruction::ExtractValue: 1137 return ConstantExpr::getExtractValue( 1138 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); 1139 case Instruction::InsertElement: 1140 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1141 case Instruction::ShuffleVector: 1142 return ConstantExpr::getShuffleVector( 1143 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask()); 1144 } 1145 } 1146 1147 } // end anonymous namespace 1148 1149 //===----------------------------------------------------------------------===// 1150 // Constant Folding public APIs 1151 //===----------------------------------------------------------------------===// 1152 1153 namespace { 1154 1155 Constant * 1156 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, 1157 const TargetLibraryInfo *TLI, 1158 SmallDenseMap<Constant *, Constant *> &FoldedOps) { 1159 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) 1160 return const_cast<Constant *>(C); 1161 1162 SmallVector<Constant *, 8> Ops; 1163 for (const Use &OldU : C->operands()) { 1164 Constant *OldC = cast<Constant>(&OldU); 1165 Constant *NewC = OldC; 1166 // Recursively fold the ConstantExpr's operands. If we have already folded 1167 // a ConstantExpr, we don't have to process it again. 1168 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) { 1169 auto It = FoldedOps.find(OldC); 1170 if (It == FoldedOps.end()) { 1171 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps); 1172 FoldedOps.insert({OldC, NewC}); 1173 } else { 1174 NewC = It->second; 1175 } 1176 } 1177 Ops.push_back(NewC); 1178 } 1179 1180 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1181 if (CE->isCompare()) 1182 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 1183 DL, TLI); 1184 1185 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI); 1186 } 1187 1188 assert(isa<ConstantVector>(C)); 1189 return ConstantVector::get(Ops); 1190 } 1191 1192 } // end anonymous namespace 1193 1194 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 1195 const TargetLibraryInfo *TLI) { 1196 // Handle PHI nodes quickly here... 1197 if (auto *PN = dyn_cast<PHINode>(I)) { 1198 Constant *CommonValue = nullptr; 1199 1200 SmallDenseMap<Constant *, Constant *> FoldedOps; 1201 for (Value *Incoming : PN->incoming_values()) { 1202 // If the incoming value is undef then skip it. Note that while we could 1203 // skip the value if it is equal to the phi node itself we choose not to 1204 // because that would break the rule that constant folding only applies if 1205 // all operands are constants. 1206 if (isa<UndefValue>(Incoming)) 1207 continue; 1208 // If the incoming value is not a constant, then give up. 1209 auto *C = dyn_cast<Constant>(Incoming); 1210 if (!C) 1211 return nullptr; 1212 // Fold the PHI's operands. 1213 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1214 // If the incoming value is a different constant to 1215 // the one we saw previously, then give up. 1216 if (CommonValue && C != CommonValue) 1217 return nullptr; 1218 CommonValue = C; 1219 } 1220 1221 // If we reach here, all incoming values are the same constant or undef. 1222 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 1223 } 1224 1225 // Scan the operand list, checking to see if they are all constants, if so, 1226 // hand off to ConstantFoldInstOperandsImpl. 1227 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) 1228 return nullptr; 1229 1230 SmallDenseMap<Constant *, Constant *> FoldedOps; 1231 SmallVector<Constant *, 8> Ops; 1232 for (const Use &OpU : I->operands()) { 1233 auto *Op = cast<Constant>(&OpU); 1234 // Fold the Instruction's operands. 1235 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps); 1236 Ops.push_back(Op); 1237 } 1238 1239 if (const auto *CI = dyn_cast<CmpInst>(I)) 1240 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 1241 DL, TLI); 1242 1243 if (const auto *LI = dyn_cast<LoadInst>(I)) { 1244 if (LI->isVolatile()) 1245 return nullptr; 1246 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL); 1247 } 1248 1249 if (auto *IVI = dyn_cast<InsertValueInst>(I)) 1250 return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices()); 1251 1252 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) 1253 return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices()); 1254 1255 return ConstantFoldInstOperands(I, Ops, DL, TLI); 1256 } 1257 1258 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, 1259 const TargetLibraryInfo *TLI) { 1260 SmallDenseMap<Constant *, Constant *> FoldedOps; 1261 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1262 } 1263 1264 Constant *llvm::ConstantFoldInstOperands(Instruction *I, 1265 ArrayRef<Constant *> Ops, 1266 const DataLayout &DL, 1267 const TargetLibraryInfo *TLI) { 1268 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); 1269 } 1270 1271 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1272 Constant *Ops0, Constant *Ops1, 1273 const DataLayout &DL, 1274 const TargetLibraryInfo *TLI) { 1275 // fold: icmp (inttoptr x), null -> icmp x, 0 1276 // fold: icmp null, (inttoptr x) -> icmp 0, x 1277 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1278 // fold: icmp 0, (ptrtoint x) -> icmp null, x 1279 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1280 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1281 // 1282 // FIXME: The following comment is out of data and the DataLayout is here now. 1283 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1284 // around to know if bit truncation is happening. 1285 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1286 if (Ops1->isNullValue()) { 1287 if (CE0->getOpcode() == Instruction::IntToPtr) { 1288 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1289 // Convert the integer value to the right size to ensure we get the 1290 // proper extension or truncation. 1291 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1292 IntPtrTy, false); 1293 Constant *Null = Constant::getNullValue(C->getType()); 1294 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1295 } 1296 1297 // Only do this transformation if the int is intptrty in size, otherwise 1298 // there is a truncation or extension that we aren't modeling. 1299 if (CE0->getOpcode() == Instruction::PtrToInt) { 1300 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1301 if (CE0->getType() == IntPtrTy) { 1302 Constant *C = CE0->getOperand(0); 1303 Constant *Null = Constant::getNullValue(C->getType()); 1304 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1305 } 1306 } 1307 } 1308 1309 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1310 if (CE0->getOpcode() == CE1->getOpcode()) { 1311 if (CE0->getOpcode() == Instruction::IntToPtr) { 1312 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1313 1314 // Convert the integer value to the right size to ensure we get the 1315 // proper extension or truncation. 1316 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1317 IntPtrTy, false); 1318 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1319 IntPtrTy, false); 1320 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1321 } 1322 1323 // Only do this transformation if the int is intptrty in size, otherwise 1324 // there is a truncation or extension that we aren't modeling. 1325 if (CE0->getOpcode() == Instruction::PtrToInt) { 1326 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1327 if (CE0->getType() == IntPtrTy && 1328 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1329 return ConstantFoldCompareInstOperands( 1330 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1331 } 1332 } 1333 } 1334 } 1335 1336 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1337 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1338 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1339 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1340 Constant *LHS = ConstantFoldCompareInstOperands( 1341 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1342 Constant *RHS = ConstantFoldCompareInstOperands( 1343 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1344 unsigned OpC = 1345 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1346 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); 1347 } 1348 } else if (isa<ConstantExpr>(Ops1)) { 1349 // If RHS is a constant expression, but the left side isn't, swap the 1350 // operands and try again. 1351 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate); 1352 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); 1353 } 1354 1355 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1356 } 1357 1358 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, 1359 const DataLayout &DL) { 1360 assert(Instruction::isUnaryOp(Opcode)); 1361 1362 return ConstantExpr::get(Opcode, Op); 1363 } 1364 1365 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, 1366 Constant *RHS, 1367 const DataLayout &DL) { 1368 assert(Instruction::isBinaryOp(Opcode)); 1369 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) 1370 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) 1371 return C; 1372 1373 return ConstantExpr::get(Opcode, LHS, RHS); 1374 } 1375 1376 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, 1377 Type *DestTy, const DataLayout &DL) { 1378 assert(Instruction::isCast(Opcode)); 1379 switch (Opcode) { 1380 default: 1381 llvm_unreachable("Missing case"); 1382 case Instruction::PtrToInt: 1383 // If the input is a inttoptr, eliminate the pair. This requires knowing 1384 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1385 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1386 if (CE->getOpcode() == Instruction::IntToPtr) { 1387 Constant *Input = CE->getOperand(0); 1388 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1389 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1390 if (PtrWidth < InWidth) { 1391 Constant *Mask = 1392 ConstantInt::get(CE->getContext(), 1393 APInt::getLowBitsSet(InWidth, PtrWidth)); 1394 Input = ConstantExpr::getAnd(Input, Mask); 1395 } 1396 // Do a zext or trunc to get to the dest size. 1397 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1398 } 1399 } 1400 return ConstantExpr::getCast(Opcode, C, DestTy); 1401 case Instruction::IntToPtr: 1402 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1403 // the int size is >= the ptr size and the address spaces are the same. 1404 // This requires knowing the width of a pointer, so it can't be done in 1405 // ConstantExpr::getCast. 1406 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1407 if (CE->getOpcode() == Instruction::PtrToInt) { 1408 Constant *SrcPtr = CE->getOperand(0); 1409 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1410 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1411 1412 if (MidIntSize >= SrcPtrSize) { 1413 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1414 if (SrcAS == DestTy->getPointerAddressSpace()) 1415 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1416 } 1417 } 1418 } 1419 1420 return ConstantExpr::getCast(Opcode, C, DestTy); 1421 case Instruction::Trunc: 1422 case Instruction::ZExt: 1423 case Instruction::SExt: 1424 case Instruction::FPTrunc: 1425 case Instruction::FPExt: 1426 case Instruction::UIToFP: 1427 case Instruction::SIToFP: 1428 case Instruction::FPToUI: 1429 case Instruction::FPToSI: 1430 case Instruction::AddrSpaceCast: 1431 return ConstantExpr::getCast(Opcode, C, DestTy); 1432 case Instruction::BitCast: 1433 return FoldBitCast(C, DestTy, DL); 1434 } 1435 } 1436 1437 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1438 ConstantExpr *CE, 1439 Type *Ty, 1440 const DataLayout &DL) { 1441 if (!CE->getOperand(1)->isNullValue()) 1442 return nullptr; // Do not allow stepping over the value! 1443 1444 // Loop over all of the operands, tracking down which value we are 1445 // addressing. 1446 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1447 C = C->getAggregateElement(CE->getOperand(i)); 1448 if (!C) 1449 return nullptr; 1450 } 1451 return ConstantFoldLoadThroughBitcast(C, Ty, DL); 1452 } 1453 1454 Constant * 1455 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1456 ArrayRef<Constant *> Indices) { 1457 // Loop over all of the operands, tracking down which value we are 1458 // addressing. 1459 for (Constant *Index : Indices) { 1460 C = C->getAggregateElement(Index); 1461 if (!C) 1462 return nullptr; 1463 } 1464 return C; 1465 } 1466 1467 //===----------------------------------------------------------------------===// 1468 // Constant Folding for Calls 1469 // 1470 1471 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { 1472 if (Call->isNoBuiltin()) 1473 return false; 1474 switch (F->getIntrinsicID()) { 1475 // Operations that do not operate floating-point numbers and do not depend on 1476 // FP environment can be folded even in strictfp functions. 1477 case Intrinsic::bswap: 1478 case Intrinsic::ctpop: 1479 case Intrinsic::ctlz: 1480 case Intrinsic::cttz: 1481 case Intrinsic::fshl: 1482 case Intrinsic::fshr: 1483 case Intrinsic::launder_invariant_group: 1484 case Intrinsic::strip_invariant_group: 1485 case Intrinsic::masked_load: 1486 case Intrinsic::get_active_lane_mask: 1487 case Intrinsic::abs: 1488 case Intrinsic::smax: 1489 case Intrinsic::smin: 1490 case Intrinsic::umax: 1491 case Intrinsic::umin: 1492 case Intrinsic::sadd_with_overflow: 1493 case Intrinsic::uadd_with_overflow: 1494 case Intrinsic::ssub_with_overflow: 1495 case Intrinsic::usub_with_overflow: 1496 case Intrinsic::smul_with_overflow: 1497 case Intrinsic::umul_with_overflow: 1498 case Intrinsic::sadd_sat: 1499 case Intrinsic::uadd_sat: 1500 case Intrinsic::ssub_sat: 1501 case Intrinsic::usub_sat: 1502 case Intrinsic::smul_fix: 1503 case Intrinsic::smul_fix_sat: 1504 case Intrinsic::bitreverse: 1505 case Intrinsic::is_constant: 1506 case Intrinsic::vector_reduce_add: 1507 case Intrinsic::vector_reduce_mul: 1508 case Intrinsic::vector_reduce_and: 1509 case Intrinsic::vector_reduce_or: 1510 case Intrinsic::vector_reduce_xor: 1511 case Intrinsic::vector_reduce_smin: 1512 case Intrinsic::vector_reduce_smax: 1513 case Intrinsic::vector_reduce_umin: 1514 case Intrinsic::vector_reduce_umax: 1515 // Target intrinsics 1516 case Intrinsic::amdgcn_perm: 1517 case Intrinsic::arm_mve_vctp8: 1518 case Intrinsic::arm_mve_vctp16: 1519 case Intrinsic::arm_mve_vctp32: 1520 case Intrinsic::arm_mve_vctp64: 1521 case Intrinsic::aarch64_sve_convert_from_svbool: 1522 // WebAssembly float semantics are always known 1523 case Intrinsic::wasm_trunc_signed: 1524 case Intrinsic::wasm_trunc_unsigned: 1525 return true; 1526 1527 // Floating point operations cannot be folded in strictfp functions in 1528 // general case. They can be folded if FP environment is known to compiler. 1529 case Intrinsic::minnum: 1530 case Intrinsic::maxnum: 1531 case Intrinsic::minimum: 1532 case Intrinsic::maximum: 1533 case Intrinsic::log: 1534 case Intrinsic::log2: 1535 case Intrinsic::log10: 1536 case Intrinsic::exp: 1537 case Intrinsic::exp2: 1538 case Intrinsic::sqrt: 1539 case Intrinsic::sin: 1540 case Intrinsic::cos: 1541 case Intrinsic::pow: 1542 case Intrinsic::powi: 1543 case Intrinsic::fma: 1544 case Intrinsic::fmuladd: 1545 case Intrinsic::fptoui_sat: 1546 case Intrinsic::fptosi_sat: 1547 case Intrinsic::convert_from_fp16: 1548 case Intrinsic::convert_to_fp16: 1549 case Intrinsic::amdgcn_cos: 1550 case Intrinsic::amdgcn_cubeid: 1551 case Intrinsic::amdgcn_cubema: 1552 case Intrinsic::amdgcn_cubesc: 1553 case Intrinsic::amdgcn_cubetc: 1554 case Intrinsic::amdgcn_fmul_legacy: 1555 case Intrinsic::amdgcn_fma_legacy: 1556 case Intrinsic::amdgcn_fract: 1557 case Intrinsic::amdgcn_ldexp: 1558 case Intrinsic::amdgcn_sin: 1559 // The intrinsics below depend on rounding mode in MXCSR. 1560 case Intrinsic::x86_sse_cvtss2si: 1561 case Intrinsic::x86_sse_cvtss2si64: 1562 case Intrinsic::x86_sse_cvttss2si: 1563 case Intrinsic::x86_sse_cvttss2si64: 1564 case Intrinsic::x86_sse2_cvtsd2si: 1565 case Intrinsic::x86_sse2_cvtsd2si64: 1566 case Intrinsic::x86_sse2_cvttsd2si: 1567 case Intrinsic::x86_sse2_cvttsd2si64: 1568 case Intrinsic::x86_avx512_vcvtss2si32: 1569 case Intrinsic::x86_avx512_vcvtss2si64: 1570 case Intrinsic::x86_avx512_cvttss2si: 1571 case Intrinsic::x86_avx512_cvttss2si64: 1572 case Intrinsic::x86_avx512_vcvtsd2si32: 1573 case Intrinsic::x86_avx512_vcvtsd2si64: 1574 case Intrinsic::x86_avx512_cvttsd2si: 1575 case Intrinsic::x86_avx512_cvttsd2si64: 1576 case Intrinsic::x86_avx512_vcvtss2usi32: 1577 case Intrinsic::x86_avx512_vcvtss2usi64: 1578 case Intrinsic::x86_avx512_cvttss2usi: 1579 case Intrinsic::x86_avx512_cvttss2usi64: 1580 case Intrinsic::x86_avx512_vcvtsd2usi32: 1581 case Intrinsic::x86_avx512_vcvtsd2usi64: 1582 case Intrinsic::x86_avx512_cvttsd2usi: 1583 case Intrinsic::x86_avx512_cvttsd2usi64: 1584 return !Call->isStrictFP(); 1585 1586 // Sign operations are actually bitwise operations, they do not raise 1587 // exceptions even for SNANs. 1588 case Intrinsic::fabs: 1589 case Intrinsic::copysign: 1590 // Non-constrained variants of rounding operations means default FP 1591 // environment, they can be folded in any case. 1592 case Intrinsic::ceil: 1593 case Intrinsic::floor: 1594 case Intrinsic::round: 1595 case Intrinsic::roundeven: 1596 case Intrinsic::trunc: 1597 case Intrinsic::nearbyint: 1598 case Intrinsic::rint: 1599 // Constrained intrinsics can be folded if FP environment is known 1600 // to compiler. 1601 case Intrinsic::experimental_constrained_ceil: 1602 case Intrinsic::experimental_constrained_floor: 1603 case Intrinsic::experimental_constrained_round: 1604 case Intrinsic::experimental_constrained_roundeven: 1605 case Intrinsic::experimental_constrained_trunc: 1606 case Intrinsic::experimental_constrained_nearbyint: 1607 case Intrinsic::experimental_constrained_rint: 1608 return true; 1609 default: 1610 return false; 1611 case Intrinsic::not_intrinsic: break; 1612 } 1613 1614 if (!F->hasName() || Call->isStrictFP()) 1615 return false; 1616 1617 // In these cases, the check of the length is required. We don't want to 1618 // return true for a name like "cos\0blah" which strcmp would return equal to 1619 // "cos", but has length 8. 1620 StringRef Name = F->getName(); 1621 switch (Name[0]) { 1622 default: 1623 return false; 1624 case 'a': 1625 return Name == "acos" || Name == "acosf" || 1626 Name == "asin" || Name == "asinf" || 1627 Name == "atan" || Name == "atanf" || 1628 Name == "atan2" || Name == "atan2f"; 1629 case 'c': 1630 return Name == "ceil" || Name == "ceilf" || 1631 Name == "cos" || Name == "cosf" || 1632 Name == "cosh" || Name == "coshf"; 1633 case 'e': 1634 return Name == "exp" || Name == "expf" || 1635 Name == "exp2" || Name == "exp2f"; 1636 case 'f': 1637 return Name == "fabs" || Name == "fabsf" || 1638 Name == "floor" || Name == "floorf" || 1639 Name == "fmod" || Name == "fmodf"; 1640 case 'l': 1641 return Name == "log" || Name == "logf" || 1642 Name == "log2" || Name == "log2f" || 1643 Name == "log10" || Name == "log10f"; 1644 case 'n': 1645 return Name == "nearbyint" || Name == "nearbyintf"; 1646 case 'p': 1647 return Name == "pow" || Name == "powf"; 1648 case 'r': 1649 return Name == "remainder" || Name == "remainderf" || 1650 Name == "rint" || Name == "rintf" || 1651 Name == "round" || Name == "roundf"; 1652 case 's': 1653 return Name == "sin" || Name == "sinf" || 1654 Name == "sinh" || Name == "sinhf" || 1655 Name == "sqrt" || Name == "sqrtf"; 1656 case 't': 1657 return Name == "tan" || Name == "tanf" || 1658 Name == "tanh" || Name == "tanhf" || 1659 Name == "trunc" || Name == "truncf"; 1660 case '_': 1661 // Check for various function names that get used for the math functions 1662 // when the header files are preprocessed with the macro 1663 // __FINITE_MATH_ONLY__ enabled. 1664 // The '12' here is the length of the shortest name that can match. 1665 // We need to check the size before looking at Name[1] and Name[2] 1666 // so we may as well check a limit that will eliminate mismatches. 1667 if (Name.size() < 12 || Name[1] != '_') 1668 return false; 1669 switch (Name[2]) { 1670 default: 1671 return false; 1672 case 'a': 1673 return Name == "__acos_finite" || Name == "__acosf_finite" || 1674 Name == "__asin_finite" || Name == "__asinf_finite" || 1675 Name == "__atan2_finite" || Name == "__atan2f_finite"; 1676 case 'c': 1677 return Name == "__cosh_finite" || Name == "__coshf_finite"; 1678 case 'e': 1679 return Name == "__exp_finite" || Name == "__expf_finite" || 1680 Name == "__exp2_finite" || Name == "__exp2f_finite"; 1681 case 'l': 1682 return Name == "__log_finite" || Name == "__logf_finite" || 1683 Name == "__log10_finite" || Name == "__log10f_finite"; 1684 case 'p': 1685 return Name == "__pow_finite" || Name == "__powf_finite"; 1686 case 's': 1687 return Name == "__sinh_finite" || Name == "__sinhf_finite"; 1688 } 1689 } 1690 } 1691 1692 namespace { 1693 1694 Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1695 if (Ty->isHalfTy() || Ty->isFloatTy()) { 1696 APFloat APF(V); 1697 bool unused; 1698 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); 1699 return ConstantFP::get(Ty->getContext(), APF); 1700 } 1701 if (Ty->isDoubleTy()) 1702 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1703 llvm_unreachable("Can only constant fold half/float/double"); 1704 } 1705 1706 /// Clear the floating-point exception state. 1707 inline void llvm_fenv_clearexcept() { 1708 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1709 feclearexcept(FE_ALL_EXCEPT); 1710 #endif 1711 errno = 0; 1712 } 1713 1714 /// Test if a floating-point exception was raised. 1715 inline bool llvm_fenv_testexcept() { 1716 int errno_val = errno; 1717 if (errno_val == ERANGE || errno_val == EDOM) 1718 return true; 1719 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1720 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1721 return true; 1722 #endif 1723 return false; 1724 } 1725 1726 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V, 1727 Type *Ty) { 1728 llvm_fenv_clearexcept(); 1729 double Result = NativeFP(V.convertToDouble()); 1730 if (llvm_fenv_testexcept()) { 1731 llvm_fenv_clearexcept(); 1732 return nullptr; 1733 } 1734 1735 return GetConstantFoldFPValue(Result, Ty); 1736 } 1737 1738 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1739 const APFloat &V, const APFloat &W, Type *Ty) { 1740 llvm_fenv_clearexcept(); 1741 double Result = NativeFP(V.convertToDouble(), W.convertToDouble()); 1742 if (llvm_fenv_testexcept()) { 1743 llvm_fenv_clearexcept(); 1744 return nullptr; 1745 } 1746 1747 return GetConstantFoldFPValue(Result, Ty); 1748 } 1749 1750 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) { 1751 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType()); 1752 if (!VT) 1753 return nullptr; 1754 1755 // This isn't strictly necessary, but handle the special/common case of zero: 1756 // all integer reductions of a zero input produce zero. 1757 if (isa<ConstantAggregateZero>(Op)) 1758 return ConstantInt::get(VT->getElementType(), 0); 1759 1760 // This is the same as the underlying binops - poison propagates. 1761 if (isa<PoisonValue>(Op) || Op->containsPoisonElement()) 1762 return PoisonValue::get(VT->getElementType()); 1763 1764 // TODO: Handle undef. 1765 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op)) 1766 return nullptr; 1767 1768 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U)); 1769 if (!EltC) 1770 return nullptr; 1771 1772 APInt Acc = EltC->getValue(); 1773 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) { 1774 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I)))) 1775 return nullptr; 1776 const APInt &X = EltC->getValue(); 1777 switch (IID) { 1778 case Intrinsic::vector_reduce_add: 1779 Acc = Acc + X; 1780 break; 1781 case Intrinsic::vector_reduce_mul: 1782 Acc = Acc * X; 1783 break; 1784 case Intrinsic::vector_reduce_and: 1785 Acc = Acc & X; 1786 break; 1787 case Intrinsic::vector_reduce_or: 1788 Acc = Acc | X; 1789 break; 1790 case Intrinsic::vector_reduce_xor: 1791 Acc = Acc ^ X; 1792 break; 1793 case Intrinsic::vector_reduce_smin: 1794 Acc = APIntOps::smin(Acc, X); 1795 break; 1796 case Intrinsic::vector_reduce_smax: 1797 Acc = APIntOps::smax(Acc, X); 1798 break; 1799 case Intrinsic::vector_reduce_umin: 1800 Acc = APIntOps::umin(Acc, X); 1801 break; 1802 case Intrinsic::vector_reduce_umax: 1803 Acc = APIntOps::umax(Acc, X); 1804 break; 1805 } 1806 } 1807 1808 return ConstantInt::get(Op->getContext(), Acc); 1809 } 1810 1811 /// Attempt to fold an SSE floating point to integer conversion of a constant 1812 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1813 /// used (toward nearest, ties to even). This matches the behavior of the 1814 /// non-truncating SSE instructions in the default rounding mode. The desired 1815 /// integer type Ty is used to select how many bits are available for the 1816 /// result. Returns null if the conversion cannot be performed, otherwise 1817 /// returns the Constant value resulting from the conversion. 1818 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, 1819 Type *Ty, bool IsSigned) { 1820 // All of these conversion intrinsics form an integer of at most 64bits. 1821 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1822 assert(ResultWidth <= 64 && 1823 "Can only constant fold conversions to 64 and 32 bit ints"); 1824 1825 uint64_t UIntVal; 1826 bool isExact = false; 1827 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1828 : APFloat::rmNearestTiesToEven; 1829 APFloat::opStatus status = 1830 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, 1831 IsSigned, mode, &isExact); 1832 if (status != APFloat::opOK && 1833 (!roundTowardZero || status != APFloat::opInexact)) 1834 return nullptr; 1835 return ConstantInt::get(Ty, UIntVal, IsSigned); 1836 } 1837 1838 double getValueAsDouble(ConstantFP *Op) { 1839 Type *Ty = Op->getType(); 1840 1841 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) 1842 return Op->getValueAPF().convertToDouble(); 1843 1844 bool unused; 1845 APFloat APF = Op->getValueAPF(); 1846 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); 1847 return APF.convertToDouble(); 1848 } 1849 1850 static bool getConstIntOrUndef(Value *Op, const APInt *&C) { 1851 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1852 C = &CI->getValue(); 1853 return true; 1854 } 1855 if (isa<UndefValue>(Op)) { 1856 C = nullptr; 1857 return true; 1858 } 1859 return false; 1860 } 1861 1862 static Constant *ConstantFoldScalarCall1(StringRef Name, 1863 Intrinsic::ID IntrinsicID, 1864 Type *Ty, 1865 ArrayRef<Constant *> Operands, 1866 const TargetLibraryInfo *TLI, 1867 const CallBase *Call) { 1868 assert(Operands.size() == 1 && "Wrong number of operands."); 1869 1870 if (IntrinsicID == Intrinsic::is_constant) { 1871 // We know we have a "Constant" argument. But we want to only 1872 // return true for manifest constants, not those that depend on 1873 // constants with unknowable values, e.g. GlobalValue or BlockAddress. 1874 if (Operands[0]->isManifestConstant()) 1875 return ConstantInt::getTrue(Ty->getContext()); 1876 return nullptr; 1877 } 1878 if (isa<UndefValue>(Operands[0])) { 1879 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN. 1880 // ctpop() is between 0 and bitwidth, pick 0 for undef. 1881 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input). 1882 if (IntrinsicID == Intrinsic::cos || 1883 IntrinsicID == Intrinsic::ctpop || 1884 IntrinsicID == Intrinsic::fptoui_sat || 1885 IntrinsicID == Intrinsic::fptosi_sat) 1886 return Constant::getNullValue(Ty); 1887 if (IntrinsicID == Intrinsic::bswap || 1888 IntrinsicID == Intrinsic::bitreverse || 1889 IntrinsicID == Intrinsic::launder_invariant_group || 1890 IntrinsicID == Intrinsic::strip_invariant_group) 1891 return Operands[0]; 1892 } 1893 1894 if (isa<ConstantPointerNull>(Operands[0])) { 1895 // launder(null) == null == strip(null) iff in addrspace 0 1896 if (IntrinsicID == Intrinsic::launder_invariant_group || 1897 IntrinsicID == Intrinsic::strip_invariant_group) { 1898 // If instruction is not yet put in a basic block (e.g. when cloning 1899 // a function during inlining), Call's caller may not be available. 1900 // So check Call's BB first before querying Call->getCaller. 1901 const Function *Caller = 1902 Call->getParent() ? Call->getCaller() : nullptr; 1903 if (Caller && 1904 !NullPointerIsDefined( 1905 Caller, Operands[0]->getType()->getPointerAddressSpace())) { 1906 return Operands[0]; 1907 } 1908 return nullptr; 1909 } 1910 } 1911 1912 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { 1913 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1914 APFloat Val(Op->getValueAPF()); 1915 1916 bool lost = false; 1917 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); 1918 1919 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1920 } 1921 1922 APFloat U = Op->getValueAPF(); 1923 1924 if (IntrinsicID == Intrinsic::wasm_trunc_signed || 1925 IntrinsicID == Intrinsic::wasm_trunc_unsigned) { 1926 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed; 1927 1928 if (U.isNaN()) 1929 return nullptr; 1930 1931 unsigned Width = Ty->getIntegerBitWidth(); 1932 APSInt Int(Width, !Signed); 1933 bool IsExact = false; 1934 APFloat::opStatus Status = 1935 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); 1936 1937 if (Status == APFloat::opOK || Status == APFloat::opInexact) 1938 return ConstantInt::get(Ty, Int); 1939 1940 return nullptr; 1941 } 1942 1943 if (IntrinsicID == Intrinsic::fptoui_sat || 1944 IntrinsicID == Intrinsic::fptosi_sat) { 1945 // convertToInteger() already has the desired saturation semantics. 1946 APSInt Int(Ty->getIntegerBitWidth(), 1947 IntrinsicID == Intrinsic::fptoui_sat); 1948 bool IsExact; 1949 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); 1950 return ConstantInt::get(Ty, Int); 1951 } 1952 1953 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1954 return nullptr; 1955 1956 // Use internal versions of these intrinsics. 1957 1958 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { 1959 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1960 return ConstantFP::get(Ty->getContext(), U); 1961 } 1962 1963 if (IntrinsicID == Intrinsic::round) { 1964 U.roundToIntegral(APFloat::rmNearestTiesToAway); 1965 return ConstantFP::get(Ty->getContext(), U); 1966 } 1967 1968 if (IntrinsicID == Intrinsic::roundeven) { 1969 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1970 return ConstantFP::get(Ty->getContext(), U); 1971 } 1972 1973 if (IntrinsicID == Intrinsic::ceil) { 1974 U.roundToIntegral(APFloat::rmTowardPositive); 1975 return ConstantFP::get(Ty->getContext(), U); 1976 } 1977 1978 if (IntrinsicID == Intrinsic::floor) { 1979 U.roundToIntegral(APFloat::rmTowardNegative); 1980 return ConstantFP::get(Ty->getContext(), U); 1981 } 1982 1983 if (IntrinsicID == Intrinsic::trunc) { 1984 U.roundToIntegral(APFloat::rmTowardZero); 1985 return ConstantFP::get(Ty->getContext(), U); 1986 } 1987 1988 if (IntrinsicID == Intrinsic::fabs) { 1989 U.clearSign(); 1990 return ConstantFP::get(Ty->getContext(), U); 1991 } 1992 1993 if (IntrinsicID == Intrinsic::amdgcn_fract) { 1994 // The v_fract instruction behaves like the OpenCL spec, which defines 1995 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is 1996 // there to prevent fract(-small) from returning 1.0. It returns the 1997 // largest positive floating-point number less than 1.0." 1998 APFloat FloorU(U); 1999 FloorU.roundToIntegral(APFloat::rmTowardNegative); 2000 APFloat FractU(U - FloorU); 2001 APFloat AlmostOne(U.getSemantics(), 1); 2002 AlmostOne.next(/*nextDown*/ true); 2003 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne)); 2004 } 2005 2006 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not 2007 // raise FP exceptions, unless the argument is signaling NaN. 2008 2009 Optional<APFloat::roundingMode> RM; 2010 switch (IntrinsicID) { 2011 default: 2012 break; 2013 case Intrinsic::experimental_constrained_nearbyint: 2014 case Intrinsic::experimental_constrained_rint: { 2015 auto CI = cast<ConstrainedFPIntrinsic>(Call); 2016 RM = CI->getRoundingMode(); 2017 if (!RM || RM.getValue() == RoundingMode::Dynamic) 2018 return nullptr; 2019 break; 2020 } 2021 case Intrinsic::experimental_constrained_round: 2022 RM = APFloat::rmNearestTiesToAway; 2023 break; 2024 case Intrinsic::experimental_constrained_ceil: 2025 RM = APFloat::rmTowardPositive; 2026 break; 2027 case Intrinsic::experimental_constrained_floor: 2028 RM = APFloat::rmTowardNegative; 2029 break; 2030 case Intrinsic::experimental_constrained_trunc: 2031 RM = APFloat::rmTowardZero; 2032 break; 2033 } 2034 if (RM) { 2035 auto CI = cast<ConstrainedFPIntrinsic>(Call); 2036 if (U.isFinite()) { 2037 APFloat::opStatus St = U.roundToIntegral(*RM); 2038 if (IntrinsicID == Intrinsic::experimental_constrained_rint && 2039 St == APFloat::opInexact) { 2040 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2041 if (EB && *EB == fp::ebStrict) 2042 return nullptr; 2043 } 2044 } else if (U.isSignaling()) { 2045 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2046 if (EB && *EB != fp::ebIgnore) 2047 return nullptr; 2048 U = APFloat::getQNaN(U.getSemantics()); 2049 } 2050 return ConstantFP::get(Ty->getContext(), U); 2051 } 2052 2053 /// We only fold functions with finite arguments. Folding NaN and inf is 2054 /// likely to be aborted with an exception anyway, and some host libms 2055 /// have known errors raising exceptions. 2056 if (!U.isFinite()) 2057 return nullptr; 2058 2059 /// Currently APFloat versions of these functions do not exist, so we use 2060 /// the host native double versions. Float versions are not called 2061 /// directly but for all these it is true (float)(f((double)arg)) == 2062 /// f(arg). Long double not supported yet. 2063 APFloat APF = Op->getValueAPF(); 2064 2065 switch (IntrinsicID) { 2066 default: break; 2067 case Intrinsic::log: 2068 return ConstantFoldFP(log, APF, Ty); 2069 case Intrinsic::log2: 2070 // TODO: What about hosts that lack a C99 library? 2071 return ConstantFoldFP(Log2, APF, Ty); 2072 case Intrinsic::log10: 2073 // TODO: What about hosts that lack a C99 library? 2074 return ConstantFoldFP(log10, APF, Ty); 2075 case Intrinsic::exp: 2076 return ConstantFoldFP(exp, APF, Ty); 2077 case Intrinsic::exp2: 2078 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2079 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); 2080 case Intrinsic::sin: 2081 return ConstantFoldFP(sin, APF, Ty); 2082 case Intrinsic::cos: 2083 return ConstantFoldFP(cos, APF, Ty); 2084 case Intrinsic::sqrt: 2085 return ConstantFoldFP(sqrt, APF, Ty); 2086 case Intrinsic::amdgcn_cos: 2087 case Intrinsic::amdgcn_sin: { 2088 double V = getValueAsDouble(Op); 2089 if (V < -256.0 || V > 256.0) 2090 // The gfx8 and gfx9 architectures handle arguments outside the range 2091 // [-256, 256] differently. This should be a rare case so bail out 2092 // rather than trying to handle the difference. 2093 return nullptr; 2094 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos; 2095 double V4 = V * 4.0; 2096 if (V4 == floor(V4)) { 2097 // Force exact results for quarter-integer inputs. 2098 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 }; 2099 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3]; 2100 } else { 2101 if (IsCos) 2102 V = cos(V * 2.0 * numbers::pi); 2103 else 2104 V = sin(V * 2.0 * numbers::pi); 2105 } 2106 return GetConstantFoldFPValue(V, Ty); 2107 } 2108 } 2109 2110 if (!TLI) 2111 return nullptr; 2112 2113 LibFunc Func = NotLibFunc; 2114 TLI->getLibFunc(Name, Func); 2115 switch (Func) { 2116 default: 2117 break; 2118 case LibFunc_acos: 2119 case LibFunc_acosf: 2120 case LibFunc_acos_finite: 2121 case LibFunc_acosf_finite: 2122 if (TLI->has(Func)) 2123 return ConstantFoldFP(acos, APF, Ty); 2124 break; 2125 case LibFunc_asin: 2126 case LibFunc_asinf: 2127 case LibFunc_asin_finite: 2128 case LibFunc_asinf_finite: 2129 if (TLI->has(Func)) 2130 return ConstantFoldFP(asin, APF, Ty); 2131 break; 2132 case LibFunc_atan: 2133 case LibFunc_atanf: 2134 if (TLI->has(Func)) 2135 return ConstantFoldFP(atan, APF, Ty); 2136 break; 2137 case LibFunc_ceil: 2138 case LibFunc_ceilf: 2139 if (TLI->has(Func)) { 2140 U.roundToIntegral(APFloat::rmTowardPositive); 2141 return ConstantFP::get(Ty->getContext(), U); 2142 } 2143 break; 2144 case LibFunc_cos: 2145 case LibFunc_cosf: 2146 if (TLI->has(Func)) 2147 return ConstantFoldFP(cos, APF, Ty); 2148 break; 2149 case LibFunc_cosh: 2150 case LibFunc_coshf: 2151 case LibFunc_cosh_finite: 2152 case LibFunc_coshf_finite: 2153 if (TLI->has(Func)) 2154 return ConstantFoldFP(cosh, APF, Ty); 2155 break; 2156 case LibFunc_exp: 2157 case LibFunc_expf: 2158 case LibFunc_exp_finite: 2159 case LibFunc_expf_finite: 2160 if (TLI->has(Func)) 2161 return ConstantFoldFP(exp, APF, Ty); 2162 break; 2163 case LibFunc_exp2: 2164 case LibFunc_exp2f: 2165 case LibFunc_exp2_finite: 2166 case LibFunc_exp2f_finite: 2167 if (TLI->has(Func)) 2168 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2169 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); 2170 break; 2171 case LibFunc_fabs: 2172 case LibFunc_fabsf: 2173 if (TLI->has(Func)) { 2174 U.clearSign(); 2175 return ConstantFP::get(Ty->getContext(), U); 2176 } 2177 break; 2178 case LibFunc_floor: 2179 case LibFunc_floorf: 2180 if (TLI->has(Func)) { 2181 U.roundToIntegral(APFloat::rmTowardNegative); 2182 return ConstantFP::get(Ty->getContext(), U); 2183 } 2184 break; 2185 case LibFunc_log: 2186 case LibFunc_logf: 2187 case LibFunc_log_finite: 2188 case LibFunc_logf_finite: 2189 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2190 return ConstantFoldFP(log, APF, Ty); 2191 break; 2192 case LibFunc_log2: 2193 case LibFunc_log2f: 2194 case LibFunc_log2_finite: 2195 case LibFunc_log2f_finite: 2196 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2197 // TODO: What about hosts that lack a C99 library? 2198 return ConstantFoldFP(Log2, APF, Ty); 2199 break; 2200 case LibFunc_log10: 2201 case LibFunc_log10f: 2202 case LibFunc_log10_finite: 2203 case LibFunc_log10f_finite: 2204 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2205 // TODO: What about hosts that lack a C99 library? 2206 return ConstantFoldFP(log10, APF, Ty); 2207 break; 2208 case LibFunc_nearbyint: 2209 case LibFunc_nearbyintf: 2210 case LibFunc_rint: 2211 case LibFunc_rintf: 2212 if (TLI->has(Func)) { 2213 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2214 return ConstantFP::get(Ty->getContext(), U); 2215 } 2216 break; 2217 case LibFunc_round: 2218 case LibFunc_roundf: 2219 if (TLI->has(Func)) { 2220 U.roundToIntegral(APFloat::rmNearestTiesToAway); 2221 return ConstantFP::get(Ty->getContext(), U); 2222 } 2223 break; 2224 case LibFunc_sin: 2225 case LibFunc_sinf: 2226 if (TLI->has(Func)) 2227 return ConstantFoldFP(sin, APF, Ty); 2228 break; 2229 case LibFunc_sinh: 2230 case LibFunc_sinhf: 2231 case LibFunc_sinh_finite: 2232 case LibFunc_sinhf_finite: 2233 if (TLI->has(Func)) 2234 return ConstantFoldFP(sinh, APF, Ty); 2235 break; 2236 case LibFunc_sqrt: 2237 case LibFunc_sqrtf: 2238 if (!APF.isNegative() && TLI->has(Func)) 2239 return ConstantFoldFP(sqrt, APF, Ty); 2240 break; 2241 case LibFunc_tan: 2242 case LibFunc_tanf: 2243 if (TLI->has(Func)) 2244 return ConstantFoldFP(tan, APF, Ty); 2245 break; 2246 case LibFunc_tanh: 2247 case LibFunc_tanhf: 2248 if (TLI->has(Func)) 2249 return ConstantFoldFP(tanh, APF, Ty); 2250 break; 2251 case LibFunc_trunc: 2252 case LibFunc_truncf: 2253 if (TLI->has(Func)) { 2254 U.roundToIntegral(APFloat::rmTowardZero); 2255 return ConstantFP::get(Ty->getContext(), U); 2256 } 2257 break; 2258 } 2259 return nullptr; 2260 } 2261 2262 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2263 switch (IntrinsicID) { 2264 case Intrinsic::bswap: 2265 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 2266 case Intrinsic::ctpop: 2267 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 2268 case Intrinsic::bitreverse: 2269 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); 2270 case Intrinsic::convert_from_fp16: { 2271 APFloat Val(APFloat::IEEEhalf(), Op->getValue()); 2272 2273 bool lost = false; 2274 APFloat::opStatus status = Val.convert( 2275 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 2276 2277 // Conversion is always precise. 2278 (void)status; 2279 assert(status == APFloat::opOK && !lost && 2280 "Precision lost during fp16 constfolding"); 2281 2282 return ConstantFP::get(Ty->getContext(), Val); 2283 } 2284 default: 2285 return nullptr; 2286 } 2287 } 2288 2289 switch (IntrinsicID) { 2290 default: break; 2291 case Intrinsic::vector_reduce_add: 2292 case Intrinsic::vector_reduce_mul: 2293 case Intrinsic::vector_reduce_and: 2294 case Intrinsic::vector_reduce_or: 2295 case Intrinsic::vector_reduce_xor: 2296 case Intrinsic::vector_reduce_smin: 2297 case Intrinsic::vector_reduce_smax: 2298 case Intrinsic::vector_reduce_umin: 2299 case Intrinsic::vector_reduce_umax: 2300 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0])) 2301 return C; 2302 break; 2303 } 2304 2305 // Support ConstantVector in case we have an Undef in the top. 2306 if (isa<ConstantVector>(Operands[0]) || 2307 isa<ConstantDataVector>(Operands[0])) { 2308 auto *Op = cast<Constant>(Operands[0]); 2309 switch (IntrinsicID) { 2310 default: break; 2311 case Intrinsic::x86_sse_cvtss2si: 2312 case Intrinsic::x86_sse_cvtss2si64: 2313 case Intrinsic::x86_sse2_cvtsd2si: 2314 case Intrinsic::x86_sse2_cvtsd2si64: 2315 if (ConstantFP *FPOp = 2316 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2317 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2318 /*roundTowardZero=*/false, Ty, 2319 /*IsSigned*/true); 2320 break; 2321 case Intrinsic::x86_sse_cvttss2si: 2322 case Intrinsic::x86_sse_cvttss2si64: 2323 case Intrinsic::x86_sse2_cvttsd2si: 2324 case Intrinsic::x86_sse2_cvttsd2si64: 2325 if (ConstantFP *FPOp = 2326 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2327 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2328 /*roundTowardZero=*/true, Ty, 2329 /*IsSigned*/true); 2330 break; 2331 } 2332 } 2333 2334 return nullptr; 2335 } 2336 2337 static Constant *ConstantFoldScalarCall2(StringRef Name, 2338 Intrinsic::ID IntrinsicID, 2339 Type *Ty, 2340 ArrayRef<Constant *> Operands, 2341 const TargetLibraryInfo *TLI, 2342 const CallBase *Call) { 2343 assert(Operands.size() == 2 && "Wrong number of operands."); 2344 2345 if (Ty->isFloatingPointTy()) { 2346 // TODO: We should have undef handling for all of the FP intrinsics that 2347 // are attempted to be folded in this function. 2348 bool IsOp0Undef = isa<UndefValue>(Operands[0]); 2349 bool IsOp1Undef = isa<UndefValue>(Operands[1]); 2350 switch (IntrinsicID) { 2351 case Intrinsic::maxnum: 2352 case Intrinsic::minnum: 2353 case Intrinsic::maximum: 2354 case Intrinsic::minimum: 2355 // If one argument is undef, return the other argument. 2356 if (IsOp0Undef) 2357 return Operands[1]; 2358 if (IsOp1Undef) 2359 return Operands[0]; 2360 break; 2361 } 2362 } 2363 2364 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2365 if (!Ty->isFloatingPointTy()) 2366 return nullptr; 2367 APFloat Op1V = Op1->getValueAPF(); 2368 2369 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2370 if (Op2->getType() != Op1->getType()) 2371 return nullptr; 2372 APFloat Op2V = Op2->getValueAPF(); 2373 2374 switch (IntrinsicID) { 2375 default: 2376 break; 2377 case Intrinsic::copysign: 2378 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V)); 2379 case Intrinsic::minnum: 2380 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V)); 2381 case Intrinsic::maxnum: 2382 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V)); 2383 case Intrinsic::minimum: 2384 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V)); 2385 case Intrinsic::maximum: 2386 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V)); 2387 } 2388 2389 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2390 return nullptr; 2391 2392 switch (IntrinsicID) { 2393 default: 2394 break; 2395 case Intrinsic::pow: 2396 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2397 case Intrinsic::amdgcn_fmul_legacy: 2398 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2399 // NaN or infinity, gives +0.0. 2400 if (Op1V.isZero() || Op2V.isZero()) 2401 return ConstantFP::getNullValue(Ty); 2402 return ConstantFP::get(Ty->getContext(), Op1V * Op2V); 2403 } 2404 2405 if (!TLI) 2406 return nullptr; 2407 2408 LibFunc Func = NotLibFunc; 2409 TLI->getLibFunc(Name, Func); 2410 switch (Func) { 2411 default: 2412 break; 2413 case LibFunc_pow: 2414 case LibFunc_powf: 2415 case LibFunc_pow_finite: 2416 case LibFunc_powf_finite: 2417 if (TLI->has(Func)) 2418 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2419 break; 2420 case LibFunc_fmod: 2421 case LibFunc_fmodf: 2422 if (TLI->has(Func)) { 2423 APFloat V = Op1->getValueAPF(); 2424 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) 2425 return ConstantFP::get(Ty->getContext(), V); 2426 } 2427 break; 2428 case LibFunc_remainder: 2429 case LibFunc_remainderf: 2430 if (TLI->has(Func)) { 2431 APFloat V = Op1->getValueAPF(); 2432 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) 2433 return ConstantFP::get(Ty->getContext(), V); 2434 } 2435 break; 2436 case LibFunc_atan2: 2437 case LibFunc_atan2f: 2438 case LibFunc_atan2_finite: 2439 case LibFunc_atan2f_finite: 2440 if (TLI->has(Func)) 2441 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 2442 break; 2443 } 2444 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 2445 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 2446 return ConstantFP::get( 2447 Ty->getContext(), 2448 APFloat((float)std::pow((float)Op1V.convertToDouble(), 2449 (int)Op2C->getZExtValue()))); 2450 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 2451 return ConstantFP::get( 2452 Ty->getContext(), 2453 APFloat((float)std::pow((float)Op1V.convertToDouble(), 2454 (int)Op2C->getZExtValue()))); 2455 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 2456 return ConstantFP::get( 2457 Ty->getContext(), 2458 APFloat((double)std::pow(Op1V.convertToDouble(), 2459 (int)Op2C->getZExtValue()))); 2460 2461 if (IntrinsicID == Intrinsic::amdgcn_ldexp) { 2462 // FIXME: Should flush denorms depending on FP mode, but that's ignored 2463 // everywhere else. 2464 2465 // scalbn is equivalent to ldexp with float radix 2 2466 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(), 2467 APFloat::rmNearestTiesToEven); 2468 return ConstantFP::get(Ty->getContext(), Result); 2469 } 2470 } 2471 return nullptr; 2472 } 2473 2474 if (Operands[0]->getType()->isIntegerTy() && 2475 Operands[1]->getType()->isIntegerTy()) { 2476 const APInt *C0, *C1; 2477 if (!getConstIntOrUndef(Operands[0], C0) || 2478 !getConstIntOrUndef(Operands[1], C1)) 2479 return nullptr; 2480 2481 unsigned BitWidth = Ty->getScalarSizeInBits(); 2482 switch (IntrinsicID) { 2483 default: break; 2484 case Intrinsic::smax: 2485 if (!C0 && !C1) 2486 return UndefValue::get(Ty); 2487 if (!C0 || !C1) 2488 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth)); 2489 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1); 2490 2491 case Intrinsic::smin: 2492 if (!C0 && !C1) 2493 return UndefValue::get(Ty); 2494 if (!C0 || !C1) 2495 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth)); 2496 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1); 2497 2498 case Intrinsic::umax: 2499 if (!C0 && !C1) 2500 return UndefValue::get(Ty); 2501 if (!C0 || !C1) 2502 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth)); 2503 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1); 2504 2505 case Intrinsic::umin: 2506 if (!C0 && !C1) 2507 return UndefValue::get(Ty); 2508 if (!C0 || !C1) 2509 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth)); 2510 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1); 2511 2512 case Intrinsic::usub_with_overflow: 2513 case Intrinsic::ssub_with_overflow: 2514 // X - undef -> { 0, false } 2515 // undef - X -> { 0, false } 2516 if (!C0 || !C1) 2517 return Constant::getNullValue(Ty); 2518 LLVM_FALLTHROUGH; 2519 case Intrinsic::uadd_with_overflow: 2520 case Intrinsic::sadd_with_overflow: 2521 // X + undef -> { -1, false } 2522 // undef + x -> { -1, false } 2523 if (!C0 || !C1) { 2524 return ConstantStruct::get( 2525 cast<StructType>(Ty), 2526 {Constant::getAllOnesValue(Ty->getStructElementType(0)), 2527 Constant::getNullValue(Ty->getStructElementType(1))}); 2528 } 2529 LLVM_FALLTHROUGH; 2530 case Intrinsic::smul_with_overflow: 2531 case Intrinsic::umul_with_overflow: { 2532 // undef * X -> { 0, false } 2533 // X * undef -> { 0, false } 2534 if (!C0 || !C1) 2535 return Constant::getNullValue(Ty); 2536 2537 APInt Res; 2538 bool Overflow; 2539 switch (IntrinsicID) { 2540 default: llvm_unreachable("Invalid case"); 2541 case Intrinsic::sadd_with_overflow: 2542 Res = C0->sadd_ov(*C1, Overflow); 2543 break; 2544 case Intrinsic::uadd_with_overflow: 2545 Res = C0->uadd_ov(*C1, Overflow); 2546 break; 2547 case Intrinsic::ssub_with_overflow: 2548 Res = C0->ssub_ov(*C1, Overflow); 2549 break; 2550 case Intrinsic::usub_with_overflow: 2551 Res = C0->usub_ov(*C1, Overflow); 2552 break; 2553 case Intrinsic::smul_with_overflow: 2554 Res = C0->smul_ov(*C1, Overflow); 2555 break; 2556 case Intrinsic::umul_with_overflow: 2557 Res = C0->umul_ov(*C1, Overflow); 2558 break; 2559 } 2560 Constant *Ops[] = { 2561 ConstantInt::get(Ty->getContext(), Res), 2562 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 2563 }; 2564 return ConstantStruct::get(cast<StructType>(Ty), Ops); 2565 } 2566 case Intrinsic::uadd_sat: 2567 case Intrinsic::sadd_sat: 2568 if (!C0 && !C1) 2569 return UndefValue::get(Ty); 2570 if (!C0 || !C1) 2571 return Constant::getAllOnesValue(Ty); 2572 if (IntrinsicID == Intrinsic::uadd_sat) 2573 return ConstantInt::get(Ty, C0->uadd_sat(*C1)); 2574 else 2575 return ConstantInt::get(Ty, C0->sadd_sat(*C1)); 2576 case Intrinsic::usub_sat: 2577 case Intrinsic::ssub_sat: 2578 if (!C0 && !C1) 2579 return UndefValue::get(Ty); 2580 if (!C0 || !C1) 2581 return Constant::getNullValue(Ty); 2582 if (IntrinsicID == Intrinsic::usub_sat) 2583 return ConstantInt::get(Ty, C0->usub_sat(*C1)); 2584 else 2585 return ConstantInt::get(Ty, C0->ssub_sat(*C1)); 2586 case Intrinsic::cttz: 2587 case Intrinsic::ctlz: 2588 assert(C1 && "Must be constant int"); 2589 2590 // cttz(0, 1) and ctlz(0, 1) are undef. 2591 if (C1->isOneValue() && (!C0 || C0->isNullValue())) 2592 return UndefValue::get(Ty); 2593 if (!C0) 2594 return Constant::getNullValue(Ty); 2595 if (IntrinsicID == Intrinsic::cttz) 2596 return ConstantInt::get(Ty, C0->countTrailingZeros()); 2597 else 2598 return ConstantInt::get(Ty, C0->countLeadingZeros()); 2599 2600 case Intrinsic::abs: 2601 // Undef or minimum val operand with poison min --> undef 2602 assert(C1 && "Must be constant int"); 2603 if (C1->isOneValue() && (!C0 || C0->isMinSignedValue())) 2604 return UndefValue::get(Ty); 2605 2606 // Undef operand with no poison min --> 0 (sign bit must be clear) 2607 if (C1->isNullValue() && !C0) 2608 return Constant::getNullValue(Ty); 2609 2610 return ConstantInt::get(Ty, C0->abs()); 2611 } 2612 2613 return nullptr; 2614 } 2615 2616 // Support ConstantVector in case we have an Undef in the top. 2617 if ((isa<ConstantVector>(Operands[0]) || 2618 isa<ConstantDataVector>(Operands[0])) && 2619 // Check for default rounding mode. 2620 // FIXME: Support other rounding modes? 2621 isa<ConstantInt>(Operands[1]) && 2622 cast<ConstantInt>(Operands[1])->getValue() == 4) { 2623 auto *Op = cast<Constant>(Operands[0]); 2624 switch (IntrinsicID) { 2625 default: break; 2626 case Intrinsic::x86_avx512_vcvtss2si32: 2627 case Intrinsic::x86_avx512_vcvtss2si64: 2628 case Intrinsic::x86_avx512_vcvtsd2si32: 2629 case Intrinsic::x86_avx512_vcvtsd2si64: 2630 if (ConstantFP *FPOp = 2631 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2632 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2633 /*roundTowardZero=*/false, Ty, 2634 /*IsSigned*/true); 2635 break; 2636 case Intrinsic::x86_avx512_vcvtss2usi32: 2637 case Intrinsic::x86_avx512_vcvtss2usi64: 2638 case Intrinsic::x86_avx512_vcvtsd2usi32: 2639 case Intrinsic::x86_avx512_vcvtsd2usi64: 2640 if (ConstantFP *FPOp = 2641 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2642 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2643 /*roundTowardZero=*/false, Ty, 2644 /*IsSigned*/false); 2645 break; 2646 case Intrinsic::x86_avx512_cvttss2si: 2647 case Intrinsic::x86_avx512_cvttss2si64: 2648 case Intrinsic::x86_avx512_cvttsd2si: 2649 case Intrinsic::x86_avx512_cvttsd2si64: 2650 if (ConstantFP *FPOp = 2651 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2652 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2653 /*roundTowardZero=*/true, Ty, 2654 /*IsSigned*/true); 2655 break; 2656 case Intrinsic::x86_avx512_cvttss2usi: 2657 case Intrinsic::x86_avx512_cvttss2usi64: 2658 case Intrinsic::x86_avx512_cvttsd2usi: 2659 case Intrinsic::x86_avx512_cvttsd2usi64: 2660 if (ConstantFP *FPOp = 2661 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2662 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2663 /*roundTowardZero=*/true, Ty, 2664 /*IsSigned*/false); 2665 break; 2666 } 2667 } 2668 return nullptr; 2669 } 2670 2671 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID, 2672 const APFloat &S0, 2673 const APFloat &S1, 2674 const APFloat &S2) { 2675 unsigned ID; 2676 const fltSemantics &Sem = S0.getSemantics(); 2677 APFloat MA(Sem), SC(Sem), TC(Sem); 2678 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) { 2679 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) { 2680 // S2 < 0 2681 ID = 5; 2682 SC = -S0; 2683 } else { 2684 ID = 4; 2685 SC = S0; 2686 } 2687 MA = S2; 2688 TC = -S1; 2689 } else if (abs(S1) >= abs(S0)) { 2690 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) { 2691 // S1 < 0 2692 ID = 3; 2693 TC = -S2; 2694 } else { 2695 ID = 2; 2696 TC = S2; 2697 } 2698 MA = S1; 2699 SC = S0; 2700 } else { 2701 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) { 2702 // S0 < 0 2703 ID = 1; 2704 SC = S2; 2705 } else { 2706 ID = 0; 2707 SC = -S2; 2708 } 2709 MA = S0; 2710 TC = -S1; 2711 } 2712 switch (IntrinsicID) { 2713 default: 2714 llvm_unreachable("unhandled amdgcn cube intrinsic"); 2715 case Intrinsic::amdgcn_cubeid: 2716 return APFloat(Sem, ID); 2717 case Intrinsic::amdgcn_cubema: 2718 return MA + MA; 2719 case Intrinsic::amdgcn_cubesc: 2720 return SC; 2721 case Intrinsic::amdgcn_cubetc: 2722 return TC; 2723 } 2724 } 2725 2726 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands, 2727 Type *Ty) { 2728 const APInt *C0, *C1, *C2; 2729 if (!getConstIntOrUndef(Operands[0], C0) || 2730 !getConstIntOrUndef(Operands[1], C1) || 2731 !getConstIntOrUndef(Operands[2], C2)) 2732 return nullptr; 2733 2734 if (!C2) 2735 return UndefValue::get(Ty); 2736 2737 APInt Val(32, 0); 2738 unsigned NumUndefBytes = 0; 2739 for (unsigned I = 0; I < 32; I += 8) { 2740 unsigned Sel = C2->extractBitsAsZExtValue(8, I); 2741 unsigned B = 0; 2742 2743 if (Sel >= 13) 2744 B = 0xff; 2745 else if (Sel == 12) 2746 B = 0x00; 2747 else { 2748 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1; 2749 if (!Src) 2750 ++NumUndefBytes; 2751 else if (Sel < 8) 2752 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8); 2753 else 2754 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff; 2755 } 2756 2757 Val.insertBits(B, I, 8); 2758 } 2759 2760 if (NumUndefBytes == 4) 2761 return UndefValue::get(Ty); 2762 2763 return ConstantInt::get(Ty, Val); 2764 } 2765 2766 static Constant *ConstantFoldScalarCall3(StringRef Name, 2767 Intrinsic::ID IntrinsicID, 2768 Type *Ty, 2769 ArrayRef<Constant *> Operands, 2770 const TargetLibraryInfo *TLI, 2771 const CallBase *Call) { 2772 assert(Operands.size() == 3 && "Wrong number of operands."); 2773 2774 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2775 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2776 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 2777 const APFloat &C1 = Op1->getValueAPF(); 2778 const APFloat &C2 = Op2->getValueAPF(); 2779 const APFloat &C3 = Op3->getValueAPF(); 2780 switch (IntrinsicID) { 2781 default: break; 2782 case Intrinsic::amdgcn_fma_legacy: { 2783 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2784 // NaN or infinity, gives +0.0. 2785 if (C1.isZero() || C2.isZero()) { 2786 // It's tempting to just return C3 here, but that would give the 2787 // wrong result if C3 was -0.0. 2788 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3); 2789 } 2790 LLVM_FALLTHROUGH; 2791 } 2792 case Intrinsic::fma: 2793 case Intrinsic::fmuladd: { 2794 APFloat V = C1; 2795 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven); 2796 return ConstantFP::get(Ty->getContext(), V); 2797 } 2798 case Intrinsic::amdgcn_cubeid: 2799 case Intrinsic::amdgcn_cubema: 2800 case Intrinsic::amdgcn_cubesc: 2801 case Intrinsic::amdgcn_cubetc: { 2802 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3); 2803 return ConstantFP::get(Ty->getContext(), V); 2804 } 2805 } 2806 } 2807 } 2808 } 2809 2810 if (IntrinsicID == Intrinsic::smul_fix || 2811 IntrinsicID == Intrinsic::smul_fix_sat) { 2812 // poison * C -> poison 2813 // C * poison -> poison 2814 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) 2815 return PoisonValue::get(Ty); 2816 2817 const APInt *C0, *C1; 2818 if (!getConstIntOrUndef(Operands[0], C0) || 2819 !getConstIntOrUndef(Operands[1], C1)) 2820 return nullptr; 2821 2822 // undef * C -> 0 2823 // C * undef -> 0 2824 if (!C0 || !C1) 2825 return Constant::getNullValue(Ty); 2826 2827 // This code performs rounding towards negative infinity in case the result 2828 // cannot be represented exactly for the given scale. Targets that do care 2829 // about rounding should use a target hook for specifying how rounding 2830 // should be done, and provide their own folding to be consistent with 2831 // rounding. This is the same approach as used by 2832 // DAGTypeLegalizer::ExpandIntRes_MULFIX. 2833 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue(); 2834 unsigned Width = C0->getBitWidth(); 2835 assert(Scale < Width && "Illegal scale."); 2836 unsigned ExtendedWidth = Width * 2; 2837 APInt Product = (C0->sextOrSelf(ExtendedWidth) * 2838 C1->sextOrSelf(ExtendedWidth)).ashr(Scale); 2839 if (IntrinsicID == Intrinsic::smul_fix_sat) { 2840 APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); 2841 APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); 2842 Product = APIntOps::smin(Product, Max); 2843 Product = APIntOps::smax(Product, Min); 2844 } 2845 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width)); 2846 } 2847 2848 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { 2849 const APInt *C0, *C1, *C2; 2850 if (!getConstIntOrUndef(Operands[0], C0) || 2851 !getConstIntOrUndef(Operands[1], C1) || 2852 !getConstIntOrUndef(Operands[2], C2)) 2853 return nullptr; 2854 2855 bool IsRight = IntrinsicID == Intrinsic::fshr; 2856 if (!C2) 2857 return Operands[IsRight ? 1 : 0]; 2858 if (!C0 && !C1) 2859 return UndefValue::get(Ty); 2860 2861 // The shift amount is interpreted as modulo the bitwidth. If the shift 2862 // amount is effectively 0, avoid UB due to oversized inverse shift below. 2863 unsigned BitWidth = C2->getBitWidth(); 2864 unsigned ShAmt = C2->urem(BitWidth); 2865 if (!ShAmt) 2866 return Operands[IsRight ? 1 : 0]; 2867 2868 // (C0 << ShlAmt) | (C1 >> LshrAmt) 2869 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; 2870 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; 2871 if (!C0) 2872 return ConstantInt::get(Ty, C1->lshr(LshrAmt)); 2873 if (!C1) 2874 return ConstantInt::get(Ty, C0->shl(ShlAmt)); 2875 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); 2876 } 2877 2878 if (IntrinsicID == Intrinsic::amdgcn_perm) 2879 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty); 2880 2881 return nullptr; 2882 } 2883 2884 static Constant *ConstantFoldScalarCall(StringRef Name, 2885 Intrinsic::ID IntrinsicID, 2886 Type *Ty, 2887 ArrayRef<Constant *> Operands, 2888 const TargetLibraryInfo *TLI, 2889 const CallBase *Call) { 2890 if (Operands.size() == 1) 2891 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); 2892 2893 if (Operands.size() == 2) 2894 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); 2895 2896 if (Operands.size() == 3) 2897 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); 2898 2899 return nullptr; 2900 } 2901 2902 static Constant *ConstantFoldFixedVectorCall( 2903 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy, 2904 ArrayRef<Constant *> Operands, const DataLayout &DL, 2905 const TargetLibraryInfo *TLI, const CallBase *Call) { 2906 SmallVector<Constant *, 4> Result(FVTy->getNumElements()); 2907 SmallVector<Constant *, 4> Lane(Operands.size()); 2908 Type *Ty = FVTy->getElementType(); 2909 2910 switch (IntrinsicID) { 2911 case Intrinsic::masked_load: { 2912 auto *SrcPtr = Operands[0]; 2913 auto *Mask = Operands[2]; 2914 auto *Passthru = Operands[3]; 2915 2916 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL); 2917 2918 SmallVector<Constant *, 32> NewElements; 2919 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2920 auto *MaskElt = Mask->getAggregateElement(I); 2921 if (!MaskElt) 2922 break; 2923 auto *PassthruElt = Passthru->getAggregateElement(I); 2924 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; 2925 if (isa<UndefValue>(MaskElt)) { 2926 if (PassthruElt) 2927 NewElements.push_back(PassthruElt); 2928 else if (VecElt) 2929 NewElements.push_back(VecElt); 2930 else 2931 return nullptr; 2932 } 2933 if (MaskElt->isNullValue()) { 2934 if (!PassthruElt) 2935 return nullptr; 2936 NewElements.push_back(PassthruElt); 2937 } else if (MaskElt->isOneValue()) { 2938 if (!VecElt) 2939 return nullptr; 2940 NewElements.push_back(VecElt); 2941 } else { 2942 return nullptr; 2943 } 2944 } 2945 if (NewElements.size() != FVTy->getNumElements()) 2946 return nullptr; 2947 return ConstantVector::get(NewElements); 2948 } 2949 case Intrinsic::arm_mve_vctp8: 2950 case Intrinsic::arm_mve_vctp16: 2951 case Intrinsic::arm_mve_vctp32: 2952 case Intrinsic::arm_mve_vctp64: { 2953 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2954 unsigned Lanes = FVTy->getNumElements(); 2955 uint64_t Limit = Op->getZExtValue(); 2956 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make 2957 // sure we get the limit right in that case and set all relevant lanes. 2958 if (IntrinsicID == Intrinsic::arm_mve_vctp64) 2959 Limit *= 2; 2960 2961 SmallVector<Constant *, 16> NCs; 2962 for (unsigned i = 0; i < Lanes; i++) { 2963 if (i < Limit) 2964 NCs.push_back(ConstantInt::getTrue(Ty)); 2965 else 2966 NCs.push_back(ConstantInt::getFalse(Ty)); 2967 } 2968 return ConstantVector::get(NCs); 2969 } 2970 break; 2971 } 2972 case Intrinsic::get_active_lane_mask: { 2973 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]); 2974 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]); 2975 if (Op0 && Op1) { 2976 unsigned Lanes = FVTy->getNumElements(); 2977 uint64_t Base = Op0->getZExtValue(); 2978 uint64_t Limit = Op1->getZExtValue(); 2979 2980 SmallVector<Constant *, 16> NCs; 2981 for (unsigned i = 0; i < Lanes; i++) { 2982 if (Base + i < Limit) 2983 NCs.push_back(ConstantInt::getTrue(Ty)); 2984 else 2985 NCs.push_back(ConstantInt::getFalse(Ty)); 2986 } 2987 return ConstantVector::get(NCs); 2988 } 2989 break; 2990 } 2991 default: 2992 break; 2993 } 2994 2995 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2996 // Gather a column of constants. 2997 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 2998 // Some intrinsics use a scalar type for certain arguments. 2999 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) { 3000 Lane[J] = Operands[J]; 3001 continue; 3002 } 3003 3004 Constant *Agg = Operands[J]->getAggregateElement(I); 3005 if (!Agg) 3006 return nullptr; 3007 3008 Lane[J] = Agg; 3009 } 3010 3011 // Use the regular scalar folding to simplify this column. 3012 Constant *Folded = 3013 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); 3014 if (!Folded) 3015 return nullptr; 3016 Result[I] = Folded; 3017 } 3018 3019 return ConstantVector::get(Result); 3020 } 3021 3022 static Constant *ConstantFoldScalableVectorCall( 3023 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy, 3024 ArrayRef<Constant *> Operands, const DataLayout &DL, 3025 const TargetLibraryInfo *TLI, const CallBase *Call) { 3026 switch (IntrinsicID) { 3027 case Intrinsic::aarch64_sve_convert_from_svbool: { 3028 auto *Src = dyn_cast<Constant>(Operands[0]); 3029 if (!Src || !Src->isNullValue()) 3030 break; 3031 3032 return ConstantInt::getFalse(SVTy); 3033 } 3034 default: 3035 break; 3036 } 3037 return nullptr; 3038 } 3039 3040 } // end anonymous namespace 3041 3042 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, 3043 ArrayRef<Constant *> Operands, 3044 const TargetLibraryInfo *TLI) { 3045 if (Call->isNoBuiltin()) 3046 return nullptr; 3047 if (!F->hasName()) 3048 return nullptr; 3049 StringRef Name = F->getName(); 3050 3051 Type *Ty = F->getReturnType(); 3052 3053 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) 3054 return ConstantFoldFixedVectorCall( 3055 Name, F->getIntrinsicID(), FVTy, Operands, 3056 F->getParent()->getDataLayout(), TLI, Call); 3057 3058 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty)) 3059 return ConstantFoldScalableVectorCall( 3060 Name, F->getIntrinsicID(), SVTy, Operands, 3061 F->getParent()->getDataLayout(), TLI, Call); 3062 3063 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, 3064 Call); 3065 } 3066 3067 bool llvm::isMathLibCallNoop(const CallBase *Call, 3068 const TargetLibraryInfo *TLI) { 3069 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap 3070 // (and to some extent ConstantFoldScalarCall). 3071 if (Call->isNoBuiltin() || Call->isStrictFP()) 3072 return false; 3073 Function *F = Call->getCalledFunction(); 3074 if (!F) 3075 return false; 3076 3077 LibFunc Func; 3078 if (!TLI || !TLI->getLibFunc(*F, Func)) 3079 return false; 3080 3081 if (Call->getNumArgOperands() == 1) { 3082 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { 3083 const APFloat &Op = OpC->getValueAPF(); 3084 switch (Func) { 3085 case LibFunc_logl: 3086 case LibFunc_log: 3087 case LibFunc_logf: 3088 case LibFunc_log2l: 3089 case LibFunc_log2: 3090 case LibFunc_log2f: 3091 case LibFunc_log10l: 3092 case LibFunc_log10: 3093 case LibFunc_log10f: 3094 return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); 3095 3096 case LibFunc_expl: 3097 case LibFunc_exp: 3098 case LibFunc_expf: 3099 // FIXME: These boundaries are slightly conservative. 3100 if (OpC->getType()->isDoubleTy()) 3101 return !(Op < APFloat(-745.0) || Op > APFloat(709.0)); 3102 if (OpC->getType()->isFloatTy()) 3103 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f)); 3104 break; 3105 3106 case LibFunc_exp2l: 3107 case LibFunc_exp2: 3108 case LibFunc_exp2f: 3109 // FIXME: These boundaries are slightly conservative. 3110 if (OpC->getType()->isDoubleTy()) 3111 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0)); 3112 if (OpC->getType()->isFloatTy()) 3113 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f)); 3114 break; 3115 3116 case LibFunc_sinl: 3117 case LibFunc_sin: 3118 case LibFunc_sinf: 3119 case LibFunc_cosl: 3120 case LibFunc_cos: 3121 case LibFunc_cosf: 3122 return !Op.isInfinity(); 3123 3124 case LibFunc_tanl: 3125 case LibFunc_tan: 3126 case LibFunc_tanf: { 3127 // FIXME: Stop using the host math library. 3128 // FIXME: The computation isn't done in the right precision. 3129 Type *Ty = OpC->getType(); 3130 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) 3131 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr; 3132 break; 3133 } 3134 3135 case LibFunc_asinl: 3136 case LibFunc_asin: 3137 case LibFunc_asinf: 3138 case LibFunc_acosl: 3139 case LibFunc_acos: 3140 case LibFunc_acosf: 3141 return !(Op < APFloat(Op.getSemantics(), "-1") || 3142 Op > APFloat(Op.getSemantics(), "1")); 3143 3144 case LibFunc_sinh: 3145 case LibFunc_cosh: 3146 case LibFunc_sinhf: 3147 case LibFunc_coshf: 3148 case LibFunc_sinhl: 3149 case LibFunc_coshl: 3150 // FIXME: These boundaries are slightly conservative. 3151 if (OpC->getType()->isDoubleTy()) 3152 return !(Op < APFloat(-710.0) || Op > APFloat(710.0)); 3153 if (OpC->getType()->isFloatTy()) 3154 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f)); 3155 break; 3156 3157 case LibFunc_sqrtl: 3158 case LibFunc_sqrt: 3159 case LibFunc_sqrtf: 3160 return Op.isNaN() || Op.isZero() || !Op.isNegative(); 3161 3162 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p, 3163 // maybe others? 3164 default: 3165 break; 3166 } 3167 } 3168 } 3169 3170 if (Call->getNumArgOperands() == 2) { 3171 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); 3172 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); 3173 if (Op0C && Op1C) { 3174 const APFloat &Op0 = Op0C->getValueAPF(); 3175 const APFloat &Op1 = Op1C->getValueAPF(); 3176 3177 switch (Func) { 3178 case LibFunc_powl: 3179 case LibFunc_pow: 3180 case LibFunc_powf: { 3181 // FIXME: Stop using the host math library. 3182 // FIXME: The computation isn't done in the right precision. 3183 Type *Ty = Op0C->getType(); 3184 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 3185 if (Ty == Op1C->getType()) 3186 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr; 3187 } 3188 break; 3189 } 3190 3191 case LibFunc_fmodl: 3192 case LibFunc_fmod: 3193 case LibFunc_fmodf: 3194 case LibFunc_remainderl: 3195 case LibFunc_remainder: 3196 case LibFunc_remainderf: 3197 return Op0.isNaN() || Op1.isNaN() || 3198 (!Op0.isInfinity() && !Op1.isZero()); 3199 3200 default: 3201 break; 3202 } 3203 } 3204 } 3205 3206 return false; 3207 } 3208 3209 void TargetFolder::anchor() {} 3210