1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines routines for folding instructions into constants. 10 // 11 // Also, to supplement the basic IR ConstantExpr simplifications, 12 // this file defines some additional folding routines that can make use of 13 // DataLayout information. These functions cannot go in IR due to library 14 // dependency issues. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/ADT/APSInt.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/TargetFolder.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/Config/config.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GlobalValue.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/InstrTypes.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/IntrinsicsAMDGPU.h" 45 #include "llvm/IR/IntrinsicsARM.h" 46 #include "llvm/IR/IntrinsicsWebAssembly.h" 47 #include "llvm/IR/IntrinsicsX86.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/Support/Casting.h" 52 #include "llvm/Support/ErrorHandling.h" 53 #include "llvm/Support/KnownBits.h" 54 #include "llvm/Support/MathExtras.h" 55 #include <cassert> 56 #include <cerrno> 57 #include <cfenv> 58 #include <cmath> 59 #include <cstddef> 60 #include <cstdint> 61 62 using namespace llvm; 63 64 namespace { 65 66 //===----------------------------------------------------------------------===// 67 // Constant Folding internal helper functions 68 //===----------------------------------------------------------------------===// 69 70 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, 71 Constant *C, Type *SrcEltTy, 72 unsigned NumSrcElts, 73 const DataLayout &DL) { 74 // Now that we know that the input value is a vector of integers, just shift 75 // and insert them into our result. 76 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); 77 for (unsigned i = 0; i != NumSrcElts; ++i) { 78 Constant *Element; 79 if (DL.isLittleEndian()) 80 Element = C->getAggregateElement(NumSrcElts - i - 1); 81 else 82 Element = C->getAggregateElement(i); 83 84 if (Element && isa<UndefValue>(Element)) { 85 Result <<= BitShift; 86 continue; 87 } 88 89 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 90 if (!ElementCI) 91 return ConstantExpr::getBitCast(C, DestTy); 92 93 Result <<= BitShift; 94 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); 95 } 96 97 return nullptr; 98 } 99 100 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 101 /// This always returns a non-null constant, but it may be a 102 /// ConstantExpr if unfoldable. 103 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 104 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && 105 "Invalid constantexpr bitcast!"); 106 107 // Catch the obvious splat cases. 108 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 109 return Constant::getNullValue(DestTy); 110 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 111 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 112 return Constant::getAllOnesValue(DestTy); 113 114 if (auto *VTy = dyn_cast<VectorType>(C->getType())) { 115 // Handle a vector->scalar integer/fp cast. 116 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { 117 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements(); 118 Type *SrcEltTy = VTy->getElementType(); 119 120 // If the vector is a vector of floating point, convert it to vector of int 121 // to simplify things. 122 if (SrcEltTy->isFloatingPointTy()) { 123 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 124 auto *SrcIVTy = FixedVectorType::get( 125 IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 126 // Ask IR to do the conversion now that #elts line up. 127 C = ConstantExpr::getBitCast(C, SrcIVTy); 128 } 129 130 APInt Result(DL.getTypeSizeInBits(DestTy), 0); 131 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, 132 SrcEltTy, NumSrcElts, DL)) 133 return CE; 134 135 if (isa<IntegerType>(DestTy)) 136 return ConstantInt::get(DestTy, Result); 137 138 APFloat FP(DestTy->getFltSemantics(), Result); 139 return ConstantFP::get(DestTy->getContext(), FP); 140 } 141 } 142 143 // The code below only handles casts to vectors currently. 144 auto *DestVTy = dyn_cast<VectorType>(DestTy); 145 if (!DestVTy) 146 return ConstantExpr::getBitCast(C, DestTy); 147 148 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 149 // vector so the code below can handle it uniformly. 150 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 151 Constant *Ops = C; // don't take the address of C! 152 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 153 } 154 155 // If this is a bitcast from constant vector -> vector, fold it. 156 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 157 return ConstantExpr::getBitCast(C, DestTy); 158 159 // If the element types match, IR can fold it. 160 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements(); 161 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements(); 162 if (NumDstElt == NumSrcElt) 163 return ConstantExpr::getBitCast(C, DestTy); 164 165 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType(); 166 Type *DstEltTy = DestVTy->getElementType(); 167 168 // Otherwise, we're changing the number of elements in a vector, which 169 // requires endianness information to do the right thing. For example, 170 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 171 // folds to (little endian): 172 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 173 // and to (big endian): 174 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 175 176 // First thing is first. We only want to think about integer here, so if 177 // we have something in FP form, recast it as integer. 178 if (DstEltTy->isFloatingPointTy()) { 179 // Fold to an vector of integers with same size as our FP type. 180 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 181 auto *DestIVTy = FixedVectorType::get( 182 IntegerType::get(C->getContext(), FPWidth), NumDstElt); 183 // Recursively handle this integer conversion, if possible. 184 C = FoldBitCast(C, DestIVTy, DL); 185 186 // Finally, IR can handle this now that #elts line up. 187 return ConstantExpr::getBitCast(C, DestTy); 188 } 189 190 // Okay, we know the destination is integer, if the input is FP, convert 191 // it to integer first. 192 if (SrcEltTy->isFloatingPointTy()) { 193 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 194 auto *SrcIVTy = FixedVectorType::get( 195 IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 196 // Ask IR to do the conversion now that #elts line up. 197 C = ConstantExpr::getBitCast(C, SrcIVTy); 198 // If IR wasn't able to fold it, bail out. 199 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 200 !isa<ConstantDataVector>(C)) 201 return C; 202 } 203 204 // Now we know that the input and output vectors are both integer vectors 205 // of the same size, and that their #elements is not the same. Do the 206 // conversion here, which depends on whether the input or output has 207 // more elements. 208 bool isLittleEndian = DL.isLittleEndian(); 209 210 SmallVector<Constant*, 32> Result; 211 if (NumDstElt < NumSrcElt) { 212 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 213 Constant *Zero = Constant::getNullValue(DstEltTy); 214 unsigned Ratio = NumSrcElt/NumDstElt; 215 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 216 unsigned SrcElt = 0; 217 for (unsigned i = 0; i != NumDstElt; ++i) { 218 // Build each element of the result. 219 Constant *Elt = Zero; 220 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 221 for (unsigned j = 0; j != Ratio; ++j) { 222 Constant *Src = C->getAggregateElement(SrcElt++); 223 if (Src && isa<UndefValue>(Src)) 224 Src = Constant::getNullValue( 225 cast<VectorType>(C->getType())->getElementType()); 226 else 227 Src = dyn_cast_or_null<ConstantInt>(Src); 228 if (!Src) // Reject constantexpr elements. 229 return ConstantExpr::getBitCast(C, DestTy); 230 231 // Zero extend the element to the right size. 232 Src = ConstantExpr::getZExt(Src, Elt->getType()); 233 234 // Shift it to the right place, depending on endianness. 235 Src = ConstantExpr::getShl(Src, 236 ConstantInt::get(Src->getType(), ShiftAmt)); 237 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 238 239 // Mix it in. 240 Elt = ConstantExpr::getOr(Elt, Src); 241 } 242 Result.push_back(Elt); 243 } 244 return ConstantVector::get(Result); 245 } 246 247 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 248 unsigned Ratio = NumDstElt/NumSrcElt; 249 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 250 251 // Loop over each source value, expanding into multiple results. 252 for (unsigned i = 0; i != NumSrcElt; ++i) { 253 auto *Element = C->getAggregateElement(i); 254 255 if (!Element) // Reject constantexpr elements. 256 return ConstantExpr::getBitCast(C, DestTy); 257 258 if (isa<UndefValue>(Element)) { 259 // Correctly Propagate undef values. 260 Result.append(Ratio, UndefValue::get(DstEltTy)); 261 continue; 262 } 263 264 auto *Src = dyn_cast<ConstantInt>(Element); 265 if (!Src) 266 return ConstantExpr::getBitCast(C, DestTy); 267 268 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 269 for (unsigned j = 0; j != Ratio; ++j) { 270 // Shift the piece of the value into the right place, depending on 271 // endianness. 272 Constant *Elt = ConstantExpr::getLShr(Src, 273 ConstantInt::get(Src->getType(), ShiftAmt)); 274 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 275 276 // Truncate the element to an integer with the same pointer size and 277 // convert the element back to a pointer using a inttoptr. 278 if (DstEltTy->isPointerTy()) { 279 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 280 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 281 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 282 continue; 283 } 284 285 // Truncate and remember this piece. 286 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 287 } 288 } 289 290 return ConstantVector::get(Result); 291 } 292 293 } // end anonymous namespace 294 295 /// If this constant is a constant offset from a global, return the global and 296 /// the constant. Because of constantexprs, this function is recursive. 297 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 298 APInt &Offset, const DataLayout &DL, 299 DSOLocalEquivalent **DSOEquiv) { 300 if (DSOEquiv) 301 *DSOEquiv = nullptr; 302 303 // Trivial case, constant is the global. 304 if ((GV = dyn_cast<GlobalValue>(C))) { 305 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 306 Offset = APInt(BitWidth, 0); 307 return true; 308 } 309 310 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) { 311 if (DSOEquiv) 312 *DSOEquiv = FoundDSOEquiv; 313 GV = FoundDSOEquiv->getGlobalValue(); 314 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 315 Offset = APInt(BitWidth, 0); 316 return true; 317 } 318 319 // Otherwise, if this isn't a constant expr, bail out. 320 auto *CE = dyn_cast<ConstantExpr>(C); 321 if (!CE) return false; 322 323 // Look through ptr->int and ptr->ptr casts. 324 if (CE->getOpcode() == Instruction::PtrToInt || 325 CE->getOpcode() == Instruction::BitCast) 326 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL, 327 DSOEquiv); 328 329 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 330 auto *GEP = dyn_cast<GEPOperator>(CE); 331 if (!GEP) 332 return false; 333 334 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 335 APInt TmpOffset(BitWidth, 0); 336 337 // If the base isn't a global+constant, we aren't either. 338 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL, 339 DSOEquiv)) 340 return false; 341 342 // Otherwise, add any offset that our operands provide. 343 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 344 return false; 345 346 Offset = TmpOffset; 347 return true; 348 } 349 350 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, 351 const DataLayout &DL) { 352 do { 353 Type *SrcTy = C->getType(); 354 uint64_t DestSize = DL.getTypeSizeInBits(DestTy); 355 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy); 356 if (SrcSize < DestSize) 357 return nullptr; 358 359 // Catch the obvious splat cases (since all-zeros can coerce non-integral 360 // pointers legally). 361 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 362 return Constant::getNullValue(DestTy); 363 if (C->isAllOnesValue() && 364 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() || 365 DestTy->isVectorTy()) && 366 !DestTy->isX86_MMXTy() && !DestTy->isPtrOrPtrVectorTy()) 367 // Get ones when the input is trivial, but 368 // only for supported types inside getAllOnesValue. 369 return Constant::getAllOnesValue(DestTy); 370 371 // If the type sizes are the same and a cast is legal, just directly 372 // cast the constant. 373 // But be careful not to coerce non-integral pointers illegally. 374 if (SrcSize == DestSize && 375 DL.isNonIntegralPointerType(SrcTy->getScalarType()) == 376 DL.isNonIntegralPointerType(DestTy->getScalarType())) { 377 Instruction::CastOps Cast = Instruction::BitCast; 378 // If we are going from a pointer to int or vice versa, we spell the cast 379 // differently. 380 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 381 Cast = Instruction::IntToPtr; 382 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 383 Cast = Instruction::PtrToInt; 384 385 if (CastInst::castIsValid(Cast, C, DestTy)) 386 return ConstantExpr::getCast(Cast, C, DestTy); 387 } 388 389 // If this isn't an aggregate type, there is nothing we can do to drill down 390 // and find a bitcastable constant. 391 if (!SrcTy->isAggregateType()) 392 return nullptr; 393 394 // We're simulating a load through a pointer that was bitcast to point to 395 // a different type, so we can try to walk down through the initial 396 // elements of an aggregate to see if some part of the aggregate is 397 // castable to implement the "load" semantic model. 398 if (SrcTy->isStructTy()) { 399 // Struct types might have leading zero-length elements like [0 x i32], 400 // which are certainly not what we are looking for, so skip them. 401 unsigned Elem = 0; 402 Constant *ElemC; 403 do { 404 ElemC = C->getAggregateElement(Elem++); 405 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero()); 406 C = ElemC; 407 } else { 408 C = C->getAggregateElement(0u); 409 } 410 } while (C); 411 412 return nullptr; 413 } 414 415 namespace { 416 417 /// Recursive helper to read bits out of global. C is the constant being copied 418 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 419 /// results into and BytesLeft is the number of bytes left in 420 /// the CurPtr buffer. DL is the DataLayout. 421 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, 422 unsigned BytesLeft, const DataLayout &DL) { 423 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 424 "Out of range access"); 425 426 // If this element is zero or undefined, we can just return since *CurPtr is 427 // zero initialized. 428 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 429 return true; 430 431 if (auto *CI = dyn_cast<ConstantInt>(C)) { 432 if (CI->getBitWidth() > 64 || 433 (CI->getBitWidth() & 7) != 0) 434 return false; 435 436 uint64_t Val = CI->getZExtValue(); 437 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 438 439 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 440 int n = ByteOffset; 441 if (!DL.isLittleEndian()) 442 n = IntBytes - n - 1; 443 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 444 ++ByteOffset; 445 } 446 return true; 447 } 448 449 if (auto *CFP = dyn_cast<ConstantFP>(C)) { 450 if (CFP->getType()->isDoubleTy()) { 451 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 452 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 453 } 454 if (CFP->getType()->isFloatTy()){ 455 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 456 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 457 } 458 if (CFP->getType()->isHalfTy()){ 459 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 460 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 461 } 462 return false; 463 } 464 465 if (auto *CS = dyn_cast<ConstantStruct>(C)) { 466 const StructLayout *SL = DL.getStructLayout(CS->getType()); 467 unsigned Index = SL->getElementContainingOffset(ByteOffset); 468 uint64_t CurEltOffset = SL->getElementOffset(Index); 469 ByteOffset -= CurEltOffset; 470 471 while (true) { 472 // If the element access is to the element itself and not to tail padding, 473 // read the bytes from the element. 474 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 475 476 if (ByteOffset < EltSize && 477 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 478 BytesLeft, DL)) 479 return false; 480 481 ++Index; 482 483 // Check to see if we read from the last struct element, if so we're done. 484 if (Index == CS->getType()->getNumElements()) 485 return true; 486 487 // If we read all of the bytes we needed from this element we're done. 488 uint64_t NextEltOffset = SL->getElementOffset(Index); 489 490 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 491 return true; 492 493 // Move to the next element of the struct. 494 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 495 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 496 ByteOffset = 0; 497 CurEltOffset = NextEltOffset; 498 } 499 // not reached. 500 } 501 502 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 503 isa<ConstantDataSequential>(C)) { 504 uint64_t NumElts; 505 Type *EltTy; 506 if (auto *AT = dyn_cast<ArrayType>(C->getType())) { 507 NumElts = AT->getNumElements(); 508 EltTy = AT->getElementType(); 509 } else { 510 NumElts = cast<FixedVectorType>(C->getType())->getNumElements(); 511 EltTy = cast<FixedVectorType>(C->getType())->getElementType(); 512 } 513 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 514 uint64_t Index = ByteOffset / EltSize; 515 uint64_t Offset = ByteOffset - Index * EltSize; 516 517 for (; Index != NumElts; ++Index) { 518 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 519 BytesLeft, DL)) 520 return false; 521 522 uint64_t BytesWritten = EltSize - Offset; 523 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 524 if (BytesWritten >= BytesLeft) 525 return true; 526 527 Offset = 0; 528 BytesLeft -= BytesWritten; 529 CurPtr += BytesWritten; 530 } 531 return true; 532 } 533 534 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 535 if (CE->getOpcode() == Instruction::IntToPtr && 536 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 537 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 538 BytesLeft, DL); 539 } 540 } 541 542 // Otherwise, unknown initializer type. 543 return false; 544 } 545 546 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, 547 const DataLayout &DL) { 548 // Bail out early. Not expect to load from scalable global variable. 549 if (isa<ScalableVectorType>(LoadTy)) 550 return nullptr; 551 552 auto *PTy = cast<PointerType>(C->getType()); 553 auto *IntType = dyn_cast<IntegerType>(LoadTy); 554 555 // If this isn't an integer load we can't fold it directly. 556 if (!IntType) { 557 unsigned AS = PTy->getAddressSpace(); 558 559 // If this is a float/double load, we can try folding it as an int32/64 load 560 // and then bitcast the result. This can be useful for union cases. Note 561 // that address spaces don't matter here since we're not going to result in 562 // an actual new load. 563 Type *MapTy; 564 if (LoadTy->isHalfTy()) 565 MapTy = Type::getInt16Ty(C->getContext()); 566 else if (LoadTy->isFloatTy()) 567 MapTy = Type::getInt32Ty(C->getContext()); 568 else if (LoadTy->isDoubleTy()) 569 MapTy = Type::getInt64Ty(C->getContext()); 570 else if (LoadTy->isVectorTy()) { 571 MapTy = PointerType::getIntNTy( 572 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); 573 } else 574 return nullptr; 575 576 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL); 577 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) { 578 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 579 // Materializing a zero can be done trivially without a bitcast 580 return Constant::getNullValue(LoadTy); 581 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; 582 Res = FoldBitCast(Res, CastTy, DL); 583 if (LoadTy->isPtrOrPtrVectorTy()) { 584 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr 585 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 586 return Constant::getNullValue(LoadTy); 587 if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) 588 // Be careful not to replace a load of an addrspace value with an inttoptr here 589 return nullptr; 590 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); 591 } 592 return Res; 593 } 594 return nullptr; 595 } 596 597 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 598 if (BytesLoaded > 32 || BytesLoaded == 0) 599 return nullptr; 600 601 GlobalValue *GVal; 602 APInt OffsetAI; 603 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL)) 604 return nullptr; 605 606 auto *GV = dyn_cast<GlobalVariable>(GVal); 607 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 608 !GV->getInitializer()->getType()->isSized()) 609 return nullptr; 610 611 int64_t Offset = OffsetAI.getSExtValue(); 612 int64_t InitializerSize = 613 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize(); 614 615 // If we're not accessing anything in this constant, the result is undefined. 616 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) 617 return UndefValue::get(IntType); 618 619 // If we're not accessing anything in this constant, the result is undefined. 620 if (Offset >= InitializerSize) 621 return UndefValue::get(IntType); 622 623 unsigned char RawBytes[32] = {0}; 624 unsigned char *CurPtr = RawBytes; 625 unsigned BytesLeft = BytesLoaded; 626 627 // If we're loading off the beginning of the global, some bytes may be valid. 628 if (Offset < 0) { 629 CurPtr += -Offset; 630 BytesLeft += Offset; 631 Offset = 0; 632 } 633 634 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL)) 635 return nullptr; 636 637 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 638 if (DL.isLittleEndian()) { 639 ResultVal = RawBytes[BytesLoaded - 1]; 640 for (unsigned i = 1; i != BytesLoaded; ++i) { 641 ResultVal <<= 8; 642 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 643 } 644 } else { 645 ResultVal = RawBytes[0]; 646 for (unsigned i = 1; i != BytesLoaded; ++i) { 647 ResultVal <<= 8; 648 ResultVal |= RawBytes[i]; 649 } 650 } 651 652 return ConstantInt::get(IntType->getContext(), ResultVal); 653 } 654 655 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy, 656 const DataLayout &DL) { 657 auto *SrcPtr = CE->getOperand(0); 658 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType()); 659 if (!SrcPtrTy) 660 return nullptr; 661 Type *SrcTy = SrcPtrTy->getPointerElementType(); 662 663 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL); 664 if (!C) 665 return nullptr; 666 667 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL); 668 } 669 670 } // end anonymous namespace 671 672 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 673 const DataLayout &DL) { 674 // First, try the easy cases: 675 if (auto *GV = dyn_cast<GlobalVariable>(C)) 676 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 677 return GV->getInitializer(); 678 679 if (auto *GA = dyn_cast<GlobalAlias>(C)) 680 if (GA->getAliasee() && !GA->isInterposable()) 681 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL); 682 683 // If the loaded value isn't a constant expr, we can't handle it. 684 auto *CE = dyn_cast<ConstantExpr>(C); 685 if (!CE) 686 return nullptr; 687 688 if (CE->getOpcode() == Instruction::GetElementPtr) { 689 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 690 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 691 if (Constant *V = 692 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 693 return V; 694 } 695 } 696 } 697 698 if (CE->getOpcode() == Instruction::BitCast) 699 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL)) 700 return LoadedC; 701 702 // Instead of loading constant c string, use corresponding integer value 703 // directly if string length is small enough. 704 StringRef Str; 705 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 706 size_t StrLen = Str.size(); 707 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 708 // Replace load with immediate integer if the result is an integer or fp 709 // value. 710 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 711 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 712 APInt StrVal(NumBits, 0); 713 APInt SingleChar(NumBits, 0); 714 if (DL.isLittleEndian()) { 715 for (unsigned char C : reverse(Str.bytes())) { 716 SingleChar = static_cast<uint64_t>(C); 717 StrVal = (StrVal << 8) | SingleChar; 718 } 719 } else { 720 for (unsigned char C : Str.bytes()) { 721 SingleChar = static_cast<uint64_t>(C); 722 StrVal = (StrVal << 8) | SingleChar; 723 } 724 // Append NULL at the end. 725 SingleChar = 0; 726 StrVal = (StrVal << 8) | SingleChar; 727 } 728 729 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 730 if (Ty->isFloatingPointTy()) 731 Res = ConstantExpr::getBitCast(Res, Ty); 732 return Res; 733 } 734 } 735 736 // If this load comes from anywhere in a constant global, and if the global 737 // is all undef or zero, we know what it loads. 738 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) { 739 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 740 if (GV->getInitializer()->isNullValue()) 741 return Constant::getNullValue(Ty); 742 if (isa<UndefValue>(GV->getInitializer())) 743 return UndefValue::get(Ty); 744 } 745 } 746 747 // Try hard to fold loads from bitcasted strange and non-type-safe things. 748 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL); 749 } 750 751 namespace { 752 753 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) { 754 if (LI->isVolatile()) return nullptr; 755 756 if (auto *C = dyn_cast<Constant>(LI->getOperand(0))) 757 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL); 758 759 return nullptr; 760 } 761 762 /// One of Op0/Op1 is a constant expression. 763 /// Attempt to symbolically evaluate the result of a binary operator merging 764 /// these together. If target data info is available, it is provided as DL, 765 /// otherwise DL is null. 766 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, 767 const DataLayout &DL) { 768 // SROA 769 770 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 771 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 772 // bits. 773 774 if (Opc == Instruction::And) { 775 KnownBits Known0 = computeKnownBits(Op0, DL); 776 KnownBits Known1 = computeKnownBits(Op1, DL); 777 if ((Known1.One | Known0.Zero).isAllOnesValue()) { 778 // All the bits of Op0 that the 'and' could be masking are already zero. 779 return Op0; 780 } 781 if ((Known0.One | Known1.Zero).isAllOnesValue()) { 782 // All the bits of Op1 that the 'and' could be masking are already zero. 783 return Op1; 784 } 785 786 Known0 &= Known1; 787 if (Known0.isConstant()) 788 return ConstantInt::get(Op0->getType(), Known0.getConstant()); 789 } 790 791 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 792 // constant. This happens frequently when iterating over a global array. 793 if (Opc == Instruction::Sub) { 794 GlobalValue *GV1, *GV2; 795 APInt Offs1, Offs2; 796 797 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 798 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 799 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 800 801 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 802 // PtrToInt may change the bitwidth so we have convert to the right size 803 // first. 804 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 805 Offs2.zextOrTrunc(OpSize)); 806 } 807 } 808 809 return nullptr; 810 } 811 812 /// If array indices are not pointer-sized integers, explicitly cast them so 813 /// that they aren't implicitly casted by the getelementptr. 814 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, 815 Type *ResultTy, Optional<unsigned> InRangeIndex, 816 const DataLayout &DL, const TargetLibraryInfo *TLI) { 817 Type *IntIdxTy = DL.getIndexType(ResultTy); 818 Type *IntIdxScalarTy = IntIdxTy->getScalarType(); 819 820 bool Any = false; 821 SmallVector<Constant*, 32> NewIdxs; 822 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 823 if ((i == 1 || 824 !isa<StructType>(GetElementPtrInst::getIndexedType( 825 SrcElemTy, Ops.slice(1, i - 1)))) && 826 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { 827 Any = true; 828 Type *NewType = Ops[i]->getType()->isVectorTy() 829 ? IntIdxTy 830 : IntIdxScalarTy; 831 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 832 true, 833 NewType, 834 true), 835 Ops[i], NewType)); 836 } else 837 NewIdxs.push_back(Ops[i]); 838 } 839 840 if (!Any) 841 return nullptr; 842 843 Constant *C = ConstantExpr::getGetElementPtr( 844 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex); 845 return ConstantFoldConstant(C, DL, TLI); 846 } 847 848 /// Strip the pointer casts, but preserve the address space information. 849 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) { 850 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 851 auto *OldPtrTy = cast<PointerType>(Ptr->getType()); 852 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 853 auto *NewPtrTy = cast<PointerType>(Ptr->getType()); 854 855 ElemTy = NewPtrTy->getPointerElementType(); 856 857 // Preserve the address space number of the pointer. 858 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 859 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace()); 860 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 861 } 862 return Ptr; 863 } 864 865 /// If we can symbolically evaluate the GEP constant expression, do so. 866 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 867 ArrayRef<Constant *> Ops, 868 const DataLayout &DL, 869 const TargetLibraryInfo *TLI) { 870 const GEPOperator *InnermostGEP = GEP; 871 bool InBounds = GEP->isInBounds(); 872 873 Type *SrcElemTy = GEP->getSourceElementType(); 874 Type *ResElemTy = GEP->getResultElementType(); 875 Type *ResTy = GEP->getType(); 876 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy)) 877 return nullptr; 878 879 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, 880 GEP->getInRangeIndex(), DL, TLI)) 881 return C; 882 883 Constant *Ptr = Ops[0]; 884 if (!Ptr->getType()->isPointerTy()) 885 return nullptr; 886 887 Type *IntIdxTy = DL.getIndexType(Ptr->getType()); 888 889 // If this is a constant expr gep that is effectively computing an 890 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 891 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 892 if (!isa<ConstantInt>(Ops[i])) { 893 894 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 895 // "inttoptr (sub (ptrtoint Ptr), V)" 896 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { 897 auto *CE = dyn_cast<ConstantExpr>(Ops[1]); 898 assert((!CE || CE->getType() == IntIdxTy) && 899 "CastGEPIndices didn't canonicalize index types!"); 900 if (CE && CE->getOpcode() == Instruction::Sub && 901 CE->getOperand(0)->isNullValue()) { 902 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 903 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 904 Res = ConstantExpr::getIntToPtr(Res, ResTy); 905 return ConstantFoldConstant(Res, DL, TLI); 906 } 907 } 908 return nullptr; 909 } 910 911 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); 912 APInt Offset = 913 APInt(BitWidth, 914 DL.getIndexedOffsetInType( 915 SrcElemTy, 916 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 917 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 918 919 // If this is a GEP of a GEP, fold it all into a single GEP. 920 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 921 InnermostGEP = GEP; 922 InBounds &= GEP->isInBounds(); 923 924 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 925 926 // Do not try the incorporate the sub-GEP if some index is not a number. 927 bool AllConstantInt = true; 928 for (Value *NestedOp : NestedOps) 929 if (!isa<ConstantInt>(NestedOp)) { 930 AllConstantInt = false; 931 break; 932 } 933 if (!AllConstantInt) 934 break; 935 936 Ptr = cast<Constant>(GEP->getOperand(0)); 937 SrcElemTy = GEP->getSourceElementType(); 938 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); 939 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 940 } 941 942 // If the base value for this address is a literal integer value, fold the 943 // getelementptr to the resulting integer value casted to the pointer type. 944 APInt BasePtr(BitWidth, 0); 945 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { 946 if (CE->getOpcode() == Instruction::IntToPtr) { 947 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 948 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 949 } 950 } 951 952 auto *PTy = cast<PointerType>(Ptr->getType()); 953 if ((Ptr->isNullValue() || BasePtr != 0) && 954 !DL.isNonIntegralPointerType(PTy)) { 955 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 956 return ConstantExpr::getIntToPtr(C, ResTy); 957 } 958 959 // Otherwise form a regular getelementptr. Recompute the indices so that 960 // we eliminate over-indexing of the notional static type array bounds. 961 // This makes it easy to determine if the getelementptr is "inbounds". 962 // Also, this helps GlobalOpt do SROA on GlobalVariables. 963 Type *Ty = PTy; 964 SmallVector<Constant *, 32> NewIdxs; 965 966 do { 967 if (!Ty->isStructTy()) { 968 if (Ty->isPointerTy()) { 969 // The only pointer indexing we'll do is on the first index of the GEP. 970 if (!NewIdxs.empty()) 971 break; 972 973 Ty = SrcElemTy; 974 975 // Only handle pointers to sized types, not pointers to functions. 976 if (!Ty->isSized()) 977 return nullptr; 978 } else { 979 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 980 if (!NextTy) 981 break; 982 Ty = NextTy; 983 } 984 985 // Determine which element of the array the offset points into. 986 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty)); 987 if (ElemSize == 0) { 988 // The element size is 0. This may be [0 x Ty]*, so just use a zero 989 // index for this level and proceed to the next level to see if it can 990 // accommodate the offset. 991 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); 992 } else { 993 // The element size is non-zero divide the offset by the element 994 // size (rounding down), to compute the index at this level. 995 bool Overflow; 996 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow); 997 if (Overflow) 998 break; 999 Offset -= NewIdx * ElemSize; 1000 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); 1001 } 1002 } else { 1003 auto *STy = cast<StructType>(Ty); 1004 // If we end up with an offset that isn't valid for this struct type, we 1005 // can't re-form this GEP in a regular form, so bail out. The pointer 1006 // operand likely went through casts that are necessary to make the GEP 1007 // sensible. 1008 const StructLayout &SL = *DL.getStructLayout(STy); 1009 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes())) 1010 break; 1011 1012 // Determine which field of the struct the offset points into. The 1013 // getZExtValue is fine as we've already ensured that the offset is 1014 // within the range representable by the StructLayout API. 1015 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 1016 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1017 ElIdx)); 1018 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 1019 Ty = STy->getTypeAtIndex(ElIdx); 1020 } 1021 } while (Ty != ResElemTy); 1022 1023 // If we haven't used up the entire offset by descending the static 1024 // type, then the offset is pointing into the middle of an indivisible 1025 // member, so we can't simplify it. 1026 if (Offset != 0) 1027 return nullptr; 1028 1029 // Preserve the inrange index from the innermost GEP if possible. We must 1030 // have calculated the same indices up to and including the inrange index. 1031 Optional<unsigned> InRangeIndex; 1032 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) 1033 if (SrcElemTy == InnermostGEP->getSourceElementType() && 1034 NewIdxs.size() > *LastIRIndex) { 1035 InRangeIndex = LastIRIndex; 1036 for (unsigned I = 0; I <= *LastIRIndex; ++I) 1037 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) 1038 return nullptr; 1039 } 1040 1041 // Create a GEP. 1042 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, 1043 InBounds, InRangeIndex); 1044 assert(C->getType()->getPointerElementType() == Ty && 1045 "Computed GetElementPtr has unexpected type!"); 1046 1047 // If we ended up indexing a member with a type that doesn't match 1048 // the type of what the original indices indexed, add a cast. 1049 if (Ty != ResElemTy) 1050 C = FoldBitCast(C, ResTy, DL); 1051 1052 return C; 1053 } 1054 1055 /// Attempt to constant fold an instruction with the 1056 /// specified opcode and operands. If successful, the constant result is 1057 /// returned, if not, null is returned. Note that this function can fail when 1058 /// attempting to fold instructions like loads and stores, which have no 1059 /// constant expression form. 1060 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, 1061 ArrayRef<Constant *> Ops, 1062 const DataLayout &DL, 1063 const TargetLibraryInfo *TLI) { 1064 Type *DestTy = InstOrCE->getType(); 1065 1066 if (Instruction::isUnaryOp(Opcode)) 1067 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); 1068 1069 if (Instruction::isBinaryOp(Opcode)) 1070 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); 1071 1072 if (Instruction::isCast(Opcode)) 1073 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); 1074 1075 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { 1076 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI)) 1077 return C; 1078 1079 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], 1080 Ops.slice(1), GEP->isInBounds(), 1081 GEP->getInRangeIndex()); 1082 } 1083 1084 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) 1085 return CE->getWithOperands(Ops); 1086 1087 switch (Opcode) { 1088 default: return nullptr; 1089 case Instruction::ICmp: 1090 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1091 case Instruction::Freeze: 1092 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr; 1093 case Instruction::Call: 1094 if (auto *F = dyn_cast<Function>(Ops.back())) { 1095 const auto *Call = cast<CallBase>(InstOrCE); 1096 if (canConstantFoldCallTo(Call, F)) 1097 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); 1098 } 1099 return nullptr; 1100 case Instruction::Select: 1101 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1102 case Instruction::ExtractElement: 1103 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1104 case Instruction::ExtractValue: 1105 return ConstantExpr::getExtractValue( 1106 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); 1107 case Instruction::InsertElement: 1108 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1109 case Instruction::ShuffleVector: 1110 return ConstantExpr::getShuffleVector( 1111 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask()); 1112 } 1113 } 1114 1115 } // end anonymous namespace 1116 1117 //===----------------------------------------------------------------------===// 1118 // Constant Folding public APIs 1119 //===----------------------------------------------------------------------===// 1120 1121 namespace { 1122 1123 Constant * 1124 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, 1125 const TargetLibraryInfo *TLI, 1126 SmallDenseMap<Constant *, Constant *> &FoldedOps) { 1127 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) 1128 return const_cast<Constant *>(C); 1129 1130 SmallVector<Constant *, 8> Ops; 1131 for (const Use &OldU : C->operands()) { 1132 Constant *OldC = cast<Constant>(&OldU); 1133 Constant *NewC = OldC; 1134 // Recursively fold the ConstantExpr's operands. If we have already folded 1135 // a ConstantExpr, we don't have to process it again. 1136 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) { 1137 auto It = FoldedOps.find(OldC); 1138 if (It == FoldedOps.end()) { 1139 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps); 1140 FoldedOps.insert({OldC, NewC}); 1141 } else { 1142 NewC = It->second; 1143 } 1144 } 1145 Ops.push_back(NewC); 1146 } 1147 1148 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1149 if (CE->isCompare()) 1150 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 1151 DL, TLI); 1152 1153 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI); 1154 } 1155 1156 assert(isa<ConstantVector>(C)); 1157 return ConstantVector::get(Ops); 1158 } 1159 1160 } // end anonymous namespace 1161 1162 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 1163 const TargetLibraryInfo *TLI) { 1164 // Handle PHI nodes quickly here... 1165 if (auto *PN = dyn_cast<PHINode>(I)) { 1166 Constant *CommonValue = nullptr; 1167 1168 SmallDenseMap<Constant *, Constant *> FoldedOps; 1169 for (Value *Incoming : PN->incoming_values()) { 1170 // If the incoming value is undef then skip it. Note that while we could 1171 // skip the value if it is equal to the phi node itself we choose not to 1172 // because that would break the rule that constant folding only applies if 1173 // all operands are constants. 1174 if (isa<UndefValue>(Incoming)) 1175 continue; 1176 // If the incoming value is not a constant, then give up. 1177 auto *C = dyn_cast<Constant>(Incoming); 1178 if (!C) 1179 return nullptr; 1180 // Fold the PHI's operands. 1181 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1182 // If the incoming value is a different constant to 1183 // the one we saw previously, then give up. 1184 if (CommonValue && C != CommonValue) 1185 return nullptr; 1186 CommonValue = C; 1187 } 1188 1189 // If we reach here, all incoming values are the same constant or undef. 1190 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 1191 } 1192 1193 // Scan the operand list, checking to see if they are all constants, if so, 1194 // hand off to ConstantFoldInstOperandsImpl. 1195 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) 1196 return nullptr; 1197 1198 SmallDenseMap<Constant *, Constant *> FoldedOps; 1199 SmallVector<Constant *, 8> Ops; 1200 for (const Use &OpU : I->operands()) { 1201 auto *Op = cast<Constant>(&OpU); 1202 // Fold the Instruction's operands. 1203 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps); 1204 Ops.push_back(Op); 1205 } 1206 1207 if (const auto *CI = dyn_cast<CmpInst>(I)) 1208 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 1209 DL, TLI); 1210 1211 if (const auto *LI = dyn_cast<LoadInst>(I)) 1212 return ConstantFoldLoadInst(LI, DL); 1213 1214 if (auto *IVI = dyn_cast<InsertValueInst>(I)) { 1215 return ConstantExpr::getInsertValue( 1216 cast<Constant>(IVI->getAggregateOperand()), 1217 cast<Constant>(IVI->getInsertedValueOperand()), 1218 IVI->getIndices()); 1219 } 1220 1221 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) { 1222 return ConstantExpr::getExtractValue( 1223 cast<Constant>(EVI->getAggregateOperand()), 1224 EVI->getIndices()); 1225 } 1226 1227 return ConstantFoldInstOperands(I, Ops, DL, TLI); 1228 } 1229 1230 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, 1231 const TargetLibraryInfo *TLI) { 1232 SmallDenseMap<Constant *, Constant *> FoldedOps; 1233 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1234 } 1235 1236 Constant *llvm::ConstantFoldInstOperands(Instruction *I, 1237 ArrayRef<Constant *> Ops, 1238 const DataLayout &DL, 1239 const TargetLibraryInfo *TLI) { 1240 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); 1241 } 1242 1243 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1244 Constant *Ops0, Constant *Ops1, 1245 const DataLayout &DL, 1246 const TargetLibraryInfo *TLI) { 1247 // fold: icmp (inttoptr x), null -> icmp x, 0 1248 // fold: icmp null, (inttoptr x) -> icmp 0, x 1249 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1250 // fold: icmp 0, (ptrtoint x) -> icmp null, x 1251 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1252 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1253 // 1254 // FIXME: The following comment is out of data and the DataLayout is here now. 1255 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1256 // around to know if bit truncation is happening. 1257 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1258 if (Ops1->isNullValue()) { 1259 if (CE0->getOpcode() == Instruction::IntToPtr) { 1260 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1261 // Convert the integer value to the right size to ensure we get the 1262 // proper extension or truncation. 1263 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1264 IntPtrTy, false); 1265 Constant *Null = Constant::getNullValue(C->getType()); 1266 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1267 } 1268 1269 // Only do this transformation if the int is intptrty in size, otherwise 1270 // there is a truncation or extension that we aren't modeling. 1271 if (CE0->getOpcode() == Instruction::PtrToInt) { 1272 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1273 if (CE0->getType() == IntPtrTy) { 1274 Constant *C = CE0->getOperand(0); 1275 Constant *Null = Constant::getNullValue(C->getType()); 1276 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1277 } 1278 } 1279 } 1280 1281 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1282 if (CE0->getOpcode() == CE1->getOpcode()) { 1283 if (CE0->getOpcode() == Instruction::IntToPtr) { 1284 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1285 1286 // Convert the integer value to the right size to ensure we get the 1287 // proper extension or truncation. 1288 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1289 IntPtrTy, false); 1290 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1291 IntPtrTy, false); 1292 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1293 } 1294 1295 // Only do this transformation if the int is intptrty in size, otherwise 1296 // there is a truncation or extension that we aren't modeling. 1297 if (CE0->getOpcode() == Instruction::PtrToInt) { 1298 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1299 if (CE0->getType() == IntPtrTy && 1300 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1301 return ConstantFoldCompareInstOperands( 1302 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1303 } 1304 } 1305 } 1306 } 1307 1308 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1309 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1310 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1311 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1312 Constant *LHS = ConstantFoldCompareInstOperands( 1313 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1314 Constant *RHS = ConstantFoldCompareInstOperands( 1315 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1316 unsigned OpC = 1317 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1318 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); 1319 } 1320 } else if (isa<ConstantExpr>(Ops1)) { 1321 // If RHS is a constant expression, but the left side isn't, swap the 1322 // operands and try again. 1323 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate); 1324 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); 1325 } 1326 1327 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1328 } 1329 1330 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, 1331 const DataLayout &DL) { 1332 assert(Instruction::isUnaryOp(Opcode)); 1333 1334 return ConstantExpr::get(Opcode, Op); 1335 } 1336 1337 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, 1338 Constant *RHS, 1339 const DataLayout &DL) { 1340 assert(Instruction::isBinaryOp(Opcode)); 1341 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) 1342 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) 1343 return C; 1344 1345 return ConstantExpr::get(Opcode, LHS, RHS); 1346 } 1347 1348 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, 1349 Type *DestTy, const DataLayout &DL) { 1350 assert(Instruction::isCast(Opcode)); 1351 switch (Opcode) { 1352 default: 1353 llvm_unreachable("Missing case"); 1354 case Instruction::PtrToInt: 1355 // If the input is a inttoptr, eliminate the pair. This requires knowing 1356 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1357 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1358 if (CE->getOpcode() == Instruction::IntToPtr) { 1359 Constant *Input = CE->getOperand(0); 1360 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1361 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1362 if (PtrWidth < InWidth) { 1363 Constant *Mask = 1364 ConstantInt::get(CE->getContext(), 1365 APInt::getLowBitsSet(InWidth, PtrWidth)); 1366 Input = ConstantExpr::getAnd(Input, Mask); 1367 } 1368 // Do a zext or trunc to get to the dest size. 1369 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1370 } 1371 } 1372 return ConstantExpr::getCast(Opcode, C, DestTy); 1373 case Instruction::IntToPtr: 1374 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1375 // the int size is >= the ptr size and the address spaces are the same. 1376 // This requires knowing the width of a pointer, so it can't be done in 1377 // ConstantExpr::getCast. 1378 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1379 if (CE->getOpcode() == Instruction::PtrToInt) { 1380 Constant *SrcPtr = CE->getOperand(0); 1381 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1382 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1383 1384 if (MidIntSize >= SrcPtrSize) { 1385 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1386 if (SrcAS == DestTy->getPointerAddressSpace()) 1387 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1388 } 1389 } 1390 } 1391 1392 return ConstantExpr::getCast(Opcode, C, DestTy); 1393 case Instruction::Trunc: 1394 case Instruction::ZExt: 1395 case Instruction::SExt: 1396 case Instruction::FPTrunc: 1397 case Instruction::FPExt: 1398 case Instruction::UIToFP: 1399 case Instruction::SIToFP: 1400 case Instruction::FPToUI: 1401 case Instruction::FPToSI: 1402 case Instruction::AddrSpaceCast: 1403 return ConstantExpr::getCast(Opcode, C, DestTy); 1404 case Instruction::BitCast: 1405 return FoldBitCast(C, DestTy, DL); 1406 } 1407 } 1408 1409 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1410 ConstantExpr *CE) { 1411 if (!CE->getOperand(1)->isNullValue()) 1412 return nullptr; // Do not allow stepping over the value! 1413 1414 // Loop over all of the operands, tracking down which value we are 1415 // addressing. 1416 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1417 C = C->getAggregateElement(CE->getOperand(i)); 1418 if (!C) 1419 return nullptr; 1420 } 1421 return C; 1422 } 1423 1424 Constant * 1425 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1426 ArrayRef<Constant *> Indices) { 1427 // Loop over all of the operands, tracking down which value we are 1428 // addressing. 1429 for (Constant *Index : Indices) { 1430 C = C->getAggregateElement(Index); 1431 if (!C) 1432 return nullptr; 1433 } 1434 return C; 1435 } 1436 1437 //===----------------------------------------------------------------------===// 1438 // Constant Folding for Calls 1439 // 1440 1441 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { 1442 if (Call->isNoBuiltin()) 1443 return false; 1444 switch (F->getIntrinsicID()) { 1445 // Operations that do not operate floating-point numbers and do not depend on 1446 // FP environment can be folded even in strictfp functions. 1447 case Intrinsic::bswap: 1448 case Intrinsic::ctpop: 1449 case Intrinsic::ctlz: 1450 case Intrinsic::cttz: 1451 case Intrinsic::fshl: 1452 case Intrinsic::fshr: 1453 case Intrinsic::launder_invariant_group: 1454 case Intrinsic::strip_invariant_group: 1455 case Intrinsic::masked_load: 1456 case Intrinsic::abs: 1457 case Intrinsic::smax: 1458 case Intrinsic::smin: 1459 case Intrinsic::umax: 1460 case Intrinsic::umin: 1461 case Intrinsic::sadd_with_overflow: 1462 case Intrinsic::uadd_with_overflow: 1463 case Intrinsic::ssub_with_overflow: 1464 case Intrinsic::usub_with_overflow: 1465 case Intrinsic::smul_with_overflow: 1466 case Intrinsic::umul_with_overflow: 1467 case Intrinsic::sadd_sat: 1468 case Intrinsic::uadd_sat: 1469 case Intrinsic::ssub_sat: 1470 case Intrinsic::usub_sat: 1471 case Intrinsic::smul_fix: 1472 case Intrinsic::smul_fix_sat: 1473 case Intrinsic::bitreverse: 1474 case Intrinsic::is_constant: 1475 case Intrinsic::vector_reduce_add: 1476 case Intrinsic::vector_reduce_mul: 1477 case Intrinsic::vector_reduce_and: 1478 case Intrinsic::vector_reduce_or: 1479 case Intrinsic::vector_reduce_xor: 1480 case Intrinsic::vector_reduce_smin: 1481 case Intrinsic::vector_reduce_smax: 1482 case Intrinsic::vector_reduce_umin: 1483 case Intrinsic::vector_reduce_umax: 1484 // Target intrinsics 1485 case Intrinsic::arm_mve_vctp8: 1486 case Intrinsic::arm_mve_vctp16: 1487 case Intrinsic::arm_mve_vctp32: 1488 case Intrinsic::arm_mve_vctp64: 1489 // WebAssembly float semantics are always known 1490 case Intrinsic::wasm_trunc_signed: 1491 case Intrinsic::wasm_trunc_unsigned: 1492 case Intrinsic::wasm_trunc_saturate_signed: 1493 case Intrinsic::wasm_trunc_saturate_unsigned: 1494 return true; 1495 1496 // Floating point operations cannot be folded in strictfp functions in 1497 // general case. They can be folded if FP environment is known to compiler. 1498 case Intrinsic::minnum: 1499 case Intrinsic::maxnum: 1500 case Intrinsic::minimum: 1501 case Intrinsic::maximum: 1502 case Intrinsic::log: 1503 case Intrinsic::log2: 1504 case Intrinsic::log10: 1505 case Intrinsic::exp: 1506 case Intrinsic::exp2: 1507 case Intrinsic::sqrt: 1508 case Intrinsic::sin: 1509 case Intrinsic::cos: 1510 case Intrinsic::pow: 1511 case Intrinsic::powi: 1512 case Intrinsic::fma: 1513 case Intrinsic::fmuladd: 1514 case Intrinsic::convert_from_fp16: 1515 case Intrinsic::convert_to_fp16: 1516 case Intrinsic::amdgcn_cos: 1517 case Intrinsic::amdgcn_cubeid: 1518 case Intrinsic::amdgcn_cubema: 1519 case Intrinsic::amdgcn_cubesc: 1520 case Intrinsic::amdgcn_cubetc: 1521 case Intrinsic::amdgcn_fmul_legacy: 1522 case Intrinsic::amdgcn_fma_legacy: 1523 case Intrinsic::amdgcn_fract: 1524 case Intrinsic::amdgcn_ldexp: 1525 case Intrinsic::amdgcn_sin: 1526 // The intrinsics below depend on rounding mode in MXCSR. 1527 case Intrinsic::x86_sse_cvtss2si: 1528 case Intrinsic::x86_sse_cvtss2si64: 1529 case Intrinsic::x86_sse_cvttss2si: 1530 case Intrinsic::x86_sse_cvttss2si64: 1531 case Intrinsic::x86_sse2_cvtsd2si: 1532 case Intrinsic::x86_sse2_cvtsd2si64: 1533 case Intrinsic::x86_sse2_cvttsd2si: 1534 case Intrinsic::x86_sse2_cvttsd2si64: 1535 case Intrinsic::x86_avx512_vcvtss2si32: 1536 case Intrinsic::x86_avx512_vcvtss2si64: 1537 case Intrinsic::x86_avx512_cvttss2si: 1538 case Intrinsic::x86_avx512_cvttss2si64: 1539 case Intrinsic::x86_avx512_vcvtsd2si32: 1540 case Intrinsic::x86_avx512_vcvtsd2si64: 1541 case Intrinsic::x86_avx512_cvttsd2si: 1542 case Intrinsic::x86_avx512_cvttsd2si64: 1543 case Intrinsic::x86_avx512_vcvtss2usi32: 1544 case Intrinsic::x86_avx512_vcvtss2usi64: 1545 case Intrinsic::x86_avx512_cvttss2usi: 1546 case Intrinsic::x86_avx512_cvttss2usi64: 1547 case Intrinsic::x86_avx512_vcvtsd2usi32: 1548 case Intrinsic::x86_avx512_vcvtsd2usi64: 1549 case Intrinsic::x86_avx512_cvttsd2usi: 1550 case Intrinsic::x86_avx512_cvttsd2usi64: 1551 return !Call->isStrictFP(); 1552 1553 // Sign operations are actually bitwise operations, they do not raise 1554 // exceptions even for SNANs. 1555 case Intrinsic::fabs: 1556 case Intrinsic::copysign: 1557 // Non-constrained variants of rounding operations means default FP 1558 // environment, they can be folded in any case. 1559 case Intrinsic::ceil: 1560 case Intrinsic::floor: 1561 case Intrinsic::round: 1562 case Intrinsic::roundeven: 1563 case Intrinsic::trunc: 1564 case Intrinsic::nearbyint: 1565 case Intrinsic::rint: 1566 // Constrained intrinsics can be folded if FP environment is known 1567 // to compiler. 1568 case Intrinsic::experimental_constrained_ceil: 1569 case Intrinsic::experimental_constrained_floor: 1570 case Intrinsic::experimental_constrained_round: 1571 case Intrinsic::experimental_constrained_roundeven: 1572 case Intrinsic::experimental_constrained_trunc: 1573 case Intrinsic::experimental_constrained_nearbyint: 1574 case Intrinsic::experimental_constrained_rint: 1575 return true; 1576 default: 1577 return false; 1578 case Intrinsic::not_intrinsic: break; 1579 } 1580 1581 if (!F->hasName() || Call->isStrictFP()) 1582 return false; 1583 1584 // In these cases, the check of the length is required. We don't want to 1585 // return true for a name like "cos\0blah" which strcmp would return equal to 1586 // "cos", but has length 8. 1587 StringRef Name = F->getName(); 1588 switch (Name[0]) { 1589 default: 1590 return false; 1591 case 'a': 1592 return Name == "acos" || Name == "acosf" || 1593 Name == "asin" || Name == "asinf" || 1594 Name == "atan" || Name == "atanf" || 1595 Name == "atan2" || Name == "atan2f"; 1596 case 'c': 1597 return Name == "ceil" || Name == "ceilf" || 1598 Name == "cos" || Name == "cosf" || 1599 Name == "cosh" || Name == "coshf"; 1600 case 'e': 1601 return Name == "exp" || Name == "expf" || 1602 Name == "exp2" || Name == "exp2f"; 1603 case 'f': 1604 return Name == "fabs" || Name == "fabsf" || 1605 Name == "floor" || Name == "floorf" || 1606 Name == "fmod" || Name == "fmodf"; 1607 case 'l': 1608 return Name == "log" || Name == "logf" || 1609 Name == "log2" || Name == "log2f" || 1610 Name == "log10" || Name == "log10f"; 1611 case 'n': 1612 return Name == "nearbyint" || Name == "nearbyintf"; 1613 case 'p': 1614 return Name == "pow" || Name == "powf"; 1615 case 'r': 1616 return Name == "remainder" || Name == "remainderf" || 1617 Name == "rint" || Name == "rintf" || 1618 Name == "round" || Name == "roundf"; 1619 case 's': 1620 return Name == "sin" || Name == "sinf" || 1621 Name == "sinh" || Name == "sinhf" || 1622 Name == "sqrt" || Name == "sqrtf"; 1623 case 't': 1624 return Name == "tan" || Name == "tanf" || 1625 Name == "tanh" || Name == "tanhf" || 1626 Name == "trunc" || Name == "truncf"; 1627 case '_': 1628 // Check for various function names that get used for the math functions 1629 // when the header files are preprocessed with the macro 1630 // __FINITE_MATH_ONLY__ enabled. 1631 // The '12' here is the length of the shortest name that can match. 1632 // We need to check the size before looking at Name[1] and Name[2] 1633 // so we may as well check a limit that will eliminate mismatches. 1634 if (Name.size() < 12 || Name[1] != '_') 1635 return false; 1636 switch (Name[2]) { 1637 default: 1638 return false; 1639 case 'a': 1640 return Name == "__acos_finite" || Name == "__acosf_finite" || 1641 Name == "__asin_finite" || Name == "__asinf_finite" || 1642 Name == "__atan2_finite" || Name == "__atan2f_finite"; 1643 case 'c': 1644 return Name == "__cosh_finite" || Name == "__coshf_finite"; 1645 case 'e': 1646 return Name == "__exp_finite" || Name == "__expf_finite" || 1647 Name == "__exp2_finite" || Name == "__exp2f_finite"; 1648 case 'l': 1649 return Name == "__log_finite" || Name == "__logf_finite" || 1650 Name == "__log10_finite" || Name == "__log10f_finite"; 1651 case 'p': 1652 return Name == "__pow_finite" || Name == "__powf_finite"; 1653 case 's': 1654 return Name == "__sinh_finite" || Name == "__sinhf_finite"; 1655 } 1656 } 1657 } 1658 1659 namespace { 1660 1661 Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1662 if (Ty->isHalfTy() || Ty->isFloatTy()) { 1663 APFloat APF(V); 1664 bool unused; 1665 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); 1666 return ConstantFP::get(Ty->getContext(), APF); 1667 } 1668 if (Ty->isDoubleTy()) 1669 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1670 llvm_unreachable("Can only constant fold half/float/double"); 1671 } 1672 1673 /// Clear the floating-point exception state. 1674 inline void llvm_fenv_clearexcept() { 1675 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1676 feclearexcept(FE_ALL_EXCEPT); 1677 #endif 1678 errno = 0; 1679 } 1680 1681 /// Test if a floating-point exception was raised. 1682 inline bool llvm_fenv_testexcept() { 1683 int errno_val = errno; 1684 if (errno_val == ERANGE || errno_val == EDOM) 1685 return true; 1686 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1687 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1688 return true; 1689 #endif 1690 return false; 1691 } 1692 1693 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) { 1694 llvm_fenv_clearexcept(); 1695 V = NativeFP(V); 1696 if (llvm_fenv_testexcept()) { 1697 llvm_fenv_clearexcept(); 1698 return nullptr; 1699 } 1700 1701 return GetConstantFoldFPValue(V, Ty); 1702 } 1703 1704 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V, 1705 double W, Type *Ty) { 1706 llvm_fenv_clearexcept(); 1707 V = NativeFP(V, W); 1708 if (llvm_fenv_testexcept()) { 1709 llvm_fenv_clearexcept(); 1710 return nullptr; 1711 } 1712 1713 return GetConstantFoldFPValue(V, Ty); 1714 } 1715 1716 Constant *ConstantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) { 1717 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType()); 1718 if (!VT) 1719 return nullptr; 1720 ConstantInt *CI = dyn_cast<ConstantInt>(Op->getAggregateElement(0U)); 1721 if (!CI) 1722 return nullptr; 1723 APInt Acc = CI->getValue(); 1724 1725 for (unsigned I = 1; I < VT->getNumElements(); I++) { 1726 if (!(CI = dyn_cast<ConstantInt>(Op->getAggregateElement(I)))) 1727 return nullptr; 1728 const APInt &X = CI->getValue(); 1729 switch (IID) { 1730 case Intrinsic::vector_reduce_add: 1731 Acc = Acc + X; 1732 break; 1733 case Intrinsic::vector_reduce_mul: 1734 Acc = Acc * X; 1735 break; 1736 case Intrinsic::vector_reduce_and: 1737 Acc = Acc & X; 1738 break; 1739 case Intrinsic::vector_reduce_or: 1740 Acc = Acc | X; 1741 break; 1742 case Intrinsic::vector_reduce_xor: 1743 Acc = Acc ^ X; 1744 break; 1745 case Intrinsic::vector_reduce_smin: 1746 Acc = APIntOps::smin(Acc, X); 1747 break; 1748 case Intrinsic::vector_reduce_smax: 1749 Acc = APIntOps::smax(Acc, X); 1750 break; 1751 case Intrinsic::vector_reduce_umin: 1752 Acc = APIntOps::umin(Acc, X); 1753 break; 1754 case Intrinsic::vector_reduce_umax: 1755 Acc = APIntOps::umax(Acc, X); 1756 break; 1757 } 1758 } 1759 1760 return ConstantInt::get(Op->getContext(), Acc); 1761 } 1762 1763 /// Attempt to fold an SSE floating point to integer conversion of a constant 1764 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1765 /// used (toward nearest, ties to even). This matches the behavior of the 1766 /// non-truncating SSE instructions in the default rounding mode. The desired 1767 /// integer type Ty is used to select how many bits are available for the 1768 /// result. Returns null if the conversion cannot be performed, otherwise 1769 /// returns the Constant value resulting from the conversion. 1770 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, 1771 Type *Ty, bool IsSigned) { 1772 // All of these conversion intrinsics form an integer of at most 64bits. 1773 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1774 assert(ResultWidth <= 64 && 1775 "Can only constant fold conversions to 64 and 32 bit ints"); 1776 1777 uint64_t UIntVal; 1778 bool isExact = false; 1779 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1780 : APFloat::rmNearestTiesToEven; 1781 APFloat::opStatus status = 1782 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, 1783 IsSigned, mode, &isExact); 1784 if (status != APFloat::opOK && 1785 (!roundTowardZero || status != APFloat::opInexact)) 1786 return nullptr; 1787 return ConstantInt::get(Ty, UIntVal, IsSigned); 1788 } 1789 1790 double getValueAsDouble(ConstantFP *Op) { 1791 Type *Ty = Op->getType(); 1792 1793 if (Ty->isFloatTy()) 1794 return Op->getValueAPF().convertToFloat(); 1795 1796 if (Ty->isDoubleTy()) 1797 return Op->getValueAPF().convertToDouble(); 1798 1799 bool unused; 1800 APFloat APF = Op->getValueAPF(); 1801 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); 1802 return APF.convertToDouble(); 1803 } 1804 1805 static bool isManifestConstant(const Constant *c) { 1806 if (isa<ConstantData>(c)) { 1807 return true; 1808 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) { 1809 for (const Value *subc : c->operand_values()) { 1810 if (!isManifestConstant(cast<Constant>(subc))) 1811 return false; 1812 } 1813 return true; 1814 } 1815 return false; 1816 } 1817 1818 static bool getConstIntOrUndef(Value *Op, const APInt *&C) { 1819 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1820 C = &CI->getValue(); 1821 return true; 1822 } 1823 if (isa<UndefValue>(Op)) { 1824 C = nullptr; 1825 return true; 1826 } 1827 return false; 1828 } 1829 1830 static Constant *ConstantFoldScalarCall1(StringRef Name, 1831 Intrinsic::ID IntrinsicID, 1832 Type *Ty, 1833 ArrayRef<Constant *> Operands, 1834 const TargetLibraryInfo *TLI, 1835 const CallBase *Call) { 1836 assert(Operands.size() == 1 && "Wrong number of operands."); 1837 1838 if (IntrinsicID == Intrinsic::is_constant) { 1839 // We know we have a "Constant" argument. But we want to only 1840 // return true for manifest constants, not those that depend on 1841 // constants with unknowable values, e.g. GlobalValue or BlockAddress. 1842 if (isManifestConstant(Operands[0])) 1843 return ConstantInt::getTrue(Ty->getContext()); 1844 return nullptr; 1845 } 1846 if (isa<UndefValue>(Operands[0])) { 1847 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN. 1848 // ctpop() is between 0 and bitwidth, pick 0 for undef. 1849 if (IntrinsicID == Intrinsic::cos || 1850 IntrinsicID == Intrinsic::ctpop) 1851 return Constant::getNullValue(Ty); 1852 if (IntrinsicID == Intrinsic::bswap || 1853 IntrinsicID == Intrinsic::bitreverse || 1854 IntrinsicID == Intrinsic::launder_invariant_group || 1855 IntrinsicID == Intrinsic::strip_invariant_group) 1856 return Operands[0]; 1857 } 1858 1859 if (isa<ConstantPointerNull>(Operands[0])) { 1860 // launder(null) == null == strip(null) iff in addrspace 0 1861 if (IntrinsicID == Intrinsic::launder_invariant_group || 1862 IntrinsicID == Intrinsic::strip_invariant_group) { 1863 // If instruction is not yet put in a basic block (e.g. when cloning 1864 // a function during inlining), Call's caller may not be available. 1865 // So check Call's BB first before querying Call->getCaller. 1866 const Function *Caller = 1867 Call->getParent() ? Call->getCaller() : nullptr; 1868 if (Caller && 1869 !NullPointerIsDefined( 1870 Caller, Operands[0]->getType()->getPointerAddressSpace())) { 1871 return Operands[0]; 1872 } 1873 return nullptr; 1874 } 1875 } 1876 1877 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { 1878 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1879 APFloat Val(Op->getValueAPF()); 1880 1881 bool lost = false; 1882 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); 1883 1884 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1885 } 1886 1887 APFloat U = Op->getValueAPF(); 1888 1889 if (IntrinsicID == Intrinsic::wasm_trunc_signed || 1890 IntrinsicID == Intrinsic::wasm_trunc_unsigned || 1891 IntrinsicID == Intrinsic::wasm_trunc_saturate_signed || 1892 IntrinsicID == Intrinsic::wasm_trunc_saturate_unsigned) { 1893 1894 bool Saturating = IntrinsicID == Intrinsic::wasm_trunc_saturate_signed || 1895 IntrinsicID == Intrinsic::wasm_trunc_saturate_unsigned; 1896 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed || 1897 IntrinsicID == Intrinsic::wasm_trunc_saturate_signed; 1898 1899 if (U.isNaN()) 1900 return Saturating ? ConstantInt::get(Ty, 0) : nullptr; 1901 1902 unsigned Width = Ty->getIntegerBitWidth(); 1903 APSInt Int(Width, !Signed); 1904 bool IsExact = false; 1905 APFloat::opStatus Status = 1906 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); 1907 1908 if (Status == APFloat::opOK || Status == APFloat::opInexact) 1909 return ConstantInt::get(Ty, Int); 1910 1911 if (!Saturating) 1912 return nullptr; 1913 1914 if (U.isNegative()) 1915 return Signed ? ConstantInt::get(Ty, APInt::getSignedMinValue(Width)) 1916 : ConstantInt::get(Ty, APInt::getMinValue(Width)); 1917 else 1918 return Signed ? ConstantInt::get(Ty, APInt::getSignedMaxValue(Width)) 1919 : ConstantInt::get(Ty, APInt::getMaxValue(Width)); 1920 } 1921 1922 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1923 return nullptr; 1924 1925 // Use internal versions of these intrinsics. 1926 1927 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { 1928 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1929 return ConstantFP::get(Ty->getContext(), U); 1930 } 1931 1932 if (IntrinsicID == Intrinsic::round) { 1933 U.roundToIntegral(APFloat::rmNearestTiesToAway); 1934 return ConstantFP::get(Ty->getContext(), U); 1935 } 1936 1937 if (IntrinsicID == Intrinsic::roundeven) { 1938 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1939 return ConstantFP::get(Ty->getContext(), U); 1940 } 1941 1942 if (IntrinsicID == Intrinsic::ceil) { 1943 U.roundToIntegral(APFloat::rmTowardPositive); 1944 return ConstantFP::get(Ty->getContext(), U); 1945 } 1946 1947 if (IntrinsicID == Intrinsic::floor) { 1948 U.roundToIntegral(APFloat::rmTowardNegative); 1949 return ConstantFP::get(Ty->getContext(), U); 1950 } 1951 1952 if (IntrinsicID == Intrinsic::trunc) { 1953 U.roundToIntegral(APFloat::rmTowardZero); 1954 return ConstantFP::get(Ty->getContext(), U); 1955 } 1956 1957 if (IntrinsicID == Intrinsic::fabs) { 1958 U.clearSign(); 1959 return ConstantFP::get(Ty->getContext(), U); 1960 } 1961 1962 if (IntrinsicID == Intrinsic::amdgcn_fract) { 1963 // The v_fract instruction behaves like the OpenCL spec, which defines 1964 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is 1965 // there to prevent fract(-small) from returning 1.0. It returns the 1966 // largest positive floating-point number less than 1.0." 1967 APFloat FloorU(U); 1968 FloorU.roundToIntegral(APFloat::rmTowardNegative); 1969 APFloat FractU(U - FloorU); 1970 APFloat AlmostOne(U.getSemantics(), 1); 1971 AlmostOne.next(/*nextDown*/ true); 1972 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne)); 1973 } 1974 1975 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not 1976 // raise FP exceptions, unless the argument is signaling NaN. 1977 1978 Optional<APFloat::roundingMode> RM; 1979 switch (IntrinsicID) { 1980 default: 1981 break; 1982 case Intrinsic::experimental_constrained_nearbyint: 1983 case Intrinsic::experimental_constrained_rint: { 1984 auto CI = cast<ConstrainedFPIntrinsic>(Call); 1985 RM = CI->getRoundingMode(); 1986 if (!RM || RM.getValue() == RoundingMode::Dynamic) 1987 return nullptr; 1988 break; 1989 } 1990 case Intrinsic::experimental_constrained_round: 1991 RM = APFloat::rmNearestTiesToAway; 1992 break; 1993 case Intrinsic::experimental_constrained_ceil: 1994 RM = APFloat::rmTowardPositive; 1995 break; 1996 case Intrinsic::experimental_constrained_floor: 1997 RM = APFloat::rmTowardNegative; 1998 break; 1999 case Intrinsic::experimental_constrained_trunc: 2000 RM = APFloat::rmTowardZero; 2001 break; 2002 } 2003 if (RM) { 2004 auto CI = cast<ConstrainedFPIntrinsic>(Call); 2005 if (U.isFinite()) { 2006 APFloat::opStatus St = U.roundToIntegral(*RM); 2007 if (IntrinsicID == Intrinsic::experimental_constrained_rint && 2008 St == APFloat::opInexact) { 2009 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2010 if (EB && *EB == fp::ebStrict) 2011 return nullptr; 2012 } 2013 } else if (U.isSignaling()) { 2014 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2015 if (EB && *EB != fp::ebIgnore) 2016 return nullptr; 2017 U = APFloat::getQNaN(U.getSemantics()); 2018 } 2019 return ConstantFP::get(Ty->getContext(), U); 2020 } 2021 2022 /// We only fold functions with finite arguments. Folding NaN and inf is 2023 /// likely to be aborted with an exception anyway, and some host libms 2024 /// have known errors raising exceptions. 2025 if (!U.isFinite()) 2026 return nullptr; 2027 2028 /// Currently APFloat versions of these functions do not exist, so we use 2029 /// the host native double versions. Float versions are not called 2030 /// directly but for all these it is true (float)(f((double)arg)) == 2031 /// f(arg). Long double not supported yet. 2032 double V = getValueAsDouble(Op); 2033 2034 switch (IntrinsicID) { 2035 default: break; 2036 case Intrinsic::log: 2037 return ConstantFoldFP(log, V, Ty); 2038 case Intrinsic::log2: 2039 // TODO: What about hosts that lack a C99 library? 2040 return ConstantFoldFP(Log2, V, Ty); 2041 case Intrinsic::log10: 2042 // TODO: What about hosts that lack a C99 library? 2043 return ConstantFoldFP(log10, V, Ty); 2044 case Intrinsic::exp: 2045 return ConstantFoldFP(exp, V, Ty); 2046 case Intrinsic::exp2: 2047 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2048 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 2049 case Intrinsic::sin: 2050 return ConstantFoldFP(sin, V, Ty); 2051 case Intrinsic::cos: 2052 return ConstantFoldFP(cos, V, Ty); 2053 case Intrinsic::sqrt: 2054 return ConstantFoldFP(sqrt, V, Ty); 2055 case Intrinsic::amdgcn_cos: 2056 case Intrinsic::amdgcn_sin: 2057 if (V < -256.0 || V > 256.0) 2058 // The gfx8 and gfx9 architectures handle arguments outside the range 2059 // [-256, 256] differently. This should be a rare case so bail out 2060 // rather than trying to handle the difference. 2061 return nullptr; 2062 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos; 2063 double V4 = V * 4.0; 2064 if (V4 == floor(V4)) { 2065 // Force exact results for quarter-integer inputs. 2066 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 }; 2067 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3]; 2068 } else { 2069 if (IsCos) 2070 V = cos(V * 2.0 * numbers::pi); 2071 else 2072 V = sin(V * 2.0 * numbers::pi); 2073 } 2074 return GetConstantFoldFPValue(V, Ty); 2075 } 2076 2077 if (!TLI) 2078 return nullptr; 2079 2080 LibFunc Func = NotLibFunc; 2081 TLI->getLibFunc(Name, Func); 2082 switch (Func) { 2083 default: 2084 break; 2085 case LibFunc_acos: 2086 case LibFunc_acosf: 2087 case LibFunc_acos_finite: 2088 case LibFunc_acosf_finite: 2089 if (TLI->has(Func)) 2090 return ConstantFoldFP(acos, V, Ty); 2091 break; 2092 case LibFunc_asin: 2093 case LibFunc_asinf: 2094 case LibFunc_asin_finite: 2095 case LibFunc_asinf_finite: 2096 if (TLI->has(Func)) 2097 return ConstantFoldFP(asin, V, Ty); 2098 break; 2099 case LibFunc_atan: 2100 case LibFunc_atanf: 2101 if (TLI->has(Func)) 2102 return ConstantFoldFP(atan, V, Ty); 2103 break; 2104 case LibFunc_ceil: 2105 case LibFunc_ceilf: 2106 if (TLI->has(Func)) { 2107 U.roundToIntegral(APFloat::rmTowardPositive); 2108 return ConstantFP::get(Ty->getContext(), U); 2109 } 2110 break; 2111 case LibFunc_cos: 2112 case LibFunc_cosf: 2113 if (TLI->has(Func)) 2114 return ConstantFoldFP(cos, V, Ty); 2115 break; 2116 case LibFunc_cosh: 2117 case LibFunc_coshf: 2118 case LibFunc_cosh_finite: 2119 case LibFunc_coshf_finite: 2120 if (TLI->has(Func)) 2121 return ConstantFoldFP(cosh, V, Ty); 2122 break; 2123 case LibFunc_exp: 2124 case LibFunc_expf: 2125 case LibFunc_exp_finite: 2126 case LibFunc_expf_finite: 2127 if (TLI->has(Func)) 2128 return ConstantFoldFP(exp, V, Ty); 2129 break; 2130 case LibFunc_exp2: 2131 case LibFunc_exp2f: 2132 case LibFunc_exp2_finite: 2133 case LibFunc_exp2f_finite: 2134 if (TLI->has(Func)) 2135 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2136 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 2137 break; 2138 case LibFunc_fabs: 2139 case LibFunc_fabsf: 2140 if (TLI->has(Func)) { 2141 U.clearSign(); 2142 return ConstantFP::get(Ty->getContext(), U); 2143 } 2144 break; 2145 case LibFunc_floor: 2146 case LibFunc_floorf: 2147 if (TLI->has(Func)) { 2148 U.roundToIntegral(APFloat::rmTowardNegative); 2149 return ConstantFP::get(Ty->getContext(), U); 2150 } 2151 break; 2152 case LibFunc_log: 2153 case LibFunc_logf: 2154 case LibFunc_log_finite: 2155 case LibFunc_logf_finite: 2156 if (V > 0.0 && TLI->has(Func)) 2157 return ConstantFoldFP(log, V, Ty); 2158 break; 2159 case LibFunc_log2: 2160 case LibFunc_log2f: 2161 case LibFunc_log2_finite: 2162 case LibFunc_log2f_finite: 2163 if (V > 0.0 && TLI->has(Func)) 2164 // TODO: What about hosts that lack a C99 library? 2165 return ConstantFoldFP(Log2, V, Ty); 2166 break; 2167 case LibFunc_log10: 2168 case LibFunc_log10f: 2169 case LibFunc_log10_finite: 2170 case LibFunc_log10f_finite: 2171 if (V > 0.0 && TLI->has(Func)) 2172 // TODO: What about hosts that lack a C99 library? 2173 return ConstantFoldFP(log10, V, Ty); 2174 break; 2175 case LibFunc_nearbyint: 2176 case LibFunc_nearbyintf: 2177 case LibFunc_rint: 2178 case LibFunc_rintf: 2179 if (TLI->has(Func)) { 2180 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2181 return ConstantFP::get(Ty->getContext(), U); 2182 } 2183 break; 2184 case LibFunc_round: 2185 case LibFunc_roundf: 2186 if (TLI->has(Func)) { 2187 U.roundToIntegral(APFloat::rmNearestTiesToAway); 2188 return ConstantFP::get(Ty->getContext(), U); 2189 } 2190 break; 2191 case LibFunc_sin: 2192 case LibFunc_sinf: 2193 if (TLI->has(Func)) 2194 return ConstantFoldFP(sin, V, Ty); 2195 break; 2196 case LibFunc_sinh: 2197 case LibFunc_sinhf: 2198 case LibFunc_sinh_finite: 2199 case LibFunc_sinhf_finite: 2200 if (TLI->has(Func)) 2201 return ConstantFoldFP(sinh, V, Ty); 2202 break; 2203 case LibFunc_sqrt: 2204 case LibFunc_sqrtf: 2205 if (V >= 0.0 && TLI->has(Func)) 2206 return ConstantFoldFP(sqrt, V, Ty); 2207 break; 2208 case LibFunc_tan: 2209 case LibFunc_tanf: 2210 if (TLI->has(Func)) 2211 return ConstantFoldFP(tan, V, Ty); 2212 break; 2213 case LibFunc_tanh: 2214 case LibFunc_tanhf: 2215 if (TLI->has(Func)) 2216 return ConstantFoldFP(tanh, V, Ty); 2217 break; 2218 case LibFunc_trunc: 2219 case LibFunc_truncf: 2220 if (TLI->has(Func)) { 2221 U.roundToIntegral(APFloat::rmTowardZero); 2222 return ConstantFP::get(Ty->getContext(), U); 2223 } 2224 break; 2225 } 2226 return nullptr; 2227 } 2228 2229 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2230 switch (IntrinsicID) { 2231 case Intrinsic::bswap: 2232 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 2233 case Intrinsic::ctpop: 2234 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 2235 case Intrinsic::bitreverse: 2236 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); 2237 case Intrinsic::convert_from_fp16: { 2238 APFloat Val(APFloat::IEEEhalf(), Op->getValue()); 2239 2240 bool lost = false; 2241 APFloat::opStatus status = Val.convert( 2242 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 2243 2244 // Conversion is always precise. 2245 (void)status; 2246 assert(status == APFloat::opOK && !lost && 2247 "Precision lost during fp16 constfolding"); 2248 2249 return ConstantFP::get(Ty->getContext(), Val); 2250 } 2251 default: 2252 return nullptr; 2253 } 2254 } 2255 2256 if (isa<ConstantAggregateZero>(Operands[0])) { 2257 switch (IntrinsicID) { 2258 default: break; 2259 case Intrinsic::vector_reduce_add: 2260 case Intrinsic::vector_reduce_mul: 2261 case Intrinsic::vector_reduce_and: 2262 case Intrinsic::vector_reduce_or: 2263 case Intrinsic::vector_reduce_xor: 2264 case Intrinsic::vector_reduce_smin: 2265 case Intrinsic::vector_reduce_smax: 2266 case Intrinsic::vector_reduce_umin: 2267 case Intrinsic::vector_reduce_umax: 2268 return ConstantInt::get(Ty, 0); 2269 } 2270 } 2271 2272 // Support ConstantVector in case we have an Undef in the top. 2273 if (isa<ConstantVector>(Operands[0]) || 2274 isa<ConstantDataVector>(Operands[0])) { 2275 auto *Op = cast<Constant>(Operands[0]); 2276 switch (IntrinsicID) { 2277 default: break; 2278 case Intrinsic::vector_reduce_add: 2279 case Intrinsic::vector_reduce_mul: 2280 case Intrinsic::vector_reduce_and: 2281 case Intrinsic::vector_reduce_or: 2282 case Intrinsic::vector_reduce_xor: 2283 case Intrinsic::vector_reduce_smin: 2284 case Intrinsic::vector_reduce_smax: 2285 case Intrinsic::vector_reduce_umin: 2286 case Intrinsic::vector_reduce_umax: 2287 if (Constant *C = ConstantFoldVectorReduce(IntrinsicID, Op)) 2288 return C; 2289 break; 2290 case Intrinsic::x86_sse_cvtss2si: 2291 case Intrinsic::x86_sse_cvtss2si64: 2292 case Intrinsic::x86_sse2_cvtsd2si: 2293 case Intrinsic::x86_sse2_cvtsd2si64: 2294 if (ConstantFP *FPOp = 2295 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2296 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2297 /*roundTowardZero=*/false, Ty, 2298 /*IsSigned*/true); 2299 break; 2300 case Intrinsic::x86_sse_cvttss2si: 2301 case Intrinsic::x86_sse_cvttss2si64: 2302 case Intrinsic::x86_sse2_cvttsd2si: 2303 case Intrinsic::x86_sse2_cvttsd2si64: 2304 if (ConstantFP *FPOp = 2305 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2306 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2307 /*roundTowardZero=*/true, Ty, 2308 /*IsSigned*/true); 2309 break; 2310 } 2311 } 2312 2313 return nullptr; 2314 } 2315 2316 static Constant *ConstantFoldScalarCall2(StringRef Name, 2317 Intrinsic::ID IntrinsicID, 2318 Type *Ty, 2319 ArrayRef<Constant *> Operands, 2320 const TargetLibraryInfo *TLI, 2321 const CallBase *Call) { 2322 assert(Operands.size() == 2 && "Wrong number of operands."); 2323 2324 if (Ty->isFloatingPointTy()) { 2325 // TODO: We should have undef handling for all of the FP intrinsics that 2326 // are attempted to be folded in this function. 2327 bool IsOp0Undef = isa<UndefValue>(Operands[0]); 2328 bool IsOp1Undef = isa<UndefValue>(Operands[1]); 2329 switch (IntrinsicID) { 2330 case Intrinsic::maxnum: 2331 case Intrinsic::minnum: 2332 case Intrinsic::maximum: 2333 case Intrinsic::minimum: 2334 // If one argument is undef, return the other argument. 2335 if (IsOp0Undef) 2336 return Operands[1]; 2337 if (IsOp1Undef) 2338 return Operands[0]; 2339 break; 2340 } 2341 } 2342 2343 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2344 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2345 return nullptr; 2346 double Op1V = getValueAsDouble(Op1); 2347 2348 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2349 if (Op2->getType() != Op1->getType()) 2350 return nullptr; 2351 2352 double Op2V = getValueAsDouble(Op2); 2353 if (IntrinsicID == Intrinsic::pow) { 2354 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2355 } 2356 if (IntrinsicID == Intrinsic::copysign) { 2357 APFloat V1 = Op1->getValueAPF(); 2358 const APFloat &V2 = Op2->getValueAPF(); 2359 V1.copySign(V2); 2360 return ConstantFP::get(Ty->getContext(), V1); 2361 } 2362 2363 if (IntrinsicID == Intrinsic::minnum) { 2364 const APFloat &C1 = Op1->getValueAPF(); 2365 const APFloat &C2 = Op2->getValueAPF(); 2366 return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); 2367 } 2368 2369 if (IntrinsicID == Intrinsic::maxnum) { 2370 const APFloat &C1 = Op1->getValueAPF(); 2371 const APFloat &C2 = Op2->getValueAPF(); 2372 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); 2373 } 2374 2375 if (IntrinsicID == Intrinsic::minimum) { 2376 const APFloat &C1 = Op1->getValueAPF(); 2377 const APFloat &C2 = Op2->getValueAPF(); 2378 return ConstantFP::get(Ty->getContext(), minimum(C1, C2)); 2379 } 2380 2381 if (IntrinsicID == Intrinsic::maximum) { 2382 const APFloat &C1 = Op1->getValueAPF(); 2383 const APFloat &C2 = Op2->getValueAPF(); 2384 return ConstantFP::get(Ty->getContext(), maximum(C1, C2)); 2385 } 2386 2387 if (IntrinsicID == Intrinsic::amdgcn_fmul_legacy) { 2388 const APFloat &C1 = Op1->getValueAPF(); 2389 const APFloat &C2 = Op2->getValueAPF(); 2390 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2391 // NaN or infinity, gives +0.0. 2392 if (C1.isZero() || C2.isZero()) 2393 return ConstantFP::getNullValue(Ty); 2394 return ConstantFP::get(Ty->getContext(), C1 * C2); 2395 } 2396 2397 if (!TLI) 2398 return nullptr; 2399 2400 LibFunc Func = NotLibFunc; 2401 TLI->getLibFunc(Name, Func); 2402 switch (Func) { 2403 default: 2404 break; 2405 case LibFunc_pow: 2406 case LibFunc_powf: 2407 case LibFunc_pow_finite: 2408 case LibFunc_powf_finite: 2409 if (TLI->has(Func)) 2410 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2411 break; 2412 case LibFunc_fmod: 2413 case LibFunc_fmodf: 2414 if (TLI->has(Func)) { 2415 APFloat V = Op1->getValueAPF(); 2416 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) 2417 return ConstantFP::get(Ty->getContext(), V); 2418 } 2419 break; 2420 case LibFunc_remainder: 2421 case LibFunc_remainderf: 2422 if (TLI->has(Func)) { 2423 APFloat V = Op1->getValueAPF(); 2424 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) 2425 return ConstantFP::get(Ty->getContext(), V); 2426 } 2427 break; 2428 case LibFunc_atan2: 2429 case LibFunc_atan2f: 2430 case LibFunc_atan2_finite: 2431 case LibFunc_atan2f_finite: 2432 if (TLI->has(Func)) 2433 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 2434 break; 2435 } 2436 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 2437 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 2438 return ConstantFP::get(Ty->getContext(), 2439 APFloat((float)std::pow((float)Op1V, 2440 (int)Op2C->getZExtValue()))); 2441 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 2442 return ConstantFP::get(Ty->getContext(), 2443 APFloat((float)std::pow((float)Op1V, 2444 (int)Op2C->getZExtValue()))); 2445 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 2446 return ConstantFP::get(Ty->getContext(), 2447 APFloat((double)std::pow((double)Op1V, 2448 (int)Op2C->getZExtValue()))); 2449 2450 if (IntrinsicID == Intrinsic::amdgcn_ldexp) { 2451 // FIXME: Should flush denorms depending on FP mode, but that's ignored 2452 // everywhere else. 2453 2454 // scalbn is equivalent to ldexp with float radix 2 2455 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(), 2456 APFloat::rmNearestTiesToEven); 2457 return ConstantFP::get(Ty->getContext(), Result); 2458 } 2459 } 2460 return nullptr; 2461 } 2462 2463 if (Operands[0]->getType()->isIntegerTy() && 2464 Operands[1]->getType()->isIntegerTy()) { 2465 const APInt *C0, *C1; 2466 if (!getConstIntOrUndef(Operands[0], C0) || 2467 !getConstIntOrUndef(Operands[1], C1)) 2468 return nullptr; 2469 2470 unsigned BitWidth = Ty->getScalarSizeInBits(); 2471 switch (IntrinsicID) { 2472 default: break; 2473 case Intrinsic::smax: 2474 if (!C0 && !C1) 2475 return UndefValue::get(Ty); 2476 if (!C0 || !C1) 2477 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth)); 2478 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1); 2479 2480 case Intrinsic::smin: 2481 if (!C0 && !C1) 2482 return UndefValue::get(Ty); 2483 if (!C0 || !C1) 2484 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth)); 2485 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1); 2486 2487 case Intrinsic::umax: 2488 if (!C0 && !C1) 2489 return UndefValue::get(Ty); 2490 if (!C0 || !C1) 2491 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth)); 2492 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1); 2493 2494 case Intrinsic::umin: 2495 if (!C0 && !C1) 2496 return UndefValue::get(Ty); 2497 if (!C0 || !C1) 2498 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth)); 2499 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1); 2500 2501 case Intrinsic::usub_with_overflow: 2502 case Intrinsic::ssub_with_overflow: 2503 case Intrinsic::uadd_with_overflow: 2504 case Intrinsic::sadd_with_overflow: 2505 // X - undef -> { undef, false } 2506 // undef - X -> { undef, false } 2507 // X + undef -> { undef, false } 2508 // undef + x -> { undef, false } 2509 if (!C0 || !C1) { 2510 return ConstantStruct::get( 2511 cast<StructType>(Ty), 2512 {UndefValue::get(Ty->getStructElementType(0)), 2513 Constant::getNullValue(Ty->getStructElementType(1))}); 2514 } 2515 LLVM_FALLTHROUGH; 2516 case Intrinsic::smul_with_overflow: 2517 case Intrinsic::umul_with_overflow: { 2518 // undef * X -> { 0, false } 2519 // X * undef -> { 0, false } 2520 if (!C0 || !C1) 2521 return Constant::getNullValue(Ty); 2522 2523 APInt Res; 2524 bool Overflow; 2525 switch (IntrinsicID) { 2526 default: llvm_unreachable("Invalid case"); 2527 case Intrinsic::sadd_with_overflow: 2528 Res = C0->sadd_ov(*C1, Overflow); 2529 break; 2530 case Intrinsic::uadd_with_overflow: 2531 Res = C0->uadd_ov(*C1, Overflow); 2532 break; 2533 case Intrinsic::ssub_with_overflow: 2534 Res = C0->ssub_ov(*C1, Overflow); 2535 break; 2536 case Intrinsic::usub_with_overflow: 2537 Res = C0->usub_ov(*C1, Overflow); 2538 break; 2539 case Intrinsic::smul_with_overflow: 2540 Res = C0->smul_ov(*C1, Overflow); 2541 break; 2542 case Intrinsic::umul_with_overflow: 2543 Res = C0->umul_ov(*C1, Overflow); 2544 break; 2545 } 2546 Constant *Ops[] = { 2547 ConstantInt::get(Ty->getContext(), Res), 2548 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 2549 }; 2550 return ConstantStruct::get(cast<StructType>(Ty), Ops); 2551 } 2552 case Intrinsic::uadd_sat: 2553 case Intrinsic::sadd_sat: 2554 if (!C0 && !C1) 2555 return UndefValue::get(Ty); 2556 if (!C0 || !C1) 2557 return Constant::getAllOnesValue(Ty); 2558 if (IntrinsicID == Intrinsic::uadd_sat) 2559 return ConstantInt::get(Ty, C0->uadd_sat(*C1)); 2560 else 2561 return ConstantInt::get(Ty, C0->sadd_sat(*C1)); 2562 case Intrinsic::usub_sat: 2563 case Intrinsic::ssub_sat: 2564 if (!C0 && !C1) 2565 return UndefValue::get(Ty); 2566 if (!C0 || !C1) 2567 return Constant::getNullValue(Ty); 2568 if (IntrinsicID == Intrinsic::usub_sat) 2569 return ConstantInt::get(Ty, C0->usub_sat(*C1)); 2570 else 2571 return ConstantInt::get(Ty, C0->ssub_sat(*C1)); 2572 case Intrinsic::cttz: 2573 case Intrinsic::ctlz: 2574 assert(C1 && "Must be constant int"); 2575 2576 // cttz(0, 1) and ctlz(0, 1) are undef. 2577 if (C1->isOneValue() && (!C0 || C0->isNullValue())) 2578 return UndefValue::get(Ty); 2579 if (!C0) 2580 return Constant::getNullValue(Ty); 2581 if (IntrinsicID == Intrinsic::cttz) 2582 return ConstantInt::get(Ty, C0->countTrailingZeros()); 2583 else 2584 return ConstantInt::get(Ty, C0->countLeadingZeros()); 2585 2586 case Intrinsic::abs: 2587 // Undef or minimum val operand with poison min --> undef 2588 assert(C1 && "Must be constant int"); 2589 if (C1->isOneValue() && (!C0 || C0->isMinSignedValue())) 2590 return UndefValue::get(Ty); 2591 2592 // Undef operand with no poison min --> 0 (sign bit must be clear) 2593 if (C1->isNullValue() && !C0) 2594 return Constant::getNullValue(Ty); 2595 2596 return ConstantInt::get(Ty, C0->abs()); 2597 } 2598 2599 return nullptr; 2600 } 2601 2602 // Support ConstantVector in case we have an Undef in the top. 2603 if ((isa<ConstantVector>(Operands[0]) || 2604 isa<ConstantDataVector>(Operands[0])) && 2605 // Check for default rounding mode. 2606 // FIXME: Support other rounding modes? 2607 isa<ConstantInt>(Operands[1]) && 2608 cast<ConstantInt>(Operands[1])->getValue() == 4) { 2609 auto *Op = cast<Constant>(Operands[0]); 2610 switch (IntrinsicID) { 2611 default: break; 2612 case Intrinsic::x86_avx512_vcvtss2si32: 2613 case Intrinsic::x86_avx512_vcvtss2si64: 2614 case Intrinsic::x86_avx512_vcvtsd2si32: 2615 case Intrinsic::x86_avx512_vcvtsd2si64: 2616 if (ConstantFP *FPOp = 2617 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2618 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2619 /*roundTowardZero=*/false, Ty, 2620 /*IsSigned*/true); 2621 break; 2622 case Intrinsic::x86_avx512_vcvtss2usi32: 2623 case Intrinsic::x86_avx512_vcvtss2usi64: 2624 case Intrinsic::x86_avx512_vcvtsd2usi32: 2625 case Intrinsic::x86_avx512_vcvtsd2usi64: 2626 if (ConstantFP *FPOp = 2627 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2628 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2629 /*roundTowardZero=*/false, Ty, 2630 /*IsSigned*/false); 2631 break; 2632 case Intrinsic::x86_avx512_cvttss2si: 2633 case Intrinsic::x86_avx512_cvttss2si64: 2634 case Intrinsic::x86_avx512_cvttsd2si: 2635 case Intrinsic::x86_avx512_cvttsd2si64: 2636 if (ConstantFP *FPOp = 2637 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2638 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2639 /*roundTowardZero=*/true, Ty, 2640 /*IsSigned*/true); 2641 break; 2642 case Intrinsic::x86_avx512_cvttss2usi: 2643 case Intrinsic::x86_avx512_cvttss2usi64: 2644 case Intrinsic::x86_avx512_cvttsd2usi: 2645 case Intrinsic::x86_avx512_cvttsd2usi64: 2646 if (ConstantFP *FPOp = 2647 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2648 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2649 /*roundTowardZero=*/true, Ty, 2650 /*IsSigned*/false); 2651 break; 2652 } 2653 } 2654 return nullptr; 2655 } 2656 2657 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID, 2658 const APFloat &S0, 2659 const APFloat &S1, 2660 const APFloat &S2) { 2661 unsigned ID; 2662 const fltSemantics &Sem = S0.getSemantics(); 2663 APFloat MA(Sem), SC(Sem), TC(Sem); 2664 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) { 2665 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) { 2666 // S2 < 0 2667 ID = 5; 2668 SC = -S0; 2669 } else { 2670 ID = 4; 2671 SC = S0; 2672 } 2673 MA = S2; 2674 TC = -S1; 2675 } else if (abs(S1) >= abs(S0)) { 2676 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) { 2677 // S1 < 0 2678 ID = 3; 2679 TC = -S2; 2680 } else { 2681 ID = 2; 2682 TC = S2; 2683 } 2684 MA = S1; 2685 SC = S0; 2686 } else { 2687 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) { 2688 // S0 < 0 2689 ID = 1; 2690 SC = S2; 2691 } else { 2692 ID = 0; 2693 SC = -S2; 2694 } 2695 MA = S0; 2696 TC = -S1; 2697 } 2698 switch (IntrinsicID) { 2699 default: 2700 llvm_unreachable("unhandled amdgcn cube intrinsic"); 2701 case Intrinsic::amdgcn_cubeid: 2702 return APFloat(Sem, ID); 2703 case Intrinsic::amdgcn_cubema: 2704 return MA + MA; 2705 case Intrinsic::amdgcn_cubesc: 2706 return SC; 2707 case Intrinsic::amdgcn_cubetc: 2708 return TC; 2709 } 2710 } 2711 2712 static Constant *ConstantFoldScalarCall3(StringRef Name, 2713 Intrinsic::ID IntrinsicID, 2714 Type *Ty, 2715 ArrayRef<Constant *> Operands, 2716 const TargetLibraryInfo *TLI, 2717 const CallBase *Call) { 2718 assert(Operands.size() == 3 && "Wrong number of operands."); 2719 2720 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2721 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2722 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 2723 switch (IntrinsicID) { 2724 default: break; 2725 case Intrinsic::amdgcn_fma_legacy: { 2726 const APFloat &C1 = Op1->getValueAPF(); 2727 const APFloat &C2 = Op2->getValueAPF(); 2728 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2729 // NaN or infinity, gives +0.0. 2730 if (C1.isZero() || C2.isZero()) { 2731 const APFloat &C3 = Op3->getValueAPF(); 2732 // It's tempting to just return C3 here, but that would give the 2733 // wrong result if C3 was -0.0. 2734 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3); 2735 } 2736 LLVM_FALLTHROUGH; 2737 } 2738 case Intrinsic::fma: 2739 case Intrinsic::fmuladd: { 2740 APFloat V = Op1->getValueAPF(); 2741 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(), 2742 APFloat::rmNearestTiesToEven); 2743 return ConstantFP::get(Ty->getContext(), V); 2744 } 2745 case Intrinsic::amdgcn_cubeid: 2746 case Intrinsic::amdgcn_cubema: 2747 case Intrinsic::amdgcn_cubesc: 2748 case Intrinsic::amdgcn_cubetc: { 2749 APFloat V = ConstantFoldAMDGCNCubeIntrinsic( 2750 IntrinsicID, Op1->getValueAPF(), Op2->getValueAPF(), 2751 Op3->getValueAPF()); 2752 return ConstantFP::get(Ty->getContext(), V); 2753 } 2754 } 2755 } 2756 } 2757 } 2758 2759 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 2760 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 2761 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) { 2762 switch (IntrinsicID) { 2763 default: break; 2764 case Intrinsic::smul_fix: 2765 case Intrinsic::smul_fix_sat: { 2766 // This code performs rounding towards negative infinity in case the 2767 // result cannot be represented exactly for the given scale. Targets 2768 // that do care about rounding should use a target hook for specifying 2769 // how rounding should be done, and provide their own folding to be 2770 // consistent with rounding. This is the same approach as used by 2771 // DAGTypeLegalizer::ExpandIntRes_MULFIX. 2772 const APInt &Lhs = Op1->getValue(); 2773 const APInt &Rhs = Op2->getValue(); 2774 unsigned Scale = Op3->getValue().getZExtValue(); 2775 unsigned Width = Lhs.getBitWidth(); 2776 assert(Scale < Width && "Illegal scale."); 2777 unsigned ExtendedWidth = Width * 2; 2778 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) * 2779 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale); 2780 if (IntrinsicID == Intrinsic::smul_fix_sat) { 2781 APInt MaxValue = 2782 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); 2783 APInt MinValue = 2784 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); 2785 Product = APIntOps::smin(Product, MaxValue); 2786 Product = APIntOps::smax(Product, MinValue); 2787 } 2788 return ConstantInt::get(Ty->getContext(), 2789 Product.sextOrTrunc(Width)); 2790 } 2791 } 2792 } 2793 } 2794 } 2795 2796 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { 2797 const APInt *C0, *C1, *C2; 2798 if (!getConstIntOrUndef(Operands[0], C0) || 2799 !getConstIntOrUndef(Operands[1], C1) || 2800 !getConstIntOrUndef(Operands[2], C2)) 2801 return nullptr; 2802 2803 bool IsRight = IntrinsicID == Intrinsic::fshr; 2804 if (!C2) 2805 return Operands[IsRight ? 1 : 0]; 2806 if (!C0 && !C1) 2807 return UndefValue::get(Ty); 2808 2809 // The shift amount is interpreted as modulo the bitwidth. If the shift 2810 // amount is effectively 0, avoid UB due to oversized inverse shift below. 2811 unsigned BitWidth = C2->getBitWidth(); 2812 unsigned ShAmt = C2->urem(BitWidth); 2813 if (!ShAmt) 2814 return Operands[IsRight ? 1 : 0]; 2815 2816 // (C0 << ShlAmt) | (C1 >> LshrAmt) 2817 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; 2818 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; 2819 if (!C0) 2820 return ConstantInt::get(Ty, C1->lshr(LshrAmt)); 2821 if (!C1) 2822 return ConstantInt::get(Ty, C0->shl(ShlAmt)); 2823 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); 2824 } 2825 2826 return nullptr; 2827 } 2828 2829 static Constant *ConstantFoldScalarCall(StringRef Name, 2830 Intrinsic::ID IntrinsicID, 2831 Type *Ty, 2832 ArrayRef<Constant *> Operands, 2833 const TargetLibraryInfo *TLI, 2834 const CallBase *Call) { 2835 if (Operands.size() == 1) 2836 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); 2837 2838 if (Operands.size() == 2) 2839 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); 2840 2841 if (Operands.size() == 3) 2842 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); 2843 2844 return nullptr; 2845 } 2846 2847 static Constant *ConstantFoldVectorCall(StringRef Name, 2848 Intrinsic::ID IntrinsicID, 2849 VectorType *VTy, 2850 ArrayRef<Constant *> Operands, 2851 const DataLayout &DL, 2852 const TargetLibraryInfo *TLI, 2853 const CallBase *Call) { 2854 // Do not iterate on scalable vector. The number of elements is unknown at 2855 // compile-time. 2856 if (isa<ScalableVectorType>(VTy)) 2857 return nullptr; 2858 2859 auto *FVTy = cast<FixedVectorType>(VTy); 2860 2861 SmallVector<Constant *, 4> Result(FVTy->getNumElements()); 2862 SmallVector<Constant *, 4> Lane(Operands.size()); 2863 Type *Ty = FVTy->getElementType(); 2864 2865 switch (IntrinsicID) { 2866 case Intrinsic::masked_load: { 2867 auto *SrcPtr = Operands[0]; 2868 auto *Mask = Operands[2]; 2869 auto *Passthru = Operands[3]; 2870 2871 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL); 2872 2873 SmallVector<Constant *, 32> NewElements; 2874 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2875 auto *MaskElt = Mask->getAggregateElement(I); 2876 if (!MaskElt) 2877 break; 2878 auto *PassthruElt = Passthru->getAggregateElement(I); 2879 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; 2880 if (isa<UndefValue>(MaskElt)) { 2881 if (PassthruElt) 2882 NewElements.push_back(PassthruElt); 2883 else if (VecElt) 2884 NewElements.push_back(VecElt); 2885 else 2886 return nullptr; 2887 } 2888 if (MaskElt->isNullValue()) { 2889 if (!PassthruElt) 2890 return nullptr; 2891 NewElements.push_back(PassthruElt); 2892 } else if (MaskElt->isOneValue()) { 2893 if (!VecElt) 2894 return nullptr; 2895 NewElements.push_back(VecElt); 2896 } else { 2897 return nullptr; 2898 } 2899 } 2900 if (NewElements.size() != FVTy->getNumElements()) 2901 return nullptr; 2902 return ConstantVector::get(NewElements); 2903 } 2904 case Intrinsic::arm_mve_vctp8: 2905 case Intrinsic::arm_mve_vctp16: 2906 case Intrinsic::arm_mve_vctp32: 2907 case Intrinsic::arm_mve_vctp64: { 2908 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2909 unsigned Lanes = FVTy->getNumElements(); 2910 uint64_t Limit = Op->getZExtValue(); 2911 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make 2912 // sure we get the limit right in that case and set all relevant lanes. 2913 if (IntrinsicID == Intrinsic::arm_mve_vctp64) 2914 Limit *= 2; 2915 2916 SmallVector<Constant *, 16> NCs; 2917 for (unsigned i = 0; i < Lanes; i++) { 2918 if (i < Limit) 2919 NCs.push_back(ConstantInt::getTrue(Ty)); 2920 else 2921 NCs.push_back(ConstantInt::getFalse(Ty)); 2922 } 2923 return ConstantVector::get(NCs); 2924 } 2925 break; 2926 } 2927 default: 2928 break; 2929 } 2930 2931 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 2932 // Gather a column of constants. 2933 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 2934 // Some intrinsics use a scalar type for certain arguments. 2935 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) { 2936 Lane[J] = Operands[J]; 2937 continue; 2938 } 2939 2940 Constant *Agg = Operands[J]->getAggregateElement(I); 2941 if (!Agg) 2942 return nullptr; 2943 2944 Lane[J] = Agg; 2945 } 2946 2947 // Use the regular scalar folding to simplify this column. 2948 Constant *Folded = 2949 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); 2950 if (!Folded) 2951 return nullptr; 2952 Result[I] = Folded; 2953 } 2954 2955 return ConstantVector::get(Result); 2956 } 2957 2958 } // end anonymous namespace 2959 2960 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, 2961 ArrayRef<Constant *> Operands, 2962 const TargetLibraryInfo *TLI) { 2963 if (Call->isNoBuiltin()) 2964 return nullptr; 2965 if (!F->hasName()) 2966 return nullptr; 2967 StringRef Name = F->getName(); 2968 2969 Type *Ty = F->getReturnType(); 2970 2971 if (auto *VTy = dyn_cast<VectorType>(Ty)) 2972 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, 2973 F->getParent()->getDataLayout(), TLI, Call); 2974 2975 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, 2976 Call); 2977 } 2978 2979 bool llvm::isMathLibCallNoop(const CallBase *Call, 2980 const TargetLibraryInfo *TLI) { 2981 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap 2982 // (and to some extent ConstantFoldScalarCall). 2983 if (Call->isNoBuiltin() || Call->isStrictFP()) 2984 return false; 2985 Function *F = Call->getCalledFunction(); 2986 if (!F) 2987 return false; 2988 2989 LibFunc Func; 2990 if (!TLI || !TLI->getLibFunc(*F, Func)) 2991 return false; 2992 2993 if (Call->getNumArgOperands() == 1) { 2994 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { 2995 const APFloat &Op = OpC->getValueAPF(); 2996 switch (Func) { 2997 case LibFunc_logl: 2998 case LibFunc_log: 2999 case LibFunc_logf: 3000 case LibFunc_log2l: 3001 case LibFunc_log2: 3002 case LibFunc_log2f: 3003 case LibFunc_log10l: 3004 case LibFunc_log10: 3005 case LibFunc_log10f: 3006 return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); 3007 3008 case LibFunc_expl: 3009 case LibFunc_exp: 3010 case LibFunc_expf: 3011 // FIXME: These boundaries are slightly conservative. 3012 if (OpC->getType()->isDoubleTy()) 3013 return !(Op < APFloat(-745.0) || Op > APFloat(709.0)); 3014 if (OpC->getType()->isFloatTy()) 3015 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f)); 3016 break; 3017 3018 case LibFunc_exp2l: 3019 case LibFunc_exp2: 3020 case LibFunc_exp2f: 3021 // FIXME: These boundaries are slightly conservative. 3022 if (OpC->getType()->isDoubleTy()) 3023 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0)); 3024 if (OpC->getType()->isFloatTy()) 3025 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f)); 3026 break; 3027 3028 case LibFunc_sinl: 3029 case LibFunc_sin: 3030 case LibFunc_sinf: 3031 case LibFunc_cosl: 3032 case LibFunc_cos: 3033 case LibFunc_cosf: 3034 return !Op.isInfinity(); 3035 3036 case LibFunc_tanl: 3037 case LibFunc_tan: 3038 case LibFunc_tanf: { 3039 // FIXME: Stop using the host math library. 3040 // FIXME: The computation isn't done in the right precision. 3041 Type *Ty = OpC->getType(); 3042 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 3043 double OpV = getValueAsDouble(OpC); 3044 return ConstantFoldFP(tan, OpV, Ty) != nullptr; 3045 } 3046 break; 3047 } 3048 3049 case LibFunc_asinl: 3050 case LibFunc_asin: 3051 case LibFunc_asinf: 3052 case LibFunc_acosl: 3053 case LibFunc_acos: 3054 case LibFunc_acosf: 3055 return !(Op < APFloat(Op.getSemantics(), "-1") || 3056 Op > APFloat(Op.getSemantics(), "1")); 3057 3058 case LibFunc_sinh: 3059 case LibFunc_cosh: 3060 case LibFunc_sinhf: 3061 case LibFunc_coshf: 3062 case LibFunc_sinhl: 3063 case LibFunc_coshl: 3064 // FIXME: These boundaries are slightly conservative. 3065 if (OpC->getType()->isDoubleTy()) 3066 return !(Op < APFloat(-710.0) || Op > APFloat(710.0)); 3067 if (OpC->getType()->isFloatTy()) 3068 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f)); 3069 break; 3070 3071 case LibFunc_sqrtl: 3072 case LibFunc_sqrt: 3073 case LibFunc_sqrtf: 3074 return Op.isNaN() || Op.isZero() || !Op.isNegative(); 3075 3076 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p, 3077 // maybe others? 3078 default: 3079 break; 3080 } 3081 } 3082 } 3083 3084 if (Call->getNumArgOperands() == 2) { 3085 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); 3086 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); 3087 if (Op0C && Op1C) { 3088 const APFloat &Op0 = Op0C->getValueAPF(); 3089 const APFloat &Op1 = Op1C->getValueAPF(); 3090 3091 switch (Func) { 3092 case LibFunc_powl: 3093 case LibFunc_pow: 3094 case LibFunc_powf: { 3095 // FIXME: Stop using the host math library. 3096 // FIXME: The computation isn't done in the right precision. 3097 Type *Ty = Op0C->getType(); 3098 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 3099 if (Ty == Op1C->getType()) { 3100 double Op0V = getValueAsDouble(Op0C); 3101 double Op1V = getValueAsDouble(Op1C); 3102 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr; 3103 } 3104 } 3105 break; 3106 } 3107 3108 case LibFunc_fmodl: 3109 case LibFunc_fmod: 3110 case LibFunc_fmodf: 3111 case LibFunc_remainderl: 3112 case LibFunc_remainder: 3113 case LibFunc_remainderf: 3114 return Op0.isNaN() || Op1.isNaN() || 3115 (!Op0.isInfinity() && !Op1.isZero()); 3116 3117 default: 3118 break; 3119 } 3120 } 3121 } 3122 3123 return false; 3124 } 3125 3126 void TargetFolder::anchor() {} 3127