1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/FunctionLoweringInfo.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/MachineBasicBlock.h" 34 #include "llvm/CodeGen/MachineConstantPool.h" 35 #include "llvm/CodeGen/MachineFrameInfo.h" 36 #include "llvm/CodeGen/MachineFunction.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/RuntimeLibcalls.h" 39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 40 #include "llvm/CodeGen/SelectionDAGNodes.h" 41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 42 #include "llvm/CodeGen/TargetFrameLowering.h" 43 #include "llvm/CodeGen/TargetLowering.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/ValueTypes.h" 47 #include "llvm/IR/Constant.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/DebugInfoMetadata.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/DerivedTypes.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GlobalValue.h" 55 #include "llvm/IR/Metadata.h" 56 #include "llvm/IR/Type.h" 57 #include "llvm/IR/Value.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Compiler.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/KnownBits.h" 64 #include "llvm/Support/MachineValueType.h" 65 #include "llvm/Support/ManagedStatic.h" 66 #include "llvm/Support/MathExtras.h" 67 #include "llvm/Support/Mutex.h" 68 #include "llvm/Support/raw_ostream.h" 69 #include "llvm/Target/TargetMachine.h" 70 #include "llvm/Target/TargetOptions.h" 71 #include "llvm/Transforms/Utils/SizeOpts.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <cstdlib> 76 #include <limits> 77 #include <set> 78 #include <string> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 84 /// makeVTList - Return an instance of the SDVTList struct initialized with the 85 /// specified members. 86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 87 SDVTList Res = {VTs, NumVTs}; 88 return Res; 89 } 90 91 // Default null implementations of the callbacks. 92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 95 96 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 97 98 #define DEBUG_TYPE "selectiondag" 99 100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 101 cl::Hidden, cl::init(true), 102 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 103 104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 105 cl::desc("Number limit for gluing ld/st of memcpy."), 106 cl::Hidden, cl::init(0)); 107 108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 110 } 111 112 //===----------------------------------------------------------------------===// 113 // ConstantFPSDNode Class 114 //===----------------------------------------------------------------------===// 115 116 /// isExactlyValue - We don't rely on operator== working on double values, as 117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 118 /// As such, this method can be used to do an exact bit-for-bit comparison of 119 /// two floating point values. 120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 121 return getValueAPF().bitwiseIsEqual(V); 122 } 123 124 bool ConstantFPSDNode::isValueValidForType(EVT VT, 125 const APFloat& Val) { 126 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 127 128 // convert modifies in place, so make a copy. 129 APFloat Val2 = APFloat(Val); 130 bool losesInfo; 131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 132 APFloat::rmNearestTiesToEven, 133 &losesInfo); 134 return !losesInfo; 135 } 136 137 //===----------------------------------------------------------------------===// 138 // ISD Namespace 139 //===----------------------------------------------------------------------===// 140 141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 142 if (N->getOpcode() == ISD::SPLAT_VECTOR) { 143 unsigned EltSize = 144 N->getValueType(0).getVectorElementType().getSizeInBits(); 145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize); 147 return true; 148 } 149 } 150 151 auto *BV = dyn_cast<BuildVectorSDNode>(N); 152 if (!BV) 153 return false; 154 155 APInt SplatUndef; 156 unsigned SplatBitSize; 157 bool HasUndefs; 158 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 159 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 160 EltSize) && 161 EltSize == SplatBitSize; 162 } 163 164 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 165 // specializations of the more general isConstantSplatVector()? 166 167 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) { 168 // Look through a bit convert. 169 while (N->getOpcode() == ISD::BITCAST) 170 N = N->getOperand(0).getNode(); 171 172 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { 173 APInt SplatVal; 174 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue(); 175 } 176 177 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 178 179 unsigned i = 0, e = N->getNumOperands(); 180 181 // Skip over all of the undef values. 182 while (i != e && N->getOperand(i).isUndef()) 183 ++i; 184 185 // Do not accept an all-undef vector. 186 if (i == e) return false; 187 188 // Do not accept build_vectors that aren't all constants or which have non-~0 189 // elements. We have to be a bit careful here, as the type of the constant 190 // may not be the same as the type of the vector elements due to type 191 // legalization (the elements are promoted to a legal type for the target and 192 // a vector of a type may be legal when the base element type is not). 193 // We only want to check enough bits to cover the vector elements, because 194 // we care if the resultant vector is all ones, not whether the individual 195 // constants are. 196 SDValue NotZero = N->getOperand(i); 197 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 198 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 199 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 200 return false; 201 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 202 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 203 return false; 204 } else 205 return false; 206 207 // Okay, we have at least one ~0 value, check to see if the rest match or are 208 // undefs. Even with the above element type twiddling, this should be OK, as 209 // the same type legalization should have applied to all the elements. 210 for (++i; i != e; ++i) 211 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 212 return false; 213 return true; 214 } 215 216 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) { 217 // Look through a bit convert. 218 while (N->getOpcode() == ISD::BITCAST) 219 N = N->getOperand(0).getNode(); 220 221 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { 222 APInt SplatVal; 223 return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue(); 224 } 225 226 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 227 228 bool IsAllUndef = true; 229 for (const SDValue &Op : N->op_values()) { 230 if (Op.isUndef()) 231 continue; 232 IsAllUndef = false; 233 // Do not accept build_vectors that aren't all constants or which have non-0 234 // elements. We have to be a bit careful here, as the type of the constant 235 // may not be the same as the type of the vector elements due to type 236 // legalization (the elements are promoted to a legal type for the target 237 // and a vector of a type may be legal when the base element type is not). 238 // We only want to check enough bits to cover the vector elements, because 239 // we care if the resultant vector is all zeros, not whether the individual 240 // constants are. 241 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 242 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 243 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 244 return false; 245 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 246 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 247 return false; 248 } else 249 return false; 250 } 251 252 // Do not accept an all-undef vector. 253 if (IsAllUndef) 254 return false; 255 return true; 256 } 257 258 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 259 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true); 260 } 261 262 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 263 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true); 264 } 265 266 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 267 if (N->getOpcode() != ISD::BUILD_VECTOR) 268 return false; 269 270 for (const SDValue &Op : N->op_values()) { 271 if (Op.isUndef()) 272 continue; 273 if (!isa<ConstantSDNode>(Op)) 274 return false; 275 } 276 return true; 277 } 278 279 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 280 if (N->getOpcode() != ISD::BUILD_VECTOR) 281 return false; 282 283 for (const SDValue &Op : N->op_values()) { 284 if (Op.isUndef()) 285 continue; 286 if (!isa<ConstantFPSDNode>(Op)) 287 return false; 288 } 289 return true; 290 } 291 292 bool ISD::allOperandsUndef(const SDNode *N) { 293 // Return false if the node has no operands. 294 // This is "logically inconsistent" with the definition of "all" but 295 // is probably the desired behavior. 296 if (N->getNumOperands() == 0) 297 return false; 298 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 299 } 300 301 bool ISD::matchUnaryPredicate(SDValue Op, 302 std::function<bool(ConstantSDNode *)> Match, 303 bool AllowUndefs) { 304 // FIXME: Add support for scalar UNDEF cases? 305 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 306 return Match(Cst); 307 308 // FIXME: Add support for vector UNDEF cases? 309 if (ISD::BUILD_VECTOR != Op.getOpcode() && 310 ISD::SPLAT_VECTOR != Op.getOpcode()) 311 return false; 312 313 EVT SVT = Op.getValueType().getScalarType(); 314 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 315 if (AllowUndefs && Op.getOperand(i).isUndef()) { 316 if (!Match(nullptr)) 317 return false; 318 continue; 319 } 320 321 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 322 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 323 return false; 324 } 325 return true; 326 } 327 328 bool ISD::matchBinaryPredicate( 329 SDValue LHS, SDValue RHS, 330 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 331 bool AllowUndefs, bool AllowTypeMismatch) { 332 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 333 return false; 334 335 // TODO: Add support for scalar UNDEF cases? 336 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 337 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 338 return Match(LHSCst, RHSCst); 339 340 // TODO: Add support for vector UNDEF cases? 341 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 342 ISD::BUILD_VECTOR != RHS.getOpcode()) 343 return false; 344 345 EVT SVT = LHS.getValueType().getScalarType(); 346 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 347 SDValue LHSOp = LHS.getOperand(i); 348 SDValue RHSOp = RHS.getOperand(i); 349 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 350 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 351 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 352 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 353 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 354 return false; 355 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 356 LHSOp.getValueType() != RHSOp.getValueType())) 357 return false; 358 if (!Match(LHSCst, RHSCst)) 359 return false; 360 } 361 return true; 362 } 363 364 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) { 365 switch (VecReduceOpcode) { 366 default: 367 llvm_unreachable("Expected VECREDUCE opcode"); 368 case ISD::VECREDUCE_FADD: 369 case ISD::VECREDUCE_SEQ_FADD: 370 return ISD::FADD; 371 case ISD::VECREDUCE_FMUL: 372 case ISD::VECREDUCE_SEQ_FMUL: 373 return ISD::FMUL; 374 case ISD::VECREDUCE_ADD: 375 return ISD::ADD; 376 case ISD::VECREDUCE_MUL: 377 return ISD::MUL; 378 case ISD::VECREDUCE_AND: 379 return ISD::AND; 380 case ISD::VECREDUCE_OR: 381 return ISD::OR; 382 case ISD::VECREDUCE_XOR: 383 return ISD::XOR; 384 case ISD::VECREDUCE_SMAX: 385 return ISD::SMAX; 386 case ISD::VECREDUCE_SMIN: 387 return ISD::SMIN; 388 case ISD::VECREDUCE_UMAX: 389 return ISD::UMAX; 390 case ISD::VECREDUCE_UMIN: 391 return ISD::UMIN; 392 case ISD::VECREDUCE_FMAX: 393 return ISD::FMAXNUM; 394 case ISD::VECREDUCE_FMIN: 395 return ISD::FMINNUM; 396 } 397 } 398 399 bool ISD::isVPOpcode(unsigned Opcode) { 400 switch (Opcode) { 401 default: 402 return false; 403 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ 404 case ISD::SDOPC: \ 405 return true; 406 #include "llvm/IR/VPIntrinsics.def" 407 } 408 } 409 410 /// The operand position of the vector mask. 411 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { 412 switch (Opcode) { 413 default: 414 return None; 415 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \ 416 case ISD::SDOPC: \ 417 return MASKPOS; 418 #include "llvm/IR/VPIntrinsics.def" 419 } 420 } 421 422 /// The operand position of the explicit vector length parameter. 423 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { 424 switch (Opcode) { 425 default: 426 return None; 427 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \ 428 case ISD::SDOPC: \ 429 return EVLPOS; 430 #include "llvm/IR/VPIntrinsics.def" 431 } 432 } 433 434 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 435 switch (ExtType) { 436 case ISD::EXTLOAD: 437 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 438 case ISD::SEXTLOAD: 439 return ISD::SIGN_EXTEND; 440 case ISD::ZEXTLOAD: 441 return ISD::ZERO_EXTEND; 442 default: 443 break; 444 } 445 446 llvm_unreachable("Invalid LoadExtType"); 447 } 448 449 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 450 // To perform this operation, we just need to swap the L and G bits of the 451 // operation. 452 unsigned OldL = (Operation >> 2) & 1; 453 unsigned OldG = (Operation >> 1) & 1; 454 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 455 (OldL << 1) | // New G bit 456 (OldG << 2)); // New L bit. 457 } 458 459 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 460 unsigned Operation = Op; 461 if (isIntegerLike) 462 Operation ^= 7; // Flip L, G, E bits, but not U. 463 else 464 Operation ^= 15; // Flip all of the condition bits. 465 466 if (Operation > ISD::SETTRUE2) 467 Operation &= ~8; // Don't let N and U bits get set. 468 469 return ISD::CondCode(Operation); 470 } 471 472 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 473 return getSetCCInverseImpl(Op, Type.isInteger()); 474 } 475 476 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 477 bool isIntegerLike) { 478 return getSetCCInverseImpl(Op, isIntegerLike); 479 } 480 481 /// For an integer comparison, return 1 if the comparison is a signed operation 482 /// and 2 if the result is an unsigned comparison. Return zero if the operation 483 /// does not depend on the sign of the input (setne and seteq). 484 static int isSignedOp(ISD::CondCode Opcode) { 485 switch (Opcode) { 486 default: llvm_unreachable("Illegal integer setcc operation!"); 487 case ISD::SETEQ: 488 case ISD::SETNE: return 0; 489 case ISD::SETLT: 490 case ISD::SETLE: 491 case ISD::SETGT: 492 case ISD::SETGE: return 1; 493 case ISD::SETULT: 494 case ISD::SETULE: 495 case ISD::SETUGT: 496 case ISD::SETUGE: return 2; 497 } 498 } 499 500 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 501 EVT Type) { 502 bool IsInteger = Type.isInteger(); 503 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 504 // Cannot fold a signed integer setcc with an unsigned integer setcc. 505 return ISD::SETCC_INVALID; 506 507 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 508 509 // If the N and U bits get set, then the resultant comparison DOES suddenly 510 // care about orderedness, and it is true when ordered. 511 if (Op > ISD::SETTRUE2) 512 Op &= ~16; // Clear the U bit if the N bit is set. 513 514 // Canonicalize illegal integer setcc's. 515 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 516 Op = ISD::SETNE; 517 518 return ISD::CondCode(Op); 519 } 520 521 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 522 EVT Type) { 523 bool IsInteger = Type.isInteger(); 524 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 525 // Cannot fold a signed setcc with an unsigned setcc. 526 return ISD::SETCC_INVALID; 527 528 // Combine all of the condition bits. 529 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 530 531 // Canonicalize illegal integer setcc's. 532 if (IsInteger) { 533 switch (Result) { 534 default: break; 535 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 536 case ISD::SETOEQ: // SETEQ & SETU[LG]E 537 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 538 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 539 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 540 } 541 } 542 543 return Result; 544 } 545 546 //===----------------------------------------------------------------------===// 547 // SDNode Profile Support 548 //===----------------------------------------------------------------------===// 549 550 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 551 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 552 ID.AddInteger(OpC); 553 } 554 555 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 556 /// solely with their pointer. 557 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 558 ID.AddPointer(VTList.VTs); 559 } 560 561 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 562 static void AddNodeIDOperands(FoldingSetNodeID &ID, 563 ArrayRef<SDValue> Ops) { 564 for (auto& Op : Ops) { 565 ID.AddPointer(Op.getNode()); 566 ID.AddInteger(Op.getResNo()); 567 } 568 } 569 570 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 571 static void AddNodeIDOperands(FoldingSetNodeID &ID, 572 ArrayRef<SDUse> Ops) { 573 for (auto& Op : Ops) { 574 ID.AddPointer(Op.getNode()); 575 ID.AddInteger(Op.getResNo()); 576 } 577 } 578 579 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 580 SDVTList VTList, ArrayRef<SDValue> OpList) { 581 AddNodeIDOpcode(ID, OpC); 582 AddNodeIDValueTypes(ID, VTList); 583 AddNodeIDOperands(ID, OpList); 584 } 585 586 /// If this is an SDNode with special info, add this info to the NodeID data. 587 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 588 switch (N->getOpcode()) { 589 case ISD::TargetExternalSymbol: 590 case ISD::ExternalSymbol: 591 case ISD::MCSymbol: 592 llvm_unreachable("Should only be used on nodes with operands"); 593 default: break; // Normal nodes don't need extra info. 594 case ISD::TargetConstant: 595 case ISD::Constant: { 596 const ConstantSDNode *C = cast<ConstantSDNode>(N); 597 ID.AddPointer(C->getConstantIntValue()); 598 ID.AddBoolean(C->isOpaque()); 599 break; 600 } 601 case ISD::TargetConstantFP: 602 case ISD::ConstantFP: 603 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 604 break; 605 case ISD::TargetGlobalAddress: 606 case ISD::GlobalAddress: 607 case ISD::TargetGlobalTLSAddress: 608 case ISD::GlobalTLSAddress: { 609 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 610 ID.AddPointer(GA->getGlobal()); 611 ID.AddInteger(GA->getOffset()); 612 ID.AddInteger(GA->getTargetFlags()); 613 break; 614 } 615 case ISD::BasicBlock: 616 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 617 break; 618 case ISD::Register: 619 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 620 break; 621 case ISD::RegisterMask: 622 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 623 break; 624 case ISD::SRCVALUE: 625 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 626 break; 627 case ISD::FrameIndex: 628 case ISD::TargetFrameIndex: 629 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 630 break; 631 case ISD::LIFETIME_START: 632 case ISD::LIFETIME_END: 633 if (cast<LifetimeSDNode>(N)->hasOffset()) { 634 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 635 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 636 } 637 break; 638 case ISD::PSEUDO_PROBE: 639 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); 640 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex()); 641 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes()); 642 break; 643 case ISD::JumpTable: 644 case ISD::TargetJumpTable: 645 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 646 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 647 break; 648 case ISD::ConstantPool: 649 case ISD::TargetConstantPool: { 650 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 651 ID.AddInteger(CP->getAlign().value()); 652 ID.AddInteger(CP->getOffset()); 653 if (CP->isMachineConstantPoolEntry()) 654 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 655 else 656 ID.AddPointer(CP->getConstVal()); 657 ID.AddInteger(CP->getTargetFlags()); 658 break; 659 } 660 case ISD::TargetIndex: { 661 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 662 ID.AddInteger(TI->getIndex()); 663 ID.AddInteger(TI->getOffset()); 664 ID.AddInteger(TI->getTargetFlags()); 665 break; 666 } 667 case ISD::LOAD: { 668 const LoadSDNode *LD = cast<LoadSDNode>(N); 669 ID.AddInteger(LD->getMemoryVT().getRawBits()); 670 ID.AddInteger(LD->getRawSubclassData()); 671 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 672 break; 673 } 674 case ISD::STORE: { 675 const StoreSDNode *ST = cast<StoreSDNode>(N); 676 ID.AddInteger(ST->getMemoryVT().getRawBits()); 677 ID.AddInteger(ST->getRawSubclassData()); 678 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 679 break; 680 } 681 case ISD::MLOAD: { 682 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 683 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 684 ID.AddInteger(MLD->getRawSubclassData()); 685 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 686 break; 687 } 688 case ISD::MSTORE: { 689 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 690 ID.AddInteger(MST->getMemoryVT().getRawBits()); 691 ID.AddInteger(MST->getRawSubclassData()); 692 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 693 break; 694 } 695 case ISD::MGATHER: { 696 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 697 ID.AddInteger(MG->getMemoryVT().getRawBits()); 698 ID.AddInteger(MG->getRawSubclassData()); 699 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 700 break; 701 } 702 case ISD::MSCATTER: { 703 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 704 ID.AddInteger(MS->getMemoryVT().getRawBits()); 705 ID.AddInteger(MS->getRawSubclassData()); 706 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 707 break; 708 } 709 case ISD::ATOMIC_CMP_SWAP: 710 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 711 case ISD::ATOMIC_SWAP: 712 case ISD::ATOMIC_LOAD_ADD: 713 case ISD::ATOMIC_LOAD_SUB: 714 case ISD::ATOMIC_LOAD_AND: 715 case ISD::ATOMIC_LOAD_CLR: 716 case ISD::ATOMIC_LOAD_OR: 717 case ISD::ATOMIC_LOAD_XOR: 718 case ISD::ATOMIC_LOAD_NAND: 719 case ISD::ATOMIC_LOAD_MIN: 720 case ISD::ATOMIC_LOAD_MAX: 721 case ISD::ATOMIC_LOAD_UMIN: 722 case ISD::ATOMIC_LOAD_UMAX: 723 case ISD::ATOMIC_LOAD: 724 case ISD::ATOMIC_STORE: { 725 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 726 ID.AddInteger(AT->getMemoryVT().getRawBits()); 727 ID.AddInteger(AT->getRawSubclassData()); 728 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 729 break; 730 } 731 case ISD::PREFETCH: { 732 const MemSDNode *PF = cast<MemSDNode>(N); 733 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 734 break; 735 } 736 case ISD::VECTOR_SHUFFLE: { 737 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 738 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 739 i != e; ++i) 740 ID.AddInteger(SVN->getMaskElt(i)); 741 break; 742 } 743 case ISD::TargetBlockAddress: 744 case ISD::BlockAddress: { 745 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 746 ID.AddPointer(BA->getBlockAddress()); 747 ID.AddInteger(BA->getOffset()); 748 ID.AddInteger(BA->getTargetFlags()); 749 break; 750 } 751 } // end switch (N->getOpcode()) 752 753 // Target specific memory nodes could also have address spaces to check. 754 if (N->isTargetMemoryOpcode()) 755 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 756 } 757 758 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 759 /// data. 760 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 761 AddNodeIDOpcode(ID, N->getOpcode()); 762 // Add the return value info. 763 AddNodeIDValueTypes(ID, N->getVTList()); 764 // Add the operand info. 765 AddNodeIDOperands(ID, N->ops()); 766 767 // Handle SDNode leafs with special info. 768 AddNodeIDCustom(ID, N); 769 } 770 771 //===----------------------------------------------------------------------===// 772 // SelectionDAG Class 773 //===----------------------------------------------------------------------===// 774 775 /// doNotCSE - Return true if CSE should not be performed for this node. 776 static bool doNotCSE(SDNode *N) { 777 if (N->getValueType(0) == MVT::Glue) 778 return true; // Never CSE anything that produces a flag. 779 780 switch (N->getOpcode()) { 781 default: break; 782 case ISD::HANDLENODE: 783 case ISD::EH_LABEL: 784 return true; // Never CSE these nodes. 785 } 786 787 // Check that remaining values produced are not flags. 788 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 789 if (N->getValueType(i) == MVT::Glue) 790 return true; // Never CSE anything that produces a flag. 791 792 return false; 793 } 794 795 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 796 /// SelectionDAG. 797 void SelectionDAG::RemoveDeadNodes() { 798 // Create a dummy node (which is not added to allnodes), that adds a reference 799 // to the root node, preventing it from being deleted. 800 HandleSDNode Dummy(getRoot()); 801 802 SmallVector<SDNode*, 128> DeadNodes; 803 804 // Add all obviously-dead nodes to the DeadNodes worklist. 805 for (SDNode &Node : allnodes()) 806 if (Node.use_empty()) 807 DeadNodes.push_back(&Node); 808 809 RemoveDeadNodes(DeadNodes); 810 811 // If the root changed (e.g. it was a dead load, update the root). 812 setRoot(Dummy.getValue()); 813 } 814 815 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 816 /// given list, and any nodes that become unreachable as a result. 817 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 818 819 // Process the worklist, deleting the nodes and adding their uses to the 820 // worklist. 821 while (!DeadNodes.empty()) { 822 SDNode *N = DeadNodes.pop_back_val(); 823 // Skip to next node if we've already managed to delete the node. This could 824 // happen if replacing a node causes a node previously added to the node to 825 // be deleted. 826 if (N->getOpcode() == ISD::DELETED_NODE) 827 continue; 828 829 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 830 DUL->NodeDeleted(N, nullptr); 831 832 // Take the node out of the appropriate CSE map. 833 RemoveNodeFromCSEMaps(N); 834 835 // Next, brutally remove the operand list. This is safe to do, as there are 836 // no cycles in the graph. 837 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 838 SDUse &Use = *I++; 839 SDNode *Operand = Use.getNode(); 840 Use.set(SDValue()); 841 842 // Now that we removed this operand, see if there are no uses of it left. 843 if (Operand->use_empty()) 844 DeadNodes.push_back(Operand); 845 } 846 847 DeallocateNode(N); 848 } 849 } 850 851 void SelectionDAG::RemoveDeadNode(SDNode *N){ 852 SmallVector<SDNode*, 16> DeadNodes(1, N); 853 854 // Create a dummy node that adds a reference to the root node, preventing 855 // it from being deleted. (This matters if the root is an operand of the 856 // dead node.) 857 HandleSDNode Dummy(getRoot()); 858 859 RemoveDeadNodes(DeadNodes); 860 } 861 862 void SelectionDAG::DeleteNode(SDNode *N) { 863 // First take this out of the appropriate CSE map. 864 RemoveNodeFromCSEMaps(N); 865 866 // Finally, remove uses due to operands of this node, remove from the 867 // AllNodes list, and delete the node. 868 DeleteNodeNotInCSEMaps(N); 869 } 870 871 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 872 assert(N->getIterator() != AllNodes.begin() && 873 "Cannot delete the entry node!"); 874 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 875 876 // Drop all of the operands and decrement used node's use counts. 877 N->DropOperands(); 878 879 DeallocateNode(N); 880 } 881 882 void SDDbgInfo::erase(const SDNode *Node) { 883 DbgValMapType::iterator I = DbgValMap.find(Node); 884 if (I == DbgValMap.end()) 885 return; 886 for (auto &Val: I->second) 887 Val->setIsInvalidated(); 888 DbgValMap.erase(I); 889 } 890 891 void SelectionDAG::DeallocateNode(SDNode *N) { 892 // If we have operands, deallocate them. 893 removeOperands(N); 894 895 NodeAllocator.Deallocate(AllNodes.remove(N)); 896 897 // Set the opcode to DELETED_NODE to help catch bugs when node 898 // memory is reallocated. 899 // FIXME: There are places in SDag that have grown a dependency on the opcode 900 // value in the released node. 901 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 902 N->NodeType = ISD::DELETED_NODE; 903 904 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 905 // them and forget about that node. 906 DbgInfo->erase(N); 907 } 908 909 #ifndef NDEBUG 910 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 911 static void VerifySDNode(SDNode *N) { 912 switch (N->getOpcode()) { 913 default: 914 break; 915 case ISD::BUILD_PAIR: { 916 EVT VT = N->getValueType(0); 917 assert(N->getNumValues() == 1 && "Too many results!"); 918 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 919 "Wrong return type!"); 920 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 921 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 922 "Mismatched operand types!"); 923 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 924 "Wrong operand type!"); 925 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 926 "Wrong return type size"); 927 break; 928 } 929 case ISD::BUILD_VECTOR: { 930 assert(N->getNumValues() == 1 && "Too many results!"); 931 assert(N->getValueType(0).isVector() && "Wrong return type!"); 932 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 933 "Wrong number of operands!"); 934 EVT EltVT = N->getValueType(0).getVectorElementType(); 935 for (const SDUse &Op : N->ops()) { 936 assert((Op.getValueType() == EltVT || 937 (EltVT.isInteger() && Op.getValueType().isInteger() && 938 EltVT.bitsLE(Op.getValueType()))) && 939 "Wrong operand type!"); 940 assert(Op.getValueType() == N->getOperand(0).getValueType() && 941 "Operands must all have the same type"); 942 } 943 break; 944 } 945 } 946 } 947 #endif // NDEBUG 948 949 /// Insert a newly allocated node into the DAG. 950 /// 951 /// Handles insertion into the all nodes list and CSE map, as well as 952 /// verification and other common operations when a new node is allocated. 953 void SelectionDAG::InsertNode(SDNode *N) { 954 AllNodes.push_back(N); 955 #ifndef NDEBUG 956 N->PersistentId = NextPersistentId++; 957 VerifySDNode(N); 958 #endif 959 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 960 DUL->NodeInserted(N); 961 } 962 963 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 964 /// correspond to it. This is useful when we're about to delete or repurpose 965 /// the node. We don't want future request for structurally identical nodes 966 /// to return N anymore. 967 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 968 bool Erased = false; 969 switch (N->getOpcode()) { 970 case ISD::HANDLENODE: return false; // noop. 971 case ISD::CONDCODE: 972 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 973 "Cond code doesn't exist!"); 974 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 975 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 976 break; 977 case ISD::ExternalSymbol: 978 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 979 break; 980 case ISD::TargetExternalSymbol: { 981 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 982 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 983 ESN->getSymbol(), ESN->getTargetFlags())); 984 break; 985 } 986 case ISD::MCSymbol: { 987 auto *MCSN = cast<MCSymbolSDNode>(N); 988 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 989 break; 990 } 991 case ISD::VALUETYPE: { 992 EVT VT = cast<VTSDNode>(N)->getVT(); 993 if (VT.isExtended()) { 994 Erased = ExtendedValueTypeNodes.erase(VT); 995 } else { 996 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 997 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 998 } 999 break; 1000 } 1001 default: 1002 // Remove it from the CSE Map. 1003 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 1004 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 1005 Erased = CSEMap.RemoveNode(N); 1006 break; 1007 } 1008 #ifndef NDEBUG 1009 // Verify that the node was actually in one of the CSE maps, unless it has a 1010 // flag result (which cannot be CSE'd) or is one of the special cases that are 1011 // not subject to CSE. 1012 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 1013 !N->isMachineOpcode() && !doNotCSE(N)) { 1014 N->dump(this); 1015 dbgs() << "\n"; 1016 llvm_unreachable("Node is not in map!"); 1017 } 1018 #endif 1019 return Erased; 1020 } 1021 1022 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 1023 /// maps and modified in place. Add it back to the CSE maps, unless an identical 1024 /// node already exists, in which case transfer all its users to the existing 1025 /// node. This transfer can potentially trigger recursive merging. 1026 void 1027 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 1028 // For node types that aren't CSE'd, just act as if no identical node 1029 // already exists. 1030 if (!doNotCSE(N)) { 1031 SDNode *Existing = CSEMap.GetOrInsertNode(N); 1032 if (Existing != N) { 1033 // If there was already an existing matching node, use ReplaceAllUsesWith 1034 // to replace the dead one with the existing one. This can cause 1035 // recursive merging of other unrelated nodes down the line. 1036 ReplaceAllUsesWith(N, Existing); 1037 1038 // N is now dead. Inform the listeners and delete it. 1039 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 1040 DUL->NodeDeleted(N, Existing); 1041 DeleteNodeNotInCSEMaps(N); 1042 return; 1043 } 1044 } 1045 1046 // If the node doesn't already exist, we updated it. Inform listeners. 1047 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 1048 DUL->NodeUpdated(N); 1049 } 1050 1051 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1052 /// were replaced with those specified. If this node is never memoized, 1053 /// return null, otherwise return a pointer to the slot it would take. If a 1054 /// node already exists with these operands, the slot will be non-null. 1055 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 1056 void *&InsertPos) { 1057 if (doNotCSE(N)) 1058 return nullptr; 1059 1060 SDValue Ops[] = { Op }; 1061 FoldingSetNodeID ID; 1062 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1063 AddNodeIDCustom(ID, N); 1064 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1065 if (Node) 1066 Node->intersectFlagsWith(N->getFlags()); 1067 return Node; 1068 } 1069 1070 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1071 /// were replaced with those specified. If this node is never memoized, 1072 /// return null, otherwise return a pointer to the slot it would take. If a 1073 /// node already exists with these operands, the slot will be non-null. 1074 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 1075 SDValue Op1, SDValue Op2, 1076 void *&InsertPos) { 1077 if (doNotCSE(N)) 1078 return nullptr; 1079 1080 SDValue Ops[] = { Op1, Op2 }; 1081 FoldingSetNodeID ID; 1082 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1083 AddNodeIDCustom(ID, N); 1084 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1085 if (Node) 1086 Node->intersectFlagsWith(N->getFlags()); 1087 return Node; 1088 } 1089 1090 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1091 /// were replaced with those specified. If this node is never memoized, 1092 /// return null, otherwise return a pointer to the slot it would take. If a 1093 /// node already exists with these operands, the slot will be non-null. 1094 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 1095 void *&InsertPos) { 1096 if (doNotCSE(N)) 1097 return nullptr; 1098 1099 FoldingSetNodeID ID; 1100 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1101 AddNodeIDCustom(ID, N); 1102 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1103 if (Node) 1104 Node->intersectFlagsWith(N->getFlags()); 1105 return Node; 1106 } 1107 1108 Align SelectionDAG::getEVTAlign(EVT VT) const { 1109 Type *Ty = VT == MVT::iPTR ? 1110 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1111 VT.getTypeForEVT(*getContext()); 1112 1113 return getDataLayout().getABITypeAlign(Ty); 1114 } 1115 1116 // EntryNode could meaningfully have debug info if we can find it... 1117 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1118 : TM(tm), OptLevel(OL), 1119 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1120 Root(getEntryNode()) { 1121 InsertNode(&EntryNode); 1122 DbgInfo = new SDDbgInfo(); 1123 } 1124 1125 void SelectionDAG::init(MachineFunction &NewMF, 1126 OptimizationRemarkEmitter &NewORE, 1127 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1128 LegacyDivergenceAnalysis * Divergence, 1129 ProfileSummaryInfo *PSIin, 1130 BlockFrequencyInfo *BFIin) { 1131 MF = &NewMF; 1132 SDAGISelPass = PassPtr; 1133 ORE = &NewORE; 1134 TLI = getSubtarget().getTargetLowering(); 1135 TSI = getSubtarget().getSelectionDAGInfo(); 1136 LibInfo = LibraryInfo; 1137 Context = &MF->getFunction().getContext(); 1138 DA = Divergence; 1139 PSI = PSIin; 1140 BFI = BFIin; 1141 } 1142 1143 SelectionDAG::~SelectionDAG() { 1144 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1145 allnodes_clear(); 1146 OperandRecycler.clear(OperandAllocator); 1147 delete DbgInfo; 1148 } 1149 1150 bool SelectionDAG::shouldOptForSize() const { 1151 return MF->getFunction().hasOptSize() || 1152 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1153 } 1154 1155 void SelectionDAG::allnodes_clear() { 1156 assert(&*AllNodes.begin() == &EntryNode); 1157 AllNodes.remove(AllNodes.begin()); 1158 while (!AllNodes.empty()) 1159 DeallocateNode(&AllNodes.front()); 1160 #ifndef NDEBUG 1161 NextPersistentId = 0; 1162 #endif 1163 } 1164 1165 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1166 void *&InsertPos) { 1167 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1168 if (N) { 1169 switch (N->getOpcode()) { 1170 default: break; 1171 case ISD::Constant: 1172 case ISD::ConstantFP: 1173 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1174 "debug location. Use another overload."); 1175 } 1176 } 1177 return N; 1178 } 1179 1180 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1181 const SDLoc &DL, void *&InsertPos) { 1182 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1183 if (N) { 1184 switch (N->getOpcode()) { 1185 case ISD::Constant: 1186 case ISD::ConstantFP: 1187 // Erase debug location from the node if the node is used at several 1188 // different places. Do not propagate one location to all uses as it 1189 // will cause a worse single stepping debugging experience. 1190 if (N->getDebugLoc() != DL.getDebugLoc()) 1191 N->setDebugLoc(DebugLoc()); 1192 break; 1193 default: 1194 // When the node's point of use is located earlier in the instruction 1195 // sequence than its prior point of use, update its debug info to the 1196 // earlier location. 1197 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1198 N->setDebugLoc(DL.getDebugLoc()); 1199 break; 1200 } 1201 } 1202 return N; 1203 } 1204 1205 void SelectionDAG::clear() { 1206 allnodes_clear(); 1207 OperandRecycler.clear(OperandAllocator); 1208 OperandAllocator.Reset(); 1209 CSEMap.clear(); 1210 1211 ExtendedValueTypeNodes.clear(); 1212 ExternalSymbols.clear(); 1213 TargetExternalSymbols.clear(); 1214 MCSymbols.clear(); 1215 SDCallSiteDbgInfo.clear(); 1216 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1217 static_cast<CondCodeSDNode*>(nullptr)); 1218 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1219 static_cast<SDNode*>(nullptr)); 1220 1221 EntryNode.UseList = nullptr; 1222 InsertNode(&EntryNode); 1223 Root = getEntryNode(); 1224 DbgInfo->clear(); 1225 } 1226 1227 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1228 return VT.bitsGT(Op.getValueType()) 1229 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1230 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1231 } 1232 1233 std::pair<SDValue, SDValue> 1234 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1235 const SDLoc &DL, EVT VT) { 1236 assert(!VT.bitsEq(Op.getValueType()) && 1237 "Strict no-op FP extend/round not allowed."); 1238 SDValue Res = 1239 VT.bitsGT(Op.getValueType()) 1240 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1241 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1242 {Chain, Op, getIntPtrConstant(0, DL)}); 1243 1244 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1245 } 1246 1247 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1248 return VT.bitsGT(Op.getValueType()) ? 1249 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1250 getNode(ISD::TRUNCATE, DL, VT, Op); 1251 } 1252 1253 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1254 return VT.bitsGT(Op.getValueType()) ? 1255 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1256 getNode(ISD::TRUNCATE, DL, VT, Op); 1257 } 1258 1259 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1260 return VT.bitsGT(Op.getValueType()) ? 1261 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1262 getNode(ISD::TRUNCATE, DL, VT, Op); 1263 } 1264 1265 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1266 EVT OpVT) { 1267 if (VT.bitsLE(Op.getValueType())) 1268 return getNode(ISD::TRUNCATE, SL, VT, Op); 1269 1270 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1271 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1272 } 1273 1274 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1275 EVT OpVT = Op.getValueType(); 1276 assert(VT.isInteger() && OpVT.isInteger() && 1277 "Cannot getZeroExtendInReg FP types"); 1278 assert(VT.isVector() == OpVT.isVector() && 1279 "getZeroExtendInReg type should be vector iff the operand " 1280 "type is vector!"); 1281 assert((!VT.isVector() || 1282 VT.getVectorElementCount() == OpVT.getVectorElementCount()) && 1283 "Vector element counts must match in getZeroExtendInReg"); 1284 assert(VT.bitsLE(OpVT) && "Not extending!"); 1285 if (OpVT == VT) 1286 return Op; 1287 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1288 VT.getScalarSizeInBits()); 1289 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1290 } 1291 1292 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1293 // Only unsigned pointer semantics are supported right now. In the future this 1294 // might delegate to TLI to check pointer signedness. 1295 return getZExtOrTrunc(Op, DL, VT); 1296 } 1297 1298 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1299 // Only unsigned pointer semantics are supported right now. In the future this 1300 // might delegate to TLI to check pointer signedness. 1301 return getZeroExtendInReg(Op, DL, VT); 1302 } 1303 1304 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1305 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1306 EVT EltVT = VT.getScalarType(); 1307 SDValue NegOne = 1308 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1309 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1310 } 1311 1312 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1313 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1314 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1315 } 1316 1317 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1318 EVT OpVT) { 1319 if (!V) 1320 return getConstant(0, DL, VT); 1321 1322 switch (TLI->getBooleanContents(OpVT)) { 1323 case TargetLowering::ZeroOrOneBooleanContent: 1324 case TargetLowering::UndefinedBooleanContent: 1325 return getConstant(1, DL, VT); 1326 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1327 return getAllOnesConstant(DL, VT); 1328 } 1329 llvm_unreachable("Unexpected boolean content enum!"); 1330 } 1331 1332 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1333 bool isT, bool isO) { 1334 EVT EltVT = VT.getScalarType(); 1335 assert((EltVT.getSizeInBits() >= 64 || 1336 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1337 "getConstant with a uint64_t value that doesn't fit in the type!"); 1338 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1339 } 1340 1341 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1342 bool isT, bool isO) { 1343 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1344 } 1345 1346 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1347 EVT VT, bool isT, bool isO) { 1348 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1349 1350 EVT EltVT = VT.getScalarType(); 1351 const ConstantInt *Elt = &Val; 1352 1353 // In some cases the vector type is legal but the element type is illegal and 1354 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1355 // inserted value (the type does not need to match the vector element type). 1356 // Any extra bits introduced will be truncated away. 1357 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1358 TargetLowering::TypePromoteInteger) { 1359 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1360 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1361 Elt = ConstantInt::get(*getContext(), NewVal); 1362 } 1363 // In other cases the element type is illegal and needs to be expanded, for 1364 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1365 // the value into n parts and use a vector type with n-times the elements. 1366 // Then bitcast to the type requested. 1367 // Legalizing constants too early makes the DAGCombiner's job harder so we 1368 // only legalize if the DAG tells us we must produce legal types. 1369 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1370 TLI->getTypeAction(*getContext(), EltVT) == 1371 TargetLowering::TypeExpandInteger) { 1372 const APInt &NewVal = Elt->getValue(); 1373 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1374 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1375 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1376 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1377 1378 // Check the temporary vector is the correct size. If this fails then 1379 // getTypeToTransformTo() probably returned a type whose size (in bits) 1380 // isn't a power-of-2 factor of the requested type size. 1381 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1382 1383 SmallVector<SDValue, 2> EltParts; 1384 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1385 EltParts.push_back(getConstant( 1386 NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL, 1387 ViaEltVT, isT, isO)); 1388 } 1389 1390 // EltParts is currently in little endian order. If we actually want 1391 // big-endian order then reverse it now. 1392 if (getDataLayout().isBigEndian()) 1393 std::reverse(EltParts.begin(), EltParts.end()); 1394 1395 // The elements must be reversed when the element order is different 1396 // to the endianness of the elements (because the BITCAST is itself a 1397 // vector shuffle in this situation). However, we do not need any code to 1398 // perform this reversal because getConstant() is producing a vector 1399 // splat. 1400 // This situation occurs in MIPS MSA. 1401 1402 SmallVector<SDValue, 8> Ops; 1403 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1404 llvm::append_range(Ops, EltParts); 1405 1406 SDValue V = 1407 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1408 return V; 1409 } 1410 1411 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1412 "APInt size does not match type size!"); 1413 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1414 FoldingSetNodeID ID; 1415 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1416 ID.AddPointer(Elt); 1417 ID.AddBoolean(isO); 1418 void *IP = nullptr; 1419 SDNode *N = nullptr; 1420 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1421 if (!VT.isVector()) 1422 return SDValue(N, 0); 1423 1424 if (!N) { 1425 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1426 CSEMap.InsertNode(N, IP); 1427 InsertNode(N); 1428 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1429 } 1430 1431 SDValue Result(N, 0); 1432 if (VT.isScalableVector()) 1433 Result = getSplatVector(VT, DL, Result); 1434 else if (VT.isVector()) 1435 Result = getSplatBuildVector(VT, DL, Result); 1436 1437 return Result; 1438 } 1439 1440 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1441 bool isTarget) { 1442 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1443 } 1444 1445 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1446 const SDLoc &DL, bool LegalTypes) { 1447 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1448 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1449 return getConstant(Val, DL, ShiftVT); 1450 } 1451 1452 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1453 bool isTarget) { 1454 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1455 } 1456 1457 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1458 bool isTarget) { 1459 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1460 } 1461 1462 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1463 EVT VT, bool isTarget) { 1464 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1465 1466 EVT EltVT = VT.getScalarType(); 1467 1468 // Do the map lookup using the actual bit pattern for the floating point 1469 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1470 // we don't have issues with SNANs. 1471 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1472 FoldingSetNodeID ID; 1473 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1474 ID.AddPointer(&V); 1475 void *IP = nullptr; 1476 SDNode *N = nullptr; 1477 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1478 if (!VT.isVector()) 1479 return SDValue(N, 0); 1480 1481 if (!N) { 1482 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1483 CSEMap.InsertNode(N, IP); 1484 InsertNode(N); 1485 } 1486 1487 SDValue Result(N, 0); 1488 if (VT.isScalableVector()) 1489 Result = getSplatVector(VT, DL, Result); 1490 else if (VT.isVector()) 1491 Result = getSplatBuildVector(VT, DL, Result); 1492 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1493 return Result; 1494 } 1495 1496 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1497 bool isTarget) { 1498 EVT EltVT = VT.getScalarType(); 1499 if (EltVT == MVT::f32) 1500 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1501 else if (EltVT == MVT::f64) 1502 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1503 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1504 EltVT == MVT::f16 || EltVT == MVT::bf16) { 1505 bool Ignored; 1506 APFloat APF = APFloat(Val); 1507 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1508 &Ignored); 1509 return getConstantFP(APF, DL, VT, isTarget); 1510 } else 1511 llvm_unreachable("Unsupported type in getConstantFP"); 1512 } 1513 1514 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1515 EVT VT, int64_t Offset, bool isTargetGA, 1516 unsigned TargetFlags) { 1517 assert((TargetFlags == 0 || isTargetGA) && 1518 "Cannot set target flags on target-independent globals"); 1519 1520 // Truncate (with sign-extension) the offset value to the pointer size. 1521 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1522 if (BitWidth < 64) 1523 Offset = SignExtend64(Offset, BitWidth); 1524 1525 unsigned Opc; 1526 if (GV->isThreadLocal()) 1527 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1528 else 1529 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1530 1531 FoldingSetNodeID ID; 1532 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1533 ID.AddPointer(GV); 1534 ID.AddInteger(Offset); 1535 ID.AddInteger(TargetFlags); 1536 void *IP = nullptr; 1537 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1538 return SDValue(E, 0); 1539 1540 auto *N = newSDNode<GlobalAddressSDNode>( 1541 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1542 CSEMap.InsertNode(N, IP); 1543 InsertNode(N); 1544 return SDValue(N, 0); 1545 } 1546 1547 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1548 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1549 FoldingSetNodeID ID; 1550 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1551 ID.AddInteger(FI); 1552 void *IP = nullptr; 1553 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1554 return SDValue(E, 0); 1555 1556 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1557 CSEMap.InsertNode(N, IP); 1558 InsertNode(N); 1559 return SDValue(N, 0); 1560 } 1561 1562 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1563 unsigned TargetFlags) { 1564 assert((TargetFlags == 0 || isTarget) && 1565 "Cannot set target flags on target-independent jump tables"); 1566 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1567 FoldingSetNodeID ID; 1568 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1569 ID.AddInteger(JTI); 1570 ID.AddInteger(TargetFlags); 1571 void *IP = nullptr; 1572 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1573 return SDValue(E, 0); 1574 1575 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1576 CSEMap.InsertNode(N, IP); 1577 InsertNode(N); 1578 return SDValue(N, 0); 1579 } 1580 1581 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1582 MaybeAlign Alignment, int Offset, 1583 bool isTarget, unsigned TargetFlags) { 1584 assert((TargetFlags == 0 || isTarget) && 1585 "Cannot set target flags on target-independent globals"); 1586 if (!Alignment) 1587 Alignment = shouldOptForSize() 1588 ? getDataLayout().getABITypeAlign(C->getType()) 1589 : getDataLayout().getPrefTypeAlign(C->getType()); 1590 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1591 FoldingSetNodeID ID; 1592 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1593 ID.AddInteger(Alignment->value()); 1594 ID.AddInteger(Offset); 1595 ID.AddPointer(C); 1596 ID.AddInteger(TargetFlags); 1597 void *IP = nullptr; 1598 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1599 return SDValue(E, 0); 1600 1601 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1602 TargetFlags); 1603 CSEMap.InsertNode(N, IP); 1604 InsertNode(N); 1605 SDValue V = SDValue(N, 0); 1606 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1607 return V; 1608 } 1609 1610 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1611 MaybeAlign Alignment, int Offset, 1612 bool isTarget, unsigned TargetFlags) { 1613 assert((TargetFlags == 0 || isTarget) && 1614 "Cannot set target flags on target-independent globals"); 1615 if (!Alignment) 1616 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1617 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1618 FoldingSetNodeID ID; 1619 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1620 ID.AddInteger(Alignment->value()); 1621 ID.AddInteger(Offset); 1622 C->addSelectionDAGCSEId(ID); 1623 ID.AddInteger(TargetFlags); 1624 void *IP = nullptr; 1625 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1626 return SDValue(E, 0); 1627 1628 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1629 TargetFlags); 1630 CSEMap.InsertNode(N, IP); 1631 InsertNode(N); 1632 return SDValue(N, 0); 1633 } 1634 1635 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1636 unsigned TargetFlags) { 1637 FoldingSetNodeID ID; 1638 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1639 ID.AddInteger(Index); 1640 ID.AddInteger(Offset); 1641 ID.AddInteger(TargetFlags); 1642 void *IP = nullptr; 1643 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1644 return SDValue(E, 0); 1645 1646 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1647 CSEMap.InsertNode(N, IP); 1648 InsertNode(N); 1649 return SDValue(N, 0); 1650 } 1651 1652 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1653 FoldingSetNodeID ID; 1654 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1655 ID.AddPointer(MBB); 1656 void *IP = nullptr; 1657 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1658 return SDValue(E, 0); 1659 1660 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1661 CSEMap.InsertNode(N, IP); 1662 InsertNode(N); 1663 return SDValue(N, 0); 1664 } 1665 1666 SDValue SelectionDAG::getValueType(EVT VT) { 1667 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1668 ValueTypeNodes.size()) 1669 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1670 1671 SDNode *&N = VT.isExtended() ? 1672 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1673 1674 if (N) return SDValue(N, 0); 1675 N = newSDNode<VTSDNode>(VT); 1676 InsertNode(N); 1677 return SDValue(N, 0); 1678 } 1679 1680 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1681 SDNode *&N = ExternalSymbols[Sym]; 1682 if (N) return SDValue(N, 0); 1683 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1684 InsertNode(N); 1685 return SDValue(N, 0); 1686 } 1687 1688 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1689 SDNode *&N = MCSymbols[Sym]; 1690 if (N) 1691 return SDValue(N, 0); 1692 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1693 InsertNode(N); 1694 return SDValue(N, 0); 1695 } 1696 1697 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1698 unsigned TargetFlags) { 1699 SDNode *&N = 1700 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1701 if (N) return SDValue(N, 0); 1702 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1703 InsertNode(N); 1704 return SDValue(N, 0); 1705 } 1706 1707 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1708 if ((unsigned)Cond >= CondCodeNodes.size()) 1709 CondCodeNodes.resize(Cond+1); 1710 1711 if (!CondCodeNodes[Cond]) { 1712 auto *N = newSDNode<CondCodeSDNode>(Cond); 1713 CondCodeNodes[Cond] = N; 1714 InsertNode(N); 1715 } 1716 1717 return SDValue(CondCodeNodes[Cond], 0); 1718 } 1719 1720 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1721 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1722 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1723 std::swap(N1, N2); 1724 ShuffleVectorSDNode::commuteMask(M); 1725 } 1726 1727 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1728 SDValue N2, ArrayRef<int> Mask) { 1729 assert(VT.getVectorNumElements() == Mask.size() && 1730 "Must have the same number of vector elements as mask elements!"); 1731 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1732 "Invalid VECTOR_SHUFFLE"); 1733 1734 // Canonicalize shuffle undef, undef -> undef 1735 if (N1.isUndef() && N2.isUndef()) 1736 return getUNDEF(VT); 1737 1738 // Validate that all indices in Mask are within the range of the elements 1739 // input to the shuffle. 1740 int NElts = Mask.size(); 1741 assert(llvm::all_of(Mask, 1742 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1743 "Index out of range"); 1744 1745 // Copy the mask so we can do any needed cleanup. 1746 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1747 1748 // Canonicalize shuffle v, v -> v, undef 1749 if (N1 == N2) { 1750 N2 = getUNDEF(VT); 1751 for (int i = 0; i != NElts; ++i) 1752 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1753 } 1754 1755 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1756 if (N1.isUndef()) 1757 commuteShuffle(N1, N2, MaskVec); 1758 1759 if (TLI->hasVectorBlend()) { 1760 // If shuffling a splat, try to blend the splat instead. We do this here so 1761 // that even when this arises during lowering we don't have to re-handle it. 1762 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1763 BitVector UndefElements; 1764 SDValue Splat = BV->getSplatValue(&UndefElements); 1765 if (!Splat) 1766 return; 1767 1768 for (int i = 0; i < NElts; ++i) { 1769 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1770 continue; 1771 1772 // If this input comes from undef, mark it as such. 1773 if (UndefElements[MaskVec[i] - Offset]) { 1774 MaskVec[i] = -1; 1775 continue; 1776 } 1777 1778 // If we can blend a non-undef lane, use that instead. 1779 if (!UndefElements[i]) 1780 MaskVec[i] = i + Offset; 1781 } 1782 }; 1783 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1784 BlendSplat(N1BV, 0); 1785 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1786 BlendSplat(N2BV, NElts); 1787 } 1788 1789 // Canonicalize all index into lhs, -> shuffle lhs, undef 1790 // Canonicalize all index into rhs, -> shuffle rhs, undef 1791 bool AllLHS = true, AllRHS = true; 1792 bool N2Undef = N2.isUndef(); 1793 for (int i = 0; i != NElts; ++i) { 1794 if (MaskVec[i] >= NElts) { 1795 if (N2Undef) 1796 MaskVec[i] = -1; 1797 else 1798 AllLHS = false; 1799 } else if (MaskVec[i] >= 0) { 1800 AllRHS = false; 1801 } 1802 } 1803 if (AllLHS && AllRHS) 1804 return getUNDEF(VT); 1805 if (AllLHS && !N2Undef) 1806 N2 = getUNDEF(VT); 1807 if (AllRHS) { 1808 N1 = getUNDEF(VT); 1809 commuteShuffle(N1, N2, MaskVec); 1810 } 1811 // Reset our undef status after accounting for the mask. 1812 N2Undef = N2.isUndef(); 1813 // Re-check whether both sides ended up undef. 1814 if (N1.isUndef() && N2Undef) 1815 return getUNDEF(VT); 1816 1817 // If Identity shuffle return that node. 1818 bool Identity = true, AllSame = true; 1819 for (int i = 0; i != NElts; ++i) { 1820 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1821 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1822 } 1823 if (Identity && NElts) 1824 return N1; 1825 1826 // Shuffling a constant splat doesn't change the result. 1827 if (N2Undef) { 1828 SDValue V = N1; 1829 1830 // Look through any bitcasts. We check that these don't change the number 1831 // (and size) of elements and just changes their types. 1832 while (V.getOpcode() == ISD::BITCAST) 1833 V = V->getOperand(0); 1834 1835 // A splat should always show up as a build vector node. 1836 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1837 BitVector UndefElements; 1838 SDValue Splat = BV->getSplatValue(&UndefElements); 1839 // If this is a splat of an undef, shuffling it is also undef. 1840 if (Splat && Splat.isUndef()) 1841 return getUNDEF(VT); 1842 1843 bool SameNumElts = 1844 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1845 1846 // We only have a splat which can skip shuffles if there is a splatted 1847 // value and no undef lanes rearranged by the shuffle. 1848 if (Splat && UndefElements.none()) { 1849 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1850 // number of elements match or the value splatted is a zero constant. 1851 if (SameNumElts) 1852 return N1; 1853 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1854 if (C->isNullValue()) 1855 return N1; 1856 } 1857 1858 // If the shuffle itself creates a splat, build the vector directly. 1859 if (AllSame && SameNumElts) { 1860 EVT BuildVT = BV->getValueType(0); 1861 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1862 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1863 1864 // We may have jumped through bitcasts, so the type of the 1865 // BUILD_VECTOR may not match the type of the shuffle. 1866 if (BuildVT != VT) 1867 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1868 return NewBV; 1869 } 1870 } 1871 } 1872 1873 FoldingSetNodeID ID; 1874 SDValue Ops[2] = { N1, N2 }; 1875 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1876 for (int i = 0; i != NElts; ++i) 1877 ID.AddInteger(MaskVec[i]); 1878 1879 void* IP = nullptr; 1880 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1881 return SDValue(E, 0); 1882 1883 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1884 // SDNode doesn't have access to it. This memory will be "leaked" when 1885 // the node is deallocated, but recovered when the NodeAllocator is released. 1886 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1887 llvm::copy(MaskVec, MaskAlloc); 1888 1889 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1890 dl.getDebugLoc(), MaskAlloc); 1891 createOperands(N, Ops); 1892 1893 CSEMap.InsertNode(N, IP); 1894 InsertNode(N); 1895 SDValue V = SDValue(N, 0); 1896 NewSDValueDbgMsg(V, "Creating new node: ", this); 1897 return V; 1898 } 1899 1900 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1901 EVT VT = SV.getValueType(0); 1902 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1903 ShuffleVectorSDNode::commuteMask(MaskVec); 1904 1905 SDValue Op0 = SV.getOperand(0); 1906 SDValue Op1 = SV.getOperand(1); 1907 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1908 } 1909 1910 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1911 FoldingSetNodeID ID; 1912 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1913 ID.AddInteger(RegNo); 1914 void *IP = nullptr; 1915 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1916 return SDValue(E, 0); 1917 1918 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1919 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1920 CSEMap.InsertNode(N, IP); 1921 InsertNode(N); 1922 return SDValue(N, 0); 1923 } 1924 1925 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1926 FoldingSetNodeID ID; 1927 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1928 ID.AddPointer(RegMask); 1929 void *IP = nullptr; 1930 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1931 return SDValue(E, 0); 1932 1933 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1934 CSEMap.InsertNode(N, IP); 1935 InsertNode(N); 1936 return SDValue(N, 0); 1937 } 1938 1939 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1940 MCSymbol *Label) { 1941 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1942 } 1943 1944 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1945 SDValue Root, MCSymbol *Label) { 1946 FoldingSetNodeID ID; 1947 SDValue Ops[] = { Root }; 1948 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1949 ID.AddPointer(Label); 1950 void *IP = nullptr; 1951 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1952 return SDValue(E, 0); 1953 1954 auto *N = 1955 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1956 createOperands(N, Ops); 1957 1958 CSEMap.InsertNode(N, IP); 1959 InsertNode(N); 1960 return SDValue(N, 0); 1961 } 1962 1963 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1964 int64_t Offset, bool isTarget, 1965 unsigned TargetFlags) { 1966 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1967 1968 FoldingSetNodeID ID; 1969 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1970 ID.AddPointer(BA); 1971 ID.AddInteger(Offset); 1972 ID.AddInteger(TargetFlags); 1973 void *IP = nullptr; 1974 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1975 return SDValue(E, 0); 1976 1977 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1978 CSEMap.InsertNode(N, IP); 1979 InsertNode(N); 1980 return SDValue(N, 0); 1981 } 1982 1983 SDValue SelectionDAG::getSrcValue(const Value *V) { 1984 FoldingSetNodeID ID; 1985 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1986 ID.AddPointer(V); 1987 1988 void *IP = nullptr; 1989 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1990 return SDValue(E, 0); 1991 1992 auto *N = newSDNode<SrcValueSDNode>(V); 1993 CSEMap.InsertNode(N, IP); 1994 InsertNode(N); 1995 return SDValue(N, 0); 1996 } 1997 1998 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1999 FoldingSetNodeID ID; 2000 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 2001 ID.AddPointer(MD); 2002 2003 void *IP = nullptr; 2004 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 2005 return SDValue(E, 0); 2006 2007 auto *N = newSDNode<MDNodeSDNode>(MD); 2008 CSEMap.InsertNode(N, IP); 2009 InsertNode(N); 2010 return SDValue(N, 0); 2011 } 2012 2013 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 2014 if (VT == V.getValueType()) 2015 return V; 2016 2017 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 2018 } 2019 2020 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 2021 unsigned SrcAS, unsigned DestAS) { 2022 SDValue Ops[] = {Ptr}; 2023 FoldingSetNodeID ID; 2024 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 2025 ID.AddInteger(SrcAS); 2026 ID.AddInteger(DestAS); 2027 2028 void *IP = nullptr; 2029 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 2030 return SDValue(E, 0); 2031 2032 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 2033 VT, SrcAS, DestAS); 2034 createOperands(N, Ops); 2035 2036 CSEMap.InsertNode(N, IP); 2037 InsertNode(N); 2038 return SDValue(N, 0); 2039 } 2040 2041 SDValue SelectionDAG::getFreeze(SDValue V) { 2042 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 2043 } 2044 2045 /// getShiftAmountOperand - Return the specified value casted to 2046 /// the target's desired shift amount type. 2047 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 2048 EVT OpTy = Op.getValueType(); 2049 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 2050 if (OpTy == ShTy || OpTy.isVector()) return Op; 2051 2052 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 2053 } 2054 2055 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 2056 SDLoc dl(Node); 2057 const TargetLowering &TLI = getTargetLoweringInfo(); 2058 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2059 EVT VT = Node->getValueType(0); 2060 SDValue Tmp1 = Node->getOperand(0); 2061 SDValue Tmp2 = Node->getOperand(1); 2062 const MaybeAlign MA(Node->getConstantOperandVal(3)); 2063 2064 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 2065 Tmp2, MachinePointerInfo(V)); 2066 SDValue VAList = VAListLoad; 2067 2068 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 2069 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2070 getConstant(MA->value() - 1, dl, VAList.getValueType())); 2071 2072 VAList = 2073 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 2074 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 2075 } 2076 2077 // Increment the pointer, VAList, to the next vaarg 2078 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2079 getConstant(getDataLayout().getTypeAllocSize( 2080 VT.getTypeForEVT(*getContext())), 2081 dl, VAList.getValueType())); 2082 // Store the incremented VAList to the legalized pointer 2083 Tmp1 = 2084 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 2085 // Load the actual argument out of the pointer VAList 2086 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 2087 } 2088 2089 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 2090 SDLoc dl(Node); 2091 const TargetLowering &TLI = getTargetLoweringInfo(); 2092 // This defaults to loading a pointer from the input and storing it to the 2093 // output, returning the chain. 2094 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2095 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2096 SDValue Tmp1 = 2097 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 2098 Node->getOperand(2), MachinePointerInfo(VS)); 2099 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2100 MachinePointerInfo(VD)); 2101 } 2102 2103 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { 2104 const DataLayout &DL = getDataLayout(); 2105 Type *Ty = VT.getTypeForEVT(*getContext()); 2106 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2107 2108 if (TLI->isTypeLegal(VT) || !VT.isVector()) 2109 return RedAlign; 2110 2111 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2112 const Align StackAlign = TFI->getStackAlign(); 2113 2114 // See if we can choose a smaller ABI alignment in cases where it's an 2115 // illegal vector type that will get broken down. 2116 if (RedAlign > StackAlign) { 2117 EVT IntermediateVT; 2118 MVT RegisterVT; 2119 unsigned NumIntermediates; 2120 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, 2121 NumIntermediates, RegisterVT); 2122 Ty = IntermediateVT.getTypeForEVT(*getContext()); 2123 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2124 if (RedAlign2 < RedAlign) 2125 RedAlign = RedAlign2; 2126 } 2127 2128 return RedAlign; 2129 } 2130 2131 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 2132 MachineFrameInfo &MFI = MF->getFrameInfo(); 2133 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2134 int StackID = 0; 2135 if (Bytes.isScalable()) 2136 StackID = TFI->getStackIDForScalableVectors(); 2137 // The stack id gives an indication of whether the object is scalable or 2138 // not, so it's safe to pass in the minimum size here. 2139 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment, 2140 false, nullptr, StackID); 2141 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2142 } 2143 2144 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2145 Type *Ty = VT.getTypeForEVT(*getContext()); 2146 Align StackAlign = 2147 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2148 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2149 } 2150 2151 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2152 TypeSize VT1Size = VT1.getStoreSize(); 2153 TypeSize VT2Size = VT2.getStoreSize(); 2154 assert(VT1Size.isScalable() == VT2Size.isScalable() && 2155 "Don't know how to choose the maximum size when creating a stack " 2156 "temporary"); 2157 TypeSize Bytes = 2158 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size; 2159 2160 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2161 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2162 const DataLayout &DL = getDataLayout(); 2163 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2164 return CreateStackTemporary(Bytes, Align); 2165 } 2166 2167 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2168 ISD::CondCode Cond, const SDLoc &dl) { 2169 EVT OpVT = N1.getValueType(); 2170 2171 // These setcc operations always fold. 2172 switch (Cond) { 2173 default: break; 2174 case ISD::SETFALSE: 2175 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2176 case ISD::SETTRUE: 2177 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2178 2179 case ISD::SETOEQ: 2180 case ISD::SETOGT: 2181 case ISD::SETOGE: 2182 case ISD::SETOLT: 2183 case ISD::SETOLE: 2184 case ISD::SETONE: 2185 case ISD::SETO: 2186 case ISD::SETUO: 2187 case ISD::SETUEQ: 2188 case ISD::SETUNE: 2189 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2190 break; 2191 } 2192 2193 if (OpVT.isInteger()) { 2194 // For EQ and NE, we can always pick a value for the undef to make the 2195 // predicate pass or fail, so we can return undef. 2196 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2197 // icmp eq/ne X, undef -> undef. 2198 if ((N1.isUndef() || N2.isUndef()) && 2199 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2200 return getUNDEF(VT); 2201 2202 // If both operands are undef, we can return undef for int comparison. 2203 // icmp undef, undef -> undef. 2204 if (N1.isUndef() && N2.isUndef()) 2205 return getUNDEF(VT); 2206 2207 // icmp X, X -> true/false 2208 // icmp X, undef -> true/false because undef could be X. 2209 if (N1 == N2) 2210 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2211 } 2212 2213 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2214 const APInt &C2 = N2C->getAPIntValue(); 2215 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2216 const APInt &C1 = N1C->getAPIntValue(); 2217 2218 switch (Cond) { 2219 default: llvm_unreachable("Unknown integer setcc!"); 2220 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2221 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2222 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2223 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2224 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2225 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2226 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2227 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2228 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2229 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2230 } 2231 } 2232 } 2233 2234 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2235 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2236 2237 if (N1CFP && N2CFP) { 2238 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2239 switch (Cond) { 2240 default: break; 2241 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2242 return getUNDEF(VT); 2243 LLVM_FALLTHROUGH; 2244 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2245 OpVT); 2246 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2247 return getUNDEF(VT); 2248 LLVM_FALLTHROUGH; 2249 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2250 R==APFloat::cmpLessThan, dl, VT, 2251 OpVT); 2252 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2253 return getUNDEF(VT); 2254 LLVM_FALLTHROUGH; 2255 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2256 OpVT); 2257 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2258 return getUNDEF(VT); 2259 LLVM_FALLTHROUGH; 2260 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2261 VT, OpVT); 2262 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2263 return getUNDEF(VT); 2264 LLVM_FALLTHROUGH; 2265 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2266 R==APFloat::cmpEqual, dl, VT, 2267 OpVT); 2268 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2269 return getUNDEF(VT); 2270 LLVM_FALLTHROUGH; 2271 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2272 R==APFloat::cmpEqual, dl, VT, OpVT); 2273 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2274 OpVT); 2275 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2276 OpVT); 2277 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2278 R==APFloat::cmpEqual, dl, VT, 2279 OpVT); 2280 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2281 OpVT); 2282 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2283 R==APFloat::cmpLessThan, dl, VT, 2284 OpVT); 2285 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2286 R==APFloat::cmpUnordered, dl, VT, 2287 OpVT); 2288 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2289 VT, OpVT); 2290 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2291 OpVT); 2292 } 2293 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2294 // Ensure that the constant occurs on the RHS. 2295 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2296 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2297 return SDValue(); 2298 return getSetCC(dl, VT, N2, N1, SwappedCond); 2299 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2300 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2301 // If an operand is known to be a nan (or undef that could be a nan), we can 2302 // fold it. 2303 // Choosing NaN for the undef will always make unordered comparison succeed 2304 // and ordered comparison fails. 2305 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2306 switch (ISD::getUnorderedFlavor(Cond)) { 2307 default: 2308 llvm_unreachable("Unknown flavor!"); 2309 case 0: // Known false. 2310 return getBoolConstant(false, dl, VT, OpVT); 2311 case 1: // Known true. 2312 return getBoolConstant(true, dl, VT, OpVT); 2313 case 2: // Undefined. 2314 return getUNDEF(VT); 2315 } 2316 } 2317 2318 // Could not fold it. 2319 return SDValue(); 2320 } 2321 2322 /// See if the specified operand can be simplified with the knowledge that only 2323 /// the bits specified by DemandedBits are used. 2324 /// TODO: really we should be making this into the DAG equivalent of 2325 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2326 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2327 EVT VT = V.getValueType(); 2328 2329 if (VT.isScalableVector()) 2330 return SDValue(); 2331 2332 APInt DemandedElts = VT.isVector() 2333 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2334 : APInt(1, 1); 2335 return GetDemandedBits(V, DemandedBits, DemandedElts); 2336 } 2337 2338 /// See if the specified operand can be simplified with the knowledge that only 2339 /// the bits specified by DemandedBits are used in the elements specified by 2340 /// DemandedElts. 2341 /// TODO: really we should be making this into the DAG equivalent of 2342 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2343 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2344 const APInt &DemandedElts) { 2345 switch (V.getOpcode()) { 2346 default: 2347 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2348 *this, 0); 2349 case ISD::Constant: { 2350 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); 2351 APInt NewVal = CVal & DemandedBits; 2352 if (NewVal != CVal) 2353 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2354 break; 2355 } 2356 case ISD::SRL: 2357 // Only look at single-use SRLs. 2358 if (!V.getNode()->hasOneUse()) 2359 break; 2360 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2361 // See if we can recursively simplify the LHS. 2362 unsigned Amt = RHSC->getZExtValue(); 2363 2364 // Watch out for shift count overflow though. 2365 if (Amt >= DemandedBits.getBitWidth()) 2366 break; 2367 APInt SrcDemandedBits = DemandedBits << Amt; 2368 if (SDValue SimplifyLHS = 2369 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2370 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2371 V.getOperand(1)); 2372 } 2373 break; 2374 } 2375 return SDValue(); 2376 } 2377 2378 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2379 /// use this predicate to simplify operations downstream. 2380 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2381 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2382 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2383 } 2384 2385 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2386 /// this predicate to simplify operations downstream. Mask is known to be zero 2387 /// for bits that V cannot have. 2388 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2389 unsigned Depth) const { 2390 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); 2391 } 2392 2393 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2394 /// DemandedElts. We use this predicate to simplify operations downstream. 2395 /// Mask is known to be zero for bits that V cannot have. 2396 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2397 const APInt &DemandedElts, 2398 unsigned Depth) const { 2399 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2400 } 2401 2402 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2403 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2404 unsigned Depth) const { 2405 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2406 } 2407 2408 /// isSplatValue - Return true if the vector V has the same value 2409 /// across all DemandedElts. For scalable vectors it does not make 2410 /// sense to specify which elements are demanded or undefined, therefore 2411 /// they are simply ignored. 2412 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2413 APInt &UndefElts, unsigned Depth) { 2414 EVT VT = V.getValueType(); 2415 assert(VT.isVector() && "Vector type expected"); 2416 2417 if (!VT.isScalableVector() && !DemandedElts) 2418 return false; // No demanded elts, better to assume we don't know anything. 2419 2420 if (Depth >= MaxRecursionDepth) 2421 return false; // Limit search depth. 2422 2423 // Deal with some common cases here that work for both fixed and scalable 2424 // vector types. 2425 switch (V.getOpcode()) { 2426 case ISD::SPLAT_VECTOR: 2427 UndefElts = V.getOperand(0).isUndef() 2428 ? APInt::getAllOnesValue(DemandedElts.getBitWidth()) 2429 : APInt(DemandedElts.getBitWidth(), 0); 2430 return true; 2431 case ISD::ADD: 2432 case ISD::SUB: 2433 case ISD::AND: 2434 case ISD::XOR: 2435 case ISD::OR: { 2436 APInt UndefLHS, UndefRHS; 2437 SDValue LHS = V.getOperand(0); 2438 SDValue RHS = V.getOperand(1); 2439 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) && 2440 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) { 2441 UndefElts = UndefLHS | UndefRHS; 2442 return true; 2443 } 2444 break; 2445 } 2446 case ISD::TRUNCATE: 2447 case ISD::SIGN_EXTEND: 2448 case ISD::ZERO_EXTEND: 2449 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1); 2450 } 2451 2452 // We don't support other cases than those above for scalable vectors at 2453 // the moment. 2454 if (VT.isScalableVector()) 2455 return false; 2456 2457 unsigned NumElts = VT.getVectorNumElements(); 2458 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2459 UndefElts = APInt::getNullValue(NumElts); 2460 2461 switch (V.getOpcode()) { 2462 case ISD::BUILD_VECTOR: { 2463 SDValue Scl; 2464 for (unsigned i = 0; i != NumElts; ++i) { 2465 SDValue Op = V.getOperand(i); 2466 if (Op.isUndef()) { 2467 UndefElts.setBit(i); 2468 continue; 2469 } 2470 if (!DemandedElts[i]) 2471 continue; 2472 if (Scl && Scl != Op) 2473 return false; 2474 Scl = Op; 2475 } 2476 return true; 2477 } 2478 case ISD::VECTOR_SHUFFLE: { 2479 // Check if this is a shuffle node doing a splat. 2480 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2481 int SplatIndex = -1; 2482 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2483 for (int i = 0; i != (int)NumElts; ++i) { 2484 int M = Mask[i]; 2485 if (M < 0) { 2486 UndefElts.setBit(i); 2487 continue; 2488 } 2489 if (!DemandedElts[i]) 2490 continue; 2491 if (0 <= SplatIndex && SplatIndex != M) 2492 return false; 2493 SplatIndex = M; 2494 } 2495 return true; 2496 } 2497 case ISD::EXTRACT_SUBVECTOR: { 2498 // Offset the demanded elts by the subvector index. 2499 SDValue Src = V.getOperand(0); 2500 uint64_t Idx = V.getConstantOperandVal(1); 2501 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2502 APInt UndefSrcElts; 2503 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2504 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { 2505 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2506 return true; 2507 } 2508 break; 2509 } 2510 } 2511 2512 return false; 2513 } 2514 2515 /// Helper wrapper to main isSplatValue function. 2516 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2517 EVT VT = V.getValueType(); 2518 assert(VT.isVector() && "Vector type expected"); 2519 2520 APInt UndefElts; 2521 APInt DemandedElts; 2522 2523 // For now we don't support this with scalable vectors. 2524 if (!VT.isScalableVector()) 2525 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2526 return isSplatValue(V, DemandedElts, UndefElts) && 2527 (AllowUndefs || !UndefElts); 2528 } 2529 2530 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2531 V = peekThroughExtractSubvectors(V); 2532 2533 EVT VT = V.getValueType(); 2534 unsigned Opcode = V.getOpcode(); 2535 switch (Opcode) { 2536 default: { 2537 APInt UndefElts; 2538 APInt DemandedElts; 2539 2540 if (!VT.isScalableVector()) 2541 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2542 2543 if (isSplatValue(V, DemandedElts, UndefElts)) { 2544 if (VT.isScalableVector()) { 2545 // DemandedElts and UndefElts are ignored for scalable vectors, since 2546 // the only supported cases are SPLAT_VECTOR nodes. 2547 SplatIdx = 0; 2548 } else { 2549 // Handle case where all demanded elements are UNDEF. 2550 if (DemandedElts.isSubsetOf(UndefElts)) { 2551 SplatIdx = 0; 2552 return getUNDEF(VT); 2553 } 2554 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2555 } 2556 return V; 2557 } 2558 break; 2559 } 2560 case ISD::SPLAT_VECTOR: 2561 SplatIdx = 0; 2562 return V; 2563 case ISD::VECTOR_SHUFFLE: { 2564 if (VT.isScalableVector()) 2565 return SDValue(); 2566 2567 // Check if this is a shuffle node doing a splat. 2568 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2569 // getTargetVShiftNode currently struggles without the splat source. 2570 auto *SVN = cast<ShuffleVectorSDNode>(V); 2571 if (!SVN->isSplat()) 2572 break; 2573 int Idx = SVN->getSplatIndex(); 2574 int NumElts = V.getValueType().getVectorNumElements(); 2575 SplatIdx = Idx % NumElts; 2576 return V.getOperand(Idx / NumElts); 2577 } 2578 } 2579 2580 return SDValue(); 2581 } 2582 2583 SDValue SelectionDAG::getSplatValue(SDValue V) { 2584 int SplatIdx; 2585 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2586 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2587 SrcVector.getValueType().getScalarType(), SrcVector, 2588 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2589 return SDValue(); 2590 } 2591 2592 const APInt * 2593 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2594 const APInt &DemandedElts) const { 2595 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2596 V.getOpcode() == ISD::SRA) && 2597 "Unknown shift node"); 2598 unsigned BitWidth = V.getScalarValueSizeInBits(); 2599 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2600 // Shifting more than the bitwidth is not valid. 2601 const APInt &ShAmt = SA->getAPIntValue(); 2602 if (ShAmt.ult(BitWidth)) 2603 return &ShAmt; 2604 } 2605 return nullptr; 2606 } 2607 2608 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2609 SDValue V, const APInt &DemandedElts) const { 2610 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2611 V.getOpcode() == ISD::SRA) && 2612 "Unknown shift node"); 2613 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2614 return ValidAmt; 2615 unsigned BitWidth = V.getScalarValueSizeInBits(); 2616 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2617 if (!BV) 2618 return nullptr; 2619 const APInt *MinShAmt = nullptr; 2620 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2621 if (!DemandedElts[i]) 2622 continue; 2623 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2624 if (!SA) 2625 return nullptr; 2626 // Shifting more than the bitwidth is not valid. 2627 const APInt &ShAmt = SA->getAPIntValue(); 2628 if (ShAmt.uge(BitWidth)) 2629 return nullptr; 2630 if (MinShAmt && MinShAmt->ule(ShAmt)) 2631 continue; 2632 MinShAmt = &ShAmt; 2633 } 2634 return MinShAmt; 2635 } 2636 2637 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2638 SDValue V, const APInt &DemandedElts) const { 2639 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2640 V.getOpcode() == ISD::SRA) && 2641 "Unknown shift node"); 2642 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2643 return ValidAmt; 2644 unsigned BitWidth = V.getScalarValueSizeInBits(); 2645 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2646 if (!BV) 2647 return nullptr; 2648 const APInt *MaxShAmt = nullptr; 2649 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2650 if (!DemandedElts[i]) 2651 continue; 2652 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2653 if (!SA) 2654 return nullptr; 2655 // Shifting more than the bitwidth is not valid. 2656 const APInt &ShAmt = SA->getAPIntValue(); 2657 if (ShAmt.uge(BitWidth)) 2658 return nullptr; 2659 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2660 continue; 2661 MaxShAmt = &ShAmt; 2662 } 2663 return MaxShAmt; 2664 } 2665 2666 /// Determine which bits of Op are known to be either zero or one and return 2667 /// them in Known. For vectors, the known bits are those that are shared by 2668 /// every vector element. 2669 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2670 EVT VT = Op.getValueType(); 2671 2672 // TOOD: Until we have a plan for how to represent demanded elements for 2673 // scalable vectors, we can just bail out for now. 2674 if (Op.getValueType().isScalableVector()) { 2675 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2676 return KnownBits(BitWidth); 2677 } 2678 2679 APInt DemandedElts = VT.isVector() 2680 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2681 : APInt(1, 1); 2682 return computeKnownBits(Op, DemandedElts, Depth); 2683 } 2684 2685 /// Determine which bits of Op are known to be either zero or one and return 2686 /// them in Known. The DemandedElts argument allows us to only collect the known 2687 /// bits that are shared by the requested vector elements. 2688 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2689 unsigned Depth) const { 2690 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2691 2692 KnownBits Known(BitWidth); // Don't know anything. 2693 2694 // TOOD: Until we have a plan for how to represent demanded elements for 2695 // scalable vectors, we can just bail out for now. 2696 if (Op.getValueType().isScalableVector()) 2697 return Known; 2698 2699 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2700 // We know all of the bits for a constant! 2701 return KnownBits::makeConstant(C->getAPIntValue()); 2702 } 2703 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2704 // We know all of the bits for a constant fp! 2705 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt()); 2706 } 2707 2708 if (Depth >= MaxRecursionDepth) 2709 return Known; // Limit search depth. 2710 2711 KnownBits Known2; 2712 unsigned NumElts = DemandedElts.getBitWidth(); 2713 assert((!Op.getValueType().isVector() || 2714 NumElts == Op.getValueType().getVectorNumElements()) && 2715 "Unexpected vector size"); 2716 2717 if (!DemandedElts) 2718 return Known; // No demanded elts, better to assume we don't know anything. 2719 2720 unsigned Opcode = Op.getOpcode(); 2721 switch (Opcode) { 2722 case ISD::BUILD_VECTOR: 2723 // Collect the known bits that are shared by every demanded vector element. 2724 Known.Zero.setAllBits(); Known.One.setAllBits(); 2725 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2726 if (!DemandedElts[i]) 2727 continue; 2728 2729 SDValue SrcOp = Op.getOperand(i); 2730 Known2 = computeKnownBits(SrcOp, Depth + 1); 2731 2732 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2733 if (SrcOp.getValueSizeInBits() != BitWidth) { 2734 assert(SrcOp.getValueSizeInBits() > BitWidth && 2735 "Expected BUILD_VECTOR implicit truncation"); 2736 Known2 = Known2.trunc(BitWidth); 2737 } 2738 2739 // Known bits are the values that are shared by every demanded element. 2740 Known = KnownBits::commonBits(Known, Known2); 2741 2742 // If we don't know any bits, early out. 2743 if (Known.isUnknown()) 2744 break; 2745 } 2746 break; 2747 case ISD::VECTOR_SHUFFLE: { 2748 // Collect the known bits that are shared by every vector element referenced 2749 // by the shuffle. 2750 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2751 Known.Zero.setAllBits(); Known.One.setAllBits(); 2752 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2753 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2754 for (unsigned i = 0; i != NumElts; ++i) { 2755 if (!DemandedElts[i]) 2756 continue; 2757 2758 int M = SVN->getMaskElt(i); 2759 if (M < 0) { 2760 // For UNDEF elements, we don't know anything about the common state of 2761 // the shuffle result. 2762 Known.resetAll(); 2763 DemandedLHS.clearAllBits(); 2764 DemandedRHS.clearAllBits(); 2765 break; 2766 } 2767 2768 if ((unsigned)M < NumElts) 2769 DemandedLHS.setBit((unsigned)M % NumElts); 2770 else 2771 DemandedRHS.setBit((unsigned)M % NumElts); 2772 } 2773 // Known bits are the values that are shared by every demanded element. 2774 if (!!DemandedLHS) { 2775 SDValue LHS = Op.getOperand(0); 2776 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2777 Known = KnownBits::commonBits(Known, Known2); 2778 } 2779 // If we don't know any bits, early out. 2780 if (Known.isUnknown()) 2781 break; 2782 if (!!DemandedRHS) { 2783 SDValue RHS = Op.getOperand(1); 2784 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2785 Known = KnownBits::commonBits(Known, Known2); 2786 } 2787 break; 2788 } 2789 case ISD::CONCAT_VECTORS: { 2790 // Split DemandedElts and test each of the demanded subvectors. 2791 Known.Zero.setAllBits(); Known.One.setAllBits(); 2792 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2793 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2794 unsigned NumSubVectors = Op.getNumOperands(); 2795 for (unsigned i = 0; i != NumSubVectors; ++i) { 2796 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2797 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2798 if (!!DemandedSub) { 2799 SDValue Sub = Op.getOperand(i); 2800 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2801 Known = KnownBits::commonBits(Known, Known2); 2802 } 2803 // If we don't know any bits, early out. 2804 if (Known.isUnknown()) 2805 break; 2806 } 2807 break; 2808 } 2809 case ISD::INSERT_SUBVECTOR: { 2810 // Demand any elements from the subvector and the remainder from the src its 2811 // inserted into. 2812 SDValue Src = Op.getOperand(0); 2813 SDValue Sub = Op.getOperand(1); 2814 uint64_t Idx = Op.getConstantOperandVal(2); 2815 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2816 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2817 APInt DemandedSrcElts = DemandedElts; 2818 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2819 2820 Known.One.setAllBits(); 2821 Known.Zero.setAllBits(); 2822 if (!!DemandedSubElts) { 2823 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2824 if (Known.isUnknown()) 2825 break; // early-out. 2826 } 2827 if (!!DemandedSrcElts) { 2828 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2829 Known = KnownBits::commonBits(Known, Known2); 2830 } 2831 break; 2832 } 2833 case ISD::EXTRACT_SUBVECTOR: { 2834 // Offset the demanded elts by the subvector index. 2835 SDValue Src = Op.getOperand(0); 2836 // Bail until we can represent demanded elements for scalable vectors. 2837 if (Src.getValueType().isScalableVector()) 2838 break; 2839 uint64_t Idx = Op.getConstantOperandVal(1); 2840 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2841 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2842 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2843 break; 2844 } 2845 case ISD::SCALAR_TO_VECTOR: { 2846 // We know about scalar_to_vector as much as we know about it source, 2847 // which becomes the first element of otherwise unknown vector. 2848 if (DemandedElts != 1) 2849 break; 2850 2851 SDValue N0 = Op.getOperand(0); 2852 Known = computeKnownBits(N0, Depth + 1); 2853 if (N0.getValueSizeInBits() != BitWidth) 2854 Known = Known.trunc(BitWidth); 2855 2856 break; 2857 } 2858 case ISD::BITCAST: { 2859 SDValue N0 = Op.getOperand(0); 2860 EVT SubVT = N0.getValueType(); 2861 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2862 2863 // Ignore bitcasts from unsupported types. 2864 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2865 break; 2866 2867 // Fast handling of 'identity' bitcasts. 2868 if (BitWidth == SubBitWidth) { 2869 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2870 break; 2871 } 2872 2873 bool IsLE = getDataLayout().isLittleEndian(); 2874 2875 // Bitcast 'small element' vector to 'large element' scalar/vector. 2876 if ((BitWidth % SubBitWidth) == 0) { 2877 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2878 2879 // Collect known bits for the (larger) output by collecting the known 2880 // bits from each set of sub elements and shift these into place. 2881 // We need to separately call computeKnownBits for each set of 2882 // sub elements as the knownbits for each is likely to be different. 2883 unsigned SubScale = BitWidth / SubBitWidth; 2884 APInt SubDemandedElts(NumElts * SubScale, 0); 2885 for (unsigned i = 0; i != NumElts; ++i) 2886 if (DemandedElts[i]) 2887 SubDemandedElts.setBit(i * SubScale); 2888 2889 for (unsigned i = 0; i != SubScale; ++i) { 2890 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2891 Depth + 1); 2892 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2893 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2894 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2895 } 2896 } 2897 2898 // Bitcast 'large element' scalar/vector to 'small element' vector. 2899 if ((SubBitWidth % BitWidth) == 0) { 2900 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2901 2902 // Collect known bits for the (smaller) output by collecting the known 2903 // bits from the overlapping larger input elements and extracting the 2904 // sub sections we actually care about. 2905 unsigned SubScale = SubBitWidth / BitWidth; 2906 APInt SubDemandedElts(NumElts / SubScale, 0); 2907 for (unsigned i = 0; i != NumElts; ++i) 2908 if (DemandedElts[i]) 2909 SubDemandedElts.setBit(i / SubScale); 2910 2911 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2912 2913 Known.Zero.setAllBits(); Known.One.setAllBits(); 2914 for (unsigned i = 0; i != NumElts; ++i) 2915 if (DemandedElts[i]) { 2916 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2917 unsigned Offset = (Shifts % SubScale) * BitWidth; 2918 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2919 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2920 // If we don't know any bits, early out. 2921 if (Known.isUnknown()) 2922 break; 2923 } 2924 } 2925 break; 2926 } 2927 case ISD::AND: 2928 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2929 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2930 2931 Known &= Known2; 2932 break; 2933 case ISD::OR: 2934 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2935 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2936 2937 Known |= Known2; 2938 break; 2939 case ISD::XOR: 2940 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2941 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2942 2943 Known ^= Known2; 2944 break; 2945 case ISD::MUL: { 2946 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2947 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2948 Known = KnownBits::computeForMul(Known, Known2); 2949 break; 2950 } 2951 case ISD::UDIV: { 2952 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2953 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2954 Known = KnownBits::udiv(Known, Known2); 2955 break; 2956 } 2957 case ISD::SELECT: 2958 case ISD::VSELECT: 2959 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2960 // If we don't know any bits, early out. 2961 if (Known.isUnknown()) 2962 break; 2963 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2964 2965 // Only known if known in both the LHS and RHS. 2966 Known = KnownBits::commonBits(Known, Known2); 2967 break; 2968 case ISD::SELECT_CC: 2969 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2970 // If we don't know any bits, early out. 2971 if (Known.isUnknown()) 2972 break; 2973 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2974 2975 // Only known if known in both the LHS and RHS. 2976 Known = KnownBits::commonBits(Known, Known2); 2977 break; 2978 case ISD::SMULO: 2979 case ISD::UMULO: 2980 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2981 if (Op.getResNo() != 1) 2982 break; 2983 // The boolean result conforms to getBooleanContents. 2984 // If we know the result of a setcc has the top bits zero, use this info. 2985 // We know that we have an integer-based boolean since these operations 2986 // are only available for integer. 2987 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2988 TargetLowering::ZeroOrOneBooleanContent && 2989 BitWidth > 1) 2990 Known.Zero.setBitsFrom(1); 2991 break; 2992 case ISD::SETCC: 2993 case ISD::STRICT_FSETCC: 2994 case ISD::STRICT_FSETCCS: { 2995 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2996 // If we know the result of a setcc has the top bits zero, use this info. 2997 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2998 TargetLowering::ZeroOrOneBooleanContent && 2999 BitWidth > 1) 3000 Known.Zero.setBitsFrom(1); 3001 break; 3002 } 3003 case ISD::SHL: 3004 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3005 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3006 Known = KnownBits::shl(Known, Known2); 3007 3008 // Minimum shift low bits are known zero. 3009 if (const APInt *ShMinAmt = 3010 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3011 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 3012 break; 3013 case ISD::SRL: 3014 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3015 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3016 Known = KnownBits::lshr(Known, Known2); 3017 3018 // Minimum shift high bits are known zero. 3019 if (const APInt *ShMinAmt = 3020 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3021 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 3022 break; 3023 case ISD::SRA: 3024 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3025 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3026 Known = KnownBits::ashr(Known, Known2); 3027 // TODO: Add minimum shift high known sign bits. 3028 break; 3029 case ISD::FSHL: 3030 case ISD::FSHR: 3031 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 3032 unsigned Amt = C->getAPIntValue().urem(BitWidth); 3033 3034 // For fshl, 0-shift returns the 1st arg. 3035 // For fshr, 0-shift returns the 2nd arg. 3036 if (Amt == 0) { 3037 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 3038 DemandedElts, Depth + 1); 3039 break; 3040 } 3041 3042 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 3043 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 3044 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3045 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3046 if (Opcode == ISD::FSHL) { 3047 Known.One <<= Amt; 3048 Known.Zero <<= Amt; 3049 Known2.One.lshrInPlace(BitWidth - Amt); 3050 Known2.Zero.lshrInPlace(BitWidth - Amt); 3051 } else { 3052 Known.One <<= BitWidth - Amt; 3053 Known.Zero <<= BitWidth - Amt; 3054 Known2.One.lshrInPlace(Amt); 3055 Known2.Zero.lshrInPlace(Amt); 3056 } 3057 Known.One |= Known2.One; 3058 Known.Zero |= Known2.Zero; 3059 } 3060 break; 3061 case ISD::SIGN_EXTEND_INREG: { 3062 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3063 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3064 Known = Known.sextInReg(EVT.getScalarSizeInBits()); 3065 break; 3066 } 3067 case ISD::CTTZ: 3068 case ISD::CTTZ_ZERO_UNDEF: { 3069 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3070 // If we have a known 1, its position is our upper bound. 3071 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 3072 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3073 Known.Zero.setBitsFrom(LowBits); 3074 break; 3075 } 3076 case ISD::CTLZ: 3077 case ISD::CTLZ_ZERO_UNDEF: { 3078 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3079 // If we have a known 1, its position is our upper bound. 3080 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3081 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3082 Known.Zero.setBitsFrom(LowBits); 3083 break; 3084 } 3085 case ISD::CTPOP: { 3086 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3087 // If we know some of the bits are zero, they can't be one. 3088 unsigned PossibleOnes = Known2.countMaxPopulation(); 3089 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3090 break; 3091 } 3092 case ISD::PARITY: { 3093 // Parity returns 0 everywhere but the LSB. 3094 Known.Zero.setBitsFrom(1); 3095 break; 3096 } 3097 case ISD::LOAD: { 3098 LoadSDNode *LD = cast<LoadSDNode>(Op); 3099 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3100 if (ISD::isNON_EXTLoad(LD) && Cst) { 3101 // Determine any common known bits from the loaded constant pool value. 3102 Type *CstTy = Cst->getType(); 3103 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3104 // If its a vector splat, then we can (quickly) reuse the scalar path. 3105 // NOTE: We assume all elements match and none are UNDEF. 3106 if (CstTy->isVectorTy()) { 3107 if (const Constant *Splat = Cst->getSplatValue()) { 3108 Cst = Splat; 3109 CstTy = Cst->getType(); 3110 } 3111 } 3112 // TODO - do we need to handle different bitwidths? 3113 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3114 // Iterate across all vector elements finding common known bits. 3115 Known.One.setAllBits(); 3116 Known.Zero.setAllBits(); 3117 for (unsigned i = 0; i != NumElts; ++i) { 3118 if (!DemandedElts[i]) 3119 continue; 3120 if (Constant *Elt = Cst->getAggregateElement(i)) { 3121 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3122 const APInt &Value = CInt->getValue(); 3123 Known.One &= Value; 3124 Known.Zero &= ~Value; 3125 continue; 3126 } 3127 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3128 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3129 Known.One &= Value; 3130 Known.Zero &= ~Value; 3131 continue; 3132 } 3133 } 3134 Known.One.clearAllBits(); 3135 Known.Zero.clearAllBits(); 3136 break; 3137 } 3138 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3139 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3140 Known = KnownBits::makeConstant(CInt->getValue()); 3141 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3142 Known = 3143 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt()); 3144 } 3145 } 3146 } 3147 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3148 // If this is a ZEXTLoad and we are looking at the loaded value. 3149 EVT VT = LD->getMemoryVT(); 3150 unsigned MemBits = VT.getScalarSizeInBits(); 3151 Known.Zero.setBitsFrom(MemBits); 3152 } else if (const MDNode *Ranges = LD->getRanges()) { 3153 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3154 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3155 } 3156 break; 3157 } 3158 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3159 EVT InVT = Op.getOperand(0).getValueType(); 3160 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3161 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3162 Known = Known.zext(BitWidth); 3163 break; 3164 } 3165 case ISD::ZERO_EXTEND: { 3166 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3167 Known = Known.zext(BitWidth); 3168 break; 3169 } 3170 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3171 EVT InVT = Op.getOperand(0).getValueType(); 3172 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3173 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3174 // If the sign bit is known to be zero or one, then sext will extend 3175 // it to the top bits, else it will just zext. 3176 Known = Known.sext(BitWidth); 3177 break; 3178 } 3179 case ISD::SIGN_EXTEND: { 3180 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3181 // If the sign bit is known to be zero or one, then sext will extend 3182 // it to the top bits, else it will just zext. 3183 Known = Known.sext(BitWidth); 3184 break; 3185 } 3186 case ISD::ANY_EXTEND_VECTOR_INREG: { 3187 EVT InVT = Op.getOperand(0).getValueType(); 3188 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3189 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3190 Known = Known.anyext(BitWidth); 3191 break; 3192 } 3193 case ISD::ANY_EXTEND: { 3194 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3195 Known = Known.anyext(BitWidth); 3196 break; 3197 } 3198 case ISD::TRUNCATE: { 3199 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3200 Known = Known.trunc(BitWidth); 3201 break; 3202 } 3203 case ISD::AssertZext: { 3204 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3205 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3206 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3207 Known.Zero |= (~InMask); 3208 Known.One &= (~Known.Zero); 3209 break; 3210 } 3211 case ISD::AssertAlign: { 3212 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); 3213 assert(LogOfAlign != 0); 3214 // If a node is guaranteed to be aligned, set low zero bits accordingly as 3215 // well as clearing one bits. 3216 Known.Zero.setLowBits(LogOfAlign); 3217 Known.One.clearLowBits(LogOfAlign); 3218 break; 3219 } 3220 case ISD::FGETSIGN: 3221 // All bits are zero except the low bit. 3222 Known.Zero.setBitsFrom(1); 3223 break; 3224 case ISD::USUBO: 3225 case ISD::SSUBO: 3226 if (Op.getResNo() == 1) { 3227 // If we know the result of a setcc has the top bits zero, use this info. 3228 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3229 TargetLowering::ZeroOrOneBooleanContent && 3230 BitWidth > 1) 3231 Known.Zero.setBitsFrom(1); 3232 break; 3233 } 3234 LLVM_FALLTHROUGH; 3235 case ISD::SUB: 3236 case ISD::SUBC: { 3237 assert(Op.getResNo() == 0 && 3238 "We only compute knownbits for the difference here."); 3239 3240 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3241 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3242 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3243 Known, Known2); 3244 break; 3245 } 3246 case ISD::UADDO: 3247 case ISD::SADDO: 3248 case ISD::ADDCARRY: 3249 if (Op.getResNo() == 1) { 3250 // If we know the result of a setcc has the top bits zero, use this info. 3251 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3252 TargetLowering::ZeroOrOneBooleanContent && 3253 BitWidth > 1) 3254 Known.Zero.setBitsFrom(1); 3255 break; 3256 } 3257 LLVM_FALLTHROUGH; 3258 case ISD::ADD: 3259 case ISD::ADDC: 3260 case ISD::ADDE: { 3261 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3262 3263 // With ADDE and ADDCARRY, a carry bit may be added in. 3264 KnownBits Carry(1); 3265 if (Opcode == ISD::ADDE) 3266 // Can't track carry from glue, set carry to unknown. 3267 Carry.resetAll(); 3268 else if (Opcode == ISD::ADDCARRY) 3269 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3270 // the trouble (how often will we find a known carry bit). And I haven't 3271 // tested this very much yet, but something like this might work: 3272 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3273 // Carry = Carry.zextOrTrunc(1, false); 3274 Carry.resetAll(); 3275 else 3276 Carry.setAllZero(); 3277 3278 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3279 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3280 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3281 break; 3282 } 3283 case ISD::SREM: { 3284 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3285 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3286 Known = KnownBits::srem(Known, Known2); 3287 break; 3288 } 3289 case ISD::UREM: { 3290 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3291 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3292 Known = KnownBits::urem(Known, Known2); 3293 break; 3294 } 3295 case ISD::EXTRACT_ELEMENT: { 3296 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3297 const unsigned Index = Op.getConstantOperandVal(1); 3298 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3299 3300 // Remove low part of known bits mask 3301 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3302 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3303 3304 // Remove high part of known bit mask 3305 Known = Known.trunc(EltBitWidth); 3306 break; 3307 } 3308 case ISD::EXTRACT_VECTOR_ELT: { 3309 SDValue InVec = Op.getOperand(0); 3310 SDValue EltNo = Op.getOperand(1); 3311 EVT VecVT = InVec.getValueType(); 3312 // computeKnownBits not yet implemented for scalable vectors. 3313 if (VecVT.isScalableVector()) 3314 break; 3315 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3316 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3317 3318 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3319 // anything about the extended bits. 3320 if (BitWidth > EltBitWidth) 3321 Known = Known.trunc(EltBitWidth); 3322 3323 // If we know the element index, just demand that vector element, else for 3324 // an unknown element index, ignore DemandedElts and demand them all. 3325 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3326 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3327 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3328 DemandedSrcElts = 3329 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3330 3331 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3332 if (BitWidth > EltBitWidth) 3333 Known = Known.anyext(BitWidth); 3334 break; 3335 } 3336 case ISD::INSERT_VECTOR_ELT: { 3337 // If we know the element index, split the demand between the 3338 // source vector and the inserted element, otherwise assume we need 3339 // the original demanded vector elements and the value. 3340 SDValue InVec = Op.getOperand(0); 3341 SDValue InVal = Op.getOperand(1); 3342 SDValue EltNo = Op.getOperand(2); 3343 bool DemandedVal = true; 3344 APInt DemandedVecElts = DemandedElts; 3345 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3346 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3347 unsigned EltIdx = CEltNo->getZExtValue(); 3348 DemandedVal = !!DemandedElts[EltIdx]; 3349 DemandedVecElts.clearBit(EltIdx); 3350 } 3351 Known.One.setAllBits(); 3352 Known.Zero.setAllBits(); 3353 if (DemandedVal) { 3354 Known2 = computeKnownBits(InVal, Depth + 1); 3355 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth)); 3356 } 3357 if (!!DemandedVecElts) { 3358 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3359 Known = KnownBits::commonBits(Known, Known2); 3360 } 3361 break; 3362 } 3363 case ISD::BITREVERSE: { 3364 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3365 Known = Known2.reverseBits(); 3366 break; 3367 } 3368 case ISD::BSWAP: { 3369 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3370 Known = Known2.byteSwap(); 3371 break; 3372 } 3373 case ISD::ABS: { 3374 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3375 Known = Known2.abs(); 3376 break; 3377 } 3378 case ISD::USUBSAT: { 3379 // The result of usubsat will never be larger than the LHS. 3380 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3381 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 3382 break; 3383 } 3384 case ISD::UMIN: { 3385 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3386 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3387 Known = KnownBits::umin(Known, Known2); 3388 break; 3389 } 3390 case ISD::UMAX: { 3391 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3392 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3393 Known = KnownBits::umax(Known, Known2); 3394 break; 3395 } 3396 case ISD::SMIN: 3397 case ISD::SMAX: { 3398 // If we have a clamp pattern, we know that the number of sign bits will be 3399 // the minimum of the clamp min/max range. 3400 bool IsMax = (Opcode == ISD::SMAX); 3401 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3402 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3403 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3404 CstHigh = 3405 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3406 if (CstLow && CstHigh) { 3407 if (!IsMax) 3408 std::swap(CstLow, CstHigh); 3409 3410 const APInt &ValueLow = CstLow->getAPIntValue(); 3411 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3412 if (ValueLow.sle(ValueHigh)) { 3413 unsigned LowSignBits = ValueLow.getNumSignBits(); 3414 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3415 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3416 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3417 Known.One.setHighBits(MinSignBits); 3418 break; 3419 } 3420 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3421 Known.Zero.setHighBits(MinSignBits); 3422 break; 3423 } 3424 } 3425 } 3426 3427 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3428 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3429 if (IsMax) 3430 Known = KnownBits::smax(Known, Known2); 3431 else 3432 Known = KnownBits::smin(Known, Known2); 3433 break; 3434 } 3435 case ISD::FrameIndex: 3436 case ISD::TargetFrameIndex: 3437 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), 3438 Known, getMachineFunction()); 3439 break; 3440 3441 default: 3442 if (Opcode < ISD::BUILTIN_OP_END) 3443 break; 3444 LLVM_FALLTHROUGH; 3445 case ISD::INTRINSIC_WO_CHAIN: 3446 case ISD::INTRINSIC_W_CHAIN: 3447 case ISD::INTRINSIC_VOID: 3448 // Allow the target to implement this method for its nodes. 3449 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3450 break; 3451 } 3452 3453 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3454 return Known; 3455 } 3456 3457 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3458 SDValue N1) const { 3459 // X + 0 never overflow 3460 if (isNullConstant(N1)) 3461 return OFK_Never; 3462 3463 KnownBits N1Known = computeKnownBits(N1); 3464 if (N1Known.Zero.getBoolValue()) { 3465 KnownBits N0Known = computeKnownBits(N0); 3466 3467 bool overflow; 3468 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3469 if (!overflow) 3470 return OFK_Never; 3471 } 3472 3473 // mulhi + 1 never overflow 3474 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3475 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3476 return OFK_Never; 3477 3478 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3479 KnownBits N0Known = computeKnownBits(N0); 3480 3481 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3482 return OFK_Never; 3483 } 3484 3485 return OFK_Sometime; 3486 } 3487 3488 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3489 EVT OpVT = Val.getValueType(); 3490 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3491 3492 // Is the constant a known power of 2? 3493 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3494 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3495 3496 // A left-shift of a constant one will have exactly one bit set because 3497 // shifting the bit off the end is undefined. 3498 if (Val.getOpcode() == ISD::SHL) { 3499 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3500 if (C && C->getAPIntValue() == 1) 3501 return true; 3502 } 3503 3504 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3505 // one bit set. 3506 if (Val.getOpcode() == ISD::SRL) { 3507 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3508 if (C && C->getAPIntValue().isSignMask()) 3509 return true; 3510 } 3511 3512 // Are all operands of a build vector constant powers of two? 3513 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3514 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3515 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3516 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3517 return false; 3518 })) 3519 return true; 3520 3521 // More could be done here, though the above checks are enough 3522 // to handle some common cases. 3523 3524 // Fall back to computeKnownBits to catch other known cases. 3525 KnownBits Known = computeKnownBits(Val); 3526 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3527 } 3528 3529 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3530 EVT VT = Op.getValueType(); 3531 3532 // TODO: Assume we don't know anything for now. 3533 if (VT.isScalableVector()) 3534 return 1; 3535 3536 APInt DemandedElts = VT.isVector() 3537 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3538 : APInt(1, 1); 3539 return ComputeNumSignBits(Op, DemandedElts, Depth); 3540 } 3541 3542 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3543 unsigned Depth) const { 3544 EVT VT = Op.getValueType(); 3545 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3546 unsigned VTBits = VT.getScalarSizeInBits(); 3547 unsigned NumElts = DemandedElts.getBitWidth(); 3548 unsigned Tmp, Tmp2; 3549 unsigned FirstAnswer = 1; 3550 3551 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3552 const APInt &Val = C->getAPIntValue(); 3553 return Val.getNumSignBits(); 3554 } 3555 3556 if (Depth >= MaxRecursionDepth) 3557 return 1; // Limit search depth. 3558 3559 if (!DemandedElts || VT.isScalableVector()) 3560 return 1; // No demanded elts, better to assume we don't know anything. 3561 3562 unsigned Opcode = Op.getOpcode(); 3563 switch (Opcode) { 3564 default: break; 3565 case ISD::AssertSext: 3566 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3567 return VTBits-Tmp+1; 3568 case ISD::AssertZext: 3569 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3570 return VTBits-Tmp; 3571 3572 case ISD::BUILD_VECTOR: 3573 Tmp = VTBits; 3574 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3575 if (!DemandedElts[i]) 3576 continue; 3577 3578 SDValue SrcOp = Op.getOperand(i); 3579 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); 3580 3581 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3582 if (SrcOp.getValueSizeInBits() != VTBits) { 3583 assert(SrcOp.getValueSizeInBits() > VTBits && 3584 "Expected BUILD_VECTOR implicit truncation"); 3585 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3586 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3587 } 3588 Tmp = std::min(Tmp, Tmp2); 3589 } 3590 return Tmp; 3591 3592 case ISD::VECTOR_SHUFFLE: { 3593 // Collect the minimum number of sign bits that are shared by every vector 3594 // element referenced by the shuffle. 3595 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3596 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3597 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3598 for (unsigned i = 0; i != NumElts; ++i) { 3599 int M = SVN->getMaskElt(i); 3600 if (!DemandedElts[i]) 3601 continue; 3602 // For UNDEF elements, we don't know anything about the common state of 3603 // the shuffle result. 3604 if (M < 0) 3605 return 1; 3606 if ((unsigned)M < NumElts) 3607 DemandedLHS.setBit((unsigned)M % NumElts); 3608 else 3609 DemandedRHS.setBit((unsigned)M % NumElts); 3610 } 3611 Tmp = std::numeric_limits<unsigned>::max(); 3612 if (!!DemandedLHS) 3613 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3614 if (!!DemandedRHS) { 3615 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3616 Tmp = std::min(Tmp, Tmp2); 3617 } 3618 // If we don't know anything, early out and try computeKnownBits fall-back. 3619 if (Tmp == 1) 3620 break; 3621 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3622 return Tmp; 3623 } 3624 3625 case ISD::BITCAST: { 3626 SDValue N0 = Op.getOperand(0); 3627 EVT SrcVT = N0.getValueType(); 3628 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3629 3630 // Ignore bitcasts from unsupported types.. 3631 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3632 break; 3633 3634 // Fast handling of 'identity' bitcasts. 3635 if (VTBits == SrcBits) 3636 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3637 3638 bool IsLE = getDataLayout().isLittleEndian(); 3639 3640 // Bitcast 'large element' scalar/vector to 'small element' vector. 3641 if ((SrcBits % VTBits) == 0) { 3642 assert(VT.isVector() && "Expected bitcast to vector"); 3643 3644 unsigned Scale = SrcBits / VTBits; 3645 APInt SrcDemandedElts(NumElts / Scale, 0); 3646 for (unsigned i = 0; i != NumElts; ++i) 3647 if (DemandedElts[i]) 3648 SrcDemandedElts.setBit(i / Scale); 3649 3650 // Fast case - sign splat can be simply split across the small elements. 3651 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3652 if (Tmp == SrcBits) 3653 return VTBits; 3654 3655 // Slow case - determine how far the sign extends into each sub-element. 3656 Tmp2 = VTBits; 3657 for (unsigned i = 0; i != NumElts; ++i) 3658 if (DemandedElts[i]) { 3659 unsigned SubOffset = i % Scale; 3660 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3661 SubOffset = SubOffset * VTBits; 3662 if (Tmp <= SubOffset) 3663 return 1; 3664 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3665 } 3666 return Tmp2; 3667 } 3668 break; 3669 } 3670 3671 case ISD::SIGN_EXTEND: 3672 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3673 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3674 case ISD::SIGN_EXTEND_INREG: 3675 // Max of the input and what this extends. 3676 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3677 Tmp = VTBits-Tmp+1; 3678 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3679 return std::max(Tmp, Tmp2); 3680 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3681 SDValue Src = Op.getOperand(0); 3682 EVT SrcVT = Src.getValueType(); 3683 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3684 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3685 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3686 } 3687 case ISD::SRA: 3688 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3689 // SRA X, C -> adds C sign bits. 3690 if (const APInt *ShAmt = 3691 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3692 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3693 return Tmp; 3694 case ISD::SHL: 3695 if (const APInt *ShAmt = 3696 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3697 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3698 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3699 if (ShAmt->ult(Tmp)) 3700 return Tmp - ShAmt->getZExtValue(); 3701 } 3702 break; 3703 case ISD::AND: 3704 case ISD::OR: 3705 case ISD::XOR: // NOT is handled here. 3706 // Logical binary ops preserve the number of sign bits at the worst. 3707 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3708 if (Tmp != 1) { 3709 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3710 FirstAnswer = std::min(Tmp, Tmp2); 3711 // We computed what we know about the sign bits as our first 3712 // answer. Now proceed to the generic code that uses 3713 // computeKnownBits, and pick whichever answer is better. 3714 } 3715 break; 3716 3717 case ISD::SELECT: 3718 case ISD::VSELECT: 3719 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3720 if (Tmp == 1) return 1; // Early out. 3721 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3722 return std::min(Tmp, Tmp2); 3723 case ISD::SELECT_CC: 3724 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3725 if (Tmp == 1) return 1; // Early out. 3726 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3727 return std::min(Tmp, Tmp2); 3728 3729 case ISD::SMIN: 3730 case ISD::SMAX: { 3731 // If we have a clamp pattern, we know that the number of sign bits will be 3732 // the minimum of the clamp min/max range. 3733 bool IsMax = (Opcode == ISD::SMAX); 3734 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3735 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3736 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3737 CstHigh = 3738 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3739 if (CstLow && CstHigh) { 3740 if (!IsMax) 3741 std::swap(CstLow, CstHigh); 3742 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3743 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3744 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3745 return std::min(Tmp, Tmp2); 3746 } 3747 } 3748 3749 // Fallback - just get the minimum number of sign bits of the operands. 3750 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3751 if (Tmp == 1) 3752 return 1; // Early out. 3753 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3754 return std::min(Tmp, Tmp2); 3755 } 3756 case ISD::UMIN: 3757 case ISD::UMAX: 3758 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3759 if (Tmp == 1) 3760 return 1; // Early out. 3761 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3762 return std::min(Tmp, Tmp2); 3763 case ISD::SADDO: 3764 case ISD::UADDO: 3765 case ISD::SSUBO: 3766 case ISD::USUBO: 3767 case ISD::SMULO: 3768 case ISD::UMULO: 3769 if (Op.getResNo() != 1) 3770 break; 3771 // The boolean result conforms to getBooleanContents. Fall through. 3772 // If setcc returns 0/-1, all bits are sign bits. 3773 // We know that we have an integer-based boolean since these operations 3774 // are only available for integer. 3775 if (TLI->getBooleanContents(VT.isVector(), false) == 3776 TargetLowering::ZeroOrNegativeOneBooleanContent) 3777 return VTBits; 3778 break; 3779 case ISD::SETCC: 3780 case ISD::STRICT_FSETCC: 3781 case ISD::STRICT_FSETCCS: { 3782 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3783 // If setcc returns 0/-1, all bits are sign bits. 3784 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3785 TargetLowering::ZeroOrNegativeOneBooleanContent) 3786 return VTBits; 3787 break; 3788 } 3789 case ISD::ROTL: 3790 case ISD::ROTR: 3791 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3792 3793 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3794 if (Tmp == VTBits) 3795 return VTBits; 3796 3797 if (ConstantSDNode *C = 3798 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3799 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3800 3801 // Handle rotate right by N like a rotate left by 32-N. 3802 if (Opcode == ISD::ROTR) 3803 RotAmt = (VTBits - RotAmt) % VTBits; 3804 3805 // If we aren't rotating out all of the known-in sign bits, return the 3806 // number that are left. This handles rotl(sext(x), 1) for example. 3807 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3808 } 3809 break; 3810 case ISD::ADD: 3811 case ISD::ADDC: 3812 // Add can have at most one carry bit. Thus we know that the output 3813 // is, at worst, one more bit than the inputs. 3814 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3815 if (Tmp == 1) return 1; // Early out. 3816 3817 // Special case decrementing a value (ADD X, -1): 3818 if (ConstantSDNode *CRHS = 3819 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3820 if (CRHS->isAllOnesValue()) { 3821 KnownBits Known = 3822 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3823 3824 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3825 // sign bits set. 3826 if ((Known.Zero | 1).isAllOnesValue()) 3827 return VTBits; 3828 3829 // If we are subtracting one from a positive number, there is no carry 3830 // out of the result. 3831 if (Known.isNonNegative()) 3832 return Tmp; 3833 } 3834 3835 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3836 if (Tmp2 == 1) return 1; // Early out. 3837 return std::min(Tmp, Tmp2) - 1; 3838 case ISD::SUB: 3839 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3840 if (Tmp2 == 1) return 1; // Early out. 3841 3842 // Handle NEG. 3843 if (ConstantSDNode *CLHS = 3844 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3845 if (CLHS->isNullValue()) { 3846 KnownBits Known = 3847 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3848 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3849 // sign bits set. 3850 if ((Known.Zero | 1).isAllOnesValue()) 3851 return VTBits; 3852 3853 // If the input is known to be positive (the sign bit is known clear), 3854 // the output of the NEG has the same number of sign bits as the input. 3855 if (Known.isNonNegative()) 3856 return Tmp2; 3857 3858 // Otherwise, we treat this like a SUB. 3859 } 3860 3861 // Sub can have at most one carry bit. Thus we know that the output 3862 // is, at worst, one more bit than the inputs. 3863 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3864 if (Tmp == 1) return 1; // Early out. 3865 return std::min(Tmp, Tmp2) - 1; 3866 case ISD::MUL: { 3867 // The output of the Mul can be at most twice the valid bits in the inputs. 3868 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3869 if (SignBitsOp0 == 1) 3870 break; 3871 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3872 if (SignBitsOp1 == 1) 3873 break; 3874 unsigned OutValidBits = 3875 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3876 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3877 } 3878 case ISD::SREM: 3879 // The sign bit is the LHS's sign bit, except when the result of the 3880 // remainder is zero. The magnitude of the result should be less than or 3881 // equal to the magnitude of the LHS. Therefore, the result should have 3882 // at least as many sign bits as the left hand side. 3883 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3884 case ISD::TRUNCATE: { 3885 // Check if the sign bits of source go down as far as the truncated value. 3886 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3887 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3888 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3889 return NumSrcSignBits - (NumSrcBits - VTBits); 3890 break; 3891 } 3892 case ISD::EXTRACT_ELEMENT: { 3893 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3894 const int BitWidth = Op.getValueSizeInBits(); 3895 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3896 3897 // Get reverse index (starting from 1), Op1 value indexes elements from 3898 // little end. Sign starts at big end. 3899 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3900 3901 // If the sign portion ends in our element the subtraction gives correct 3902 // result. Otherwise it gives either negative or > bitwidth result 3903 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3904 } 3905 case ISD::INSERT_VECTOR_ELT: { 3906 // If we know the element index, split the demand between the 3907 // source vector and the inserted element, otherwise assume we need 3908 // the original demanded vector elements and the value. 3909 SDValue InVec = Op.getOperand(0); 3910 SDValue InVal = Op.getOperand(1); 3911 SDValue EltNo = Op.getOperand(2); 3912 bool DemandedVal = true; 3913 APInt DemandedVecElts = DemandedElts; 3914 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3915 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3916 unsigned EltIdx = CEltNo->getZExtValue(); 3917 DemandedVal = !!DemandedElts[EltIdx]; 3918 DemandedVecElts.clearBit(EltIdx); 3919 } 3920 Tmp = std::numeric_limits<unsigned>::max(); 3921 if (DemandedVal) { 3922 // TODO - handle implicit truncation of inserted elements. 3923 if (InVal.getScalarValueSizeInBits() != VTBits) 3924 break; 3925 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3926 Tmp = std::min(Tmp, Tmp2); 3927 } 3928 if (!!DemandedVecElts) { 3929 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3930 Tmp = std::min(Tmp, Tmp2); 3931 } 3932 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3933 return Tmp; 3934 } 3935 case ISD::EXTRACT_VECTOR_ELT: { 3936 SDValue InVec = Op.getOperand(0); 3937 SDValue EltNo = Op.getOperand(1); 3938 EVT VecVT = InVec.getValueType(); 3939 // ComputeNumSignBits not yet implemented for scalable vectors. 3940 if (VecVT.isScalableVector()) 3941 break; 3942 const unsigned BitWidth = Op.getValueSizeInBits(); 3943 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3944 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3945 3946 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3947 // anything about sign bits. But if the sizes match we can derive knowledge 3948 // about sign bits from the vector operand. 3949 if (BitWidth != EltBitWidth) 3950 break; 3951 3952 // If we know the element index, just demand that vector element, else for 3953 // an unknown element index, ignore DemandedElts and demand them all. 3954 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3955 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3956 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3957 DemandedSrcElts = 3958 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3959 3960 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3961 } 3962 case ISD::EXTRACT_SUBVECTOR: { 3963 // Offset the demanded elts by the subvector index. 3964 SDValue Src = Op.getOperand(0); 3965 // Bail until we can represent demanded elements for scalable vectors. 3966 if (Src.getValueType().isScalableVector()) 3967 break; 3968 uint64_t Idx = Op.getConstantOperandVal(1); 3969 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3970 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3971 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3972 } 3973 case ISD::CONCAT_VECTORS: { 3974 // Determine the minimum number of sign bits across all demanded 3975 // elts of the input vectors. Early out if the result is already 1. 3976 Tmp = std::numeric_limits<unsigned>::max(); 3977 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3978 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3979 unsigned NumSubVectors = Op.getNumOperands(); 3980 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3981 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3982 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3983 if (!DemandedSub) 3984 continue; 3985 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3986 Tmp = std::min(Tmp, Tmp2); 3987 } 3988 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3989 return Tmp; 3990 } 3991 case ISD::INSERT_SUBVECTOR: { 3992 // Demand any elements from the subvector and the remainder from the src its 3993 // inserted into. 3994 SDValue Src = Op.getOperand(0); 3995 SDValue Sub = Op.getOperand(1); 3996 uint64_t Idx = Op.getConstantOperandVal(2); 3997 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3998 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3999 APInt DemandedSrcElts = DemandedElts; 4000 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 4001 4002 Tmp = std::numeric_limits<unsigned>::max(); 4003 if (!!DemandedSubElts) { 4004 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 4005 if (Tmp == 1) 4006 return 1; // early-out 4007 } 4008 if (!!DemandedSrcElts) { 4009 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 4010 Tmp = std::min(Tmp, Tmp2); 4011 } 4012 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 4013 return Tmp; 4014 } 4015 } 4016 4017 // If we are looking at the loaded value of the SDNode. 4018 if (Op.getResNo() == 0) { 4019 // Handle LOADX separately here. EXTLOAD case will fallthrough. 4020 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 4021 unsigned ExtType = LD->getExtensionType(); 4022 switch (ExtType) { 4023 default: break; 4024 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 4025 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4026 return VTBits - Tmp + 1; 4027 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 4028 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4029 return VTBits - Tmp; 4030 case ISD::NON_EXTLOAD: 4031 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 4032 // We only need to handle vectors - computeKnownBits should handle 4033 // scalar cases. 4034 Type *CstTy = Cst->getType(); 4035 if (CstTy->isVectorTy() && 4036 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4037 Tmp = VTBits; 4038 for (unsigned i = 0; i != NumElts; ++i) { 4039 if (!DemandedElts[i]) 4040 continue; 4041 if (Constant *Elt = Cst->getAggregateElement(i)) { 4042 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4043 const APInt &Value = CInt->getValue(); 4044 Tmp = std::min(Tmp, Value.getNumSignBits()); 4045 continue; 4046 } 4047 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4048 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4049 Tmp = std::min(Tmp, Value.getNumSignBits()); 4050 continue; 4051 } 4052 } 4053 // Unknown type. Conservatively assume no bits match sign bit. 4054 return 1; 4055 } 4056 return Tmp; 4057 } 4058 } 4059 break; 4060 } 4061 } 4062 } 4063 4064 // Allow the target to implement this method for its nodes. 4065 if (Opcode >= ISD::BUILTIN_OP_END || 4066 Opcode == ISD::INTRINSIC_WO_CHAIN || 4067 Opcode == ISD::INTRINSIC_W_CHAIN || 4068 Opcode == ISD::INTRINSIC_VOID) { 4069 unsigned NumBits = 4070 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4071 if (NumBits > 1) 4072 FirstAnswer = std::max(FirstAnswer, NumBits); 4073 } 4074 4075 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4076 // use this information. 4077 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4078 4079 APInt Mask; 4080 if (Known.isNonNegative()) { // sign bit is 0 4081 Mask = Known.Zero; 4082 } else if (Known.isNegative()) { // sign bit is 1; 4083 Mask = Known.One; 4084 } else { 4085 // Nothing known. 4086 return FirstAnswer; 4087 } 4088 4089 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4090 // the number of identical bits in the top of the input value. 4091 Mask <<= Mask.getBitWidth()-VTBits; 4092 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4093 } 4094 4095 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4096 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4097 !isa<ConstantSDNode>(Op.getOperand(1))) 4098 return false; 4099 4100 if (Op.getOpcode() == ISD::OR && 4101 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4102 return false; 4103 4104 return true; 4105 } 4106 4107 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4108 // If we're told that NaNs won't happen, assume they won't. 4109 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4110 return true; 4111 4112 if (Depth >= MaxRecursionDepth) 4113 return false; // Limit search depth. 4114 4115 // TODO: Handle vectors. 4116 // If the value is a constant, we can obviously see if it is a NaN or not. 4117 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4118 return !C->getValueAPF().isNaN() || 4119 (SNaN && !C->getValueAPF().isSignaling()); 4120 } 4121 4122 unsigned Opcode = Op.getOpcode(); 4123 switch (Opcode) { 4124 case ISD::FADD: 4125 case ISD::FSUB: 4126 case ISD::FMUL: 4127 case ISD::FDIV: 4128 case ISD::FREM: 4129 case ISD::FSIN: 4130 case ISD::FCOS: { 4131 if (SNaN) 4132 return true; 4133 // TODO: Need isKnownNeverInfinity 4134 return false; 4135 } 4136 case ISD::FCANONICALIZE: 4137 case ISD::FEXP: 4138 case ISD::FEXP2: 4139 case ISD::FTRUNC: 4140 case ISD::FFLOOR: 4141 case ISD::FCEIL: 4142 case ISD::FROUND: 4143 case ISD::FROUNDEVEN: 4144 case ISD::FRINT: 4145 case ISD::FNEARBYINT: { 4146 if (SNaN) 4147 return true; 4148 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4149 } 4150 case ISD::FABS: 4151 case ISD::FNEG: 4152 case ISD::FCOPYSIGN: { 4153 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4154 } 4155 case ISD::SELECT: 4156 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4157 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4158 case ISD::FP_EXTEND: 4159 case ISD::FP_ROUND: { 4160 if (SNaN) 4161 return true; 4162 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4163 } 4164 case ISD::SINT_TO_FP: 4165 case ISD::UINT_TO_FP: 4166 return true; 4167 case ISD::FMA: 4168 case ISD::FMAD: { 4169 if (SNaN) 4170 return true; 4171 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4172 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4173 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4174 } 4175 case ISD::FSQRT: // Need is known positive 4176 case ISD::FLOG: 4177 case ISD::FLOG2: 4178 case ISD::FLOG10: 4179 case ISD::FPOWI: 4180 case ISD::FPOW: { 4181 if (SNaN) 4182 return true; 4183 // TODO: Refine on operand 4184 return false; 4185 } 4186 case ISD::FMINNUM: 4187 case ISD::FMAXNUM: { 4188 // Only one needs to be known not-nan, since it will be returned if the 4189 // other ends up being one. 4190 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4191 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4192 } 4193 case ISD::FMINNUM_IEEE: 4194 case ISD::FMAXNUM_IEEE: { 4195 if (SNaN) 4196 return true; 4197 // This can return a NaN if either operand is an sNaN, or if both operands 4198 // are NaN. 4199 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4200 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4201 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4202 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4203 } 4204 case ISD::FMINIMUM: 4205 case ISD::FMAXIMUM: { 4206 // TODO: Does this quiet or return the origina NaN as-is? 4207 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4208 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4209 } 4210 case ISD::EXTRACT_VECTOR_ELT: { 4211 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4212 } 4213 default: 4214 if (Opcode >= ISD::BUILTIN_OP_END || 4215 Opcode == ISD::INTRINSIC_WO_CHAIN || 4216 Opcode == ISD::INTRINSIC_W_CHAIN || 4217 Opcode == ISD::INTRINSIC_VOID) { 4218 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4219 } 4220 4221 return false; 4222 } 4223 } 4224 4225 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4226 assert(Op.getValueType().isFloatingPoint() && 4227 "Floating point type expected"); 4228 4229 // If the value is a constant, we can obviously see if it is a zero or not. 4230 // TODO: Add BuildVector support. 4231 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4232 return !C->isZero(); 4233 return false; 4234 } 4235 4236 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4237 assert(!Op.getValueType().isFloatingPoint() && 4238 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4239 4240 // If the value is a constant, we can obviously see if it is a zero or not. 4241 if (ISD::matchUnaryPredicate( 4242 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4243 return true; 4244 4245 // TODO: Recognize more cases here. 4246 switch (Op.getOpcode()) { 4247 default: break; 4248 case ISD::OR: 4249 if (isKnownNeverZero(Op.getOperand(1)) || 4250 isKnownNeverZero(Op.getOperand(0))) 4251 return true; 4252 break; 4253 } 4254 4255 return false; 4256 } 4257 4258 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4259 // Check the obvious case. 4260 if (A == B) return true; 4261 4262 // For for negative and positive zero. 4263 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4264 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4265 if (CA->isZero() && CB->isZero()) return true; 4266 4267 // Otherwise they may not be equal. 4268 return false; 4269 } 4270 4271 // FIXME: unify with llvm::haveNoCommonBitsSet. 4272 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4273 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4274 assert(A.getValueType() == B.getValueType() && 4275 "Values must have the same type"); 4276 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4277 } 4278 4279 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4280 ArrayRef<SDValue> Ops, 4281 SelectionDAG &DAG) { 4282 int NumOps = Ops.size(); 4283 assert(NumOps != 0 && "Can't build an empty vector!"); 4284 assert(!VT.isScalableVector() && 4285 "BUILD_VECTOR cannot be used with scalable types"); 4286 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4287 "Incorrect element count in BUILD_VECTOR!"); 4288 4289 // BUILD_VECTOR of UNDEFs is UNDEF. 4290 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4291 return DAG.getUNDEF(VT); 4292 4293 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4294 SDValue IdentitySrc; 4295 bool IsIdentity = true; 4296 for (int i = 0; i != NumOps; ++i) { 4297 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4298 Ops[i].getOperand(0).getValueType() != VT || 4299 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4300 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4301 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4302 IsIdentity = false; 4303 break; 4304 } 4305 IdentitySrc = Ops[i].getOperand(0); 4306 } 4307 if (IsIdentity) 4308 return IdentitySrc; 4309 4310 return SDValue(); 4311 } 4312 4313 /// Try to simplify vector concatenation to an input value, undef, or build 4314 /// vector. 4315 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4316 ArrayRef<SDValue> Ops, 4317 SelectionDAG &DAG) { 4318 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4319 assert(llvm::all_of(Ops, 4320 [Ops](SDValue Op) { 4321 return Ops[0].getValueType() == Op.getValueType(); 4322 }) && 4323 "Concatenation of vectors with inconsistent value types!"); 4324 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == 4325 VT.getVectorElementCount() && 4326 "Incorrect element count in vector concatenation!"); 4327 4328 if (Ops.size() == 1) 4329 return Ops[0]; 4330 4331 // Concat of UNDEFs is UNDEF. 4332 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4333 return DAG.getUNDEF(VT); 4334 4335 // Scan the operands and look for extract operations from a single source 4336 // that correspond to insertion at the same location via this concatenation: 4337 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4338 SDValue IdentitySrc; 4339 bool IsIdentity = true; 4340 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4341 SDValue Op = Ops[i]; 4342 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); 4343 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4344 Op.getOperand(0).getValueType() != VT || 4345 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4346 Op.getConstantOperandVal(1) != IdentityIndex) { 4347 IsIdentity = false; 4348 break; 4349 } 4350 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4351 "Unexpected identity source vector for concat of extracts"); 4352 IdentitySrc = Op.getOperand(0); 4353 } 4354 if (IsIdentity) { 4355 assert(IdentitySrc && "Failed to set source vector of extracts"); 4356 return IdentitySrc; 4357 } 4358 4359 // The code below this point is only designed to work for fixed width 4360 // vectors, so we bail out for now. 4361 if (VT.isScalableVector()) 4362 return SDValue(); 4363 4364 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4365 // simplified to one big BUILD_VECTOR. 4366 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4367 EVT SVT = VT.getScalarType(); 4368 SmallVector<SDValue, 16> Elts; 4369 for (SDValue Op : Ops) { 4370 EVT OpVT = Op.getValueType(); 4371 if (Op.isUndef()) 4372 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4373 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4374 Elts.append(Op->op_begin(), Op->op_end()); 4375 else 4376 return SDValue(); 4377 } 4378 4379 // BUILD_VECTOR requires all inputs to be of the same type, find the 4380 // maximum type and extend them all. 4381 for (SDValue Op : Elts) 4382 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4383 4384 if (SVT.bitsGT(VT.getScalarType())) { 4385 for (SDValue &Op : Elts) { 4386 if (Op.isUndef()) 4387 Op = DAG.getUNDEF(SVT); 4388 else 4389 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4390 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4391 : DAG.getSExtOrTrunc(Op, DL, SVT); 4392 } 4393 } 4394 4395 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4396 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4397 return V; 4398 } 4399 4400 /// Gets or creates the specified node. 4401 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4402 FoldingSetNodeID ID; 4403 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4404 void *IP = nullptr; 4405 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4406 return SDValue(E, 0); 4407 4408 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4409 getVTList(VT)); 4410 CSEMap.InsertNode(N, IP); 4411 4412 InsertNode(N); 4413 SDValue V = SDValue(N, 0); 4414 NewSDValueDbgMsg(V, "Creating new node: ", this); 4415 return V; 4416 } 4417 4418 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4419 SDValue Operand) { 4420 SDNodeFlags Flags; 4421 if (Inserter) 4422 Flags = Inserter->getFlags(); 4423 return getNode(Opcode, DL, VT, Operand, Flags); 4424 } 4425 4426 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4427 SDValue Operand, const SDNodeFlags Flags) { 4428 assert(Operand.getOpcode() != ISD::DELETED_NODE && 4429 "Operand is DELETED_NODE!"); 4430 // Constant fold unary operations with an integer constant operand. Even 4431 // opaque constant will be folded, because the folding of unary operations 4432 // doesn't create new constants with different values. Nevertheless, the 4433 // opaque flag is preserved during folding to prevent future folding with 4434 // other constants. 4435 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4436 const APInt &Val = C->getAPIntValue(); 4437 switch (Opcode) { 4438 default: break; 4439 case ISD::SIGN_EXTEND: 4440 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4441 C->isTargetOpcode(), C->isOpaque()); 4442 case ISD::TRUNCATE: 4443 if (C->isOpaque()) 4444 break; 4445 LLVM_FALLTHROUGH; 4446 case ISD::ANY_EXTEND: 4447 case ISD::ZERO_EXTEND: 4448 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4449 C->isTargetOpcode(), C->isOpaque()); 4450 case ISD::UINT_TO_FP: 4451 case ISD::SINT_TO_FP: { 4452 APFloat apf(EVTToAPFloatSemantics(VT), 4453 APInt::getNullValue(VT.getSizeInBits())); 4454 (void)apf.convertFromAPInt(Val, 4455 Opcode==ISD::SINT_TO_FP, 4456 APFloat::rmNearestTiesToEven); 4457 return getConstantFP(apf, DL, VT); 4458 } 4459 case ISD::BITCAST: 4460 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4461 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4462 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4463 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4464 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4465 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4466 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4467 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4468 break; 4469 case ISD::ABS: 4470 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4471 C->isOpaque()); 4472 case ISD::BITREVERSE: 4473 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4474 C->isOpaque()); 4475 case ISD::BSWAP: 4476 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4477 C->isOpaque()); 4478 case ISD::CTPOP: 4479 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4480 C->isOpaque()); 4481 case ISD::CTLZ: 4482 case ISD::CTLZ_ZERO_UNDEF: 4483 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4484 C->isOpaque()); 4485 case ISD::CTTZ: 4486 case ISD::CTTZ_ZERO_UNDEF: 4487 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4488 C->isOpaque()); 4489 case ISD::FP16_TO_FP: { 4490 bool Ignored; 4491 APFloat FPV(APFloat::IEEEhalf(), 4492 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4493 4494 // This can return overflow, underflow, or inexact; we don't care. 4495 // FIXME need to be more flexible about rounding mode. 4496 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4497 APFloat::rmNearestTiesToEven, &Ignored); 4498 return getConstantFP(FPV, DL, VT); 4499 } 4500 } 4501 } 4502 4503 // Constant fold unary operations with a floating point constant operand. 4504 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4505 APFloat V = C->getValueAPF(); // make copy 4506 switch (Opcode) { 4507 case ISD::FNEG: 4508 V.changeSign(); 4509 return getConstantFP(V, DL, VT); 4510 case ISD::FABS: 4511 V.clearSign(); 4512 return getConstantFP(V, DL, VT); 4513 case ISD::FCEIL: { 4514 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4515 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4516 return getConstantFP(V, DL, VT); 4517 break; 4518 } 4519 case ISD::FTRUNC: { 4520 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4521 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4522 return getConstantFP(V, DL, VT); 4523 break; 4524 } 4525 case ISD::FFLOOR: { 4526 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4527 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4528 return getConstantFP(V, DL, VT); 4529 break; 4530 } 4531 case ISD::FP_EXTEND: { 4532 bool ignored; 4533 // This can return overflow, underflow, or inexact; we don't care. 4534 // FIXME need to be more flexible about rounding mode. 4535 (void)V.convert(EVTToAPFloatSemantics(VT), 4536 APFloat::rmNearestTiesToEven, &ignored); 4537 return getConstantFP(V, DL, VT); 4538 } 4539 case ISD::FP_TO_SINT: 4540 case ISD::FP_TO_UINT: { 4541 bool ignored; 4542 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4543 // FIXME need to be more flexible about rounding mode. 4544 APFloat::opStatus s = 4545 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4546 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4547 break; 4548 return getConstant(IntVal, DL, VT); 4549 } 4550 case ISD::BITCAST: 4551 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4552 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4553 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4554 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4555 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4556 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4557 break; 4558 case ISD::FP_TO_FP16: { 4559 bool Ignored; 4560 // This can return overflow, underflow, or inexact; we don't care. 4561 // FIXME need to be more flexible about rounding mode. 4562 (void)V.convert(APFloat::IEEEhalf(), 4563 APFloat::rmNearestTiesToEven, &Ignored); 4564 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4565 } 4566 } 4567 } 4568 4569 // Constant fold unary operations with a vector integer or float operand. 4570 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4571 if (BV->isConstant()) { 4572 switch (Opcode) { 4573 default: 4574 // FIXME: Entirely reasonable to perform folding of other unary 4575 // operations here as the need arises. 4576 break; 4577 case ISD::FNEG: 4578 case ISD::FABS: 4579 case ISD::FCEIL: 4580 case ISD::FTRUNC: 4581 case ISD::FFLOOR: 4582 case ISD::FP_EXTEND: 4583 case ISD::FP_TO_SINT: 4584 case ISD::FP_TO_UINT: 4585 case ISD::TRUNCATE: 4586 case ISD::ANY_EXTEND: 4587 case ISD::ZERO_EXTEND: 4588 case ISD::SIGN_EXTEND: 4589 case ISD::UINT_TO_FP: 4590 case ISD::SINT_TO_FP: 4591 case ISD::ABS: 4592 case ISD::BITREVERSE: 4593 case ISD::BSWAP: 4594 case ISD::CTLZ: 4595 case ISD::CTLZ_ZERO_UNDEF: 4596 case ISD::CTTZ: 4597 case ISD::CTTZ_ZERO_UNDEF: 4598 case ISD::CTPOP: { 4599 SDValue Ops = { Operand }; 4600 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4601 return Fold; 4602 } 4603 } 4604 } 4605 } 4606 4607 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4608 switch (Opcode) { 4609 case ISD::FREEZE: 4610 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4611 break; 4612 case ISD::TokenFactor: 4613 case ISD::MERGE_VALUES: 4614 case ISD::CONCAT_VECTORS: 4615 return Operand; // Factor, merge or concat of one node? No need. 4616 case ISD::BUILD_VECTOR: { 4617 // Attempt to simplify BUILD_VECTOR. 4618 SDValue Ops[] = {Operand}; 4619 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4620 return V; 4621 break; 4622 } 4623 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4624 case ISD::FP_EXTEND: 4625 assert(VT.isFloatingPoint() && 4626 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4627 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4628 assert((!VT.isVector() || 4629 VT.getVectorElementCount() == 4630 Operand.getValueType().getVectorElementCount()) && 4631 "Vector element count mismatch!"); 4632 assert(Operand.getValueType().bitsLT(VT) && 4633 "Invalid fpext node, dst < src!"); 4634 if (Operand.isUndef()) 4635 return getUNDEF(VT); 4636 break; 4637 case ISD::FP_TO_SINT: 4638 case ISD::FP_TO_UINT: 4639 if (Operand.isUndef()) 4640 return getUNDEF(VT); 4641 break; 4642 case ISD::SINT_TO_FP: 4643 case ISD::UINT_TO_FP: 4644 // [us]itofp(undef) = 0, because the result value is bounded. 4645 if (Operand.isUndef()) 4646 return getConstantFP(0.0, DL, VT); 4647 break; 4648 case ISD::SIGN_EXTEND: 4649 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4650 "Invalid SIGN_EXTEND!"); 4651 assert(VT.isVector() == Operand.getValueType().isVector() && 4652 "SIGN_EXTEND result type type should be vector iff the operand " 4653 "type is vector!"); 4654 if (Operand.getValueType() == VT) return Operand; // noop extension 4655 assert((!VT.isVector() || 4656 VT.getVectorElementCount() == 4657 Operand.getValueType().getVectorElementCount()) && 4658 "Vector element count mismatch!"); 4659 assert(Operand.getValueType().bitsLT(VT) && 4660 "Invalid sext node, dst < src!"); 4661 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4662 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4663 else if (OpOpcode == ISD::UNDEF) 4664 // sext(undef) = 0, because the top bits will all be the same. 4665 return getConstant(0, DL, VT); 4666 break; 4667 case ISD::ZERO_EXTEND: 4668 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4669 "Invalid ZERO_EXTEND!"); 4670 assert(VT.isVector() == Operand.getValueType().isVector() && 4671 "ZERO_EXTEND result type type should be vector iff the operand " 4672 "type is vector!"); 4673 if (Operand.getValueType() == VT) return Operand; // noop extension 4674 assert((!VT.isVector() || 4675 VT.getVectorElementCount() == 4676 Operand.getValueType().getVectorElementCount()) && 4677 "Vector element count mismatch!"); 4678 assert(Operand.getValueType().bitsLT(VT) && 4679 "Invalid zext node, dst < src!"); 4680 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4681 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4682 else if (OpOpcode == ISD::UNDEF) 4683 // zext(undef) = 0, because the top bits will be zero. 4684 return getConstant(0, DL, VT); 4685 break; 4686 case ISD::ANY_EXTEND: 4687 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4688 "Invalid ANY_EXTEND!"); 4689 assert(VT.isVector() == Operand.getValueType().isVector() && 4690 "ANY_EXTEND result type type should be vector iff the operand " 4691 "type is vector!"); 4692 if (Operand.getValueType() == VT) return Operand; // noop extension 4693 assert((!VT.isVector() || 4694 VT.getVectorElementCount() == 4695 Operand.getValueType().getVectorElementCount()) && 4696 "Vector element count mismatch!"); 4697 assert(Operand.getValueType().bitsLT(VT) && 4698 "Invalid anyext node, dst < src!"); 4699 4700 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4701 OpOpcode == ISD::ANY_EXTEND) 4702 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4703 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4704 else if (OpOpcode == ISD::UNDEF) 4705 return getUNDEF(VT); 4706 4707 // (ext (trunc x)) -> x 4708 if (OpOpcode == ISD::TRUNCATE) { 4709 SDValue OpOp = Operand.getOperand(0); 4710 if (OpOp.getValueType() == VT) { 4711 transferDbgValues(Operand, OpOp); 4712 return OpOp; 4713 } 4714 } 4715 break; 4716 case ISD::TRUNCATE: 4717 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4718 "Invalid TRUNCATE!"); 4719 assert(VT.isVector() == Operand.getValueType().isVector() && 4720 "TRUNCATE result type type should be vector iff the operand " 4721 "type is vector!"); 4722 if (Operand.getValueType() == VT) return Operand; // noop truncate 4723 assert((!VT.isVector() || 4724 VT.getVectorElementCount() == 4725 Operand.getValueType().getVectorElementCount()) && 4726 "Vector element count mismatch!"); 4727 assert(Operand.getValueType().bitsGT(VT) && 4728 "Invalid truncate node, src < dst!"); 4729 if (OpOpcode == ISD::TRUNCATE) 4730 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4731 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4732 OpOpcode == ISD::ANY_EXTEND) { 4733 // If the source is smaller than the dest, we still need an extend. 4734 if (Operand.getOperand(0).getValueType().getScalarType() 4735 .bitsLT(VT.getScalarType())) 4736 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4737 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4738 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4739 return Operand.getOperand(0); 4740 } 4741 if (OpOpcode == ISD::UNDEF) 4742 return getUNDEF(VT); 4743 break; 4744 case ISD::ANY_EXTEND_VECTOR_INREG: 4745 case ISD::ZERO_EXTEND_VECTOR_INREG: 4746 case ISD::SIGN_EXTEND_VECTOR_INREG: 4747 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4748 assert(Operand.getValueType().bitsLE(VT) && 4749 "The input must be the same size or smaller than the result."); 4750 assert(VT.getVectorNumElements() < 4751 Operand.getValueType().getVectorNumElements() && 4752 "The destination vector type must have fewer lanes than the input."); 4753 break; 4754 case ISD::ABS: 4755 assert(VT.isInteger() && VT == Operand.getValueType() && 4756 "Invalid ABS!"); 4757 if (OpOpcode == ISD::UNDEF) 4758 return getUNDEF(VT); 4759 break; 4760 case ISD::BSWAP: 4761 assert(VT.isInteger() && VT == Operand.getValueType() && 4762 "Invalid BSWAP!"); 4763 assert((VT.getScalarSizeInBits() % 16 == 0) && 4764 "BSWAP types must be a multiple of 16 bits!"); 4765 if (OpOpcode == ISD::UNDEF) 4766 return getUNDEF(VT); 4767 break; 4768 case ISD::BITREVERSE: 4769 assert(VT.isInteger() && VT == Operand.getValueType() && 4770 "Invalid BITREVERSE!"); 4771 if (OpOpcode == ISD::UNDEF) 4772 return getUNDEF(VT); 4773 break; 4774 case ISD::BITCAST: 4775 // Basic sanity checking. 4776 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4777 "Cannot BITCAST between types of different sizes!"); 4778 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4779 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4780 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4781 if (OpOpcode == ISD::UNDEF) 4782 return getUNDEF(VT); 4783 break; 4784 case ISD::SCALAR_TO_VECTOR: 4785 assert(VT.isVector() && !Operand.getValueType().isVector() && 4786 (VT.getVectorElementType() == Operand.getValueType() || 4787 (VT.getVectorElementType().isInteger() && 4788 Operand.getValueType().isInteger() && 4789 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4790 "Illegal SCALAR_TO_VECTOR node!"); 4791 if (OpOpcode == ISD::UNDEF) 4792 return getUNDEF(VT); 4793 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4794 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4795 isa<ConstantSDNode>(Operand.getOperand(1)) && 4796 Operand.getConstantOperandVal(1) == 0 && 4797 Operand.getOperand(0).getValueType() == VT) 4798 return Operand.getOperand(0); 4799 break; 4800 case ISD::FNEG: 4801 // Negation of an unknown bag of bits is still completely undefined. 4802 if (OpOpcode == ISD::UNDEF) 4803 return getUNDEF(VT); 4804 4805 if (OpOpcode == ISD::FNEG) // --X -> X 4806 return Operand.getOperand(0); 4807 break; 4808 case ISD::FABS: 4809 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4810 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4811 break; 4812 case ISD::VSCALE: 4813 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4814 break; 4815 case ISD::CTPOP: 4816 if (Operand.getValueType().getScalarType() == MVT::i1) 4817 return Operand; 4818 break; 4819 case ISD::CTLZ: 4820 case ISD::CTTZ: 4821 if (Operand.getValueType().getScalarType() == MVT::i1) 4822 return getNOT(DL, Operand, Operand.getValueType()); 4823 break; 4824 case ISD::VECREDUCE_SMIN: 4825 case ISD::VECREDUCE_UMAX: 4826 if (Operand.getValueType().getScalarType() == MVT::i1) 4827 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand); 4828 break; 4829 case ISD::VECREDUCE_SMAX: 4830 case ISD::VECREDUCE_UMIN: 4831 if (Operand.getValueType().getScalarType() == MVT::i1) 4832 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand); 4833 break; 4834 } 4835 4836 SDNode *N; 4837 SDVTList VTs = getVTList(VT); 4838 SDValue Ops[] = {Operand}; 4839 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4840 FoldingSetNodeID ID; 4841 AddNodeIDNode(ID, Opcode, VTs, Ops); 4842 void *IP = nullptr; 4843 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4844 E->intersectFlagsWith(Flags); 4845 return SDValue(E, 0); 4846 } 4847 4848 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4849 N->setFlags(Flags); 4850 createOperands(N, Ops); 4851 CSEMap.InsertNode(N, IP); 4852 } else { 4853 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4854 createOperands(N, Ops); 4855 } 4856 4857 InsertNode(N); 4858 SDValue V = SDValue(N, 0); 4859 NewSDValueDbgMsg(V, "Creating new node: ", this); 4860 return V; 4861 } 4862 4863 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4864 const APInt &C2) { 4865 switch (Opcode) { 4866 case ISD::ADD: return C1 + C2; 4867 case ISD::SUB: return C1 - C2; 4868 case ISD::MUL: return C1 * C2; 4869 case ISD::AND: return C1 & C2; 4870 case ISD::OR: return C1 | C2; 4871 case ISD::XOR: return C1 ^ C2; 4872 case ISD::SHL: return C1 << C2; 4873 case ISD::SRL: return C1.lshr(C2); 4874 case ISD::SRA: return C1.ashr(C2); 4875 case ISD::ROTL: return C1.rotl(C2); 4876 case ISD::ROTR: return C1.rotr(C2); 4877 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4878 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4879 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4880 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4881 case ISD::SADDSAT: return C1.sadd_sat(C2); 4882 case ISD::UADDSAT: return C1.uadd_sat(C2); 4883 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4884 case ISD::USUBSAT: return C1.usub_sat(C2); 4885 case ISD::UDIV: 4886 if (!C2.getBoolValue()) 4887 break; 4888 return C1.udiv(C2); 4889 case ISD::UREM: 4890 if (!C2.getBoolValue()) 4891 break; 4892 return C1.urem(C2); 4893 case ISD::SDIV: 4894 if (!C2.getBoolValue()) 4895 break; 4896 return C1.sdiv(C2); 4897 case ISD::SREM: 4898 if (!C2.getBoolValue()) 4899 break; 4900 return C1.srem(C2); 4901 } 4902 return llvm::None; 4903 } 4904 4905 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4906 const GlobalAddressSDNode *GA, 4907 const SDNode *N2) { 4908 if (GA->getOpcode() != ISD::GlobalAddress) 4909 return SDValue(); 4910 if (!TLI->isOffsetFoldingLegal(GA)) 4911 return SDValue(); 4912 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4913 if (!C2) 4914 return SDValue(); 4915 int64_t Offset = C2->getSExtValue(); 4916 switch (Opcode) { 4917 case ISD::ADD: break; 4918 case ISD::SUB: Offset = -uint64_t(Offset); break; 4919 default: return SDValue(); 4920 } 4921 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4922 GA->getOffset() + uint64_t(Offset)); 4923 } 4924 4925 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4926 switch (Opcode) { 4927 case ISD::SDIV: 4928 case ISD::UDIV: 4929 case ISD::SREM: 4930 case ISD::UREM: { 4931 // If a divisor is zero/undef or any element of a divisor vector is 4932 // zero/undef, the whole op is undef. 4933 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4934 SDValue Divisor = Ops[1]; 4935 if (Divisor.isUndef() || isNullConstant(Divisor)) 4936 return true; 4937 4938 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4939 llvm::any_of(Divisor->op_values(), 4940 [](SDValue V) { return V.isUndef() || 4941 isNullConstant(V); }); 4942 // TODO: Handle signed overflow. 4943 } 4944 // TODO: Handle oversized shifts. 4945 default: 4946 return false; 4947 } 4948 } 4949 4950 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4951 EVT VT, ArrayRef<SDValue> Ops) { 4952 // If the opcode is a target-specific ISD node, there's nothing we can 4953 // do here and the operand rules may not line up with the below, so 4954 // bail early. 4955 if (Opcode >= ISD::BUILTIN_OP_END) 4956 return SDValue(); 4957 4958 // For now, the array Ops should only contain two values. 4959 // This enforcement will be removed once this function is merged with 4960 // FoldConstantVectorArithmetic 4961 if (Ops.size() != 2) 4962 return SDValue(); 4963 4964 if (isUndef(Opcode, Ops)) 4965 return getUNDEF(VT); 4966 4967 SDNode *N1 = Ops[0].getNode(); 4968 SDNode *N2 = Ops[1].getNode(); 4969 4970 // Handle the case of two scalars. 4971 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4972 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4973 if (C1->isOpaque() || C2->isOpaque()) 4974 return SDValue(); 4975 4976 Optional<APInt> FoldAttempt = 4977 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4978 if (!FoldAttempt) 4979 return SDValue(); 4980 4981 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4982 assert((!Folded || !VT.isVector()) && 4983 "Can't fold vectors ops with scalar operands"); 4984 return Folded; 4985 } 4986 } 4987 4988 // fold (add Sym, c) -> Sym+c 4989 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4990 return FoldSymbolOffset(Opcode, VT, GA, N2); 4991 if (TLI->isCommutativeBinOp(Opcode)) 4992 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4993 return FoldSymbolOffset(Opcode, VT, GA, N1); 4994 4995 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4996 // vector width, however we should be able to do constant folds involving 4997 // splat vector nodes too. 4998 if (VT.isScalableVector()) 4999 return SDValue(); 5000 5001 // For fixed width vectors, extract each constant element and fold them 5002 // individually. Either input may be an undef value. 5003 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 5004 if (!BV1 && !N1->isUndef()) 5005 return SDValue(); 5006 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 5007 if (!BV2 && !N2->isUndef()) 5008 return SDValue(); 5009 // If both operands are undef, that's handled the same way as scalars. 5010 if (!BV1 && !BV2) 5011 return SDValue(); 5012 5013 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 5014 "Vector binop with different number of elements in operands?"); 5015 5016 EVT SVT = VT.getScalarType(); 5017 EVT LegalSVT = SVT; 5018 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5019 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5020 if (LegalSVT.bitsLT(SVT)) 5021 return SDValue(); 5022 } 5023 SmallVector<SDValue, 4> Outputs; 5024 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 5025 for (unsigned I = 0; I != NumOps; ++I) { 5026 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 5027 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 5028 if (SVT.isInteger()) { 5029 if (V1->getValueType(0).bitsGT(SVT)) 5030 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 5031 if (V2->getValueType(0).bitsGT(SVT)) 5032 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 5033 } 5034 5035 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 5036 return SDValue(); 5037 5038 // Fold one vector element. 5039 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 5040 if (LegalSVT != SVT) 5041 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5042 5043 // Scalar folding only succeeded if the result is a constant or UNDEF. 5044 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5045 ScalarResult.getOpcode() != ISD::ConstantFP) 5046 return SDValue(); 5047 Outputs.push_back(ScalarResult); 5048 } 5049 5050 assert(VT.getVectorNumElements() == Outputs.size() && 5051 "Vector size mismatch!"); 5052 5053 // We may have a vector type but a scalar result. Create a splat. 5054 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 5055 5056 // Build a big vector out of the scalar elements we generated. 5057 return getBuildVector(VT, SDLoc(), Outputs); 5058 } 5059 5060 // TODO: Merge with FoldConstantArithmetic 5061 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 5062 const SDLoc &DL, EVT VT, 5063 ArrayRef<SDValue> Ops, 5064 const SDNodeFlags Flags) { 5065 // If the opcode is a target-specific ISD node, there's nothing we can 5066 // do here and the operand rules may not line up with the below, so 5067 // bail early. 5068 if (Opcode >= ISD::BUILTIN_OP_END) 5069 return SDValue(); 5070 5071 if (isUndef(Opcode, Ops)) 5072 return getUNDEF(VT); 5073 5074 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 5075 if (!VT.isVector()) 5076 return SDValue(); 5077 5078 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5079 // vector width, however we should be able to do constant folds involving 5080 // splat vector nodes too. 5081 if (VT.isScalableVector()) 5082 return SDValue(); 5083 5084 // From this point onwards all vectors are assumed to be fixed width. 5085 unsigned NumElts = VT.getVectorNumElements(); 5086 5087 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5088 return !Op.getValueType().isVector() || 5089 Op.getValueType().getVectorNumElements() == NumElts; 5090 }; 5091 5092 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5093 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5094 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5095 (BV && BV->isConstant()); 5096 }; 5097 5098 // All operands must be vector types with the same number of elements as 5099 // the result type and must be either UNDEF or a build vector of constant 5100 // or UNDEF scalars. 5101 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5102 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5103 return SDValue(); 5104 5105 // If we are comparing vectors, then the result needs to be a i1 boolean 5106 // that is then sign-extended back to the legal result type. 5107 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5108 5109 // Find legal integer scalar type for constant promotion and 5110 // ensure that its scalar size is at least as large as source. 5111 EVT LegalSVT = VT.getScalarType(); 5112 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5113 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5114 if (LegalSVT.bitsLT(VT.getScalarType())) 5115 return SDValue(); 5116 } 5117 5118 // Constant fold each scalar lane separately. 5119 SmallVector<SDValue, 4> ScalarResults; 5120 for (unsigned i = 0; i != NumElts; i++) { 5121 SmallVector<SDValue, 4> ScalarOps; 5122 for (SDValue Op : Ops) { 5123 EVT InSVT = Op.getValueType().getScalarType(); 5124 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5125 if (!InBV) { 5126 // We've checked that this is UNDEF or a constant of some kind. 5127 if (Op.isUndef()) 5128 ScalarOps.push_back(getUNDEF(InSVT)); 5129 else 5130 ScalarOps.push_back(Op); 5131 continue; 5132 } 5133 5134 SDValue ScalarOp = InBV->getOperand(i); 5135 EVT ScalarVT = ScalarOp.getValueType(); 5136 5137 // Build vector (integer) scalar operands may need implicit 5138 // truncation - do this before constant folding. 5139 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5140 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5141 5142 ScalarOps.push_back(ScalarOp); 5143 } 5144 5145 // Constant fold the scalar operands. 5146 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5147 5148 // Legalize the (integer) scalar constant if necessary. 5149 if (LegalSVT != SVT) 5150 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5151 5152 // Scalar folding only succeeded if the result is a constant or UNDEF. 5153 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5154 ScalarResult.getOpcode() != ISD::ConstantFP) 5155 return SDValue(); 5156 ScalarResults.push_back(ScalarResult); 5157 } 5158 5159 SDValue V = getBuildVector(VT, DL, ScalarResults); 5160 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5161 return V; 5162 } 5163 5164 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5165 EVT VT, SDValue N1, SDValue N2) { 5166 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5167 // should. That will require dealing with a potentially non-default 5168 // rounding mode, checking the "opStatus" return value from the APFloat 5169 // math calculations, and possibly other variations. 5170 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5171 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5172 if (N1CFP && N2CFP) { 5173 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5174 switch (Opcode) { 5175 case ISD::FADD: 5176 C1.add(C2, APFloat::rmNearestTiesToEven); 5177 return getConstantFP(C1, DL, VT); 5178 case ISD::FSUB: 5179 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5180 return getConstantFP(C1, DL, VT); 5181 case ISD::FMUL: 5182 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5183 return getConstantFP(C1, DL, VT); 5184 case ISD::FDIV: 5185 C1.divide(C2, APFloat::rmNearestTiesToEven); 5186 return getConstantFP(C1, DL, VT); 5187 case ISD::FREM: 5188 C1.mod(C2); 5189 return getConstantFP(C1, DL, VT); 5190 case ISD::FCOPYSIGN: 5191 C1.copySign(C2); 5192 return getConstantFP(C1, DL, VT); 5193 default: break; 5194 } 5195 } 5196 if (N1CFP && Opcode == ISD::FP_ROUND) { 5197 APFloat C1 = N1CFP->getValueAPF(); // make copy 5198 bool Unused; 5199 // This can return overflow, underflow, or inexact; we don't care. 5200 // FIXME need to be more flexible about rounding mode. 5201 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5202 &Unused); 5203 return getConstantFP(C1, DL, VT); 5204 } 5205 5206 switch (Opcode) { 5207 case ISD::FSUB: 5208 // -0.0 - undef --> undef (consistent with "fneg undef") 5209 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5210 return getUNDEF(VT); 5211 LLVM_FALLTHROUGH; 5212 5213 case ISD::FADD: 5214 case ISD::FMUL: 5215 case ISD::FDIV: 5216 case ISD::FREM: 5217 // If both operands are undef, the result is undef. If 1 operand is undef, 5218 // the result is NaN. This should match the behavior of the IR optimizer. 5219 if (N1.isUndef() && N2.isUndef()) 5220 return getUNDEF(VT); 5221 if (N1.isUndef() || N2.isUndef()) 5222 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5223 } 5224 return SDValue(); 5225 } 5226 5227 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { 5228 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!"); 5229 5230 // There's no need to assert on a byte-aligned pointer. All pointers are at 5231 // least byte aligned. 5232 if (A == Align(1)) 5233 return Val; 5234 5235 FoldingSetNodeID ID; 5236 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); 5237 ID.AddInteger(A.value()); 5238 5239 void *IP = nullptr; 5240 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5241 return SDValue(E, 0); 5242 5243 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), 5244 Val.getValueType(), A); 5245 createOperands(N, {Val}); 5246 5247 CSEMap.InsertNode(N, IP); 5248 InsertNode(N); 5249 5250 SDValue V(N, 0); 5251 NewSDValueDbgMsg(V, "Creating new node: ", this); 5252 return V; 5253 } 5254 5255 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5256 SDValue N1, SDValue N2) { 5257 SDNodeFlags Flags; 5258 if (Inserter) 5259 Flags = Inserter->getFlags(); 5260 return getNode(Opcode, DL, VT, N1, N2, Flags); 5261 } 5262 5263 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5264 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5265 assert(N1.getOpcode() != ISD::DELETED_NODE && 5266 N2.getOpcode() != ISD::DELETED_NODE && 5267 "Operand is DELETED_NODE!"); 5268 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5269 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5270 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5271 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5272 5273 // Canonicalize constant to RHS if commutative. 5274 if (TLI->isCommutativeBinOp(Opcode)) { 5275 if (N1C && !N2C) { 5276 std::swap(N1C, N2C); 5277 std::swap(N1, N2); 5278 } else if (N1CFP && !N2CFP) { 5279 std::swap(N1CFP, N2CFP); 5280 std::swap(N1, N2); 5281 } 5282 } 5283 5284 switch (Opcode) { 5285 default: break; 5286 case ISD::TokenFactor: 5287 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5288 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5289 // Fold trivial token factors. 5290 if (N1.getOpcode() == ISD::EntryToken) return N2; 5291 if (N2.getOpcode() == ISD::EntryToken) return N1; 5292 if (N1 == N2) return N1; 5293 break; 5294 case ISD::BUILD_VECTOR: { 5295 // Attempt to simplify BUILD_VECTOR. 5296 SDValue Ops[] = {N1, N2}; 5297 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5298 return V; 5299 break; 5300 } 5301 case ISD::CONCAT_VECTORS: { 5302 SDValue Ops[] = {N1, N2}; 5303 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5304 return V; 5305 break; 5306 } 5307 case ISD::AND: 5308 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5309 assert(N1.getValueType() == N2.getValueType() && 5310 N1.getValueType() == VT && "Binary operator types must match!"); 5311 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5312 // worth handling here. 5313 if (N2C && N2C->isNullValue()) 5314 return N2; 5315 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5316 return N1; 5317 break; 5318 case ISD::OR: 5319 case ISD::XOR: 5320 case ISD::ADD: 5321 case ISD::SUB: 5322 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5323 assert(N1.getValueType() == N2.getValueType() && 5324 N1.getValueType() == VT && "Binary operator types must match!"); 5325 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5326 // it's worth handling here. 5327 if (N2C && N2C->isNullValue()) 5328 return N1; 5329 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() && 5330 VT.getVectorElementType() == MVT::i1) 5331 return getNode(ISD::XOR, DL, VT, N1, N2); 5332 break; 5333 case ISD::MUL: 5334 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5335 assert(N1.getValueType() == N2.getValueType() && 5336 N1.getValueType() == VT && "Binary operator types must match!"); 5337 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5338 return getNode(ISD::AND, DL, VT, N1, N2); 5339 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5340 const APInt &MulImm = N1->getConstantOperandAPInt(0); 5341 const APInt &N2CImm = N2C->getAPIntValue(); 5342 return getVScale(DL, VT, MulImm * N2CImm); 5343 } 5344 break; 5345 case ISD::UDIV: 5346 case ISD::UREM: 5347 case ISD::MULHU: 5348 case ISD::MULHS: 5349 case ISD::SDIV: 5350 case ISD::SREM: 5351 case ISD::SADDSAT: 5352 case ISD::SSUBSAT: 5353 case ISD::UADDSAT: 5354 case ISD::USUBSAT: 5355 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5356 assert(N1.getValueType() == N2.getValueType() && 5357 N1.getValueType() == VT && "Binary operator types must match!"); 5358 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) { 5359 // fold (add_sat x, y) -> (or x, y) for bool types. 5360 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT) 5361 return getNode(ISD::OR, DL, VT, N1, N2); 5362 // fold (sub_sat x, y) -> (and x, ~y) for bool types. 5363 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT) 5364 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT)); 5365 } 5366 break; 5367 case ISD::SMIN: 5368 case ISD::UMAX: 5369 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5370 assert(N1.getValueType() == N2.getValueType() && 5371 N1.getValueType() == VT && "Binary operator types must match!"); 5372 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5373 return getNode(ISD::OR, DL, VT, N1, N2); 5374 break; 5375 case ISD::SMAX: 5376 case ISD::UMIN: 5377 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5378 assert(N1.getValueType() == N2.getValueType() && 5379 N1.getValueType() == VT && "Binary operator types must match!"); 5380 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5381 return getNode(ISD::AND, DL, VT, N1, N2); 5382 break; 5383 case ISD::FADD: 5384 case ISD::FSUB: 5385 case ISD::FMUL: 5386 case ISD::FDIV: 5387 case ISD::FREM: 5388 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5389 assert(N1.getValueType() == N2.getValueType() && 5390 N1.getValueType() == VT && "Binary operator types must match!"); 5391 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5392 return V; 5393 break; 5394 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5395 assert(N1.getValueType() == VT && 5396 N1.getValueType().isFloatingPoint() && 5397 N2.getValueType().isFloatingPoint() && 5398 "Invalid FCOPYSIGN!"); 5399 break; 5400 case ISD::SHL: 5401 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5402 const APInt &MulImm = N1->getConstantOperandAPInt(0); 5403 const APInt &ShiftImm = N2C->getAPIntValue(); 5404 return getVScale(DL, VT, MulImm << ShiftImm); 5405 } 5406 LLVM_FALLTHROUGH; 5407 case ISD::SRA: 5408 case ISD::SRL: 5409 if (SDValue V = simplifyShift(N1, N2)) 5410 return V; 5411 LLVM_FALLTHROUGH; 5412 case ISD::ROTL: 5413 case ISD::ROTR: 5414 assert(VT == N1.getValueType() && 5415 "Shift operators return type must be the same as their first arg"); 5416 assert(VT.isInteger() && N2.getValueType().isInteger() && 5417 "Shifts only work on integers"); 5418 assert((!VT.isVector() || VT == N2.getValueType()) && 5419 "Vector shift amounts must be in the same as their first arg"); 5420 // Verify that the shift amount VT is big enough to hold valid shift 5421 // amounts. This catches things like trying to shift an i1024 value by an 5422 // i8, which is easy to fall into in generic code that uses 5423 // TLI.getShiftAmount(). 5424 assert(N2.getValueType().getScalarSizeInBits() >= 5425 Log2_32_Ceil(VT.getScalarSizeInBits()) && 5426 "Invalid use of small shift amount with oversized value!"); 5427 5428 // Always fold shifts of i1 values so the code generator doesn't need to 5429 // handle them. Since we know the size of the shift has to be less than the 5430 // size of the value, the shift/rotate count is guaranteed to be zero. 5431 if (VT == MVT::i1) 5432 return N1; 5433 if (N2C && N2C->isNullValue()) 5434 return N1; 5435 break; 5436 case ISD::FP_ROUND: 5437 assert(VT.isFloatingPoint() && 5438 N1.getValueType().isFloatingPoint() && 5439 VT.bitsLE(N1.getValueType()) && 5440 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5441 "Invalid FP_ROUND!"); 5442 if (N1.getValueType() == VT) return N1; // noop conversion. 5443 break; 5444 case ISD::AssertSext: 5445 case ISD::AssertZext: { 5446 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5447 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5448 assert(VT.isInteger() && EVT.isInteger() && 5449 "Cannot *_EXTEND_INREG FP types"); 5450 assert(!EVT.isVector() && 5451 "AssertSExt/AssertZExt type should be the vector element type " 5452 "rather than the vector type!"); 5453 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5454 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5455 break; 5456 } 5457 case ISD::SIGN_EXTEND_INREG: { 5458 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5459 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5460 assert(VT.isInteger() && EVT.isInteger() && 5461 "Cannot *_EXTEND_INREG FP types"); 5462 assert(EVT.isVector() == VT.isVector() && 5463 "SIGN_EXTEND_INREG type should be vector iff the operand " 5464 "type is vector!"); 5465 assert((!EVT.isVector() || 5466 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5467 "Vector element counts must match in SIGN_EXTEND_INREG"); 5468 assert(EVT.bitsLE(VT) && "Not extending!"); 5469 if (EVT == VT) return N1; // Not actually extending 5470 5471 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5472 unsigned FromBits = EVT.getScalarSizeInBits(); 5473 Val <<= Val.getBitWidth() - FromBits; 5474 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5475 return getConstant(Val, DL, ConstantVT); 5476 }; 5477 5478 if (N1C) { 5479 const APInt &Val = N1C->getAPIntValue(); 5480 return SignExtendInReg(Val, VT); 5481 } 5482 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5483 SmallVector<SDValue, 8> Ops; 5484 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5485 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5486 SDValue Op = N1.getOperand(i); 5487 if (Op.isUndef()) { 5488 Ops.push_back(getUNDEF(OpVT)); 5489 continue; 5490 } 5491 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5492 APInt Val = C->getAPIntValue(); 5493 Ops.push_back(SignExtendInReg(Val, OpVT)); 5494 } 5495 return getBuildVector(VT, DL, Ops); 5496 } 5497 break; 5498 } 5499 case ISD::EXTRACT_VECTOR_ELT: 5500 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5501 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5502 element type of the vector."); 5503 5504 // Extract from an undefined value or using an undefined index is undefined. 5505 if (N1.isUndef() || N2.isUndef()) 5506 return getUNDEF(VT); 5507 5508 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length 5509 // vectors. For scalable vectors we will provide appropriate support for 5510 // dealing with arbitrary indices. 5511 if (N2C && N1.getValueType().isFixedLengthVector() && 5512 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5513 return getUNDEF(VT); 5514 5515 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5516 // expanding copies of large vectors from registers. This only works for 5517 // fixed length vectors, since we need to know the exact number of 5518 // elements. 5519 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && 5520 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { 5521 unsigned Factor = 5522 N1.getOperand(0).getValueType().getVectorNumElements(); 5523 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5524 N1.getOperand(N2C->getZExtValue() / Factor), 5525 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5526 } 5527 5528 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while 5529 // lowering is expanding large vector constants. 5530 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || 5531 N1.getOpcode() == ISD::SPLAT_VECTOR)) { 5532 assert((N1.getOpcode() != ISD::BUILD_VECTOR || 5533 N1.getValueType().isFixedLengthVector()) && 5534 "BUILD_VECTOR used for scalable vectors"); 5535 unsigned Index = 5536 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; 5537 SDValue Elt = N1.getOperand(Index); 5538 5539 if (VT != Elt.getValueType()) 5540 // If the vector element type is not legal, the BUILD_VECTOR operands 5541 // are promoted and implicitly truncated, and the result implicitly 5542 // extended. Make that explicit here. 5543 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5544 5545 return Elt; 5546 } 5547 5548 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5549 // operations are lowered to scalars. 5550 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5551 // If the indices are the same, return the inserted element else 5552 // if the indices are known different, extract the element from 5553 // the original vector. 5554 SDValue N1Op2 = N1.getOperand(2); 5555 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5556 5557 if (N1Op2C && N2C) { 5558 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5559 if (VT == N1.getOperand(1).getValueType()) 5560 return N1.getOperand(1); 5561 else 5562 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5563 } 5564 5565 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5566 } 5567 } 5568 5569 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5570 // when vector types are scalarized and v1iX is legal. 5571 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). 5572 // Here we are completely ignoring the extract element index (N2), 5573 // which is fine for fixed width vectors, since any index other than 0 5574 // is undefined anyway. However, this cannot be ignored for scalable 5575 // vectors - in theory we could support this, but we don't want to do this 5576 // without a profitability check. 5577 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5578 N1.getValueType().isFixedLengthVector() && 5579 N1.getValueType().getVectorNumElements() == 1) { 5580 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5581 N1.getOperand(1)); 5582 } 5583 break; 5584 case ISD::EXTRACT_ELEMENT: 5585 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5586 assert(!N1.getValueType().isVector() && !VT.isVector() && 5587 (N1.getValueType().isInteger() == VT.isInteger()) && 5588 N1.getValueType() != VT && 5589 "Wrong types for EXTRACT_ELEMENT!"); 5590 5591 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5592 // 64-bit integers into 32-bit parts. Instead of building the extract of 5593 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5594 if (N1.getOpcode() == ISD::BUILD_PAIR) 5595 return N1.getOperand(N2C->getZExtValue()); 5596 5597 // EXTRACT_ELEMENT of a constant int is also very common. 5598 if (N1C) { 5599 unsigned ElementSize = VT.getSizeInBits(); 5600 unsigned Shift = ElementSize * N2C->getZExtValue(); 5601 const APInt &Val = N1C->getAPIntValue(); 5602 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT); 5603 } 5604 break; 5605 case ISD::EXTRACT_SUBVECTOR: 5606 EVT N1VT = N1.getValueType(); 5607 assert(VT.isVector() && N1VT.isVector() && 5608 "Extract subvector VTs must be vectors!"); 5609 assert(VT.getVectorElementType() == N1VT.getVectorElementType() && 5610 "Extract subvector VTs must have the same element type!"); 5611 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && 5612 "Cannot extract a scalable vector from a fixed length vector!"); 5613 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5614 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && 5615 "Extract subvector must be from larger vector to smaller vector!"); 5616 assert(N2C && "Extract subvector index must be a constant"); 5617 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5618 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= 5619 N1VT.getVectorMinNumElements()) && 5620 "Extract subvector overflow!"); 5621 assert(N2C->getAPIntValue().getBitWidth() == 5622 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && 5623 "Constant index for EXTRACT_SUBVECTOR has an invalid size"); 5624 5625 // Trivial extraction. 5626 if (VT == N1VT) 5627 return N1; 5628 5629 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5630 if (N1.isUndef()) 5631 return getUNDEF(VT); 5632 5633 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5634 // the concat have the same type as the extract. 5635 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 && 5636 VT == N1.getOperand(0).getValueType()) { 5637 unsigned Factor = VT.getVectorMinNumElements(); 5638 return N1.getOperand(N2C->getZExtValue() / Factor); 5639 } 5640 5641 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5642 // during shuffle legalization. 5643 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5644 VT == N1.getOperand(1).getValueType()) 5645 return N1.getOperand(1); 5646 break; 5647 } 5648 5649 // Perform trivial constant folding. 5650 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5651 return SV; 5652 5653 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5654 return V; 5655 5656 // Canonicalize an UNDEF to the RHS, even over a constant. 5657 if (N1.isUndef()) { 5658 if (TLI->isCommutativeBinOp(Opcode)) { 5659 std::swap(N1, N2); 5660 } else { 5661 switch (Opcode) { 5662 case ISD::SIGN_EXTEND_INREG: 5663 case ISD::SUB: 5664 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5665 case ISD::UDIV: 5666 case ISD::SDIV: 5667 case ISD::UREM: 5668 case ISD::SREM: 5669 case ISD::SSUBSAT: 5670 case ISD::USUBSAT: 5671 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5672 } 5673 } 5674 } 5675 5676 // Fold a bunch of operators when the RHS is undef. 5677 if (N2.isUndef()) { 5678 switch (Opcode) { 5679 case ISD::XOR: 5680 if (N1.isUndef()) 5681 // Handle undef ^ undef -> 0 special case. This is a common 5682 // idiom (misuse). 5683 return getConstant(0, DL, VT); 5684 LLVM_FALLTHROUGH; 5685 case ISD::ADD: 5686 case ISD::SUB: 5687 case ISD::UDIV: 5688 case ISD::SDIV: 5689 case ISD::UREM: 5690 case ISD::SREM: 5691 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5692 case ISD::MUL: 5693 case ISD::AND: 5694 case ISD::SSUBSAT: 5695 case ISD::USUBSAT: 5696 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5697 case ISD::OR: 5698 case ISD::SADDSAT: 5699 case ISD::UADDSAT: 5700 return getAllOnesConstant(DL, VT); 5701 } 5702 } 5703 5704 // Memoize this node if possible. 5705 SDNode *N; 5706 SDVTList VTs = getVTList(VT); 5707 SDValue Ops[] = {N1, N2}; 5708 if (VT != MVT::Glue) { 5709 FoldingSetNodeID ID; 5710 AddNodeIDNode(ID, Opcode, VTs, Ops); 5711 void *IP = nullptr; 5712 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5713 E->intersectFlagsWith(Flags); 5714 return SDValue(E, 0); 5715 } 5716 5717 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5718 N->setFlags(Flags); 5719 createOperands(N, Ops); 5720 CSEMap.InsertNode(N, IP); 5721 } else { 5722 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5723 createOperands(N, Ops); 5724 } 5725 5726 InsertNode(N); 5727 SDValue V = SDValue(N, 0); 5728 NewSDValueDbgMsg(V, "Creating new node: ", this); 5729 return V; 5730 } 5731 5732 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5733 SDValue N1, SDValue N2, SDValue N3) { 5734 SDNodeFlags Flags; 5735 if (Inserter) 5736 Flags = Inserter->getFlags(); 5737 return getNode(Opcode, DL, VT, N1, N2, N3, Flags); 5738 } 5739 5740 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5741 SDValue N1, SDValue N2, SDValue N3, 5742 const SDNodeFlags Flags) { 5743 assert(N1.getOpcode() != ISD::DELETED_NODE && 5744 N2.getOpcode() != ISD::DELETED_NODE && 5745 N3.getOpcode() != ISD::DELETED_NODE && 5746 "Operand is DELETED_NODE!"); 5747 // Perform various simplifications. 5748 switch (Opcode) { 5749 case ISD::FMA: { 5750 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5751 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5752 N3.getValueType() == VT && "FMA types must match!"); 5753 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5754 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5755 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5756 if (N1CFP && N2CFP && N3CFP) { 5757 APFloat V1 = N1CFP->getValueAPF(); 5758 const APFloat &V2 = N2CFP->getValueAPF(); 5759 const APFloat &V3 = N3CFP->getValueAPF(); 5760 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5761 return getConstantFP(V1, DL, VT); 5762 } 5763 break; 5764 } 5765 case ISD::BUILD_VECTOR: { 5766 // Attempt to simplify BUILD_VECTOR. 5767 SDValue Ops[] = {N1, N2, N3}; 5768 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5769 return V; 5770 break; 5771 } 5772 case ISD::CONCAT_VECTORS: { 5773 SDValue Ops[] = {N1, N2, N3}; 5774 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5775 return V; 5776 break; 5777 } 5778 case ISD::SETCC: { 5779 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5780 assert(N1.getValueType() == N2.getValueType() && 5781 "SETCC operands must have the same type!"); 5782 assert(VT.isVector() == N1.getValueType().isVector() && 5783 "SETCC type should be vector iff the operand type is vector!"); 5784 assert((!VT.isVector() || VT.getVectorElementCount() == 5785 N1.getValueType().getVectorElementCount()) && 5786 "SETCC vector element counts must match!"); 5787 // Use FoldSetCC to simplify SETCC's. 5788 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5789 return V; 5790 // Vector constant folding. 5791 SDValue Ops[] = {N1, N2, N3}; 5792 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5793 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5794 return V; 5795 } 5796 break; 5797 } 5798 case ISD::SELECT: 5799 case ISD::VSELECT: 5800 if (SDValue V = simplifySelect(N1, N2, N3)) 5801 return V; 5802 break; 5803 case ISD::VECTOR_SHUFFLE: 5804 llvm_unreachable("should use getVectorShuffle constructor!"); 5805 case ISD::INSERT_VECTOR_ELT: { 5806 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5807 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5808 // for scalable vectors where we will generate appropriate code to 5809 // deal with out-of-bounds cases correctly. 5810 if (N3C && N1.getValueType().isFixedLengthVector() && 5811 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5812 return getUNDEF(VT); 5813 5814 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5815 if (N3.isUndef()) 5816 return getUNDEF(VT); 5817 5818 // If the inserted element is an UNDEF, just use the input vector. 5819 if (N2.isUndef()) 5820 return N1; 5821 5822 break; 5823 } 5824 case ISD::INSERT_SUBVECTOR: { 5825 // Inserting undef into undef is still undef. 5826 if (N1.isUndef() && N2.isUndef()) 5827 return getUNDEF(VT); 5828 5829 EVT N2VT = N2.getValueType(); 5830 assert(VT == N1.getValueType() && 5831 "Dest and insert subvector source types must match!"); 5832 assert(VT.isVector() && N2VT.isVector() && 5833 "Insert subvector VTs must be vectors!"); 5834 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && 5835 "Cannot insert a scalable vector into a fixed length vector!"); 5836 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5837 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && 5838 "Insert subvector must be from smaller vector to larger vector!"); 5839 assert(isa<ConstantSDNode>(N3) && 5840 "Insert subvector index must be constant"); 5841 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5842 (N2VT.getVectorMinNumElements() + 5843 cast<ConstantSDNode>(N3)->getZExtValue()) <= 5844 VT.getVectorMinNumElements()) && 5845 "Insert subvector overflow!"); 5846 assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() == 5847 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && 5848 "Constant index for INSERT_SUBVECTOR has an invalid size"); 5849 5850 // Trivial insertion. 5851 if (VT == N2VT) 5852 return N2; 5853 5854 // If this is an insert of an extracted vector into an undef vector, we 5855 // can just use the input to the extract. 5856 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5857 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5858 return N2.getOperand(0); 5859 break; 5860 } 5861 case ISD::BITCAST: 5862 // Fold bit_convert nodes from a type to themselves. 5863 if (N1.getValueType() == VT) 5864 return N1; 5865 break; 5866 } 5867 5868 // Memoize node if it doesn't produce a flag. 5869 SDNode *N; 5870 SDVTList VTs = getVTList(VT); 5871 SDValue Ops[] = {N1, N2, N3}; 5872 if (VT != MVT::Glue) { 5873 FoldingSetNodeID ID; 5874 AddNodeIDNode(ID, Opcode, VTs, Ops); 5875 void *IP = nullptr; 5876 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5877 E->intersectFlagsWith(Flags); 5878 return SDValue(E, 0); 5879 } 5880 5881 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5882 N->setFlags(Flags); 5883 createOperands(N, Ops); 5884 CSEMap.InsertNode(N, IP); 5885 } else { 5886 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5887 createOperands(N, Ops); 5888 } 5889 5890 InsertNode(N); 5891 SDValue V = SDValue(N, 0); 5892 NewSDValueDbgMsg(V, "Creating new node: ", this); 5893 return V; 5894 } 5895 5896 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5897 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5898 SDValue Ops[] = { N1, N2, N3, N4 }; 5899 return getNode(Opcode, DL, VT, Ops); 5900 } 5901 5902 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5903 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5904 SDValue N5) { 5905 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5906 return getNode(Opcode, DL, VT, Ops); 5907 } 5908 5909 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5910 /// the incoming stack arguments to be loaded from the stack. 5911 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5912 SmallVector<SDValue, 8> ArgChains; 5913 5914 // Include the original chain at the beginning of the list. When this is 5915 // used by target LowerCall hooks, this helps legalize find the 5916 // CALLSEQ_BEGIN node. 5917 ArgChains.push_back(Chain); 5918 5919 // Add a chain value for each stack argument. 5920 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5921 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5922 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5923 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5924 if (FI->getIndex() < 0) 5925 ArgChains.push_back(SDValue(L, 1)); 5926 5927 // Build a tokenfactor for all the chains. 5928 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5929 } 5930 5931 /// getMemsetValue - Vectorized representation of the memset value 5932 /// operand. 5933 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5934 const SDLoc &dl) { 5935 assert(!Value.isUndef()); 5936 5937 unsigned NumBits = VT.getScalarSizeInBits(); 5938 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5939 assert(C->getAPIntValue().getBitWidth() == 8); 5940 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5941 if (VT.isInteger()) { 5942 bool IsOpaque = VT.getSizeInBits() > 64 || 5943 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5944 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5945 } 5946 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5947 VT); 5948 } 5949 5950 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5951 EVT IntVT = VT.getScalarType(); 5952 if (!IntVT.isInteger()) 5953 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5954 5955 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5956 if (NumBits > 8) { 5957 // Use a multiplication with 0x010101... to extend the input to the 5958 // required length. 5959 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5960 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5961 DAG.getConstant(Magic, dl, IntVT)); 5962 } 5963 5964 if (VT != Value.getValueType() && !VT.isInteger()) 5965 Value = DAG.getBitcast(VT.getScalarType(), Value); 5966 if (VT != Value.getValueType()) 5967 Value = DAG.getSplatBuildVector(VT, dl, Value); 5968 5969 return Value; 5970 } 5971 5972 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5973 /// used when a memcpy is turned into a memset when the source is a constant 5974 /// string ptr. 5975 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5976 const TargetLowering &TLI, 5977 const ConstantDataArraySlice &Slice) { 5978 // Handle vector with all elements zero. 5979 if (Slice.Array == nullptr) { 5980 if (VT.isInteger()) 5981 return DAG.getConstant(0, dl, VT); 5982 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5983 return DAG.getConstantFP(0.0, dl, VT); 5984 else if (VT.isVector()) { 5985 unsigned NumElts = VT.getVectorNumElements(); 5986 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5987 return DAG.getNode(ISD::BITCAST, dl, VT, 5988 DAG.getConstant(0, dl, 5989 EVT::getVectorVT(*DAG.getContext(), 5990 EltVT, NumElts))); 5991 } else 5992 llvm_unreachable("Expected type!"); 5993 } 5994 5995 assert(!VT.isVector() && "Can't handle vector type here!"); 5996 unsigned NumVTBits = VT.getSizeInBits(); 5997 unsigned NumVTBytes = NumVTBits / 8; 5998 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5999 6000 APInt Val(NumVTBits, 0); 6001 if (DAG.getDataLayout().isLittleEndian()) { 6002 for (unsigned i = 0; i != NumBytes; ++i) 6003 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 6004 } else { 6005 for (unsigned i = 0; i != NumBytes; ++i) 6006 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 6007 } 6008 6009 // If the "cost" of materializing the integer immediate is less than the cost 6010 // of a load, then it is cost effective to turn the load into the immediate. 6011 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 6012 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 6013 return DAG.getConstant(Val, dl, VT); 6014 return SDValue(nullptr, 0); 6015 } 6016 6017 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset, 6018 const SDLoc &DL, 6019 const SDNodeFlags Flags) { 6020 EVT VT = Base.getValueType(); 6021 SDValue Index; 6022 6023 if (Offset.isScalable()) 6024 Index = getVScale(DL, Base.getValueType(), 6025 APInt(Base.getValueSizeInBits().getFixedSize(), 6026 Offset.getKnownMinSize())); 6027 else 6028 Index = getConstant(Offset.getFixedSize(), DL, VT); 6029 6030 return getMemBasePlusOffset(Base, Index, DL, Flags); 6031 } 6032 6033 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 6034 const SDLoc &DL, 6035 const SDNodeFlags Flags) { 6036 assert(Offset.getValueType().isInteger()); 6037 EVT BasePtrVT = Ptr.getValueType(); 6038 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 6039 } 6040 6041 /// Returns true if memcpy source is constant data. 6042 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 6043 uint64_t SrcDelta = 0; 6044 GlobalAddressSDNode *G = nullptr; 6045 if (Src.getOpcode() == ISD::GlobalAddress) 6046 G = cast<GlobalAddressSDNode>(Src); 6047 else if (Src.getOpcode() == ISD::ADD && 6048 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 6049 Src.getOperand(1).getOpcode() == ISD::Constant) { 6050 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 6051 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 6052 } 6053 if (!G) 6054 return false; 6055 6056 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 6057 SrcDelta + G->getOffset()); 6058 } 6059 6060 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 6061 SelectionDAG &DAG) { 6062 // On Darwin, -Os means optimize for size without hurting performance, so 6063 // only really optimize for size when -Oz (MinSize) is used. 6064 if (MF.getTarget().getTargetTriple().isOSDarwin()) 6065 return MF.getFunction().hasMinSize(); 6066 return DAG.shouldOptForSize(); 6067 } 6068 6069 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 6070 SmallVector<SDValue, 32> &OutChains, unsigned From, 6071 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 6072 SmallVector<SDValue, 16> &OutStoreChains) { 6073 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 6074 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 6075 SmallVector<SDValue, 16> GluedLoadChains; 6076 for (unsigned i = From; i < To; ++i) { 6077 OutChains.push_back(OutLoadChains[i]); 6078 GluedLoadChains.push_back(OutLoadChains[i]); 6079 } 6080 6081 // Chain for all loads. 6082 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6083 GluedLoadChains); 6084 6085 for (unsigned i = From; i < To; ++i) { 6086 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 6087 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 6088 ST->getBasePtr(), ST->getMemoryVT(), 6089 ST->getMemOperand()); 6090 OutChains.push_back(NewStore); 6091 } 6092 } 6093 6094 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6095 SDValue Chain, SDValue Dst, SDValue Src, 6096 uint64_t Size, Align Alignment, 6097 bool isVol, bool AlwaysInline, 6098 MachinePointerInfo DstPtrInfo, 6099 MachinePointerInfo SrcPtrInfo) { 6100 // Turn a memcpy of undef to nop. 6101 // FIXME: We need to honor volatile even is Src is undef. 6102 if (Src.isUndef()) 6103 return Chain; 6104 6105 // Expand memcpy to a series of load and store ops if the size operand falls 6106 // below a certain threshold. 6107 // TODO: In the AlwaysInline case, if the size is big then generate a loop 6108 // rather than maybe a humongous number of loads and stores. 6109 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6110 const DataLayout &DL = DAG.getDataLayout(); 6111 LLVMContext &C = *DAG.getContext(); 6112 std::vector<EVT> MemOps; 6113 bool DstAlignCanChange = false; 6114 MachineFunction &MF = DAG.getMachineFunction(); 6115 MachineFrameInfo &MFI = MF.getFrameInfo(); 6116 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6117 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6118 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6119 DstAlignCanChange = true; 6120 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6121 if (!SrcAlign || Alignment > *SrcAlign) 6122 SrcAlign = Alignment; 6123 assert(SrcAlign && "SrcAlign must be set"); 6124 ConstantDataArraySlice Slice; 6125 // If marked as volatile, perform a copy even when marked as constant. 6126 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice); 6127 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 6128 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 6129 const MemOp Op = isZeroConstant 6130 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 6131 /*IsZeroMemset*/ true, isVol) 6132 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 6133 *SrcAlign, isVol, CopyFromConstant); 6134 if (!TLI.findOptimalMemOpLowering( 6135 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 6136 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 6137 return SDValue(); 6138 6139 if (DstAlignCanChange) { 6140 Type *Ty = MemOps[0].getTypeForEVT(C); 6141 Align NewAlign = DL.getABITypeAlign(Ty); 6142 6143 // Don't promote to an alignment that would require dynamic stack 6144 // realignment. 6145 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 6146 if (!TRI->needsStackRealignment(MF)) 6147 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 6148 NewAlign = NewAlign / 2; 6149 6150 if (NewAlign > Alignment) { 6151 // Give the stack frame object a larger alignment if needed. 6152 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6153 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6154 Alignment = NewAlign; 6155 } 6156 } 6157 6158 MachineMemOperand::Flags MMOFlags = 6159 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6160 SmallVector<SDValue, 16> OutLoadChains; 6161 SmallVector<SDValue, 16> OutStoreChains; 6162 SmallVector<SDValue, 32> OutChains; 6163 unsigned NumMemOps = MemOps.size(); 6164 uint64_t SrcOff = 0, DstOff = 0; 6165 for (unsigned i = 0; i != NumMemOps; ++i) { 6166 EVT VT = MemOps[i]; 6167 unsigned VTSize = VT.getSizeInBits() / 8; 6168 SDValue Value, Store; 6169 6170 if (VTSize > Size) { 6171 // Issuing an unaligned load / store pair that overlaps with the previous 6172 // pair. Adjust the offset accordingly. 6173 assert(i == NumMemOps-1 && i != 0); 6174 SrcOff -= VTSize - Size; 6175 DstOff -= VTSize - Size; 6176 } 6177 6178 if (CopyFromConstant && 6179 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 6180 // It's unlikely a store of a vector immediate can be done in a single 6181 // instruction. It would require a load from a constantpool first. 6182 // We only handle zero vectors here. 6183 // FIXME: Handle other cases where store of vector immediate is done in 6184 // a single instruction. 6185 ConstantDataArraySlice SubSlice; 6186 if (SrcOff < Slice.Length) { 6187 SubSlice = Slice; 6188 SubSlice.move(SrcOff); 6189 } else { 6190 // This is an out-of-bounds access and hence UB. Pretend we read zero. 6191 SubSlice.Array = nullptr; 6192 SubSlice.Offset = 0; 6193 SubSlice.Length = VTSize; 6194 } 6195 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6196 if (Value.getNode()) { 6197 Store = DAG.getStore( 6198 Chain, dl, Value, 6199 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6200 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6201 OutChains.push_back(Store); 6202 } 6203 } 6204 6205 if (!Store.getNode()) { 6206 // The type might not be legal for the target. This should only happen 6207 // if the type is smaller than a legal type, as on PPC, so the right 6208 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6209 // to Load/Store if NVT==VT. 6210 // FIXME does the case above also need this? 6211 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6212 assert(NVT.bitsGE(VT)); 6213 6214 bool isDereferenceable = 6215 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6216 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6217 if (isDereferenceable) 6218 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6219 6220 Value = DAG.getExtLoad( 6221 ISD::EXTLOAD, dl, NVT, Chain, 6222 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6223 SrcPtrInfo.getWithOffset(SrcOff), VT, 6224 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags); 6225 OutLoadChains.push_back(Value.getValue(1)); 6226 6227 Store = DAG.getTruncStore( 6228 Chain, dl, Value, 6229 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6230 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 6231 OutStoreChains.push_back(Store); 6232 } 6233 SrcOff += VTSize; 6234 DstOff += VTSize; 6235 Size -= VTSize; 6236 } 6237 6238 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6239 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6240 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6241 6242 if (NumLdStInMemcpy) { 6243 // It may be that memcpy might be converted to memset if it's memcpy 6244 // of constants. In such a case, we won't have loads and stores, but 6245 // just stores. In the absence of loads, there is nothing to gang up. 6246 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6247 // If target does not care, just leave as it. 6248 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6249 OutChains.push_back(OutLoadChains[i]); 6250 OutChains.push_back(OutStoreChains[i]); 6251 } 6252 } else { 6253 // Ld/St less than/equal limit set by target. 6254 if (NumLdStInMemcpy <= GluedLdStLimit) { 6255 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6256 NumLdStInMemcpy, OutLoadChains, 6257 OutStoreChains); 6258 } else { 6259 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6260 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6261 unsigned GlueIter = 0; 6262 6263 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6264 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6265 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6266 6267 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6268 OutLoadChains, OutStoreChains); 6269 GlueIter += GluedLdStLimit; 6270 } 6271 6272 // Residual ld/st. 6273 if (RemainingLdStInMemcpy) { 6274 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6275 RemainingLdStInMemcpy, OutLoadChains, 6276 OutStoreChains); 6277 } 6278 } 6279 } 6280 } 6281 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6282 } 6283 6284 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6285 SDValue Chain, SDValue Dst, SDValue Src, 6286 uint64_t Size, Align Alignment, 6287 bool isVol, bool AlwaysInline, 6288 MachinePointerInfo DstPtrInfo, 6289 MachinePointerInfo SrcPtrInfo) { 6290 // Turn a memmove of undef to nop. 6291 // FIXME: We need to honor volatile even is Src is undef. 6292 if (Src.isUndef()) 6293 return Chain; 6294 6295 // Expand memmove to a series of load and store ops if the size operand falls 6296 // below a certain threshold. 6297 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6298 const DataLayout &DL = DAG.getDataLayout(); 6299 LLVMContext &C = *DAG.getContext(); 6300 std::vector<EVT> MemOps; 6301 bool DstAlignCanChange = false; 6302 MachineFunction &MF = DAG.getMachineFunction(); 6303 MachineFrameInfo &MFI = MF.getFrameInfo(); 6304 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6305 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6306 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6307 DstAlignCanChange = true; 6308 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6309 if (!SrcAlign || Alignment > *SrcAlign) 6310 SrcAlign = Alignment; 6311 assert(SrcAlign && "SrcAlign must be set"); 6312 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6313 if (!TLI.findOptimalMemOpLowering( 6314 MemOps, Limit, 6315 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6316 /*IsVolatile*/ true), 6317 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6318 MF.getFunction().getAttributes())) 6319 return SDValue(); 6320 6321 if (DstAlignCanChange) { 6322 Type *Ty = MemOps[0].getTypeForEVT(C); 6323 Align NewAlign = DL.getABITypeAlign(Ty); 6324 if (NewAlign > Alignment) { 6325 // Give the stack frame object a larger alignment if needed. 6326 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6327 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6328 Alignment = NewAlign; 6329 } 6330 } 6331 6332 MachineMemOperand::Flags MMOFlags = 6333 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6334 uint64_t SrcOff = 0, DstOff = 0; 6335 SmallVector<SDValue, 8> LoadValues; 6336 SmallVector<SDValue, 8> LoadChains; 6337 SmallVector<SDValue, 8> OutChains; 6338 unsigned NumMemOps = MemOps.size(); 6339 for (unsigned i = 0; i < NumMemOps; i++) { 6340 EVT VT = MemOps[i]; 6341 unsigned VTSize = VT.getSizeInBits() / 8; 6342 SDValue Value; 6343 6344 bool isDereferenceable = 6345 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6346 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6347 if (isDereferenceable) 6348 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6349 6350 Value = 6351 DAG.getLoad(VT, dl, Chain, 6352 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6353 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags); 6354 LoadValues.push_back(Value); 6355 LoadChains.push_back(Value.getValue(1)); 6356 SrcOff += VTSize; 6357 } 6358 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6359 OutChains.clear(); 6360 for (unsigned i = 0; i < NumMemOps; i++) { 6361 EVT VT = MemOps[i]; 6362 unsigned VTSize = VT.getSizeInBits() / 8; 6363 SDValue Store; 6364 6365 Store = 6366 DAG.getStore(Chain, dl, LoadValues[i], 6367 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6368 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6369 OutChains.push_back(Store); 6370 DstOff += VTSize; 6371 } 6372 6373 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6374 } 6375 6376 /// Lower the call to 'memset' intrinsic function into a series of store 6377 /// operations. 6378 /// 6379 /// \param DAG Selection DAG where lowered code is placed. 6380 /// \param dl Link to corresponding IR location. 6381 /// \param Chain Control flow dependency. 6382 /// \param Dst Pointer to destination memory location. 6383 /// \param Src Value of byte to write into the memory. 6384 /// \param Size Number of bytes to write. 6385 /// \param Alignment Alignment of the destination in bytes. 6386 /// \param isVol True if destination is volatile. 6387 /// \param DstPtrInfo IR information on the memory pointer. 6388 /// \returns New head in the control flow, if lowering was successful, empty 6389 /// SDValue otherwise. 6390 /// 6391 /// The function tries to replace 'llvm.memset' intrinsic with several store 6392 /// operations and value calculation code. This is usually profitable for small 6393 /// memory size. 6394 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6395 SDValue Chain, SDValue Dst, SDValue Src, 6396 uint64_t Size, Align Alignment, bool isVol, 6397 MachinePointerInfo DstPtrInfo) { 6398 // Turn a memset of undef to nop. 6399 // FIXME: We need to honor volatile even is Src is undef. 6400 if (Src.isUndef()) 6401 return Chain; 6402 6403 // Expand memset to a series of load/store ops if the size operand 6404 // falls below a certain threshold. 6405 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6406 std::vector<EVT> MemOps; 6407 bool DstAlignCanChange = false; 6408 MachineFunction &MF = DAG.getMachineFunction(); 6409 MachineFrameInfo &MFI = MF.getFrameInfo(); 6410 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6411 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6412 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6413 DstAlignCanChange = true; 6414 bool IsZeroVal = 6415 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6416 if (!TLI.findOptimalMemOpLowering( 6417 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6418 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6419 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6420 return SDValue(); 6421 6422 if (DstAlignCanChange) { 6423 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6424 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6425 if (NewAlign > Alignment) { 6426 // Give the stack frame object a larger alignment if needed. 6427 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6428 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6429 Alignment = NewAlign; 6430 } 6431 } 6432 6433 SmallVector<SDValue, 8> OutChains; 6434 uint64_t DstOff = 0; 6435 unsigned NumMemOps = MemOps.size(); 6436 6437 // Find the largest store and generate the bit pattern for it. 6438 EVT LargestVT = MemOps[0]; 6439 for (unsigned i = 1; i < NumMemOps; i++) 6440 if (MemOps[i].bitsGT(LargestVT)) 6441 LargestVT = MemOps[i]; 6442 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6443 6444 for (unsigned i = 0; i < NumMemOps; i++) { 6445 EVT VT = MemOps[i]; 6446 unsigned VTSize = VT.getSizeInBits() / 8; 6447 if (VTSize > Size) { 6448 // Issuing an unaligned load / store pair that overlaps with the previous 6449 // pair. Adjust the offset accordingly. 6450 assert(i == NumMemOps-1 && i != 0); 6451 DstOff -= VTSize - Size; 6452 } 6453 6454 // If this store is smaller than the largest store see whether we can get 6455 // the smaller value for free with a truncate. 6456 SDValue Value = MemSetValue; 6457 if (VT.bitsLT(LargestVT)) { 6458 if (!LargestVT.isVector() && !VT.isVector() && 6459 TLI.isTruncateFree(LargestVT, VT)) 6460 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6461 else 6462 Value = getMemsetValue(Src, VT, DAG, dl); 6463 } 6464 assert(Value.getValueType() == VT && "Value with wrong type."); 6465 SDValue Store = DAG.getStore( 6466 Chain, dl, Value, 6467 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6468 DstPtrInfo.getWithOffset(DstOff), Alignment, 6469 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6470 OutChains.push_back(Store); 6471 DstOff += VT.getSizeInBits() / 8; 6472 Size -= VTSize; 6473 } 6474 6475 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6476 } 6477 6478 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6479 unsigned AS) { 6480 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6481 // pointer operands can be losslessly bitcasted to pointers of address space 0 6482 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { 6483 report_fatal_error("cannot lower memory intrinsic in address space " + 6484 Twine(AS)); 6485 } 6486 } 6487 6488 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6489 SDValue Src, SDValue Size, Align Alignment, 6490 bool isVol, bool AlwaysInline, bool isTailCall, 6491 MachinePointerInfo DstPtrInfo, 6492 MachinePointerInfo SrcPtrInfo) { 6493 // Check to see if we should lower the memcpy to loads and stores first. 6494 // For cases within the target-specified limits, this is the best choice. 6495 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6496 if (ConstantSize) { 6497 // Memcpy with size zero? Just return the original chain. 6498 if (ConstantSize->isNullValue()) 6499 return Chain; 6500 6501 SDValue Result = getMemcpyLoadsAndStores( 6502 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6503 isVol, false, DstPtrInfo, SrcPtrInfo); 6504 if (Result.getNode()) 6505 return Result; 6506 } 6507 6508 // Then check to see if we should lower the memcpy with target-specific 6509 // code. If the target chooses to do this, this is the next best. 6510 if (TSI) { 6511 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6512 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, 6513 DstPtrInfo, SrcPtrInfo); 6514 if (Result.getNode()) 6515 return Result; 6516 } 6517 6518 // If we really need inline code and the target declined to provide it, 6519 // use a (potentially long) sequence of loads and stores. 6520 if (AlwaysInline) { 6521 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6522 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6523 ConstantSize->getZExtValue(), Alignment, 6524 isVol, true, DstPtrInfo, SrcPtrInfo); 6525 } 6526 6527 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6528 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6529 6530 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6531 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6532 // respect volatile, so they may do things like read or write memory 6533 // beyond the given memory regions. But fixing this isn't easy, and most 6534 // people don't care. 6535 6536 // Emit a library call. 6537 TargetLowering::ArgListTy Args; 6538 TargetLowering::ArgListEntry Entry; 6539 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6540 Entry.Node = Dst; Args.push_back(Entry); 6541 Entry.Node = Src; Args.push_back(Entry); 6542 6543 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6544 Entry.Node = Size; Args.push_back(Entry); 6545 // FIXME: pass in SDLoc 6546 TargetLowering::CallLoweringInfo CLI(*this); 6547 CLI.setDebugLoc(dl) 6548 .setChain(Chain) 6549 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6550 Dst.getValueType().getTypeForEVT(*getContext()), 6551 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6552 TLI->getPointerTy(getDataLayout())), 6553 std::move(Args)) 6554 .setDiscardResult() 6555 .setTailCall(isTailCall); 6556 6557 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6558 return CallResult.second; 6559 } 6560 6561 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6562 SDValue Dst, unsigned DstAlign, 6563 SDValue Src, unsigned SrcAlign, 6564 SDValue Size, Type *SizeTy, 6565 unsigned ElemSz, bool isTailCall, 6566 MachinePointerInfo DstPtrInfo, 6567 MachinePointerInfo SrcPtrInfo) { 6568 // Emit a library call. 6569 TargetLowering::ArgListTy Args; 6570 TargetLowering::ArgListEntry Entry; 6571 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6572 Entry.Node = Dst; 6573 Args.push_back(Entry); 6574 6575 Entry.Node = Src; 6576 Args.push_back(Entry); 6577 6578 Entry.Ty = SizeTy; 6579 Entry.Node = Size; 6580 Args.push_back(Entry); 6581 6582 RTLIB::Libcall LibraryCall = 6583 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6584 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6585 report_fatal_error("Unsupported element size"); 6586 6587 TargetLowering::CallLoweringInfo CLI(*this); 6588 CLI.setDebugLoc(dl) 6589 .setChain(Chain) 6590 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6591 Type::getVoidTy(*getContext()), 6592 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6593 TLI->getPointerTy(getDataLayout())), 6594 std::move(Args)) 6595 .setDiscardResult() 6596 .setTailCall(isTailCall); 6597 6598 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6599 return CallResult.second; 6600 } 6601 6602 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6603 SDValue Src, SDValue Size, Align Alignment, 6604 bool isVol, bool isTailCall, 6605 MachinePointerInfo DstPtrInfo, 6606 MachinePointerInfo SrcPtrInfo) { 6607 // Check to see if we should lower the memmove to loads and stores first. 6608 // For cases within the target-specified limits, this is the best choice. 6609 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6610 if (ConstantSize) { 6611 // Memmove with size zero? Just return the original chain. 6612 if (ConstantSize->isNullValue()) 6613 return Chain; 6614 6615 SDValue Result = getMemmoveLoadsAndStores( 6616 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6617 isVol, false, DstPtrInfo, SrcPtrInfo); 6618 if (Result.getNode()) 6619 return Result; 6620 } 6621 6622 // Then check to see if we should lower the memmove with target-specific 6623 // code. If the target chooses to do this, this is the next best. 6624 if (TSI) { 6625 SDValue Result = 6626 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, 6627 Alignment, isVol, DstPtrInfo, SrcPtrInfo); 6628 if (Result.getNode()) 6629 return Result; 6630 } 6631 6632 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6633 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6634 6635 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6636 // not be safe. See memcpy above for more details. 6637 6638 // Emit a library call. 6639 TargetLowering::ArgListTy Args; 6640 TargetLowering::ArgListEntry Entry; 6641 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6642 Entry.Node = Dst; Args.push_back(Entry); 6643 Entry.Node = Src; Args.push_back(Entry); 6644 6645 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6646 Entry.Node = Size; Args.push_back(Entry); 6647 // FIXME: pass in SDLoc 6648 TargetLowering::CallLoweringInfo CLI(*this); 6649 CLI.setDebugLoc(dl) 6650 .setChain(Chain) 6651 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6652 Dst.getValueType().getTypeForEVT(*getContext()), 6653 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6654 TLI->getPointerTy(getDataLayout())), 6655 std::move(Args)) 6656 .setDiscardResult() 6657 .setTailCall(isTailCall); 6658 6659 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6660 return CallResult.second; 6661 } 6662 6663 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6664 SDValue Dst, unsigned DstAlign, 6665 SDValue Src, unsigned SrcAlign, 6666 SDValue Size, Type *SizeTy, 6667 unsigned ElemSz, bool isTailCall, 6668 MachinePointerInfo DstPtrInfo, 6669 MachinePointerInfo SrcPtrInfo) { 6670 // Emit a library call. 6671 TargetLowering::ArgListTy Args; 6672 TargetLowering::ArgListEntry Entry; 6673 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6674 Entry.Node = Dst; 6675 Args.push_back(Entry); 6676 6677 Entry.Node = Src; 6678 Args.push_back(Entry); 6679 6680 Entry.Ty = SizeTy; 6681 Entry.Node = Size; 6682 Args.push_back(Entry); 6683 6684 RTLIB::Libcall LibraryCall = 6685 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6686 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6687 report_fatal_error("Unsupported element size"); 6688 6689 TargetLowering::CallLoweringInfo CLI(*this); 6690 CLI.setDebugLoc(dl) 6691 .setChain(Chain) 6692 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6693 Type::getVoidTy(*getContext()), 6694 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6695 TLI->getPointerTy(getDataLayout())), 6696 std::move(Args)) 6697 .setDiscardResult() 6698 .setTailCall(isTailCall); 6699 6700 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6701 return CallResult.second; 6702 } 6703 6704 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6705 SDValue Src, SDValue Size, Align Alignment, 6706 bool isVol, bool isTailCall, 6707 MachinePointerInfo DstPtrInfo) { 6708 // Check to see if we should lower the memset to stores first. 6709 // For cases within the target-specified limits, this is the best choice. 6710 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6711 if (ConstantSize) { 6712 // Memset with size zero? Just return the original chain. 6713 if (ConstantSize->isNullValue()) 6714 return Chain; 6715 6716 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6717 ConstantSize->getZExtValue(), Alignment, 6718 isVol, DstPtrInfo); 6719 6720 if (Result.getNode()) 6721 return Result; 6722 } 6723 6724 // Then check to see if we should lower the memset with target-specific 6725 // code. If the target chooses to do this, this is the next best. 6726 if (TSI) { 6727 SDValue Result = TSI->EmitTargetCodeForMemset( 6728 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo); 6729 if (Result.getNode()) 6730 return Result; 6731 } 6732 6733 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6734 6735 // Emit a library call. 6736 TargetLowering::ArgListTy Args; 6737 TargetLowering::ArgListEntry Entry; 6738 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6739 Args.push_back(Entry); 6740 Entry.Node = Src; 6741 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6742 Args.push_back(Entry); 6743 Entry.Node = Size; 6744 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6745 Args.push_back(Entry); 6746 6747 // FIXME: pass in SDLoc 6748 TargetLowering::CallLoweringInfo CLI(*this); 6749 CLI.setDebugLoc(dl) 6750 .setChain(Chain) 6751 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6752 Dst.getValueType().getTypeForEVT(*getContext()), 6753 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6754 TLI->getPointerTy(getDataLayout())), 6755 std::move(Args)) 6756 .setDiscardResult() 6757 .setTailCall(isTailCall); 6758 6759 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6760 return CallResult.second; 6761 } 6762 6763 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6764 SDValue Dst, unsigned DstAlign, 6765 SDValue Value, SDValue Size, Type *SizeTy, 6766 unsigned ElemSz, bool isTailCall, 6767 MachinePointerInfo DstPtrInfo) { 6768 // Emit a library call. 6769 TargetLowering::ArgListTy Args; 6770 TargetLowering::ArgListEntry Entry; 6771 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6772 Entry.Node = Dst; 6773 Args.push_back(Entry); 6774 6775 Entry.Ty = Type::getInt8Ty(*getContext()); 6776 Entry.Node = Value; 6777 Args.push_back(Entry); 6778 6779 Entry.Ty = SizeTy; 6780 Entry.Node = Size; 6781 Args.push_back(Entry); 6782 6783 RTLIB::Libcall LibraryCall = 6784 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6785 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6786 report_fatal_error("Unsupported element size"); 6787 6788 TargetLowering::CallLoweringInfo CLI(*this); 6789 CLI.setDebugLoc(dl) 6790 .setChain(Chain) 6791 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6792 Type::getVoidTy(*getContext()), 6793 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6794 TLI->getPointerTy(getDataLayout())), 6795 std::move(Args)) 6796 .setDiscardResult() 6797 .setTailCall(isTailCall); 6798 6799 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6800 return CallResult.second; 6801 } 6802 6803 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6804 SDVTList VTList, ArrayRef<SDValue> Ops, 6805 MachineMemOperand *MMO) { 6806 FoldingSetNodeID ID; 6807 ID.AddInteger(MemVT.getRawBits()); 6808 AddNodeIDNode(ID, Opcode, VTList, Ops); 6809 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6810 void* IP = nullptr; 6811 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6812 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6813 return SDValue(E, 0); 6814 } 6815 6816 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6817 VTList, MemVT, MMO); 6818 createOperands(N, Ops); 6819 6820 CSEMap.InsertNode(N, IP); 6821 InsertNode(N); 6822 return SDValue(N, 0); 6823 } 6824 6825 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6826 EVT MemVT, SDVTList VTs, SDValue Chain, 6827 SDValue Ptr, SDValue Cmp, SDValue Swp, 6828 MachineMemOperand *MMO) { 6829 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6830 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6831 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6832 6833 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6834 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6835 } 6836 6837 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6838 SDValue Chain, SDValue Ptr, SDValue Val, 6839 MachineMemOperand *MMO) { 6840 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6841 Opcode == ISD::ATOMIC_LOAD_SUB || 6842 Opcode == ISD::ATOMIC_LOAD_AND || 6843 Opcode == ISD::ATOMIC_LOAD_CLR || 6844 Opcode == ISD::ATOMIC_LOAD_OR || 6845 Opcode == ISD::ATOMIC_LOAD_XOR || 6846 Opcode == ISD::ATOMIC_LOAD_NAND || 6847 Opcode == ISD::ATOMIC_LOAD_MIN || 6848 Opcode == ISD::ATOMIC_LOAD_MAX || 6849 Opcode == ISD::ATOMIC_LOAD_UMIN || 6850 Opcode == ISD::ATOMIC_LOAD_UMAX || 6851 Opcode == ISD::ATOMIC_LOAD_FADD || 6852 Opcode == ISD::ATOMIC_LOAD_FSUB || 6853 Opcode == ISD::ATOMIC_SWAP || 6854 Opcode == ISD::ATOMIC_STORE) && 6855 "Invalid Atomic Op"); 6856 6857 EVT VT = Val.getValueType(); 6858 6859 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6860 getVTList(VT, MVT::Other); 6861 SDValue Ops[] = {Chain, Ptr, Val}; 6862 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6863 } 6864 6865 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6866 EVT VT, SDValue Chain, SDValue Ptr, 6867 MachineMemOperand *MMO) { 6868 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6869 6870 SDVTList VTs = getVTList(VT, MVT::Other); 6871 SDValue Ops[] = {Chain, Ptr}; 6872 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6873 } 6874 6875 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6876 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6877 if (Ops.size() == 1) 6878 return Ops[0]; 6879 6880 SmallVector<EVT, 4> VTs; 6881 VTs.reserve(Ops.size()); 6882 for (const SDValue &Op : Ops) 6883 VTs.push_back(Op.getValueType()); 6884 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6885 } 6886 6887 SDValue SelectionDAG::getMemIntrinsicNode( 6888 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6889 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6890 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6891 if (!Size && MemVT.isScalableVector()) 6892 Size = MemoryLocation::UnknownSize; 6893 else if (!Size) 6894 Size = MemVT.getStoreSize(); 6895 6896 MachineFunction &MF = getMachineFunction(); 6897 MachineMemOperand *MMO = 6898 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6899 6900 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6901 } 6902 6903 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6904 SDVTList VTList, 6905 ArrayRef<SDValue> Ops, EVT MemVT, 6906 MachineMemOperand *MMO) { 6907 assert((Opcode == ISD::INTRINSIC_VOID || 6908 Opcode == ISD::INTRINSIC_W_CHAIN || 6909 Opcode == ISD::PREFETCH || 6910 ((int)Opcode <= std::numeric_limits<int>::max() && 6911 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6912 "Opcode is not a memory-accessing opcode!"); 6913 6914 // Memoize the node unless it returns a flag. 6915 MemIntrinsicSDNode *N; 6916 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6917 FoldingSetNodeID ID; 6918 AddNodeIDNode(ID, Opcode, VTList, Ops); 6919 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6920 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6921 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6922 void *IP = nullptr; 6923 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6924 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6925 return SDValue(E, 0); 6926 } 6927 6928 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6929 VTList, MemVT, MMO); 6930 createOperands(N, Ops); 6931 6932 CSEMap.InsertNode(N, IP); 6933 } else { 6934 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6935 VTList, MemVT, MMO); 6936 createOperands(N, Ops); 6937 } 6938 InsertNode(N); 6939 SDValue V(N, 0); 6940 NewSDValueDbgMsg(V, "Creating new node: ", this); 6941 return V; 6942 } 6943 6944 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6945 SDValue Chain, int FrameIndex, 6946 int64_t Size, int64_t Offset) { 6947 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6948 const auto VTs = getVTList(MVT::Other); 6949 SDValue Ops[2] = { 6950 Chain, 6951 getFrameIndex(FrameIndex, 6952 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6953 true)}; 6954 6955 FoldingSetNodeID ID; 6956 AddNodeIDNode(ID, Opcode, VTs, Ops); 6957 ID.AddInteger(FrameIndex); 6958 ID.AddInteger(Size); 6959 ID.AddInteger(Offset); 6960 void *IP = nullptr; 6961 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6962 return SDValue(E, 0); 6963 6964 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6965 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6966 createOperands(N, Ops); 6967 CSEMap.InsertNode(N, IP); 6968 InsertNode(N); 6969 SDValue V(N, 0); 6970 NewSDValueDbgMsg(V, "Creating new node: ", this); 6971 return V; 6972 } 6973 6974 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, 6975 uint64_t Guid, uint64_t Index, 6976 uint32_t Attr) { 6977 const unsigned Opcode = ISD::PSEUDO_PROBE; 6978 const auto VTs = getVTList(MVT::Other); 6979 SDValue Ops[] = {Chain}; 6980 FoldingSetNodeID ID; 6981 AddNodeIDNode(ID, Opcode, VTs, Ops); 6982 ID.AddInteger(Guid); 6983 ID.AddInteger(Index); 6984 void *IP = nullptr; 6985 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP)) 6986 return SDValue(E, 0); 6987 6988 auto *N = newSDNode<PseudoProbeSDNode>( 6989 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr); 6990 createOperands(N, Ops); 6991 CSEMap.InsertNode(N, IP); 6992 InsertNode(N); 6993 SDValue V(N, 0); 6994 NewSDValueDbgMsg(V, "Creating new node: ", this); 6995 return V; 6996 } 6997 6998 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6999 /// MachinePointerInfo record from it. This is particularly useful because the 7000 /// code generator has many cases where it doesn't bother passing in a 7001 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 7002 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 7003 SelectionDAG &DAG, SDValue Ptr, 7004 int64_t Offset = 0) { 7005 // If this is FI+Offset, we can model it. 7006 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 7007 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 7008 FI->getIndex(), Offset); 7009 7010 // If this is (FI+Offset1)+Offset2, we can model it. 7011 if (Ptr.getOpcode() != ISD::ADD || 7012 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 7013 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 7014 return Info; 7015 7016 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7017 return MachinePointerInfo::getFixedStack( 7018 DAG.getMachineFunction(), FI, 7019 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 7020 } 7021 7022 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 7023 /// MachinePointerInfo record from it. This is particularly useful because the 7024 /// code generator has many cases where it doesn't bother passing in a 7025 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 7026 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 7027 SelectionDAG &DAG, SDValue Ptr, 7028 SDValue OffsetOp) { 7029 // If the 'Offset' value isn't a constant, we can't handle this. 7030 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 7031 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 7032 if (OffsetOp.isUndef()) 7033 return InferPointerInfo(Info, DAG, Ptr); 7034 return Info; 7035 } 7036 7037 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 7038 EVT VT, const SDLoc &dl, SDValue Chain, 7039 SDValue Ptr, SDValue Offset, 7040 MachinePointerInfo PtrInfo, EVT MemVT, 7041 Align Alignment, 7042 MachineMemOperand::Flags MMOFlags, 7043 const AAMDNodes &AAInfo, const MDNode *Ranges) { 7044 assert(Chain.getValueType() == MVT::Other && 7045 "Invalid chain type"); 7046 7047 MMOFlags |= MachineMemOperand::MOLoad; 7048 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 7049 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 7050 // clients. 7051 if (PtrInfo.V.isNull()) 7052 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 7053 7054 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 7055 MachineFunction &MF = getMachineFunction(); 7056 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 7057 Alignment, AAInfo, Ranges); 7058 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 7059 } 7060 7061 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 7062 EVT VT, const SDLoc &dl, SDValue Chain, 7063 SDValue Ptr, SDValue Offset, EVT MemVT, 7064 MachineMemOperand *MMO) { 7065 if (VT == MemVT) { 7066 ExtType = ISD::NON_EXTLOAD; 7067 } else if (ExtType == ISD::NON_EXTLOAD) { 7068 assert(VT == MemVT && "Non-extending load from different memory type!"); 7069 } else { 7070 // Extending load. 7071 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 7072 "Should only be an extending load, not truncating!"); 7073 assert(VT.isInteger() == MemVT.isInteger() && 7074 "Cannot convert from FP to Int or Int -> FP!"); 7075 assert(VT.isVector() == MemVT.isVector() && 7076 "Cannot use an ext load to convert to or from a vector!"); 7077 assert((!VT.isVector() || 7078 VT.getVectorElementCount() == MemVT.getVectorElementCount()) && 7079 "Cannot use an ext load to change the number of vector elements!"); 7080 } 7081 7082 bool Indexed = AM != ISD::UNINDEXED; 7083 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 7084 7085 SDVTList VTs = Indexed ? 7086 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 7087 SDValue Ops[] = { Chain, Ptr, Offset }; 7088 FoldingSetNodeID ID; 7089 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 7090 ID.AddInteger(MemVT.getRawBits()); 7091 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 7092 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 7093 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7094 void *IP = nullptr; 7095 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7096 cast<LoadSDNode>(E)->refineAlignment(MMO); 7097 return SDValue(E, 0); 7098 } 7099 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7100 ExtType, MemVT, MMO); 7101 createOperands(N, Ops); 7102 7103 CSEMap.InsertNode(N, IP); 7104 InsertNode(N); 7105 SDValue V(N, 0); 7106 NewSDValueDbgMsg(V, "Creating new node: ", this); 7107 return V; 7108 } 7109 7110 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7111 SDValue Ptr, MachinePointerInfo PtrInfo, 7112 MaybeAlign Alignment, 7113 MachineMemOperand::Flags MMOFlags, 7114 const AAMDNodes &AAInfo, const MDNode *Ranges) { 7115 SDValue Undef = getUNDEF(Ptr.getValueType()); 7116 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7117 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 7118 } 7119 7120 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7121 SDValue Ptr, MachineMemOperand *MMO) { 7122 SDValue Undef = getUNDEF(Ptr.getValueType()); 7123 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7124 VT, MMO); 7125 } 7126 7127 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7128 EVT VT, SDValue Chain, SDValue Ptr, 7129 MachinePointerInfo PtrInfo, EVT MemVT, 7130 MaybeAlign Alignment, 7131 MachineMemOperand::Flags MMOFlags, 7132 const AAMDNodes &AAInfo) { 7133 SDValue Undef = getUNDEF(Ptr.getValueType()); 7134 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 7135 MemVT, Alignment, MMOFlags, AAInfo); 7136 } 7137 7138 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7139 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 7140 MachineMemOperand *MMO) { 7141 SDValue Undef = getUNDEF(Ptr.getValueType()); 7142 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 7143 MemVT, MMO); 7144 } 7145 7146 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 7147 SDValue Base, SDValue Offset, 7148 ISD::MemIndexedMode AM) { 7149 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 7150 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 7151 // Don't propagate the invariant or dereferenceable flags. 7152 auto MMOFlags = 7153 LD->getMemOperand()->getFlags() & 7154 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 7155 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 7156 LD->getChain(), Base, Offset, LD->getPointerInfo(), 7157 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo()); 7158 } 7159 7160 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7161 SDValue Ptr, MachinePointerInfo PtrInfo, 7162 Align Alignment, 7163 MachineMemOperand::Flags MMOFlags, 7164 const AAMDNodes &AAInfo) { 7165 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 7166 7167 MMOFlags |= MachineMemOperand::MOStore; 7168 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7169 7170 if (PtrInfo.V.isNull()) 7171 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7172 7173 MachineFunction &MF = getMachineFunction(); 7174 uint64_t Size = 7175 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 7176 MachineMemOperand *MMO = 7177 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 7178 return getStore(Chain, dl, Val, Ptr, MMO); 7179 } 7180 7181 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7182 SDValue Ptr, MachineMemOperand *MMO) { 7183 assert(Chain.getValueType() == MVT::Other && 7184 "Invalid chain type"); 7185 EVT VT = Val.getValueType(); 7186 SDVTList VTs = getVTList(MVT::Other); 7187 SDValue Undef = getUNDEF(Ptr.getValueType()); 7188 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7189 FoldingSetNodeID ID; 7190 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7191 ID.AddInteger(VT.getRawBits()); 7192 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7193 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 7194 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7195 void *IP = nullptr; 7196 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7197 cast<StoreSDNode>(E)->refineAlignment(MMO); 7198 return SDValue(E, 0); 7199 } 7200 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7201 ISD::UNINDEXED, false, VT, MMO); 7202 createOperands(N, Ops); 7203 7204 CSEMap.InsertNode(N, IP); 7205 InsertNode(N); 7206 SDValue V(N, 0); 7207 NewSDValueDbgMsg(V, "Creating new node: ", this); 7208 return V; 7209 } 7210 7211 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7212 SDValue Ptr, MachinePointerInfo PtrInfo, 7213 EVT SVT, Align Alignment, 7214 MachineMemOperand::Flags MMOFlags, 7215 const AAMDNodes &AAInfo) { 7216 assert(Chain.getValueType() == MVT::Other && 7217 "Invalid chain type"); 7218 7219 MMOFlags |= MachineMemOperand::MOStore; 7220 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7221 7222 if (PtrInfo.V.isNull()) 7223 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7224 7225 MachineFunction &MF = getMachineFunction(); 7226 MachineMemOperand *MMO = MF.getMachineMemOperand( 7227 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()), 7228 Alignment, AAInfo); 7229 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7230 } 7231 7232 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7233 SDValue Ptr, EVT SVT, 7234 MachineMemOperand *MMO) { 7235 EVT VT = Val.getValueType(); 7236 7237 assert(Chain.getValueType() == MVT::Other && 7238 "Invalid chain type"); 7239 if (VT == SVT) 7240 return getStore(Chain, dl, Val, Ptr, MMO); 7241 7242 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7243 "Should only be a truncating store, not extending!"); 7244 assert(VT.isInteger() == SVT.isInteger() && 7245 "Can't do FP-INT conversion!"); 7246 assert(VT.isVector() == SVT.isVector() && 7247 "Cannot use trunc store to convert to or from a vector!"); 7248 assert((!VT.isVector() || 7249 VT.getVectorElementCount() == SVT.getVectorElementCount()) && 7250 "Cannot use trunc store to change the number of vector elements!"); 7251 7252 SDVTList VTs = getVTList(MVT::Other); 7253 SDValue Undef = getUNDEF(Ptr.getValueType()); 7254 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7255 FoldingSetNodeID ID; 7256 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7257 ID.AddInteger(SVT.getRawBits()); 7258 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7259 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7260 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7261 void *IP = nullptr; 7262 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7263 cast<StoreSDNode>(E)->refineAlignment(MMO); 7264 return SDValue(E, 0); 7265 } 7266 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7267 ISD::UNINDEXED, true, SVT, MMO); 7268 createOperands(N, Ops); 7269 7270 CSEMap.InsertNode(N, IP); 7271 InsertNode(N); 7272 SDValue V(N, 0); 7273 NewSDValueDbgMsg(V, "Creating new node: ", this); 7274 return V; 7275 } 7276 7277 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7278 SDValue Base, SDValue Offset, 7279 ISD::MemIndexedMode AM) { 7280 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7281 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7282 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7283 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7284 FoldingSetNodeID ID; 7285 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7286 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7287 ID.AddInteger(ST->getRawSubclassData()); 7288 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7289 void *IP = nullptr; 7290 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7291 return SDValue(E, 0); 7292 7293 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7294 ST->isTruncatingStore(), ST->getMemoryVT(), 7295 ST->getMemOperand()); 7296 createOperands(N, Ops); 7297 7298 CSEMap.InsertNode(N, IP); 7299 InsertNode(N); 7300 SDValue V(N, 0); 7301 NewSDValueDbgMsg(V, "Creating new node: ", this); 7302 return V; 7303 } 7304 7305 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7306 SDValue Base, SDValue Offset, SDValue Mask, 7307 SDValue PassThru, EVT MemVT, 7308 MachineMemOperand *MMO, 7309 ISD::MemIndexedMode AM, 7310 ISD::LoadExtType ExtTy, bool isExpanding) { 7311 bool Indexed = AM != ISD::UNINDEXED; 7312 assert((Indexed || Offset.isUndef()) && 7313 "Unindexed masked load with an offset!"); 7314 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7315 : getVTList(VT, MVT::Other); 7316 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7317 FoldingSetNodeID ID; 7318 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7319 ID.AddInteger(MemVT.getRawBits()); 7320 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7321 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7322 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7323 void *IP = nullptr; 7324 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7325 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7326 return SDValue(E, 0); 7327 } 7328 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7329 AM, ExtTy, isExpanding, MemVT, MMO); 7330 createOperands(N, Ops); 7331 7332 CSEMap.InsertNode(N, IP); 7333 InsertNode(N); 7334 SDValue V(N, 0); 7335 NewSDValueDbgMsg(V, "Creating new node: ", this); 7336 return V; 7337 } 7338 7339 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7340 SDValue Base, SDValue Offset, 7341 ISD::MemIndexedMode AM) { 7342 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7343 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7344 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7345 Offset, LD->getMask(), LD->getPassThru(), 7346 LD->getMemoryVT(), LD->getMemOperand(), AM, 7347 LD->getExtensionType(), LD->isExpandingLoad()); 7348 } 7349 7350 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7351 SDValue Val, SDValue Base, SDValue Offset, 7352 SDValue Mask, EVT MemVT, 7353 MachineMemOperand *MMO, 7354 ISD::MemIndexedMode AM, bool IsTruncating, 7355 bool IsCompressing) { 7356 assert(Chain.getValueType() == MVT::Other && 7357 "Invalid chain type"); 7358 bool Indexed = AM != ISD::UNINDEXED; 7359 assert((Indexed || Offset.isUndef()) && 7360 "Unindexed masked store with an offset!"); 7361 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7362 : getVTList(MVT::Other); 7363 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7364 FoldingSetNodeID ID; 7365 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7366 ID.AddInteger(MemVT.getRawBits()); 7367 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7368 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7369 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7370 void *IP = nullptr; 7371 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7372 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7373 return SDValue(E, 0); 7374 } 7375 auto *N = 7376 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7377 IsTruncating, IsCompressing, MemVT, MMO); 7378 createOperands(N, Ops); 7379 7380 CSEMap.InsertNode(N, IP); 7381 InsertNode(N); 7382 SDValue V(N, 0); 7383 NewSDValueDbgMsg(V, "Creating new node: ", this); 7384 return V; 7385 } 7386 7387 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7388 SDValue Base, SDValue Offset, 7389 ISD::MemIndexedMode AM) { 7390 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7391 assert(ST->getOffset().isUndef() && 7392 "Masked store is already a indexed store!"); 7393 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7394 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7395 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7396 } 7397 7398 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7399 ArrayRef<SDValue> Ops, 7400 MachineMemOperand *MMO, 7401 ISD::MemIndexType IndexType, 7402 ISD::LoadExtType ExtTy) { 7403 assert(Ops.size() == 6 && "Incompatible number of operands"); 7404 7405 FoldingSetNodeID ID; 7406 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7407 ID.AddInteger(VT.getRawBits()); 7408 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7409 dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy)); 7410 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7411 void *IP = nullptr; 7412 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7413 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7414 return SDValue(E, 0); 7415 } 7416 7417 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]); 7418 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7419 VTs, VT, MMO, IndexType, ExtTy); 7420 createOperands(N, Ops); 7421 7422 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7423 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7424 assert(N->getMask().getValueType().getVectorElementCount() == 7425 N->getValueType(0).getVectorElementCount() && 7426 "Vector width mismatch between mask and data"); 7427 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == 7428 N->getValueType(0).getVectorElementCount().isScalable() && 7429 "Scalable flags of index and data do not match"); 7430 assert(ElementCount::isKnownGE( 7431 N->getIndex().getValueType().getVectorElementCount(), 7432 N->getValueType(0).getVectorElementCount()) && 7433 "Vector width mismatch between index and data"); 7434 assert(isa<ConstantSDNode>(N->getScale()) && 7435 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7436 "Scale should be a constant power of 2"); 7437 7438 CSEMap.InsertNode(N, IP); 7439 InsertNode(N); 7440 SDValue V(N, 0); 7441 NewSDValueDbgMsg(V, "Creating new node: ", this); 7442 return V; 7443 } 7444 7445 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7446 ArrayRef<SDValue> Ops, 7447 MachineMemOperand *MMO, 7448 ISD::MemIndexType IndexType, 7449 bool IsTrunc) { 7450 assert(Ops.size() == 6 && "Incompatible number of operands"); 7451 7452 FoldingSetNodeID ID; 7453 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7454 ID.AddInteger(VT.getRawBits()); 7455 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7456 dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc)); 7457 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7458 void *IP = nullptr; 7459 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7460 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7461 return SDValue(E, 0); 7462 } 7463 7464 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]); 7465 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7466 VTs, VT, MMO, IndexType, IsTrunc); 7467 createOperands(N, Ops); 7468 7469 assert(N->getMask().getValueType().getVectorElementCount() == 7470 N->getValue().getValueType().getVectorElementCount() && 7471 "Vector width mismatch between mask and data"); 7472 assert( 7473 N->getIndex().getValueType().getVectorElementCount().isScalable() == 7474 N->getValue().getValueType().getVectorElementCount().isScalable() && 7475 "Scalable flags of index and data do not match"); 7476 assert(ElementCount::isKnownGE( 7477 N->getIndex().getValueType().getVectorElementCount(), 7478 N->getValue().getValueType().getVectorElementCount()) && 7479 "Vector width mismatch between index and data"); 7480 assert(isa<ConstantSDNode>(N->getScale()) && 7481 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7482 "Scale should be a constant power of 2"); 7483 7484 CSEMap.InsertNode(N, IP); 7485 InsertNode(N); 7486 SDValue V(N, 0); 7487 NewSDValueDbgMsg(V, "Creating new node: ", this); 7488 return V; 7489 } 7490 7491 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7492 // select undef, T, F --> T (if T is a constant), otherwise F 7493 // select, ?, undef, F --> F 7494 // select, ?, T, undef --> T 7495 if (Cond.isUndef()) 7496 return isConstantValueOfAnyType(T) ? T : F; 7497 if (T.isUndef()) 7498 return F; 7499 if (F.isUndef()) 7500 return T; 7501 7502 // select true, T, F --> T 7503 // select false, T, F --> F 7504 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7505 return CondC->isNullValue() ? F : T; 7506 7507 // TODO: This should simplify VSELECT with constant condition using something 7508 // like this (but check boolean contents to be complete?): 7509 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7510 // return T; 7511 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7512 // return F; 7513 7514 // select ?, T, T --> T 7515 if (T == F) 7516 return T; 7517 7518 return SDValue(); 7519 } 7520 7521 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7522 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7523 if (X.isUndef()) 7524 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7525 // shift X, undef --> undef (because it may shift by the bitwidth) 7526 if (Y.isUndef()) 7527 return getUNDEF(X.getValueType()); 7528 7529 // shift 0, Y --> 0 7530 // shift X, 0 --> X 7531 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7532 return X; 7533 7534 // shift X, C >= bitwidth(X) --> undef 7535 // All vector elements must be too big (or undef) to avoid partial undefs. 7536 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7537 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7538 }; 7539 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7540 return getUNDEF(X.getValueType()); 7541 7542 return SDValue(); 7543 } 7544 7545 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7546 SDNodeFlags Flags) { 7547 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7548 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7549 // operation is poison. That result can be relaxed to undef. 7550 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7551 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7552 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7553 (YC && YC->getValueAPF().isNaN()); 7554 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7555 (YC && YC->getValueAPF().isInfinity()); 7556 7557 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7558 return getUNDEF(X.getValueType()); 7559 7560 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7561 return getUNDEF(X.getValueType()); 7562 7563 if (!YC) 7564 return SDValue(); 7565 7566 // X + -0.0 --> X 7567 if (Opcode == ISD::FADD) 7568 if (YC->getValueAPF().isNegZero()) 7569 return X; 7570 7571 // X - +0.0 --> X 7572 if (Opcode == ISD::FSUB) 7573 if (YC->getValueAPF().isPosZero()) 7574 return X; 7575 7576 // X * 1.0 --> X 7577 // X / 1.0 --> X 7578 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7579 if (YC->getValueAPF().isExactlyValue(1.0)) 7580 return X; 7581 7582 // X * 0.0 --> 0.0 7583 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros()) 7584 if (YC->getValueAPF().isZero()) 7585 return getConstantFP(0.0, SDLoc(Y), Y.getValueType()); 7586 7587 return SDValue(); 7588 } 7589 7590 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7591 SDValue Ptr, SDValue SV, unsigned Align) { 7592 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7593 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7594 } 7595 7596 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7597 ArrayRef<SDUse> Ops) { 7598 switch (Ops.size()) { 7599 case 0: return getNode(Opcode, DL, VT); 7600 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7601 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7602 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7603 default: break; 7604 } 7605 7606 // Copy from an SDUse array into an SDValue array for use with 7607 // the regular getNode logic. 7608 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7609 return getNode(Opcode, DL, VT, NewOps); 7610 } 7611 7612 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7613 ArrayRef<SDValue> Ops) { 7614 SDNodeFlags Flags; 7615 if (Inserter) 7616 Flags = Inserter->getFlags(); 7617 return getNode(Opcode, DL, VT, Ops, Flags); 7618 } 7619 7620 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7621 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7622 unsigned NumOps = Ops.size(); 7623 switch (NumOps) { 7624 case 0: return getNode(Opcode, DL, VT); 7625 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7626 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7627 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7628 default: break; 7629 } 7630 7631 #ifndef NDEBUG 7632 for (auto &Op : Ops) 7633 assert(Op.getOpcode() != ISD::DELETED_NODE && 7634 "Operand is DELETED_NODE!"); 7635 #endif 7636 7637 switch (Opcode) { 7638 default: break; 7639 case ISD::BUILD_VECTOR: 7640 // Attempt to simplify BUILD_VECTOR. 7641 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7642 return V; 7643 break; 7644 case ISD::CONCAT_VECTORS: 7645 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7646 return V; 7647 break; 7648 case ISD::SELECT_CC: 7649 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7650 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7651 "LHS and RHS of condition must have same type!"); 7652 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7653 "True and False arms of SelectCC must have same type!"); 7654 assert(Ops[2].getValueType() == VT && 7655 "select_cc node must be of same type as true and false value!"); 7656 break; 7657 case ISD::BR_CC: 7658 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7659 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7660 "LHS/RHS of comparison should match types!"); 7661 break; 7662 } 7663 7664 // Memoize nodes. 7665 SDNode *N; 7666 SDVTList VTs = getVTList(VT); 7667 7668 if (VT != MVT::Glue) { 7669 FoldingSetNodeID ID; 7670 AddNodeIDNode(ID, Opcode, VTs, Ops); 7671 void *IP = nullptr; 7672 7673 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7674 return SDValue(E, 0); 7675 7676 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7677 createOperands(N, Ops); 7678 7679 CSEMap.InsertNode(N, IP); 7680 } else { 7681 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7682 createOperands(N, Ops); 7683 } 7684 7685 N->setFlags(Flags); 7686 InsertNode(N); 7687 SDValue V(N, 0); 7688 NewSDValueDbgMsg(V, "Creating new node: ", this); 7689 return V; 7690 } 7691 7692 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7693 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7694 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7695 } 7696 7697 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7698 ArrayRef<SDValue> Ops) { 7699 SDNodeFlags Flags; 7700 if (Inserter) 7701 Flags = Inserter->getFlags(); 7702 return getNode(Opcode, DL, VTList, Ops, Flags); 7703 } 7704 7705 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7706 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7707 if (VTList.NumVTs == 1) 7708 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7709 7710 #ifndef NDEBUG 7711 for (auto &Op : Ops) 7712 assert(Op.getOpcode() != ISD::DELETED_NODE && 7713 "Operand is DELETED_NODE!"); 7714 #endif 7715 7716 switch (Opcode) { 7717 case ISD::STRICT_FP_EXTEND: 7718 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7719 "Invalid STRICT_FP_EXTEND!"); 7720 assert(VTList.VTs[0].isFloatingPoint() && 7721 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7722 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7723 "STRICT_FP_EXTEND result type should be vector iff the operand " 7724 "type is vector!"); 7725 assert((!VTList.VTs[0].isVector() || 7726 VTList.VTs[0].getVectorNumElements() == 7727 Ops[1].getValueType().getVectorNumElements()) && 7728 "Vector element count mismatch!"); 7729 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7730 "Invalid fpext node, dst <= src!"); 7731 break; 7732 case ISD::STRICT_FP_ROUND: 7733 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7734 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7735 "STRICT_FP_ROUND result type should be vector iff the operand " 7736 "type is vector!"); 7737 assert((!VTList.VTs[0].isVector() || 7738 VTList.VTs[0].getVectorNumElements() == 7739 Ops[1].getValueType().getVectorNumElements()) && 7740 "Vector element count mismatch!"); 7741 assert(VTList.VTs[0].isFloatingPoint() && 7742 Ops[1].getValueType().isFloatingPoint() && 7743 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7744 isa<ConstantSDNode>(Ops[2]) && 7745 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7746 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7747 "Invalid STRICT_FP_ROUND!"); 7748 break; 7749 #if 0 7750 // FIXME: figure out how to safely handle things like 7751 // int foo(int x) { return 1 << (x & 255); } 7752 // int bar() { return foo(256); } 7753 case ISD::SRA_PARTS: 7754 case ISD::SRL_PARTS: 7755 case ISD::SHL_PARTS: 7756 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7757 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7758 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7759 else if (N3.getOpcode() == ISD::AND) 7760 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7761 // If the and is only masking out bits that cannot effect the shift, 7762 // eliminate the and. 7763 unsigned NumBits = VT.getScalarSizeInBits()*2; 7764 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7765 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7766 } 7767 break; 7768 #endif 7769 } 7770 7771 // Memoize the node unless it returns a flag. 7772 SDNode *N; 7773 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7774 FoldingSetNodeID ID; 7775 AddNodeIDNode(ID, Opcode, VTList, Ops); 7776 void *IP = nullptr; 7777 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7778 return SDValue(E, 0); 7779 7780 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7781 createOperands(N, Ops); 7782 CSEMap.InsertNode(N, IP); 7783 } else { 7784 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7785 createOperands(N, Ops); 7786 } 7787 7788 N->setFlags(Flags); 7789 InsertNode(N); 7790 SDValue V(N, 0); 7791 NewSDValueDbgMsg(V, "Creating new node: ", this); 7792 return V; 7793 } 7794 7795 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7796 SDVTList VTList) { 7797 return getNode(Opcode, DL, VTList, None); 7798 } 7799 7800 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7801 SDValue N1) { 7802 SDValue Ops[] = { N1 }; 7803 return getNode(Opcode, DL, VTList, Ops); 7804 } 7805 7806 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7807 SDValue N1, SDValue N2) { 7808 SDValue Ops[] = { N1, N2 }; 7809 return getNode(Opcode, DL, VTList, Ops); 7810 } 7811 7812 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7813 SDValue N1, SDValue N2, SDValue N3) { 7814 SDValue Ops[] = { N1, N2, N3 }; 7815 return getNode(Opcode, DL, VTList, Ops); 7816 } 7817 7818 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7819 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7820 SDValue Ops[] = { N1, N2, N3, N4 }; 7821 return getNode(Opcode, DL, VTList, Ops); 7822 } 7823 7824 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7825 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7826 SDValue N5) { 7827 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7828 return getNode(Opcode, DL, VTList, Ops); 7829 } 7830 7831 SDVTList SelectionDAG::getVTList(EVT VT) { 7832 return makeVTList(SDNode::getValueTypeList(VT), 1); 7833 } 7834 7835 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7836 FoldingSetNodeID ID; 7837 ID.AddInteger(2U); 7838 ID.AddInteger(VT1.getRawBits()); 7839 ID.AddInteger(VT2.getRawBits()); 7840 7841 void *IP = nullptr; 7842 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7843 if (!Result) { 7844 EVT *Array = Allocator.Allocate<EVT>(2); 7845 Array[0] = VT1; 7846 Array[1] = VT2; 7847 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7848 VTListMap.InsertNode(Result, IP); 7849 } 7850 return Result->getSDVTList(); 7851 } 7852 7853 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7854 FoldingSetNodeID ID; 7855 ID.AddInteger(3U); 7856 ID.AddInteger(VT1.getRawBits()); 7857 ID.AddInteger(VT2.getRawBits()); 7858 ID.AddInteger(VT3.getRawBits()); 7859 7860 void *IP = nullptr; 7861 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7862 if (!Result) { 7863 EVT *Array = Allocator.Allocate<EVT>(3); 7864 Array[0] = VT1; 7865 Array[1] = VT2; 7866 Array[2] = VT3; 7867 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7868 VTListMap.InsertNode(Result, IP); 7869 } 7870 return Result->getSDVTList(); 7871 } 7872 7873 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7874 FoldingSetNodeID ID; 7875 ID.AddInteger(4U); 7876 ID.AddInteger(VT1.getRawBits()); 7877 ID.AddInteger(VT2.getRawBits()); 7878 ID.AddInteger(VT3.getRawBits()); 7879 ID.AddInteger(VT4.getRawBits()); 7880 7881 void *IP = nullptr; 7882 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7883 if (!Result) { 7884 EVT *Array = Allocator.Allocate<EVT>(4); 7885 Array[0] = VT1; 7886 Array[1] = VT2; 7887 Array[2] = VT3; 7888 Array[3] = VT4; 7889 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7890 VTListMap.InsertNode(Result, IP); 7891 } 7892 return Result->getSDVTList(); 7893 } 7894 7895 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7896 unsigned NumVTs = VTs.size(); 7897 FoldingSetNodeID ID; 7898 ID.AddInteger(NumVTs); 7899 for (unsigned index = 0; index < NumVTs; index++) { 7900 ID.AddInteger(VTs[index].getRawBits()); 7901 } 7902 7903 void *IP = nullptr; 7904 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7905 if (!Result) { 7906 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7907 llvm::copy(VTs, Array); 7908 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7909 VTListMap.InsertNode(Result, IP); 7910 } 7911 return Result->getSDVTList(); 7912 } 7913 7914 7915 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7916 /// specified operands. If the resultant node already exists in the DAG, 7917 /// this does not modify the specified node, instead it returns the node that 7918 /// already exists. If the resultant node does not exist in the DAG, the 7919 /// input node is returned. As a degenerate case, if you specify the same 7920 /// input operands as the node already has, the input node is returned. 7921 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7922 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7923 7924 // Check to see if there is no change. 7925 if (Op == N->getOperand(0)) return N; 7926 7927 // See if the modified node already exists. 7928 void *InsertPos = nullptr; 7929 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7930 return Existing; 7931 7932 // Nope it doesn't. Remove the node from its current place in the maps. 7933 if (InsertPos) 7934 if (!RemoveNodeFromCSEMaps(N)) 7935 InsertPos = nullptr; 7936 7937 // Now we update the operands. 7938 N->OperandList[0].set(Op); 7939 7940 updateDivergence(N); 7941 // If this gets put into a CSE map, add it. 7942 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7943 return N; 7944 } 7945 7946 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7947 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7948 7949 // Check to see if there is no change. 7950 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7951 return N; // No operands changed, just return the input node. 7952 7953 // See if the modified node already exists. 7954 void *InsertPos = nullptr; 7955 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7956 return Existing; 7957 7958 // Nope it doesn't. Remove the node from its current place in the maps. 7959 if (InsertPos) 7960 if (!RemoveNodeFromCSEMaps(N)) 7961 InsertPos = nullptr; 7962 7963 // Now we update the operands. 7964 if (N->OperandList[0] != Op1) 7965 N->OperandList[0].set(Op1); 7966 if (N->OperandList[1] != Op2) 7967 N->OperandList[1].set(Op2); 7968 7969 updateDivergence(N); 7970 // If this gets put into a CSE map, add it. 7971 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7972 return N; 7973 } 7974 7975 SDNode *SelectionDAG:: 7976 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7977 SDValue Ops[] = { Op1, Op2, Op3 }; 7978 return UpdateNodeOperands(N, Ops); 7979 } 7980 7981 SDNode *SelectionDAG:: 7982 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7983 SDValue Op3, SDValue Op4) { 7984 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7985 return UpdateNodeOperands(N, Ops); 7986 } 7987 7988 SDNode *SelectionDAG:: 7989 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7990 SDValue Op3, SDValue Op4, SDValue Op5) { 7991 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7992 return UpdateNodeOperands(N, Ops); 7993 } 7994 7995 SDNode *SelectionDAG:: 7996 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7997 unsigned NumOps = Ops.size(); 7998 assert(N->getNumOperands() == NumOps && 7999 "Update with wrong number of operands"); 8000 8001 // If no operands changed just return the input node. 8002 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 8003 return N; 8004 8005 // See if the modified node already exists. 8006 void *InsertPos = nullptr; 8007 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 8008 return Existing; 8009 8010 // Nope it doesn't. Remove the node from its current place in the maps. 8011 if (InsertPos) 8012 if (!RemoveNodeFromCSEMaps(N)) 8013 InsertPos = nullptr; 8014 8015 // Now we update the operands. 8016 for (unsigned i = 0; i != NumOps; ++i) 8017 if (N->OperandList[i] != Ops[i]) 8018 N->OperandList[i].set(Ops[i]); 8019 8020 updateDivergence(N); 8021 // If this gets put into a CSE map, add it. 8022 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 8023 return N; 8024 } 8025 8026 /// DropOperands - Release the operands and set this node to have 8027 /// zero operands. 8028 void SDNode::DropOperands() { 8029 // Unlike the code in MorphNodeTo that does this, we don't need to 8030 // watch for dead nodes here. 8031 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 8032 SDUse &Use = *I++; 8033 Use.set(SDValue()); 8034 } 8035 } 8036 8037 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 8038 ArrayRef<MachineMemOperand *> NewMemRefs) { 8039 if (NewMemRefs.empty()) { 8040 N->clearMemRefs(); 8041 return; 8042 } 8043 8044 // Check if we can avoid allocating by storing a single reference directly. 8045 if (NewMemRefs.size() == 1) { 8046 N->MemRefs = NewMemRefs[0]; 8047 N->NumMemRefs = 1; 8048 return; 8049 } 8050 8051 MachineMemOperand **MemRefsBuffer = 8052 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 8053 llvm::copy(NewMemRefs, MemRefsBuffer); 8054 N->MemRefs = MemRefsBuffer; 8055 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 8056 } 8057 8058 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 8059 /// machine opcode. 8060 /// 8061 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8062 EVT VT) { 8063 SDVTList VTs = getVTList(VT); 8064 return SelectNodeTo(N, MachineOpc, VTs, None); 8065 } 8066 8067 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8068 EVT VT, SDValue Op1) { 8069 SDVTList VTs = getVTList(VT); 8070 SDValue Ops[] = { Op1 }; 8071 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8072 } 8073 8074 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8075 EVT VT, SDValue Op1, 8076 SDValue Op2) { 8077 SDVTList VTs = getVTList(VT); 8078 SDValue Ops[] = { Op1, Op2 }; 8079 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8080 } 8081 8082 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8083 EVT VT, SDValue Op1, 8084 SDValue Op2, SDValue Op3) { 8085 SDVTList VTs = getVTList(VT); 8086 SDValue Ops[] = { Op1, Op2, Op3 }; 8087 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8088 } 8089 8090 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8091 EVT VT, ArrayRef<SDValue> Ops) { 8092 SDVTList VTs = getVTList(VT); 8093 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8094 } 8095 8096 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8097 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 8098 SDVTList VTs = getVTList(VT1, VT2); 8099 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8100 } 8101 8102 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8103 EVT VT1, EVT VT2) { 8104 SDVTList VTs = getVTList(VT1, VT2); 8105 return SelectNodeTo(N, MachineOpc, VTs, None); 8106 } 8107 8108 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8109 EVT VT1, EVT VT2, EVT VT3, 8110 ArrayRef<SDValue> Ops) { 8111 SDVTList VTs = getVTList(VT1, VT2, VT3); 8112 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8113 } 8114 8115 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8116 EVT VT1, EVT VT2, 8117 SDValue Op1, SDValue Op2) { 8118 SDVTList VTs = getVTList(VT1, VT2); 8119 SDValue Ops[] = { Op1, Op2 }; 8120 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8121 } 8122 8123 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8124 SDVTList VTs,ArrayRef<SDValue> Ops) { 8125 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 8126 // Reset the NodeID to -1. 8127 New->setNodeId(-1); 8128 if (New != N) { 8129 ReplaceAllUsesWith(N, New); 8130 RemoveDeadNode(N); 8131 } 8132 return New; 8133 } 8134 8135 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 8136 /// the line number information on the merged node since it is not possible to 8137 /// preserve the information that operation is associated with multiple lines. 8138 /// This will make the debugger working better at -O0, were there is a higher 8139 /// probability having other instructions associated with that line. 8140 /// 8141 /// For IROrder, we keep the smaller of the two 8142 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 8143 DebugLoc NLoc = N->getDebugLoc(); 8144 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 8145 N->setDebugLoc(DebugLoc()); 8146 } 8147 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 8148 N->setIROrder(Order); 8149 return N; 8150 } 8151 8152 /// MorphNodeTo - This *mutates* the specified node to have the specified 8153 /// return type, opcode, and operands. 8154 /// 8155 /// Note that MorphNodeTo returns the resultant node. If there is already a 8156 /// node of the specified opcode and operands, it returns that node instead of 8157 /// the current one. Note that the SDLoc need not be the same. 8158 /// 8159 /// Using MorphNodeTo is faster than creating a new node and swapping it in 8160 /// with ReplaceAllUsesWith both because it often avoids allocating a new 8161 /// node, and because it doesn't require CSE recalculation for any of 8162 /// the node's users. 8163 /// 8164 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 8165 /// As a consequence it isn't appropriate to use from within the DAG combiner or 8166 /// the legalizer which maintain worklists that would need to be updated when 8167 /// deleting things. 8168 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 8169 SDVTList VTs, ArrayRef<SDValue> Ops) { 8170 // If an identical node already exists, use it. 8171 void *IP = nullptr; 8172 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 8173 FoldingSetNodeID ID; 8174 AddNodeIDNode(ID, Opc, VTs, Ops); 8175 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 8176 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 8177 } 8178 8179 if (!RemoveNodeFromCSEMaps(N)) 8180 IP = nullptr; 8181 8182 // Start the morphing. 8183 N->NodeType = Opc; 8184 N->ValueList = VTs.VTs; 8185 N->NumValues = VTs.NumVTs; 8186 8187 // Clear the operands list, updating used nodes to remove this from their 8188 // use list. Keep track of any operands that become dead as a result. 8189 SmallPtrSet<SDNode*, 16> DeadNodeSet; 8190 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 8191 SDUse &Use = *I++; 8192 SDNode *Used = Use.getNode(); 8193 Use.set(SDValue()); 8194 if (Used->use_empty()) 8195 DeadNodeSet.insert(Used); 8196 } 8197 8198 // For MachineNode, initialize the memory references information. 8199 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 8200 MN->clearMemRefs(); 8201 8202 // Swap for an appropriately sized array from the recycler. 8203 removeOperands(N); 8204 createOperands(N, Ops); 8205 8206 // Delete any nodes that are still dead after adding the uses for the 8207 // new operands. 8208 if (!DeadNodeSet.empty()) { 8209 SmallVector<SDNode *, 16> DeadNodes; 8210 for (SDNode *N : DeadNodeSet) 8211 if (N->use_empty()) 8212 DeadNodes.push_back(N); 8213 RemoveDeadNodes(DeadNodes); 8214 } 8215 8216 if (IP) 8217 CSEMap.InsertNode(N, IP); // Memoize the new node. 8218 return N; 8219 } 8220 8221 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 8222 unsigned OrigOpc = Node->getOpcode(); 8223 unsigned NewOpc; 8224 switch (OrigOpc) { 8225 default: 8226 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 8227 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8228 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 8229 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8230 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 8231 #include "llvm/IR/ConstrainedOps.def" 8232 } 8233 8234 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 8235 8236 // We're taking this node out of the chain, so we need to re-link things. 8237 SDValue InputChain = Node->getOperand(0); 8238 SDValue OutputChain = SDValue(Node, 1); 8239 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 8240 8241 SmallVector<SDValue, 3> Ops; 8242 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 8243 Ops.push_back(Node->getOperand(i)); 8244 8245 SDVTList VTs = getVTList(Node->getValueType(0)); 8246 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 8247 8248 // MorphNodeTo can operate in two ways: if an existing node with the 8249 // specified operands exists, it can just return it. Otherwise, it 8250 // updates the node in place to have the requested operands. 8251 if (Res == Node) { 8252 // If we updated the node in place, reset the node ID. To the isel, 8253 // this should be just like a newly allocated machine node. 8254 Res->setNodeId(-1); 8255 } else { 8256 ReplaceAllUsesWith(Node, Res); 8257 RemoveDeadNode(Node); 8258 } 8259 8260 return Res; 8261 } 8262 8263 /// getMachineNode - These are used for target selectors to create a new node 8264 /// with specified return type(s), MachineInstr opcode, and operands. 8265 /// 8266 /// Note that getMachineNode returns the resultant node. If there is already a 8267 /// node of the specified opcode and operands, it returns that node instead of 8268 /// the current one. 8269 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8270 EVT VT) { 8271 SDVTList VTs = getVTList(VT); 8272 return getMachineNode(Opcode, dl, VTs, None); 8273 } 8274 8275 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8276 EVT VT, SDValue Op1) { 8277 SDVTList VTs = getVTList(VT); 8278 SDValue Ops[] = { Op1 }; 8279 return getMachineNode(Opcode, dl, VTs, Ops); 8280 } 8281 8282 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8283 EVT VT, SDValue Op1, SDValue Op2) { 8284 SDVTList VTs = getVTList(VT); 8285 SDValue Ops[] = { Op1, Op2 }; 8286 return getMachineNode(Opcode, dl, VTs, Ops); 8287 } 8288 8289 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8290 EVT VT, SDValue Op1, SDValue Op2, 8291 SDValue Op3) { 8292 SDVTList VTs = getVTList(VT); 8293 SDValue Ops[] = { Op1, Op2, Op3 }; 8294 return getMachineNode(Opcode, dl, VTs, Ops); 8295 } 8296 8297 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8298 EVT VT, ArrayRef<SDValue> Ops) { 8299 SDVTList VTs = getVTList(VT); 8300 return getMachineNode(Opcode, dl, VTs, Ops); 8301 } 8302 8303 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8304 EVT VT1, EVT VT2, SDValue Op1, 8305 SDValue Op2) { 8306 SDVTList VTs = getVTList(VT1, VT2); 8307 SDValue Ops[] = { Op1, Op2 }; 8308 return getMachineNode(Opcode, dl, VTs, Ops); 8309 } 8310 8311 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8312 EVT VT1, EVT VT2, SDValue Op1, 8313 SDValue Op2, SDValue Op3) { 8314 SDVTList VTs = getVTList(VT1, VT2); 8315 SDValue Ops[] = { Op1, Op2, Op3 }; 8316 return getMachineNode(Opcode, dl, VTs, Ops); 8317 } 8318 8319 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8320 EVT VT1, EVT VT2, 8321 ArrayRef<SDValue> Ops) { 8322 SDVTList VTs = getVTList(VT1, VT2); 8323 return getMachineNode(Opcode, dl, VTs, Ops); 8324 } 8325 8326 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8327 EVT VT1, EVT VT2, EVT VT3, 8328 SDValue Op1, SDValue Op2) { 8329 SDVTList VTs = getVTList(VT1, VT2, VT3); 8330 SDValue Ops[] = { Op1, Op2 }; 8331 return getMachineNode(Opcode, dl, VTs, Ops); 8332 } 8333 8334 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8335 EVT VT1, EVT VT2, EVT VT3, 8336 SDValue Op1, SDValue Op2, 8337 SDValue Op3) { 8338 SDVTList VTs = getVTList(VT1, VT2, VT3); 8339 SDValue Ops[] = { Op1, Op2, Op3 }; 8340 return getMachineNode(Opcode, dl, VTs, Ops); 8341 } 8342 8343 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8344 EVT VT1, EVT VT2, EVT VT3, 8345 ArrayRef<SDValue> Ops) { 8346 SDVTList VTs = getVTList(VT1, VT2, VT3); 8347 return getMachineNode(Opcode, dl, VTs, Ops); 8348 } 8349 8350 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8351 ArrayRef<EVT> ResultTys, 8352 ArrayRef<SDValue> Ops) { 8353 SDVTList VTs = getVTList(ResultTys); 8354 return getMachineNode(Opcode, dl, VTs, Ops); 8355 } 8356 8357 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8358 SDVTList VTs, 8359 ArrayRef<SDValue> Ops) { 8360 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8361 MachineSDNode *N; 8362 void *IP = nullptr; 8363 8364 if (DoCSE) { 8365 FoldingSetNodeID ID; 8366 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8367 IP = nullptr; 8368 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8369 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8370 } 8371 } 8372 8373 // Allocate a new MachineSDNode. 8374 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8375 createOperands(N, Ops); 8376 8377 if (DoCSE) 8378 CSEMap.InsertNode(N, IP); 8379 8380 InsertNode(N); 8381 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8382 return N; 8383 } 8384 8385 /// getTargetExtractSubreg - A convenience function for creating 8386 /// TargetOpcode::EXTRACT_SUBREG nodes. 8387 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8388 SDValue Operand) { 8389 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8390 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8391 VT, Operand, SRIdxVal); 8392 return SDValue(Subreg, 0); 8393 } 8394 8395 /// getTargetInsertSubreg - A convenience function for creating 8396 /// TargetOpcode::INSERT_SUBREG nodes. 8397 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8398 SDValue Operand, SDValue Subreg) { 8399 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8400 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8401 VT, Operand, Subreg, SRIdxVal); 8402 return SDValue(Result, 0); 8403 } 8404 8405 /// getNodeIfExists - Get the specified node if it's already available, or 8406 /// else return NULL. 8407 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8408 ArrayRef<SDValue> Ops) { 8409 SDNodeFlags Flags; 8410 if (Inserter) 8411 Flags = Inserter->getFlags(); 8412 return getNodeIfExists(Opcode, VTList, Ops, Flags); 8413 } 8414 8415 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8416 ArrayRef<SDValue> Ops, 8417 const SDNodeFlags Flags) { 8418 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8419 FoldingSetNodeID ID; 8420 AddNodeIDNode(ID, Opcode, VTList, Ops); 8421 void *IP = nullptr; 8422 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8423 E->intersectFlagsWith(Flags); 8424 return E; 8425 } 8426 } 8427 return nullptr; 8428 } 8429 8430 /// doesNodeExist - Check if a node exists without modifying its flags. 8431 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList, 8432 ArrayRef<SDValue> Ops) { 8433 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8434 FoldingSetNodeID ID; 8435 AddNodeIDNode(ID, Opcode, VTList, Ops); 8436 void *IP = nullptr; 8437 if (FindNodeOrInsertPos(ID, SDLoc(), IP)) 8438 return true; 8439 } 8440 return false; 8441 } 8442 8443 /// getDbgValue - Creates a SDDbgValue node. 8444 /// 8445 /// SDNode 8446 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8447 SDNode *N, unsigned R, bool IsIndirect, 8448 const DebugLoc &DL, unsigned O) { 8449 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8450 "Expected inlined-at fields to agree"); 8451 return new (DbgInfo->getAlloc()) 8452 SDDbgValue(Var, Expr, SDDbgOperand::fromNode(N, R), N, IsIndirect, DL, O, 8453 /*IsVariadic=*/false); 8454 } 8455 8456 /// Constant 8457 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8458 DIExpression *Expr, 8459 const Value *C, 8460 const DebugLoc &DL, unsigned O) { 8461 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8462 "Expected inlined-at fields to agree"); 8463 return new (DbgInfo->getAlloc()) SDDbgValue( 8464 Var, Expr, SDDbgOperand::fromConst(C), {}, /*IsIndirect=*/false, DL, O, 8465 /*IsVariadic=*/false); 8466 } 8467 8468 /// FrameIndex 8469 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8470 DIExpression *Expr, unsigned FI, 8471 bool IsIndirect, 8472 const DebugLoc &DL, 8473 unsigned O) { 8474 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8475 "Expected inlined-at fields to agree"); 8476 return new (DbgInfo->getAlloc()) 8477 SDDbgValue(Var, Expr, SDDbgOperand::fromFrameIdx(FI), {}, IsIndirect, DL, 8478 O, /*IsVariadic=*/false); 8479 } 8480 8481 /// VReg 8482 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8483 DIExpression *Expr, 8484 unsigned VReg, bool IsIndirect, 8485 const DebugLoc &DL, unsigned O) { 8486 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8487 "Expected inlined-at fields to agree"); 8488 return new (DbgInfo->getAlloc()) 8489 SDDbgValue(Var, Expr, SDDbgOperand::fromVReg(VReg), {}, IsIndirect, DL, O, 8490 /*IsVariadic=*/false); 8491 } 8492 8493 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8494 unsigned OffsetInBits, unsigned SizeInBits, 8495 bool InvalidateDbg) { 8496 SDNode *FromNode = From.getNode(); 8497 SDNode *ToNode = To.getNode(); 8498 assert(FromNode && ToNode && "Can't modify dbg values"); 8499 8500 // PR35338 8501 // TODO: assert(From != To && "Redundant dbg value transfer"); 8502 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8503 if (From == To || FromNode == ToNode) 8504 return; 8505 8506 if (!FromNode->getHasDebugValue()) 8507 return; 8508 8509 SmallVector<SDDbgValue *, 2> ClonedDVs; 8510 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8511 SDDbgOperand DbgOperand = Dbg->getLocationOps()[0]; 8512 if (DbgOperand.getKind() != SDDbgOperand::SDNODE || Dbg->isInvalidated()) 8513 continue; 8514 8515 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8516 8517 // Just transfer the dbg value attached to From. 8518 if (DbgOperand.getResNo() != From.getResNo()) 8519 continue; 8520 8521 DIVariable *Var = Dbg->getVariable(); 8522 auto *Expr = Dbg->getExpression(); 8523 // If a fragment is requested, update the expression. 8524 if (SizeInBits) { 8525 // When splitting a larger (e.g., sign-extended) value whose 8526 // lower bits are described with an SDDbgValue, do not attempt 8527 // to transfer the SDDbgValue to the upper bits. 8528 if (auto FI = Expr->getFragmentInfo()) 8529 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8530 continue; 8531 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8532 SizeInBits); 8533 if (!Fragment) 8534 continue; 8535 Expr = *Fragment; 8536 } 8537 // Clone the SDDbgValue and move it to To. 8538 SDDbgValue *Clone = getDbgValue( 8539 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8540 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8541 ClonedDVs.push_back(Clone); 8542 8543 if (InvalidateDbg) { 8544 // Invalidate value and indicate the SDDbgValue should not be emitted. 8545 Dbg->setIsInvalidated(); 8546 Dbg->setIsEmitted(); 8547 } 8548 } 8549 8550 for (SDDbgValue *Dbg : ClonedDVs) 8551 AddDbgValue(Dbg, ToNode, false); 8552 } 8553 8554 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8555 if (!N.getHasDebugValue()) 8556 return; 8557 8558 SmallVector<SDDbgValue *, 2> ClonedDVs; 8559 for (auto DV : GetDbgValues(&N)) { 8560 if (DV->isInvalidated()) 8561 continue; 8562 switch (N.getOpcode()) { 8563 default: 8564 break; 8565 case ISD::ADD: 8566 SDValue N0 = N.getOperand(0); 8567 SDValue N1 = N.getOperand(1); 8568 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8569 isConstantIntBuildVectorOrConstantInt(N1)) { 8570 uint64_t Offset = N.getConstantOperandVal(1); 8571 // Rewrite an ADD constant node into a DIExpression. Since we are 8572 // performing arithmetic to compute the variable's *value* in the 8573 // DIExpression, we need to mark the expression with a 8574 // DW_OP_stack_value. 8575 auto *DIExpr = DV->getExpression(); 8576 DIExpr = 8577 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8578 SDDbgValue *Clone = 8579 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8580 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8581 ClonedDVs.push_back(Clone); 8582 DV->setIsInvalidated(); 8583 DV->setIsEmitted(); 8584 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8585 N0.getNode()->dumprFull(this); 8586 dbgs() << " into " << *DIExpr << '\n'); 8587 } 8588 } 8589 } 8590 8591 for (SDDbgValue *Dbg : ClonedDVs) 8592 AddDbgValue(Dbg, Dbg->getLocationOps()[0].getSDNode(), false); 8593 } 8594 8595 /// Creates a SDDbgLabel node. 8596 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8597 const DebugLoc &DL, unsigned O) { 8598 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8599 "Expected inlined-at fields to agree"); 8600 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8601 } 8602 8603 namespace { 8604 8605 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8606 /// pointed to by a use iterator is deleted, increment the use iterator 8607 /// so that it doesn't dangle. 8608 /// 8609 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8610 SDNode::use_iterator &UI; 8611 SDNode::use_iterator &UE; 8612 8613 void NodeDeleted(SDNode *N, SDNode *E) override { 8614 // Increment the iterator as needed. 8615 while (UI != UE && N == *UI) 8616 ++UI; 8617 } 8618 8619 public: 8620 RAUWUpdateListener(SelectionDAG &d, 8621 SDNode::use_iterator &ui, 8622 SDNode::use_iterator &ue) 8623 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8624 }; 8625 8626 } // end anonymous namespace 8627 8628 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8629 /// This can cause recursive merging of nodes in the DAG. 8630 /// 8631 /// This version assumes From has a single result value. 8632 /// 8633 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8634 SDNode *From = FromN.getNode(); 8635 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8636 "Cannot replace with this method!"); 8637 assert(From != To.getNode() && "Cannot replace uses of with self"); 8638 8639 // Preserve Debug Values 8640 transferDbgValues(FromN, To); 8641 8642 // Iterate over all the existing uses of From. New uses will be added 8643 // to the beginning of the use list, which we avoid visiting. 8644 // This specifically avoids visiting uses of From that arise while the 8645 // replacement is happening, because any such uses would be the result 8646 // of CSE: If an existing node looks like From after one of its operands 8647 // is replaced by To, we don't want to replace of all its users with To 8648 // too. See PR3018 for more info. 8649 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8650 RAUWUpdateListener Listener(*this, UI, UE); 8651 while (UI != UE) { 8652 SDNode *User = *UI; 8653 8654 // This node is about to morph, remove its old self from the CSE maps. 8655 RemoveNodeFromCSEMaps(User); 8656 8657 // A user can appear in a use list multiple times, and when this 8658 // happens the uses are usually next to each other in the list. 8659 // To help reduce the number of CSE recomputations, process all 8660 // the uses of this user that we can find this way. 8661 do { 8662 SDUse &Use = UI.getUse(); 8663 ++UI; 8664 Use.set(To); 8665 if (To->isDivergent() != From->isDivergent()) 8666 updateDivergence(User); 8667 } while (UI != UE && *UI == User); 8668 // Now that we have modified User, add it back to the CSE maps. If it 8669 // already exists there, recursively merge the results together. 8670 AddModifiedNodeToCSEMaps(User); 8671 } 8672 8673 // If we just RAUW'd the root, take note. 8674 if (FromN == getRoot()) 8675 setRoot(To); 8676 } 8677 8678 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8679 /// This can cause recursive merging of nodes in the DAG. 8680 /// 8681 /// This version assumes that for each value of From, there is a 8682 /// corresponding value in To in the same position with the same type. 8683 /// 8684 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8685 #ifndef NDEBUG 8686 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8687 assert((!From->hasAnyUseOfValue(i) || 8688 From->getValueType(i) == To->getValueType(i)) && 8689 "Cannot use this version of ReplaceAllUsesWith!"); 8690 #endif 8691 8692 // Handle the trivial case. 8693 if (From == To) 8694 return; 8695 8696 // Preserve Debug Info. Only do this if there's a use. 8697 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8698 if (From->hasAnyUseOfValue(i)) { 8699 assert((i < To->getNumValues()) && "Invalid To location"); 8700 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8701 } 8702 8703 // Iterate over just the existing users of From. See the comments in 8704 // the ReplaceAllUsesWith above. 8705 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8706 RAUWUpdateListener Listener(*this, UI, UE); 8707 while (UI != UE) { 8708 SDNode *User = *UI; 8709 8710 // This node is about to morph, remove its old self from the CSE maps. 8711 RemoveNodeFromCSEMaps(User); 8712 8713 // A user can appear in a use list multiple times, and when this 8714 // happens the uses are usually next to each other in the list. 8715 // To help reduce the number of CSE recomputations, process all 8716 // the uses of this user that we can find this way. 8717 do { 8718 SDUse &Use = UI.getUse(); 8719 ++UI; 8720 Use.setNode(To); 8721 if (To->isDivergent() != From->isDivergent()) 8722 updateDivergence(User); 8723 } while (UI != UE && *UI == User); 8724 8725 // Now that we have modified User, add it back to the CSE maps. If it 8726 // already exists there, recursively merge the results together. 8727 AddModifiedNodeToCSEMaps(User); 8728 } 8729 8730 // If we just RAUW'd the root, take note. 8731 if (From == getRoot().getNode()) 8732 setRoot(SDValue(To, getRoot().getResNo())); 8733 } 8734 8735 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8736 /// This can cause recursive merging of nodes in the DAG. 8737 /// 8738 /// This version can replace From with any result values. To must match the 8739 /// number and types of values returned by From. 8740 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8741 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8742 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8743 8744 // Preserve Debug Info. 8745 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8746 transferDbgValues(SDValue(From, i), To[i]); 8747 8748 // Iterate over just the existing users of From. See the comments in 8749 // the ReplaceAllUsesWith above. 8750 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8751 RAUWUpdateListener Listener(*this, UI, UE); 8752 while (UI != UE) { 8753 SDNode *User = *UI; 8754 8755 // This node is about to morph, remove its old self from the CSE maps. 8756 RemoveNodeFromCSEMaps(User); 8757 8758 // A user can appear in a use list multiple times, and when this happens the 8759 // uses are usually next to each other in the list. To help reduce the 8760 // number of CSE and divergence recomputations, process all the uses of this 8761 // user that we can find this way. 8762 bool To_IsDivergent = false; 8763 do { 8764 SDUse &Use = UI.getUse(); 8765 const SDValue &ToOp = To[Use.getResNo()]; 8766 ++UI; 8767 Use.set(ToOp); 8768 To_IsDivergent |= ToOp->isDivergent(); 8769 } while (UI != UE && *UI == User); 8770 8771 if (To_IsDivergent != From->isDivergent()) 8772 updateDivergence(User); 8773 8774 // Now that we have modified User, add it back to the CSE maps. If it 8775 // already exists there, recursively merge the results together. 8776 AddModifiedNodeToCSEMaps(User); 8777 } 8778 8779 // If we just RAUW'd the root, take note. 8780 if (From == getRoot().getNode()) 8781 setRoot(SDValue(To[getRoot().getResNo()])); 8782 } 8783 8784 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8785 /// uses of other values produced by From.getNode() alone. The Deleted 8786 /// vector is handled the same way as for ReplaceAllUsesWith. 8787 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8788 // Handle the really simple, really trivial case efficiently. 8789 if (From == To) return; 8790 8791 // Handle the simple, trivial, case efficiently. 8792 if (From.getNode()->getNumValues() == 1) { 8793 ReplaceAllUsesWith(From, To); 8794 return; 8795 } 8796 8797 // Preserve Debug Info. 8798 transferDbgValues(From, To); 8799 8800 // Iterate over just the existing users of From. See the comments in 8801 // the ReplaceAllUsesWith above. 8802 SDNode::use_iterator UI = From.getNode()->use_begin(), 8803 UE = From.getNode()->use_end(); 8804 RAUWUpdateListener Listener(*this, UI, UE); 8805 while (UI != UE) { 8806 SDNode *User = *UI; 8807 bool UserRemovedFromCSEMaps = false; 8808 8809 // A user can appear in a use list multiple times, and when this 8810 // happens the uses are usually next to each other in the list. 8811 // To help reduce the number of CSE recomputations, process all 8812 // the uses of this user that we can find this way. 8813 do { 8814 SDUse &Use = UI.getUse(); 8815 8816 // Skip uses of different values from the same node. 8817 if (Use.getResNo() != From.getResNo()) { 8818 ++UI; 8819 continue; 8820 } 8821 8822 // If this node hasn't been modified yet, it's still in the CSE maps, 8823 // so remove its old self from the CSE maps. 8824 if (!UserRemovedFromCSEMaps) { 8825 RemoveNodeFromCSEMaps(User); 8826 UserRemovedFromCSEMaps = true; 8827 } 8828 8829 ++UI; 8830 Use.set(To); 8831 if (To->isDivergent() != From->isDivergent()) 8832 updateDivergence(User); 8833 } while (UI != UE && *UI == User); 8834 // We are iterating over all uses of the From node, so if a use 8835 // doesn't use the specific value, no changes are made. 8836 if (!UserRemovedFromCSEMaps) 8837 continue; 8838 8839 // Now that we have modified User, add it back to the CSE maps. If it 8840 // already exists there, recursively merge the results together. 8841 AddModifiedNodeToCSEMaps(User); 8842 } 8843 8844 // If we just RAUW'd the root, take note. 8845 if (From == getRoot()) 8846 setRoot(To); 8847 } 8848 8849 namespace { 8850 8851 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8852 /// to record information about a use. 8853 struct UseMemo { 8854 SDNode *User; 8855 unsigned Index; 8856 SDUse *Use; 8857 }; 8858 8859 /// operator< - Sort Memos by User. 8860 bool operator<(const UseMemo &L, const UseMemo &R) { 8861 return (intptr_t)L.User < (intptr_t)R.User; 8862 } 8863 8864 } // end anonymous namespace 8865 8866 bool SelectionDAG::calculateDivergence(SDNode *N) { 8867 if (TLI->isSDNodeAlwaysUniform(N)) { 8868 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) && 8869 "Conflicting divergence information!"); 8870 return false; 8871 } 8872 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA)) 8873 return true; 8874 for (auto &Op : N->ops()) { 8875 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent()) 8876 return true; 8877 } 8878 return false; 8879 } 8880 8881 void SelectionDAG::updateDivergence(SDNode *N) { 8882 SmallVector<SDNode *, 16> Worklist(1, N); 8883 do { 8884 N = Worklist.pop_back_val(); 8885 bool IsDivergent = calculateDivergence(N); 8886 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8887 N->SDNodeBits.IsDivergent = IsDivergent; 8888 llvm::append_range(Worklist, N->uses()); 8889 } 8890 } while (!Worklist.empty()); 8891 } 8892 8893 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8894 DenseMap<SDNode *, unsigned> Degree; 8895 Order.reserve(AllNodes.size()); 8896 for (auto &N : allnodes()) { 8897 unsigned NOps = N.getNumOperands(); 8898 Degree[&N] = NOps; 8899 if (0 == NOps) 8900 Order.push_back(&N); 8901 } 8902 for (size_t I = 0; I != Order.size(); ++I) { 8903 SDNode *N = Order[I]; 8904 for (auto U : N->uses()) { 8905 unsigned &UnsortedOps = Degree[U]; 8906 if (0 == --UnsortedOps) 8907 Order.push_back(U); 8908 } 8909 } 8910 } 8911 8912 #ifndef NDEBUG 8913 void SelectionDAG::VerifyDAGDiverence() { 8914 std::vector<SDNode *> TopoOrder; 8915 CreateTopologicalOrder(TopoOrder); 8916 for (auto *N : TopoOrder) { 8917 assert(calculateDivergence(N) == N->isDivergent() && 8918 "Divergence bit inconsistency detected"); 8919 } 8920 } 8921 #endif 8922 8923 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8924 /// uses of other values produced by From.getNode() alone. The same value 8925 /// may appear in both the From and To list. The Deleted vector is 8926 /// handled the same way as for ReplaceAllUsesWith. 8927 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8928 const SDValue *To, 8929 unsigned Num){ 8930 // Handle the simple, trivial case efficiently. 8931 if (Num == 1) 8932 return ReplaceAllUsesOfValueWith(*From, *To); 8933 8934 transferDbgValues(*From, *To); 8935 8936 // Read up all the uses and make records of them. This helps 8937 // processing new uses that are introduced during the 8938 // replacement process. 8939 SmallVector<UseMemo, 4> Uses; 8940 for (unsigned i = 0; i != Num; ++i) { 8941 unsigned FromResNo = From[i].getResNo(); 8942 SDNode *FromNode = From[i].getNode(); 8943 for (SDNode::use_iterator UI = FromNode->use_begin(), 8944 E = FromNode->use_end(); UI != E; ++UI) { 8945 SDUse &Use = UI.getUse(); 8946 if (Use.getResNo() == FromResNo) { 8947 UseMemo Memo = { *UI, i, &Use }; 8948 Uses.push_back(Memo); 8949 } 8950 } 8951 } 8952 8953 // Sort the uses, so that all the uses from a given User are together. 8954 llvm::sort(Uses); 8955 8956 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8957 UseIndex != UseIndexEnd; ) { 8958 // We know that this user uses some value of From. If it is the right 8959 // value, update it. 8960 SDNode *User = Uses[UseIndex].User; 8961 8962 // This node is about to morph, remove its old self from the CSE maps. 8963 RemoveNodeFromCSEMaps(User); 8964 8965 // The Uses array is sorted, so all the uses for a given User 8966 // are next to each other in the list. 8967 // To help reduce the number of CSE recomputations, process all 8968 // the uses of this user that we can find this way. 8969 do { 8970 unsigned i = Uses[UseIndex].Index; 8971 SDUse &Use = *Uses[UseIndex].Use; 8972 ++UseIndex; 8973 8974 Use.set(To[i]); 8975 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8976 8977 // Now that we have modified User, add it back to the CSE maps. If it 8978 // already exists there, recursively merge the results together. 8979 AddModifiedNodeToCSEMaps(User); 8980 } 8981 } 8982 8983 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8984 /// based on their topological order. It returns the maximum id and a vector 8985 /// of the SDNodes* in assigned order by reference. 8986 unsigned SelectionDAG::AssignTopologicalOrder() { 8987 unsigned DAGSize = 0; 8988 8989 // SortedPos tracks the progress of the algorithm. Nodes before it are 8990 // sorted, nodes after it are unsorted. When the algorithm completes 8991 // it is at the end of the list. 8992 allnodes_iterator SortedPos = allnodes_begin(); 8993 8994 // Visit all the nodes. Move nodes with no operands to the front of 8995 // the list immediately. Annotate nodes that do have operands with their 8996 // operand count. Before we do this, the Node Id fields of the nodes 8997 // may contain arbitrary values. After, the Node Id fields for nodes 8998 // before SortedPos will contain the topological sort index, and the 8999 // Node Id fields for nodes At SortedPos and after will contain the 9000 // count of outstanding operands. 9001 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 9002 SDNode *N = &*I++; 9003 checkForCycles(N, this); 9004 unsigned Degree = N->getNumOperands(); 9005 if (Degree == 0) { 9006 // A node with no uses, add it to the result array immediately. 9007 N->setNodeId(DAGSize++); 9008 allnodes_iterator Q(N); 9009 if (Q != SortedPos) 9010 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 9011 assert(SortedPos != AllNodes.end() && "Overran node list"); 9012 ++SortedPos; 9013 } else { 9014 // Temporarily use the Node Id as scratch space for the degree count. 9015 N->setNodeId(Degree); 9016 } 9017 } 9018 9019 // Visit all the nodes. As we iterate, move nodes into sorted order, 9020 // such that by the time the end is reached all nodes will be sorted. 9021 for (SDNode &Node : allnodes()) { 9022 SDNode *N = &Node; 9023 checkForCycles(N, this); 9024 // N is in sorted position, so all its uses have one less operand 9025 // that needs to be sorted. 9026 for (SDNode *P : N->uses()) { 9027 unsigned Degree = P->getNodeId(); 9028 assert(Degree != 0 && "Invalid node degree"); 9029 --Degree; 9030 if (Degree == 0) { 9031 // All of P's operands are sorted, so P may sorted now. 9032 P->setNodeId(DAGSize++); 9033 if (P->getIterator() != SortedPos) 9034 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 9035 assert(SortedPos != AllNodes.end() && "Overran node list"); 9036 ++SortedPos; 9037 } else { 9038 // Update P's outstanding operand count. 9039 P->setNodeId(Degree); 9040 } 9041 } 9042 if (Node.getIterator() == SortedPos) { 9043 #ifndef NDEBUG 9044 allnodes_iterator I(N); 9045 SDNode *S = &*++I; 9046 dbgs() << "Overran sorted position:\n"; 9047 S->dumprFull(this); dbgs() << "\n"; 9048 dbgs() << "Checking if this is due to cycles\n"; 9049 checkForCycles(this, true); 9050 #endif 9051 llvm_unreachable(nullptr); 9052 } 9053 } 9054 9055 assert(SortedPos == AllNodes.end() && 9056 "Topological sort incomplete!"); 9057 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 9058 "First node in topological sort is not the entry token!"); 9059 assert(AllNodes.front().getNodeId() == 0 && 9060 "First node in topological sort has non-zero id!"); 9061 assert(AllNodes.front().getNumOperands() == 0 && 9062 "First node in topological sort has operands!"); 9063 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 9064 "Last node in topologic sort has unexpected id!"); 9065 assert(AllNodes.back().use_empty() && 9066 "Last node in topologic sort has users!"); 9067 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 9068 return DAGSize; 9069 } 9070 9071 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 9072 /// value is produced by SD. 9073 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 9074 if (SD) { 9075 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 9076 SD->setHasDebugValue(true); 9077 } 9078 DbgInfo->add(DB, SD, isParameter); 9079 } 9080 9081 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 9082 DbgInfo->add(DB); 9083 } 9084 9085 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain, 9086 SDValue NewMemOpChain) { 9087 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node"); 9088 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT"); 9089 // The new memory operation must have the same position as the old load in 9090 // terms of memory dependency. Create a TokenFactor for the old load and new 9091 // memory operation and update uses of the old load's output chain to use that 9092 // TokenFactor. 9093 if (OldChain == NewMemOpChain || OldChain.use_empty()) 9094 return NewMemOpChain; 9095 9096 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other, 9097 OldChain, NewMemOpChain); 9098 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 9099 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain); 9100 return TokenFactor; 9101 } 9102 9103 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 9104 SDValue NewMemOp) { 9105 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 9106 SDValue OldChain = SDValue(OldLoad, 1); 9107 SDValue NewMemOpChain = NewMemOp.getValue(1); 9108 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain); 9109 } 9110 9111 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 9112 Function **OutFunction) { 9113 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 9114 9115 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 9116 auto *Module = MF->getFunction().getParent(); 9117 auto *Function = Module->getFunction(Symbol); 9118 9119 if (OutFunction != nullptr) 9120 *OutFunction = Function; 9121 9122 if (Function != nullptr) { 9123 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 9124 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 9125 } 9126 9127 std::string ErrorStr; 9128 raw_string_ostream ErrorFormatter(ErrorStr); 9129 9130 ErrorFormatter << "Undefined external symbol "; 9131 ErrorFormatter << '"' << Symbol << '"'; 9132 ErrorFormatter.flush(); 9133 9134 report_fatal_error(ErrorStr); 9135 } 9136 9137 //===----------------------------------------------------------------------===// 9138 // SDNode Class 9139 //===----------------------------------------------------------------------===// 9140 9141 bool llvm::isNullConstant(SDValue V) { 9142 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9143 return Const != nullptr && Const->isNullValue(); 9144 } 9145 9146 bool llvm::isNullFPConstant(SDValue V) { 9147 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 9148 return Const != nullptr && Const->isZero() && !Const->isNegative(); 9149 } 9150 9151 bool llvm::isAllOnesConstant(SDValue V) { 9152 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9153 return Const != nullptr && Const->isAllOnesValue(); 9154 } 9155 9156 bool llvm::isOneConstant(SDValue V) { 9157 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9158 return Const != nullptr && Const->isOne(); 9159 } 9160 9161 SDValue llvm::peekThroughBitcasts(SDValue V) { 9162 while (V.getOpcode() == ISD::BITCAST) 9163 V = V.getOperand(0); 9164 return V; 9165 } 9166 9167 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 9168 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 9169 V = V.getOperand(0); 9170 return V; 9171 } 9172 9173 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 9174 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 9175 V = V.getOperand(0); 9176 return V; 9177 } 9178 9179 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 9180 if (V.getOpcode() != ISD::XOR) 9181 return false; 9182 V = peekThroughBitcasts(V.getOperand(1)); 9183 unsigned NumBits = V.getScalarValueSizeInBits(); 9184 ConstantSDNode *C = 9185 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 9186 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 9187 } 9188 9189 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 9190 bool AllowTruncation) { 9191 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9192 return CN; 9193 9194 // SplatVectors can truncate their operands. Ignore that case here unless 9195 // AllowTruncation is set. 9196 if (N->getOpcode() == ISD::SPLAT_VECTOR) { 9197 EVT VecEltVT = N->getValueType(0).getVectorElementType(); 9198 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 9199 EVT CVT = CN->getValueType(0); 9200 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension"); 9201 if (AllowTruncation || CVT == VecEltVT) 9202 return CN; 9203 } 9204 } 9205 9206 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9207 BitVector UndefElements; 9208 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 9209 9210 // BuildVectors can truncate their operands. Ignore that case here unless 9211 // AllowTruncation is set. 9212 if (CN && (UndefElements.none() || AllowUndefs)) { 9213 EVT CVT = CN->getValueType(0); 9214 EVT NSVT = N.getValueType().getScalarType(); 9215 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9216 if (AllowTruncation || (CVT == NSVT)) 9217 return CN; 9218 } 9219 } 9220 9221 return nullptr; 9222 } 9223 9224 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 9225 bool AllowUndefs, 9226 bool AllowTruncation) { 9227 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9228 return CN; 9229 9230 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9231 BitVector UndefElements; 9232 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 9233 9234 // BuildVectors can truncate their operands. Ignore that case here unless 9235 // AllowTruncation is set. 9236 if (CN && (UndefElements.none() || AllowUndefs)) { 9237 EVT CVT = CN->getValueType(0); 9238 EVT NSVT = N.getValueType().getScalarType(); 9239 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9240 if (AllowTruncation || (CVT == NSVT)) 9241 return CN; 9242 } 9243 } 9244 9245 return nullptr; 9246 } 9247 9248 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 9249 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9250 return CN; 9251 9252 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9253 BitVector UndefElements; 9254 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 9255 if (CN && (UndefElements.none() || AllowUndefs)) 9256 return CN; 9257 } 9258 9259 if (N.getOpcode() == ISD::SPLAT_VECTOR) 9260 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0))) 9261 return CN; 9262 9263 return nullptr; 9264 } 9265 9266 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 9267 const APInt &DemandedElts, 9268 bool AllowUndefs) { 9269 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9270 return CN; 9271 9272 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9273 BitVector UndefElements; 9274 ConstantFPSDNode *CN = 9275 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 9276 if (CN && (UndefElements.none() || AllowUndefs)) 9277 return CN; 9278 } 9279 9280 return nullptr; 9281 } 9282 9283 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 9284 // TODO: may want to use peekThroughBitcast() here. 9285 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9286 return C && C->isNullValue(); 9287 } 9288 9289 bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) { 9290 // TODO: may want to use peekThroughBitcast() here. 9291 unsigned BitWidth = N.getScalarValueSizeInBits(); 9292 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9293 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 9294 } 9295 9296 bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) { 9297 N = peekThroughBitcasts(N); 9298 unsigned BitWidth = N.getScalarValueSizeInBits(); 9299 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9300 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 9301 } 9302 9303 HandleSDNode::~HandleSDNode() { 9304 DropOperands(); 9305 } 9306 9307 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 9308 const DebugLoc &DL, 9309 const GlobalValue *GA, EVT VT, 9310 int64_t o, unsigned TF) 9311 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9312 TheGlobal = GA; 9313 } 9314 9315 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9316 EVT VT, unsigned SrcAS, 9317 unsigned DestAS) 9318 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9319 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9320 9321 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9322 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9323 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9324 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9325 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9326 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9327 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9328 9329 // We check here that the size of the memory operand fits within the size of 9330 // the MMO. This is because the MMO might indicate only a possible address 9331 // range instead of specifying the affected memory addresses precisely. 9332 // TODO: Make MachineMemOperands aware of scalable vectors. 9333 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9334 "Size mismatch!"); 9335 } 9336 9337 /// Profile - Gather unique data for the node. 9338 /// 9339 void SDNode::Profile(FoldingSetNodeID &ID) const { 9340 AddNodeIDNode(ID, this); 9341 } 9342 9343 namespace { 9344 9345 struct EVTArray { 9346 std::vector<EVT> VTs; 9347 9348 EVTArray() { 9349 VTs.reserve(MVT::LAST_VALUETYPE); 9350 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9351 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9352 } 9353 }; 9354 9355 } // end anonymous namespace 9356 9357 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9358 static ManagedStatic<EVTArray> SimpleVTArray; 9359 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9360 9361 /// getValueTypeList - Return a pointer to the specified value type. 9362 /// 9363 const EVT *SDNode::getValueTypeList(EVT VT) { 9364 if (VT.isExtended()) { 9365 sys::SmartScopedLock<true> Lock(*VTMutex); 9366 return &(*EVTs->insert(VT).first); 9367 } else { 9368 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9369 "Value type out of range!"); 9370 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9371 } 9372 } 9373 9374 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9375 /// indicated value. This method ignores uses of other values defined by this 9376 /// operation. 9377 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9378 assert(Value < getNumValues() && "Bad value!"); 9379 9380 // TODO: Only iterate over uses of a given value of the node 9381 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9382 if (UI.getUse().getResNo() == Value) { 9383 if (NUses == 0) 9384 return false; 9385 --NUses; 9386 } 9387 } 9388 9389 // Found exactly the right number of uses? 9390 return NUses == 0; 9391 } 9392 9393 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9394 /// value. This method ignores uses of other values defined by this operation. 9395 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9396 assert(Value < getNumValues() && "Bad value!"); 9397 9398 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9399 if (UI.getUse().getResNo() == Value) 9400 return true; 9401 9402 return false; 9403 } 9404 9405 /// isOnlyUserOf - Return true if this node is the only use of N. 9406 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9407 bool Seen = false; 9408 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9409 SDNode *User = *I; 9410 if (User == this) 9411 Seen = true; 9412 else 9413 return false; 9414 } 9415 9416 return Seen; 9417 } 9418 9419 /// Return true if the only users of N are contained in Nodes. 9420 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9421 bool Seen = false; 9422 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9423 SDNode *User = *I; 9424 if (llvm::is_contained(Nodes, User)) 9425 Seen = true; 9426 else 9427 return false; 9428 } 9429 9430 return Seen; 9431 } 9432 9433 /// isOperand - Return true if this node is an operand of N. 9434 bool SDValue::isOperandOf(const SDNode *N) const { 9435 return is_contained(N->op_values(), *this); 9436 } 9437 9438 bool SDNode::isOperandOf(const SDNode *N) const { 9439 return any_of(N->op_values(), 9440 [this](SDValue Op) { return this == Op.getNode(); }); 9441 } 9442 9443 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9444 /// be a chain) reaches the specified operand without crossing any 9445 /// side-effecting instructions on any chain path. In practice, this looks 9446 /// through token factors and non-volatile loads. In order to remain efficient, 9447 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9448 /// 9449 /// Note that we only need to examine chains when we're searching for 9450 /// side-effects; SelectionDAG requires that all side-effects are represented 9451 /// by chains, even if another operand would force a specific ordering. This 9452 /// constraint is necessary to allow transformations like splitting loads. 9453 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9454 unsigned Depth) const { 9455 if (*this == Dest) return true; 9456 9457 // Don't search too deeply, we just want to be able to see through 9458 // TokenFactor's etc. 9459 if (Depth == 0) return false; 9460 9461 // If this is a token factor, all inputs to the TF happen in parallel. 9462 if (getOpcode() == ISD::TokenFactor) { 9463 // First, try a shallow search. 9464 if (is_contained((*this)->ops(), Dest)) { 9465 // We found the chain we want as an operand of this TokenFactor. 9466 // Essentially, we reach the chain without side-effects if we could 9467 // serialize the TokenFactor into a simple chain of operations with 9468 // Dest as the last operation. This is automatically true if the 9469 // chain has one use: there are no other ordering constraints. 9470 // If the chain has more than one use, we give up: some other 9471 // use of Dest might force a side-effect between Dest and the current 9472 // node. 9473 if (Dest.hasOneUse()) 9474 return true; 9475 } 9476 // Next, try a deep search: check whether every operand of the TokenFactor 9477 // reaches Dest. 9478 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9479 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9480 }); 9481 } 9482 9483 // Loads don't have side effects, look through them. 9484 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9485 if (Ld->isUnordered()) 9486 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9487 } 9488 return false; 9489 } 9490 9491 bool SDNode::hasPredecessor(const SDNode *N) const { 9492 SmallPtrSet<const SDNode *, 32> Visited; 9493 SmallVector<const SDNode *, 16> Worklist; 9494 Worklist.push_back(this); 9495 return hasPredecessorHelper(N, Visited, Worklist); 9496 } 9497 9498 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9499 this->Flags.intersectWith(Flags); 9500 } 9501 9502 SDValue 9503 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9504 ArrayRef<ISD::NodeType> CandidateBinOps, 9505 bool AllowPartials) { 9506 // The pattern must end in an extract from index 0. 9507 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9508 !isNullConstant(Extract->getOperand(1))) 9509 return SDValue(); 9510 9511 // Match against one of the candidate binary ops. 9512 SDValue Op = Extract->getOperand(0); 9513 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9514 return Op.getOpcode() == unsigned(BinOp); 9515 })) 9516 return SDValue(); 9517 9518 // Floating-point reductions may require relaxed constraints on the final step 9519 // of the reduction because they may reorder intermediate operations. 9520 unsigned CandidateBinOp = Op.getOpcode(); 9521 if (Op.getValueType().isFloatingPoint()) { 9522 SDNodeFlags Flags = Op->getFlags(); 9523 switch (CandidateBinOp) { 9524 case ISD::FADD: 9525 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9526 return SDValue(); 9527 break; 9528 default: 9529 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9530 } 9531 } 9532 9533 // Matching failed - attempt to see if we did enough stages that a partial 9534 // reduction from a subvector is possible. 9535 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9536 if (!AllowPartials || !Op) 9537 return SDValue(); 9538 EVT OpVT = Op.getValueType(); 9539 EVT OpSVT = OpVT.getScalarType(); 9540 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9541 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9542 return SDValue(); 9543 BinOp = (ISD::NodeType)CandidateBinOp; 9544 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9545 getVectorIdxConstant(0, SDLoc(Op))); 9546 }; 9547 9548 // At each stage, we're looking for something that looks like: 9549 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9550 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9551 // i32 undef, i32 undef, i32 undef, i32 undef> 9552 // %a = binop <8 x i32> %op, %s 9553 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9554 // we expect something like: 9555 // <4,5,6,7,u,u,u,u> 9556 // <2,3,u,u,u,u,u,u> 9557 // <1,u,u,u,u,u,u,u> 9558 // While a partial reduction match would be: 9559 // <2,3,u,u,u,u,u,u> 9560 // <1,u,u,u,u,u,u,u> 9561 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9562 SDValue PrevOp; 9563 for (unsigned i = 0; i < Stages; ++i) { 9564 unsigned MaskEnd = (1 << i); 9565 9566 if (Op.getOpcode() != CandidateBinOp) 9567 return PartialReduction(PrevOp, MaskEnd); 9568 9569 SDValue Op0 = Op.getOperand(0); 9570 SDValue Op1 = Op.getOperand(1); 9571 9572 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9573 if (Shuffle) { 9574 Op = Op1; 9575 } else { 9576 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9577 Op = Op0; 9578 } 9579 9580 // The first operand of the shuffle should be the same as the other operand 9581 // of the binop. 9582 if (!Shuffle || Shuffle->getOperand(0) != Op) 9583 return PartialReduction(PrevOp, MaskEnd); 9584 9585 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9586 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9587 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9588 return PartialReduction(PrevOp, MaskEnd); 9589 9590 PrevOp = Op; 9591 } 9592 9593 // Handle subvector reductions, which tend to appear after the shuffle 9594 // reduction stages. 9595 while (Op.getOpcode() == CandidateBinOp) { 9596 unsigned NumElts = Op.getValueType().getVectorNumElements(); 9597 SDValue Op0 = Op.getOperand(0); 9598 SDValue Op1 = Op.getOperand(1); 9599 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9600 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9601 Op0.getOperand(0) != Op1.getOperand(0)) 9602 break; 9603 SDValue Src = Op0.getOperand(0); 9604 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 9605 if (NumSrcElts != (2 * NumElts)) 9606 break; 9607 if (!(Op0.getConstantOperandAPInt(1) == 0 && 9608 Op1.getConstantOperandAPInt(1) == NumElts) && 9609 !(Op1.getConstantOperandAPInt(1) == 0 && 9610 Op0.getConstantOperandAPInt(1) == NumElts)) 9611 break; 9612 Op = Src; 9613 } 9614 9615 BinOp = (ISD::NodeType)CandidateBinOp; 9616 return Op; 9617 } 9618 9619 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9620 assert(N->getNumValues() == 1 && 9621 "Can't unroll a vector with multiple results!"); 9622 9623 EVT VT = N->getValueType(0); 9624 unsigned NE = VT.getVectorNumElements(); 9625 EVT EltVT = VT.getVectorElementType(); 9626 SDLoc dl(N); 9627 9628 SmallVector<SDValue, 8> Scalars; 9629 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9630 9631 // If ResNE is 0, fully unroll the vector op. 9632 if (ResNE == 0) 9633 ResNE = NE; 9634 else if (NE > ResNE) 9635 NE = ResNE; 9636 9637 unsigned i; 9638 for (i= 0; i != NE; ++i) { 9639 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9640 SDValue Operand = N->getOperand(j); 9641 EVT OperandVT = Operand.getValueType(); 9642 if (OperandVT.isVector()) { 9643 // A vector operand; extract a single element. 9644 EVT OperandEltVT = OperandVT.getVectorElementType(); 9645 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9646 Operand, getVectorIdxConstant(i, dl)); 9647 } else { 9648 // A scalar operand; just use it as is. 9649 Operands[j] = Operand; 9650 } 9651 } 9652 9653 switch (N->getOpcode()) { 9654 default: { 9655 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9656 N->getFlags())); 9657 break; 9658 } 9659 case ISD::VSELECT: 9660 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9661 break; 9662 case ISD::SHL: 9663 case ISD::SRA: 9664 case ISD::SRL: 9665 case ISD::ROTL: 9666 case ISD::ROTR: 9667 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9668 getShiftAmountOperand(Operands[0].getValueType(), 9669 Operands[1]))); 9670 break; 9671 case ISD::SIGN_EXTEND_INREG: { 9672 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9673 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9674 Operands[0], 9675 getValueType(ExtVT))); 9676 } 9677 } 9678 } 9679 9680 for (; i < ResNE; ++i) 9681 Scalars.push_back(getUNDEF(EltVT)); 9682 9683 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9684 return getBuildVector(VecVT, dl, Scalars); 9685 } 9686 9687 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9688 SDNode *N, unsigned ResNE) { 9689 unsigned Opcode = N->getOpcode(); 9690 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9691 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9692 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9693 "Expected an overflow opcode"); 9694 9695 EVT ResVT = N->getValueType(0); 9696 EVT OvVT = N->getValueType(1); 9697 EVT ResEltVT = ResVT.getVectorElementType(); 9698 EVT OvEltVT = OvVT.getVectorElementType(); 9699 SDLoc dl(N); 9700 9701 // If ResNE is 0, fully unroll the vector op. 9702 unsigned NE = ResVT.getVectorNumElements(); 9703 if (ResNE == 0) 9704 ResNE = NE; 9705 else if (NE > ResNE) 9706 NE = ResNE; 9707 9708 SmallVector<SDValue, 8> LHSScalars; 9709 SmallVector<SDValue, 8> RHSScalars; 9710 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9711 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9712 9713 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9714 SDVTList VTs = getVTList(ResEltVT, SVT); 9715 SmallVector<SDValue, 8> ResScalars; 9716 SmallVector<SDValue, 8> OvScalars; 9717 for (unsigned i = 0; i < NE; ++i) { 9718 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9719 SDValue Ov = 9720 getSelect(dl, OvEltVT, Res.getValue(1), 9721 getBoolConstant(true, dl, OvEltVT, ResVT), 9722 getConstant(0, dl, OvEltVT)); 9723 9724 ResScalars.push_back(Res); 9725 OvScalars.push_back(Ov); 9726 } 9727 9728 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9729 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9730 9731 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9732 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9733 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9734 getBuildVector(NewOvVT, dl, OvScalars)); 9735 } 9736 9737 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9738 LoadSDNode *Base, 9739 unsigned Bytes, 9740 int Dist) const { 9741 if (LD->isVolatile() || Base->isVolatile()) 9742 return false; 9743 // TODO: probably too restrictive for atomics, revisit 9744 if (!LD->isSimple()) 9745 return false; 9746 if (LD->isIndexed() || Base->isIndexed()) 9747 return false; 9748 if (LD->getChain() != Base->getChain()) 9749 return false; 9750 EVT VT = LD->getValueType(0); 9751 if (VT.getSizeInBits() / 8 != Bytes) 9752 return false; 9753 9754 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9755 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9756 9757 int64_t Offset = 0; 9758 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9759 return (Dist * Bytes == Offset); 9760 return false; 9761 } 9762 9763 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9764 /// if it cannot be inferred. 9765 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9766 // If this is a GlobalAddress + cst, return the alignment. 9767 const GlobalValue *GV = nullptr; 9768 int64_t GVOffset = 0; 9769 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9770 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9771 KnownBits Known(PtrWidth); 9772 llvm::computeKnownBits(GV, Known, getDataLayout()); 9773 unsigned AlignBits = Known.countMinTrailingZeros(); 9774 if (AlignBits) 9775 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9776 } 9777 9778 // If this is a direct reference to a stack slot, use information about the 9779 // stack slot's alignment. 9780 int FrameIdx = INT_MIN; 9781 int64_t FrameOffset = 0; 9782 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9783 FrameIdx = FI->getIndex(); 9784 } else if (isBaseWithConstantOffset(Ptr) && 9785 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9786 // Handle FI+Cst 9787 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9788 FrameOffset = Ptr.getConstantOperandVal(1); 9789 } 9790 9791 if (FrameIdx != INT_MIN) { 9792 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9793 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9794 } 9795 9796 return None; 9797 } 9798 9799 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9800 /// which is split (or expanded) into two not necessarily identical pieces. 9801 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9802 // Currently all types are split in half. 9803 EVT LoVT, HiVT; 9804 if (!VT.isVector()) 9805 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9806 else 9807 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9808 9809 return std::make_pair(LoVT, HiVT); 9810 } 9811 9812 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9813 /// type, dependent on an enveloping VT that has been split into two identical 9814 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9815 std::pair<EVT, EVT> 9816 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9817 bool *HiIsEmpty) const { 9818 EVT EltTp = VT.getVectorElementType(); 9819 // Examples: 9820 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9821 // custom VL=9 with enveloping VL=8/8 yields 8/1 9822 // custom VL=10 with enveloping VL=8/8 yields 8/2 9823 // etc. 9824 ElementCount VTNumElts = VT.getVectorElementCount(); 9825 ElementCount EnvNumElts = EnvVT.getVectorElementCount(); 9826 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() && 9827 "Mixing fixed width and scalable vectors when enveloping a type"); 9828 EVT LoVT, HiVT; 9829 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) { 9830 LoVT = EnvVT; 9831 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts); 9832 *HiIsEmpty = false; 9833 } else { 9834 // Flag that hi type has zero storage size, but return split envelop type 9835 // (this would be easier if vector types with zero elements were allowed). 9836 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts); 9837 HiVT = EnvVT; 9838 *HiIsEmpty = true; 9839 } 9840 return std::make_pair(LoVT, HiVT); 9841 } 9842 9843 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9844 /// low/high part. 9845 std::pair<SDValue, SDValue> 9846 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9847 const EVT &HiVT) { 9848 assert(LoVT.isScalableVector() == HiVT.isScalableVector() && 9849 LoVT.isScalableVector() == N.getValueType().isScalableVector() && 9850 "Splitting vector with an invalid mixture of fixed and scalable " 9851 "vector types"); 9852 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <= 9853 N.getValueType().getVectorMinNumElements() && 9854 "More vector elements requested than available!"); 9855 SDValue Lo, Hi; 9856 Lo = 9857 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9858 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements() 9859 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales 9860 // IDX with the runtime scaling factor of the result vector type. For 9861 // fixed-width result vectors, that runtime scaling factor is 1. 9862 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9863 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL)); 9864 return std::make_pair(Lo, Hi); 9865 } 9866 9867 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9868 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9869 EVT VT = N.getValueType(); 9870 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9871 NextPowerOf2(VT.getVectorNumElements())); 9872 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9873 getVectorIdxConstant(0, DL)); 9874 } 9875 9876 void SelectionDAG::ExtractVectorElements(SDValue Op, 9877 SmallVectorImpl<SDValue> &Args, 9878 unsigned Start, unsigned Count, 9879 EVT EltVT) { 9880 EVT VT = Op.getValueType(); 9881 if (Count == 0) 9882 Count = VT.getVectorNumElements(); 9883 if (EltVT == EVT()) 9884 EltVT = VT.getVectorElementType(); 9885 SDLoc SL(Op); 9886 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9887 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9888 getVectorIdxConstant(i, SL))); 9889 } 9890 } 9891 9892 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9893 unsigned GlobalAddressSDNode::getAddressSpace() const { 9894 return getGlobal()->getType()->getAddressSpace(); 9895 } 9896 9897 Type *ConstantPoolSDNode::getType() const { 9898 if (isMachineConstantPoolEntry()) 9899 return Val.MachineCPVal->getType(); 9900 return Val.ConstVal->getType(); 9901 } 9902 9903 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9904 unsigned &SplatBitSize, 9905 bool &HasAnyUndefs, 9906 unsigned MinSplatBits, 9907 bool IsBigEndian) const { 9908 EVT VT = getValueType(0); 9909 assert(VT.isVector() && "Expected a vector type"); 9910 unsigned VecWidth = VT.getSizeInBits(); 9911 if (MinSplatBits > VecWidth) 9912 return false; 9913 9914 // FIXME: The widths are based on this node's type, but build vectors can 9915 // truncate their operands. 9916 SplatValue = APInt(VecWidth, 0); 9917 SplatUndef = APInt(VecWidth, 0); 9918 9919 // Get the bits. Bits with undefined values (when the corresponding element 9920 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9921 // in SplatValue. If any of the values are not constant, give up and return 9922 // false. 9923 unsigned int NumOps = getNumOperands(); 9924 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9925 unsigned EltWidth = VT.getScalarSizeInBits(); 9926 9927 for (unsigned j = 0; j < NumOps; ++j) { 9928 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9929 SDValue OpVal = getOperand(i); 9930 unsigned BitPos = j * EltWidth; 9931 9932 if (OpVal.isUndef()) 9933 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9934 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9935 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9936 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9937 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9938 else 9939 return false; 9940 } 9941 9942 // The build_vector is all constants or undefs. Find the smallest element 9943 // size that splats the vector. 9944 HasAnyUndefs = (SplatUndef != 0); 9945 9946 // FIXME: This does not work for vectors with elements less than 8 bits. 9947 while (VecWidth > 8) { 9948 unsigned HalfSize = VecWidth / 2; 9949 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9950 APInt LowValue = SplatValue.trunc(HalfSize); 9951 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9952 APInt LowUndef = SplatUndef.trunc(HalfSize); 9953 9954 // If the two halves do not match (ignoring undef bits), stop here. 9955 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9956 MinSplatBits > HalfSize) 9957 break; 9958 9959 SplatValue = HighValue | LowValue; 9960 SplatUndef = HighUndef & LowUndef; 9961 9962 VecWidth = HalfSize; 9963 } 9964 9965 SplatBitSize = VecWidth; 9966 return true; 9967 } 9968 9969 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9970 BitVector *UndefElements) const { 9971 unsigned NumOps = getNumOperands(); 9972 if (UndefElements) { 9973 UndefElements->clear(); 9974 UndefElements->resize(NumOps); 9975 } 9976 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 9977 if (!DemandedElts) 9978 return SDValue(); 9979 SDValue Splatted; 9980 for (unsigned i = 0; i != NumOps; ++i) { 9981 if (!DemandedElts[i]) 9982 continue; 9983 SDValue Op = getOperand(i); 9984 if (Op.isUndef()) { 9985 if (UndefElements) 9986 (*UndefElements)[i] = true; 9987 } else if (!Splatted) { 9988 Splatted = Op; 9989 } else if (Splatted != Op) { 9990 return SDValue(); 9991 } 9992 } 9993 9994 if (!Splatted) { 9995 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9996 assert(getOperand(FirstDemandedIdx).isUndef() && 9997 "Can only have a splat without a constant for all undefs."); 9998 return getOperand(FirstDemandedIdx); 9999 } 10000 10001 return Splatted; 10002 } 10003 10004 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 10005 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 10006 return getSplatValue(DemandedElts, UndefElements); 10007 } 10008 10009 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts, 10010 SmallVectorImpl<SDValue> &Sequence, 10011 BitVector *UndefElements) const { 10012 unsigned NumOps = getNumOperands(); 10013 Sequence.clear(); 10014 if (UndefElements) { 10015 UndefElements->clear(); 10016 UndefElements->resize(NumOps); 10017 } 10018 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 10019 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps)) 10020 return false; 10021 10022 // Set the undefs even if we don't find a sequence (like getSplatValue). 10023 if (UndefElements) 10024 for (unsigned I = 0; I != NumOps; ++I) 10025 if (DemandedElts[I] && getOperand(I).isUndef()) 10026 (*UndefElements)[I] = true; 10027 10028 // Iteratively widen the sequence length looking for repetitions. 10029 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) { 10030 Sequence.append(SeqLen, SDValue()); 10031 for (unsigned I = 0; I != NumOps; ++I) { 10032 if (!DemandedElts[I]) 10033 continue; 10034 SDValue &SeqOp = Sequence[I % SeqLen]; 10035 SDValue Op = getOperand(I); 10036 if (Op.isUndef()) { 10037 if (!SeqOp) 10038 SeqOp = Op; 10039 continue; 10040 } 10041 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) { 10042 Sequence.clear(); 10043 break; 10044 } 10045 SeqOp = Op; 10046 } 10047 if (!Sequence.empty()) 10048 return true; 10049 } 10050 10051 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern"); 10052 return false; 10053 } 10054 10055 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, 10056 BitVector *UndefElements) const { 10057 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 10058 return getRepeatedSequence(DemandedElts, Sequence, UndefElements); 10059 } 10060 10061 ConstantSDNode * 10062 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 10063 BitVector *UndefElements) const { 10064 return dyn_cast_or_null<ConstantSDNode>( 10065 getSplatValue(DemandedElts, UndefElements)); 10066 } 10067 10068 ConstantSDNode * 10069 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 10070 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 10071 } 10072 10073 ConstantFPSDNode * 10074 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 10075 BitVector *UndefElements) const { 10076 return dyn_cast_or_null<ConstantFPSDNode>( 10077 getSplatValue(DemandedElts, UndefElements)); 10078 } 10079 10080 ConstantFPSDNode * 10081 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 10082 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 10083 } 10084 10085 int32_t 10086 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 10087 uint32_t BitWidth) const { 10088 if (ConstantFPSDNode *CN = 10089 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 10090 bool IsExact; 10091 APSInt IntVal(BitWidth); 10092 const APFloat &APF = CN->getValueAPF(); 10093 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 10094 APFloat::opOK || 10095 !IsExact) 10096 return -1; 10097 10098 return IntVal.exactLogBase2(); 10099 } 10100 return -1; 10101 } 10102 10103 bool BuildVectorSDNode::isConstant() const { 10104 for (const SDValue &Op : op_values()) { 10105 unsigned Opc = Op.getOpcode(); 10106 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 10107 return false; 10108 } 10109 return true; 10110 } 10111 10112 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 10113 // Find the first non-undef value in the shuffle mask. 10114 unsigned i, e; 10115 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 10116 /* search */; 10117 10118 // If all elements are undefined, this shuffle can be considered a splat 10119 // (although it should eventually get simplified away completely). 10120 if (i == e) 10121 return true; 10122 10123 // Make sure all remaining elements are either undef or the same as the first 10124 // non-undef value. 10125 for (int Idx = Mask[i]; i != e; ++i) 10126 if (Mask[i] >= 0 && Mask[i] != Idx) 10127 return false; 10128 return true; 10129 } 10130 10131 // Returns the SDNode if it is a constant integer BuildVector 10132 // or constant integer. 10133 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const { 10134 if (isa<ConstantSDNode>(N)) 10135 return N.getNode(); 10136 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 10137 return N.getNode(); 10138 // Treat a GlobalAddress supporting constant offset folding as a 10139 // constant integer. 10140 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 10141 if (GA->getOpcode() == ISD::GlobalAddress && 10142 TLI->isOffsetFoldingLegal(GA)) 10143 return GA; 10144 if ((N.getOpcode() == ISD::SPLAT_VECTOR) && 10145 isa<ConstantSDNode>(N.getOperand(0))) 10146 return N.getNode(); 10147 return nullptr; 10148 } 10149 10150 // Returns the SDNode if it is a constant float BuildVector 10151 // or constant float. 10152 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const { 10153 if (isa<ConstantFPSDNode>(N)) 10154 return N.getNode(); 10155 10156 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 10157 return N.getNode(); 10158 10159 return nullptr; 10160 } 10161 10162 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 10163 assert(!Node->OperandList && "Node already has operands"); 10164 assert(SDNode::getMaxNumOperands() >= Vals.size() && 10165 "too many operands to fit into SDNode"); 10166 SDUse *Ops = OperandRecycler.allocate( 10167 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 10168 10169 bool IsDivergent = false; 10170 for (unsigned I = 0; I != Vals.size(); ++I) { 10171 Ops[I].setUser(Node); 10172 Ops[I].setInitial(Vals[I]); 10173 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 10174 IsDivergent |= Ops[I].getNode()->isDivergent(); 10175 } 10176 Node->NumOperands = Vals.size(); 10177 Node->OperandList = Ops; 10178 if (!TLI->isSDNodeAlwaysUniform(Node)) { 10179 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 10180 Node->SDNodeBits.IsDivergent = IsDivergent; 10181 } 10182 checkForCycles(Node); 10183 } 10184 10185 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 10186 SmallVectorImpl<SDValue> &Vals) { 10187 size_t Limit = SDNode::getMaxNumOperands(); 10188 while (Vals.size() > Limit) { 10189 unsigned SliceIdx = Vals.size() - Limit; 10190 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 10191 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 10192 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 10193 Vals.emplace_back(NewTF); 10194 } 10195 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 10196 } 10197 10198 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL, 10199 EVT VT, SDNodeFlags Flags) { 10200 switch (Opcode) { 10201 default: 10202 return SDValue(); 10203 case ISD::ADD: 10204 case ISD::OR: 10205 case ISD::XOR: 10206 case ISD::UMAX: 10207 return getConstant(0, DL, VT); 10208 case ISD::MUL: 10209 return getConstant(1, DL, VT); 10210 case ISD::AND: 10211 case ISD::UMIN: 10212 return getAllOnesConstant(DL, VT); 10213 case ISD::SMAX: 10214 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT); 10215 case ISD::SMIN: 10216 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT); 10217 case ISD::FADD: 10218 return getConstantFP(-0.0, DL, VT); 10219 case ISD::FMUL: 10220 return getConstantFP(1.0, DL, VT); 10221 case ISD::FMINNUM: 10222 case ISD::FMAXNUM: { 10223 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF. 10224 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT); 10225 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) : 10226 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) : 10227 APFloat::getLargest(Semantics); 10228 if (Opcode == ISD::FMAXNUM) 10229 NeutralAF.changeSign(); 10230 10231 return getConstantFP(NeutralAF, DL, VT); 10232 } 10233 } 10234 } 10235 10236 #ifndef NDEBUG 10237 static void checkForCyclesHelper(const SDNode *N, 10238 SmallPtrSetImpl<const SDNode*> &Visited, 10239 SmallPtrSetImpl<const SDNode*> &Checked, 10240 const llvm::SelectionDAG *DAG) { 10241 // If this node has already been checked, don't check it again. 10242 if (Checked.count(N)) 10243 return; 10244 10245 // If a node has already been visited on this depth-first walk, reject it as 10246 // a cycle. 10247 if (!Visited.insert(N).second) { 10248 errs() << "Detected cycle in SelectionDAG\n"; 10249 dbgs() << "Offending node:\n"; 10250 N->dumprFull(DAG); dbgs() << "\n"; 10251 abort(); 10252 } 10253 10254 for (const SDValue &Op : N->op_values()) 10255 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 10256 10257 Checked.insert(N); 10258 Visited.erase(N); 10259 } 10260 #endif 10261 10262 void llvm::checkForCycles(const llvm::SDNode *N, 10263 const llvm::SelectionDAG *DAG, 10264 bool force) { 10265 #ifndef NDEBUG 10266 bool check = force; 10267 #ifdef EXPENSIVE_CHECKS 10268 check = true; 10269 #endif // EXPENSIVE_CHECKS 10270 if (check) { 10271 assert(N && "Checking nonexistent SDNode"); 10272 SmallPtrSet<const SDNode*, 32> visited; 10273 SmallPtrSet<const SDNode*, 32> checked; 10274 checkForCyclesHelper(N, visited, checked, DAG); 10275 } 10276 #endif // !NDEBUG 10277 } 10278 10279 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 10280 checkForCycles(DAG->getRoot().getNode(), DAG, force); 10281 } 10282