1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/FunctionLoweringInfo.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/MachineBasicBlock.h" 34 #include "llvm/CodeGen/MachineConstantPool.h" 35 #include "llvm/CodeGen/MachineFrameInfo.h" 36 #include "llvm/CodeGen/MachineFunction.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/RuntimeLibcalls.h" 39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 40 #include "llvm/CodeGen/SelectionDAGNodes.h" 41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 42 #include "llvm/CodeGen/TargetFrameLowering.h" 43 #include "llvm/CodeGen/TargetLowering.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/ValueTypes.h" 47 #include "llvm/IR/Constant.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/DebugInfoMetadata.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/DerivedTypes.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GlobalValue.h" 55 #include "llvm/IR/Metadata.h" 56 #include "llvm/IR/Type.h" 57 #include "llvm/IR/Value.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Compiler.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/KnownBits.h" 64 #include "llvm/Support/MachineValueType.h" 65 #include "llvm/Support/ManagedStatic.h" 66 #include "llvm/Support/MathExtras.h" 67 #include "llvm/Support/Mutex.h" 68 #include "llvm/Support/raw_ostream.h" 69 #include "llvm/Target/TargetMachine.h" 70 #include "llvm/Target/TargetOptions.h" 71 #include "llvm/Transforms/Utils/SizeOpts.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <cstdlib> 76 #include <limits> 77 #include <set> 78 #include <string> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 84 /// makeVTList - Return an instance of the SDVTList struct initialized with the 85 /// specified members. 86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 87 SDVTList Res = {VTs, NumVTs}; 88 return Res; 89 } 90 91 // Default null implementations of the callbacks. 92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 95 96 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 97 98 #define DEBUG_TYPE "selectiondag" 99 100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 101 cl::Hidden, cl::init(true), 102 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 103 104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 105 cl::desc("Number limit for gluing ld/st of memcpy."), 106 cl::Hidden, cl::init(0)); 107 108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 110 } 111 112 //===----------------------------------------------------------------------===// 113 // ConstantFPSDNode Class 114 //===----------------------------------------------------------------------===// 115 116 /// isExactlyValue - We don't rely on operator== working on double values, as 117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 118 /// As such, this method can be used to do an exact bit-for-bit comparison of 119 /// two floating point values. 120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 121 return getValueAPF().bitwiseIsEqual(V); 122 } 123 124 bool ConstantFPSDNode::isValueValidForType(EVT VT, 125 const APFloat& Val) { 126 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 127 128 // convert modifies in place, so make a copy. 129 APFloat Val2 = APFloat(Val); 130 bool losesInfo; 131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 132 APFloat::rmNearestTiesToEven, 133 &losesInfo); 134 return !losesInfo; 135 } 136 137 //===----------------------------------------------------------------------===// 138 // ISD Namespace 139 //===----------------------------------------------------------------------===// 140 141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 142 if (N->getOpcode() == ISD::SPLAT_VECTOR) { 143 unsigned EltSize = 144 N->getValueType(0).getVectorElementType().getSizeInBits(); 145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize); 147 return true; 148 } 149 } 150 151 auto *BV = dyn_cast<BuildVectorSDNode>(N); 152 if (!BV) 153 return false; 154 155 APInt SplatUndef; 156 unsigned SplatBitSize; 157 bool HasUndefs; 158 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 159 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 160 EltSize) && 161 EltSize == SplatBitSize; 162 } 163 164 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 165 // specializations of the more general isConstantSplatVector()? 166 167 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) { 168 // Look through a bit convert. 169 while (N->getOpcode() == ISD::BITCAST) 170 N = N->getOperand(0).getNode(); 171 172 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { 173 APInt SplatVal; 174 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue(); 175 } 176 177 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 178 179 unsigned i = 0, e = N->getNumOperands(); 180 181 // Skip over all of the undef values. 182 while (i != e && N->getOperand(i).isUndef()) 183 ++i; 184 185 // Do not accept an all-undef vector. 186 if (i == e) return false; 187 188 // Do not accept build_vectors that aren't all constants or which have non-~0 189 // elements. We have to be a bit careful here, as the type of the constant 190 // may not be the same as the type of the vector elements due to type 191 // legalization (the elements are promoted to a legal type for the target and 192 // a vector of a type may be legal when the base element type is not). 193 // We only want to check enough bits to cover the vector elements, because 194 // we care if the resultant vector is all ones, not whether the individual 195 // constants are. 196 SDValue NotZero = N->getOperand(i); 197 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 198 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 199 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 200 return false; 201 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 202 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 203 return false; 204 } else 205 return false; 206 207 // Okay, we have at least one ~0 value, check to see if the rest match or are 208 // undefs. Even with the above element type twiddling, this should be OK, as 209 // the same type legalization should have applied to all the elements. 210 for (++i; i != e; ++i) 211 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 212 return false; 213 return true; 214 } 215 216 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) { 217 // Look through a bit convert. 218 while (N->getOpcode() == ISD::BITCAST) 219 N = N->getOperand(0).getNode(); 220 221 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { 222 APInt SplatVal; 223 return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue(); 224 } 225 226 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 227 228 bool IsAllUndef = true; 229 for (const SDValue &Op : N->op_values()) { 230 if (Op.isUndef()) 231 continue; 232 IsAllUndef = false; 233 // Do not accept build_vectors that aren't all constants or which have non-0 234 // elements. We have to be a bit careful here, as the type of the constant 235 // may not be the same as the type of the vector elements due to type 236 // legalization (the elements are promoted to a legal type for the target 237 // and a vector of a type may be legal when the base element type is not). 238 // We only want to check enough bits to cover the vector elements, because 239 // we care if the resultant vector is all zeros, not whether the individual 240 // constants are. 241 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 242 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 243 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 244 return false; 245 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 246 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 247 return false; 248 } else 249 return false; 250 } 251 252 // Do not accept an all-undef vector. 253 if (IsAllUndef) 254 return false; 255 return true; 256 } 257 258 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 259 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true); 260 } 261 262 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 263 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true); 264 } 265 266 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 267 if (N->getOpcode() != ISD::BUILD_VECTOR) 268 return false; 269 270 for (const SDValue &Op : N->op_values()) { 271 if (Op.isUndef()) 272 continue; 273 if (!isa<ConstantSDNode>(Op)) 274 return false; 275 } 276 return true; 277 } 278 279 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 280 if (N->getOpcode() != ISD::BUILD_VECTOR) 281 return false; 282 283 for (const SDValue &Op : N->op_values()) { 284 if (Op.isUndef()) 285 continue; 286 if (!isa<ConstantFPSDNode>(Op)) 287 return false; 288 } 289 return true; 290 } 291 292 bool ISD::allOperandsUndef(const SDNode *N) { 293 // Return false if the node has no operands. 294 // This is "logically inconsistent" with the definition of "all" but 295 // is probably the desired behavior. 296 if (N->getNumOperands() == 0) 297 return false; 298 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 299 } 300 301 bool ISD::matchUnaryPredicate(SDValue Op, 302 std::function<bool(ConstantSDNode *)> Match, 303 bool AllowUndefs) { 304 // FIXME: Add support for scalar UNDEF cases? 305 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 306 return Match(Cst); 307 308 // FIXME: Add support for vector UNDEF cases? 309 if (ISD::BUILD_VECTOR != Op.getOpcode()) 310 return false; 311 312 EVT SVT = Op.getValueType().getScalarType(); 313 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 314 if (AllowUndefs && Op.getOperand(i).isUndef()) { 315 if (!Match(nullptr)) 316 return false; 317 continue; 318 } 319 320 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 321 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 322 return false; 323 } 324 return true; 325 } 326 327 bool ISD::matchBinaryPredicate( 328 SDValue LHS, SDValue RHS, 329 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 330 bool AllowUndefs, bool AllowTypeMismatch) { 331 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 332 return false; 333 334 // TODO: Add support for scalar UNDEF cases? 335 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 336 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 337 return Match(LHSCst, RHSCst); 338 339 // TODO: Add support for vector UNDEF cases? 340 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 341 ISD::BUILD_VECTOR != RHS.getOpcode()) 342 return false; 343 344 EVT SVT = LHS.getValueType().getScalarType(); 345 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 346 SDValue LHSOp = LHS.getOperand(i); 347 SDValue RHSOp = RHS.getOperand(i); 348 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 349 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 350 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 351 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 352 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 353 return false; 354 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 355 LHSOp.getValueType() != RHSOp.getValueType())) 356 return false; 357 if (!Match(LHSCst, RHSCst)) 358 return false; 359 } 360 return true; 361 } 362 363 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) { 364 switch (VecReduceOpcode) { 365 default: 366 llvm_unreachable("Expected VECREDUCE opcode"); 367 case ISD::VECREDUCE_FADD: 368 case ISD::VECREDUCE_SEQ_FADD: 369 return ISD::FADD; 370 case ISD::VECREDUCE_FMUL: 371 case ISD::VECREDUCE_SEQ_FMUL: 372 return ISD::FMUL; 373 case ISD::VECREDUCE_ADD: 374 return ISD::ADD; 375 case ISD::VECREDUCE_MUL: 376 return ISD::MUL; 377 case ISD::VECREDUCE_AND: 378 return ISD::AND; 379 case ISD::VECREDUCE_OR: 380 return ISD::OR; 381 case ISD::VECREDUCE_XOR: 382 return ISD::XOR; 383 case ISD::VECREDUCE_SMAX: 384 return ISD::SMAX; 385 case ISD::VECREDUCE_SMIN: 386 return ISD::SMIN; 387 case ISD::VECREDUCE_UMAX: 388 return ISD::UMAX; 389 case ISD::VECREDUCE_UMIN: 390 return ISD::UMIN; 391 case ISD::VECREDUCE_FMAX: 392 return ISD::FMAXNUM; 393 case ISD::VECREDUCE_FMIN: 394 return ISD::FMINNUM; 395 } 396 } 397 398 bool ISD::isVPOpcode(unsigned Opcode) { 399 switch (Opcode) { 400 default: 401 return false; 402 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ 403 case ISD::SDOPC: \ 404 return true; 405 #include "llvm/IR/VPIntrinsics.def" 406 } 407 } 408 409 /// The operand position of the vector mask. 410 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { 411 switch (Opcode) { 412 default: 413 return None; 414 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \ 415 case ISD::SDOPC: \ 416 return MASKPOS; 417 #include "llvm/IR/VPIntrinsics.def" 418 } 419 } 420 421 /// The operand position of the explicit vector length parameter. 422 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { 423 switch (Opcode) { 424 default: 425 return None; 426 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \ 427 case ISD::SDOPC: \ 428 return EVLPOS; 429 #include "llvm/IR/VPIntrinsics.def" 430 } 431 } 432 433 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 434 switch (ExtType) { 435 case ISD::EXTLOAD: 436 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 437 case ISD::SEXTLOAD: 438 return ISD::SIGN_EXTEND; 439 case ISD::ZEXTLOAD: 440 return ISD::ZERO_EXTEND; 441 default: 442 break; 443 } 444 445 llvm_unreachable("Invalid LoadExtType"); 446 } 447 448 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 449 // To perform this operation, we just need to swap the L and G bits of the 450 // operation. 451 unsigned OldL = (Operation >> 2) & 1; 452 unsigned OldG = (Operation >> 1) & 1; 453 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 454 (OldL << 1) | // New G bit 455 (OldG << 2)); // New L bit. 456 } 457 458 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 459 unsigned Operation = Op; 460 if (isIntegerLike) 461 Operation ^= 7; // Flip L, G, E bits, but not U. 462 else 463 Operation ^= 15; // Flip all of the condition bits. 464 465 if (Operation > ISD::SETTRUE2) 466 Operation &= ~8; // Don't let N and U bits get set. 467 468 return ISD::CondCode(Operation); 469 } 470 471 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 472 return getSetCCInverseImpl(Op, Type.isInteger()); 473 } 474 475 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 476 bool isIntegerLike) { 477 return getSetCCInverseImpl(Op, isIntegerLike); 478 } 479 480 /// For an integer comparison, return 1 if the comparison is a signed operation 481 /// and 2 if the result is an unsigned comparison. Return zero if the operation 482 /// does not depend on the sign of the input (setne and seteq). 483 static int isSignedOp(ISD::CondCode Opcode) { 484 switch (Opcode) { 485 default: llvm_unreachable("Illegal integer setcc operation!"); 486 case ISD::SETEQ: 487 case ISD::SETNE: return 0; 488 case ISD::SETLT: 489 case ISD::SETLE: 490 case ISD::SETGT: 491 case ISD::SETGE: return 1; 492 case ISD::SETULT: 493 case ISD::SETULE: 494 case ISD::SETUGT: 495 case ISD::SETUGE: return 2; 496 } 497 } 498 499 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 500 EVT Type) { 501 bool IsInteger = Type.isInteger(); 502 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 503 // Cannot fold a signed integer setcc with an unsigned integer setcc. 504 return ISD::SETCC_INVALID; 505 506 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 507 508 // If the N and U bits get set, then the resultant comparison DOES suddenly 509 // care about orderedness, and it is true when ordered. 510 if (Op > ISD::SETTRUE2) 511 Op &= ~16; // Clear the U bit if the N bit is set. 512 513 // Canonicalize illegal integer setcc's. 514 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 515 Op = ISD::SETNE; 516 517 return ISD::CondCode(Op); 518 } 519 520 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 521 EVT Type) { 522 bool IsInteger = Type.isInteger(); 523 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 524 // Cannot fold a signed setcc with an unsigned setcc. 525 return ISD::SETCC_INVALID; 526 527 // Combine all of the condition bits. 528 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 529 530 // Canonicalize illegal integer setcc's. 531 if (IsInteger) { 532 switch (Result) { 533 default: break; 534 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 535 case ISD::SETOEQ: // SETEQ & SETU[LG]E 536 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 537 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 538 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 539 } 540 } 541 542 return Result; 543 } 544 545 //===----------------------------------------------------------------------===// 546 // SDNode Profile Support 547 //===----------------------------------------------------------------------===// 548 549 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 550 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 551 ID.AddInteger(OpC); 552 } 553 554 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 555 /// solely with their pointer. 556 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 557 ID.AddPointer(VTList.VTs); 558 } 559 560 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 561 static void AddNodeIDOperands(FoldingSetNodeID &ID, 562 ArrayRef<SDValue> Ops) { 563 for (auto& Op : Ops) { 564 ID.AddPointer(Op.getNode()); 565 ID.AddInteger(Op.getResNo()); 566 } 567 } 568 569 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 570 static void AddNodeIDOperands(FoldingSetNodeID &ID, 571 ArrayRef<SDUse> Ops) { 572 for (auto& Op : Ops) { 573 ID.AddPointer(Op.getNode()); 574 ID.AddInteger(Op.getResNo()); 575 } 576 } 577 578 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 579 SDVTList VTList, ArrayRef<SDValue> OpList) { 580 AddNodeIDOpcode(ID, OpC); 581 AddNodeIDValueTypes(ID, VTList); 582 AddNodeIDOperands(ID, OpList); 583 } 584 585 /// If this is an SDNode with special info, add this info to the NodeID data. 586 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 587 switch (N->getOpcode()) { 588 case ISD::TargetExternalSymbol: 589 case ISD::ExternalSymbol: 590 case ISD::MCSymbol: 591 llvm_unreachable("Should only be used on nodes with operands"); 592 default: break; // Normal nodes don't need extra info. 593 case ISD::TargetConstant: 594 case ISD::Constant: { 595 const ConstantSDNode *C = cast<ConstantSDNode>(N); 596 ID.AddPointer(C->getConstantIntValue()); 597 ID.AddBoolean(C->isOpaque()); 598 break; 599 } 600 case ISD::TargetConstantFP: 601 case ISD::ConstantFP: 602 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 603 break; 604 case ISD::TargetGlobalAddress: 605 case ISD::GlobalAddress: 606 case ISD::TargetGlobalTLSAddress: 607 case ISD::GlobalTLSAddress: { 608 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 609 ID.AddPointer(GA->getGlobal()); 610 ID.AddInteger(GA->getOffset()); 611 ID.AddInteger(GA->getTargetFlags()); 612 break; 613 } 614 case ISD::BasicBlock: 615 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 616 break; 617 case ISD::Register: 618 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 619 break; 620 case ISD::RegisterMask: 621 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 622 break; 623 case ISD::SRCVALUE: 624 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 625 break; 626 case ISD::FrameIndex: 627 case ISD::TargetFrameIndex: 628 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 629 break; 630 case ISD::LIFETIME_START: 631 case ISD::LIFETIME_END: 632 if (cast<LifetimeSDNode>(N)->hasOffset()) { 633 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 634 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 635 } 636 break; 637 case ISD::PSEUDO_PROBE: 638 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); 639 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex()); 640 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes()); 641 break; 642 case ISD::JumpTable: 643 case ISD::TargetJumpTable: 644 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 645 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 646 break; 647 case ISD::ConstantPool: 648 case ISD::TargetConstantPool: { 649 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 650 ID.AddInteger(CP->getAlign().value()); 651 ID.AddInteger(CP->getOffset()); 652 if (CP->isMachineConstantPoolEntry()) 653 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 654 else 655 ID.AddPointer(CP->getConstVal()); 656 ID.AddInteger(CP->getTargetFlags()); 657 break; 658 } 659 case ISD::TargetIndex: { 660 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 661 ID.AddInteger(TI->getIndex()); 662 ID.AddInteger(TI->getOffset()); 663 ID.AddInteger(TI->getTargetFlags()); 664 break; 665 } 666 case ISD::LOAD: { 667 const LoadSDNode *LD = cast<LoadSDNode>(N); 668 ID.AddInteger(LD->getMemoryVT().getRawBits()); 669 ID.AddInteger(LD->getRawSubclassData()); 670 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 671 break; 672 } 673 case ISD::STORE: { 674 const StoreSDNode *ST = cast<StoreSDNode>(N); 675 ID.AddInteger(ST->getMemoryVT().getRawBits()); 676 ID.AddInteger(ST->getRawSubclassData()); 677 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 678 break; 679 } 680 case ISD::MLOAD: { 681 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 682 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 683 ID.AddInteger(MLD->getRawSubclassData()); 684 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 685 break; 686 } 687 case ISD::MSTORE: { 688 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 689 ID.AddInteger(MST->getMemoryVT().getRawBits()); 690 ID.AddInteger(MST->getRawSubclassData()); 691 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 692 break; 693 } 694 case ISD::MGATHER: { 695 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 696 ID.AddInteger(MG->getMemoryVT().getRawBits()); 697 ID.AddInteger(MG->getRawSubclassData()); 698 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 699 break; 700 } 701 case ISD::MSCATTER: { 702 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 703 ID.AddInteger(MS->getMemoryVT().getRawBits()); 704 ID.AddInteger(MS->getRawSubclassData()); 705 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 706 break; 707 } 708 case ISD::ATOMIC_CMP_SWAP: 709 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 710 case ISD::ATOMIC_SWAP: 711 case ISD::ATOMIC_LOAD_ADD: 712 case ISD::ATOMIC_LOAD_SUB: 713 case ISD::ATOMIC_LOAD_AND: 714 case ISD::ATOMIC_LOAD_CLR: 715 case ISD::ATOMIC_LOAD_OR: 716 case ISD::ATOMIC_LOAD_XOR: 717 case ISD::ATOMIC_LOAD_NAND: 718 case ISD::ATOMIC_LOAD_MIN: 719 case ISD::ATOMIC_LOAD_MAX: 720 case ISD::ATOMIC_LOAD_UMIN: 721 case ISD::ATOMIC_LOAD_UMAX: 722 case ISD::ATOMIC_LOAD: 723 case ISD::ATOMIC_STORE: { 724 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 725 ID.AddInteger(AT->getMemoryVT().getRawBits()); 726 ID.AddInteger(AT->getRawSubclassData()); 727 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 728 break; 729 } 730 case ISD::PREFETCH: { 731 const MemSDNode *PF = cast<MemSDNode>(N); 732 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 733 break; 734 } 735 case ISD::VECTOR_SHUFFLE: { 736 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 737 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 738 i != e; ++i) 739 ID.AddInteger(SVN->getMaskElt(i)); 740 break; 741 } 742 case ISD::TargetBlockAddress: 743 case ISD::BlockAddress: { 744 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 745 ID.AddPointer(BA->getBlockAddress()); 746 ID.AddInteger(BA->getOffset()); 747 ID.AddInteger(BA->getTargetFlags()); 748 break; 749 } 750 } // end switch (N->getOpcode()) 751 752 // Target specific memory nodes could also have address spaces to check. 753 if (N->isTargetMemoryOpcode()) 754 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 755 } 756 757 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 758 /// data. 759 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 760 AddNodeIDOpcode(ID, N->getOpcode()); 761 // Add the return value info. 762 AddNodeIDValueTypes(ID, N->getVTList()); 763 // Add the operand info. 764 AddNodeIDOperands(ID, N->ops()); 765 766 // Handle SDNode leafs with special info. 767 AddNodeIDCustom(ID, N); 768 } 769 770 //===----------------------------------------------------------------------===// 771 // SelectionDAG Class 772 //===----------------------------------------------------------------------===// 773 774 /// doNotCSE - Return true if CSE should not be performed for this node. 775 static bool doNotCSE(SDNode *N) { 776 if (N->getValueType(0) == MVT::Glue) 777 return true; // Never CSE anything that produces a flag. 778 779 switch (N->getOpcode()) { 780 default: break; 781 case ISD::HANDLENODE: 782 case ISD::EH_LABEL: 783 return true; // Never CSE these nodes. 784 } 785 786 // Check that remaining values produced are not flags. 787 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 788 if (N->getValueType(i) == MVT::Glue) 789 return true; // Never CSE anything that produces a flag. 790 791 return false; 792 } 793 794 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 795 /// SelectionDAG. 796 void SelectionDAG::RemoveDeadNodes() { 797 // Create a dummy node (which is not added to allnodes), that adds a reference 798 // to the root node, preventing it from being deleted. 799 HandleSDNode Dummy(getRoot()); 800 801 SmallVector<SDNode*, 128> DeadNodes; 802 803 // Add all obviously-dead nodes to the DeadNodes worklist. 804 for (SDNode &Node : allnodes()) 805 if (Node.use_empty()) 806 DeadNodes.push_back(&Node); 807 808 RemoveDeadNodes(DeadNodes); 809 810 // If the root changed (e.g. it was a dead load, update the root). 811 setRoot(Dummy.getValue()); 812 } 813 814 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 815 /// given list, and any nodes that become unreachable as a result. 816 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 817 818 // Process the worklist, deleting the nodes and adding their uses to the 819 // worklist. 820 while (!DeadNodes.empty()) { 821 SDNode *N = DeadNodes.pop_back_val(); 822 // Skip to next node if we've already managed to delete the node. This could 823 // happen if replacing a node causes a node previously added to the node to 824 // be deleted. 825 if (N->getOpcode() == ISD::DELETED_NODE) 826 continue; 827 828 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 829 DUL->NodeDeleted(N, nullptr); 830 831 // Take the node out of the appropriate CSE map. 832 RemoveNodeFromCSEMaps(N); 833 834 // Next, brutally remove the operand list. This is safe to do, as there are 835 // no cycles in the graph. 836 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 837 SDUse &Use = *I++; 838 SDNode *Operand = Use.getNode(); 839 Use.set(SDValue()); 840 841 // Now that we removed this operand, see if there are no uses of it left. 842 if (Operand->use_empty()) 843 DeadNodes.push_back(Operand); 844 } 845 846 DeallocateNode(N); 847 } 848 } 849 850 void SelectionDAG::RemoveDeadNode(SDNode *N){ 851 SmallVector<SDNode*, 16> DeadNodes(1, N); 852 853 // Create a dummy node that adds a reference to the root node, preventing 854 // it from being deleted. (This matters if the root is an operand of the 855 // dead node.) 856 HandleSDNode Dummy(getRoot()); 857 858 RemoveDeadNodes(DeadNodes); 859 } 860 861 void SelectionDAG::DeleteNode(SDNode *N) { 862 // First take this out of the appropriate CSE map. 863 RemoveNodeFromCSEMaps(N); 864 865 // Finally, remove uses due to operands of this node, remove from the 866 // AllNodes list, and delete the node. 867 DeleteNodeNotInCSEMaps(N); 868 } 869 870 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 871 assert(N->getIterator() != AllNodes.begin() && 872 "Cannot delete the entry node!"); 873 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 874 875 // Drop all of the operands and decrement used node's use counts. 876 N->DropOperands(); 877 878 DeallocateNode(N); 879 } 880 881 void SDDbgInfo::erase(const SDNode *Node) { 882 DbgValMapType::iterator I = DbgValMap.find(Node); 883 if (I == DbgValMap.end()) 884 return; 885 for (auto &Val: I->second) 886 Val->setIsInvalidated(); 887 DbgValMap.erase(I); 888 } 889 890 void SelectionDAG::DeallocateNode(SDNode *N) { 891 // If we have operands, deallocate them. 892 removeOperands(N); 893 894 NodeAllocator.Deallocate(AllNodes.remove(N)); 895 896 // Set the opcode to DELETED_NODE to help catch bugs when node 897 // memory is reallocated. 898 // FIXME: There are places in SDag that have grown a dependency on the opcode 899 // value in the released node. 900 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 901 N->NodeType = ISD::DELETED_NODE; 902 903 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 904 // them and forget about that node. 905 DbgInfo->erase(N); 906 } 907 908 #ifndef NDEBUG 909 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 910 static void VerifySDNode(SDNode *N) { 911 switch (N->getOpcode()) { 912 default: 913 break; 914 case ISD::BUILD_PAIR: { 915 EVT VT = N->getValueType(0); 916 assert(N->getNumValues() == 1 && "Too many results!"); 917 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 918 "Wrong return type!"); 919 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 920 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 921 "Mismatched operand types!"); 922 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 923 "Wrong operand type!"); 924 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 925 "Wrong return type size"); 926 break; 927 } 928 case ISD::BUILD_VECTOR: { 929 assert(N->getNumValues() == 1 && "Too many results!"); 930 assert(N->getValueType(0).isVector() && "Wrong return type!"); 931 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 932 "Wrong number of operands!"); 933 EVT EltVT = N->getValueType(0).getVectorElementType(); 934 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 935 assert((I->getValueType() == EltVT || 936 (EltVT.isInteger() && I->getValueType().isInteger() && 937 EltVT.bitsLE(I->getValueType()))) && 938 "Wrong operand type!"); 939 assert(I->getValueType() == N->getOperand(0).getValueType() && 940 "Operands must all have the same type"); 941 } 942 break; 943 } 944 } 945 } 946 #endif // NDEBUG 947 948 /// Insert a newly allocated node into the DAG. 949 /// 950 /// Handles insertion into the all nodes list and CSE map, as well as 951 /// verification and other common operations when a new node is allocated. 952 void SelectionDAG::InsertNode(SDNode *N) { 953 AllNodes.push_back(N); 954 #ifndef NDEBUG 955 N->PersistentId = NextPersistentId++; 956 VerifySDNode(N); 957 #endif 958 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 959 DUL->NodeInserted(N); 960 } 961 962 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 963 /// correspond to it. This is useful when we're about to delete or repurpose 964 /// the node. We don't want future request for structurally identical nodes 965 /// to return N anymore. 966 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 967 bool Erased = false; 968 switch (N->getOpcode()) { 969 case ISD::HANDLENODE: return false; // noop. 970 case ISD::CONDCODE: 971 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 972 "Cond code doesn't exist!"); 973 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 974 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 975 break; 976 case ISD::ExternalSymbol: 977 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 978 break; 979 case ISD::TargetExternalSymbol: { 980 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 981 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 982 ESN->getSymbol(), ESN->getTargetFlags())); 983 break; 984 } 985 case ISD::MCSymbol: { 986 auto *MCSN = cast<MCSymbolSDNode>(N); 987 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 988 break; 989 } 990 case ISD::VALUETYPE: { 991 EVT VT = cast<VTSDNode>(N)->getVT(); 992 if (VT.isExtended()) { 993 Erased = ExtendedValueTypeNodes.erase(VT); 994 } else { 995 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 996 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 997 } 998 break; 999 } 1000 default: 1001 // Remove it from the CSE Map. 1002 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 1003 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 1004 Erased = CSEMap.RemoveNode(N); 1005 break; 1006 } 1007 #ifndef NDEBUG 1008 // Verify that the node was actually in one of the CSE maps, unless it has a 1009 // flag result (which cannot be CSE'd) or is one of the special cases that are 1010 // not subject to CSE. 1011 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 1012 !N->isMachineOpcode() && !doNotCSE(N)) { 1013 N->dump(this); 1014 dbgs() << "\n"; 1015 llvm_unreachable("Node is not in map!"); 1016 } 1017 #endif 1018 return Erased; 1019 } 1020 1021 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 1022 /// maps and modified in place. Add it back to the CSE maps, unless an identical 1023 /// node already exists, in which case transfer all its users to the existing 1024 /// node. This transfer can potentially trigger recursive merging. 1025 void 1026 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 1027 // For node types that aren't CSE'd, just act as if no identical node 1028 // already exists. 1029 if (!doNotCSE(N)) { 1030 SDNode *Existing = CSEMap.GetOrInsertNode(N); 1031 if (Existing != N) { 1032 // If there was already an existing matching node, use ReplaceAllUsesWith 1033 // to replace the dead one with the existing one. This can cause 1034 // recursive merging of other unrelated nodes down the line. 1035 ReplaceAllUsesWith(N, Existing); 1036 1037 // N is now dead. Inform the listeners and delete it. 1038 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 1039 DUL->NodeDeleted(N, Existing); 1040 DeleteNodeNotInCSEMaps(N); 1041 return; 1042 } 1043 } 1044 1045 // If the node doesn't already exist, we updated it. Inform listeners. 1046 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 1047 DUL->NodeUpdated(N); 1048 } 1049 1050 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1051 /// were replaced with those specified. If this node is never memoized, 1052 /// return null, otherwise return a pointer to the slot it would take. If a 1053 /// node already exists with these operands, the slot will be non-null. 1054 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 1055 void *&InsertPos) { 1056 if (doNotCSE(N)) 1057 return nullptr; 1058 1059 SDValue Ops[] = { Op }; 1060 FoldingSetNodeID ID; 1061 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1062 AddNodeIDCustom(ID, N); 1063 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1064 if (Node) 1065 Node->intersectFlagsWith(N->getFlags()); 1066 return Node; 1067 } 1068 1069 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1070 /// were replaced with those specified. If this node is never memoized, 1071 /// return null, otherwise return a pointer to the slot it would take. If a 1072 /// node already exists with these operands, the slot will be non-null. 1073 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 1074 SDValue Op1, SDValue Op2, 1075 void *&InsertPos) { 1076 if (doNotCSE(N)) 1077 return nullptr; 1078 1079 SDValue Ops[] = { Op1, Op2 }; 1080 FoldingSetNodeID ID; 1081 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1082 AddNodeIDCustom(ID, N); 1083 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1084 if (Node) 1085 Node->intersectFlagsWith(N->getFlags()); 1086 return Node; 1087 } 1088 1089 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1090 /// were replaced with those specified. If this node is never memoized, 1091 /// return null, otherwise return a pointer to the slot it would take. If a 1092 /// node already exists with these operands, the slot will be non-null. 1093 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 1094 void *&InsertPos) { 1095 if (doNotCSE(N)) 1096 return nullptr; 1097 1098 FoldingSetNodeID ID; 1099 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1100 AddNodeIDCustom(ID, N); 1101 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1102 if (Node) 1103 Node->intersectFlagsWith(N->getFlags()); 1104 return Node; 1105 } 1106 1107 Align SelectionDAG::getEVTAlign(EVT VT) const { 1108 Type *Ty = VT == MVT::iPTR ? 1109 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1110 VT.getTypeForEVT(*getContext()); 1111 1112 return getDataLayout().getABITypeAlign(Ty); 1113 } 1114 1115 // EntryNode could meaningfully have debug info if we can find it... 1116 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1117 : TM(tm), OptLevel(OL), 1118 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1119 Root(getEntryNode()) { 1120 InsertNode(&EntryNode); 1121 DbgInfo = new SDDbgInfo(); 1122 } 1123 1124 void SelectionDAG::init(MachineFunction &NewMF, 1125 OptimizationRemarkEmitter &NewORE, 1126 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1127 LegacyDivergenceAnalysis * Divergence, 1128 ProfileSummaryInfo *PSIin, 1129 BlockFrequencyInfo *BFIin) { 1130 MF = &NewMF; 1131 SDAGISelPass = PassPtr; 1132 ORE = &NewORE; 1133 TLI = getSubtarget().getTargetLowering(); 1134 TSI = getSubtarget().getSelectionDAGInfo(); 1135 LibInfo = LibraryInfo; 1136 Context = &MF->getFunction().getContext(); 1137 DA = Divergence; 1138 PSI = PSIin; 1139 BFI = BFIin; 1140 } 1141 1142 SelectionDAG::~SelectionDAG() { 1143 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1144 allnodes_clear(); 1145 OperandRecycler.clear(OperandAllocator); 1146 delete DbgInfo; 1147 } 1148 1149 bool SelectionDAG::shouldOptForSize() const { 1150 return MF->getFunction().hasOptSize() || 1151 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1152 } 1153 1154 void SelectionDAG::allnodes_clear() { 1155 assert(&*AllNodes.begin() == &EntryNode); 1156 AllNodes.remove(AllNodes.begin()); 1157 while (!AllNodes.empty()) 1158 DeallocateNode(&AllNodes.front()); 1159 #ifndef NDEBUG 1160 NextPersistentId = 0; 1161 #endif 1162 } 1163 1164 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1165 void *&InsertPos) { 1166 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1167 if (N) { 1168 switch (N->getOpcode()) { 1169 default: break; 1170 case ISD::Constant: 1171 case ISD::ConstantFP: 1172 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1173 "debug location. Use another overload."); 1174 } 1175 } 1176 return N; 1177 } 1178 1179 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1180 const SDLoc &DL, void *&InsertPos) { 1181 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1182 if (N) { 1183 switch (N->getOpcode()) { 1184 case ISD::Constant: 1185 case ISD::ConstantFP: 1186 // Erase debug location from the node if the node is used at several 1187 // different places. Do not propagate one location to all uses as it 1188 // will cause a worse single stepping debugging experience. 1189 if (N->getDebugLoc() != DL.getDebugLoc()) 1190 N->setDebugLoc(DebugLoc()); 1191 break; 1192 default: 1193 // When the node's point of use is located earlier in the instruction 1194 // sequence than its prior point of use, update its debug info to the 1195 // earlier location. 1196 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1197 N->setDebugLoc(DL.getDebugLoc()); 1198 break; 1199 } 1200 } 1201 return N; 1202 } 1203 1204 void SelectionDAG::clear() { 1205 allnodes_clear(); 1206 OperandRecycler.clear(OperandAllocator); 1207 OperandAllocator.Reset(); 1208 CSEMap.clear(); 1209 1210 ExtendedValueTypeNodes.clear(); 1211 ExternalSymbols.clear(); 1212 TargetExternalSymbols.clear(); 1213 MCSymbols.clear(); 1214 SDCallSiteDbgInfo.clear(); 1215 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1216 static_cast<CondCodeSDNode*>(nullptr)); 1217 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1218 static_cast<SDNode*>(nullptr)); 1219 1220 EntryNode.UseList = nullptr; 1221 InsertNode(&EntryNode); 1222 Root = getEntryNode(); 1223 DbgInfo->clear(); 1224 } 1225 1226 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1227 return VT.bitsGT(Op.getValueType()) 1228 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1229 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1230 } 1231 1232 std::pair<SDValue, SDValue> 1233 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1234 const SDLoc &DL, EVT VT) { 1235 assert(!VT.bitsEq(Op.getValueType()) && 1236 "Strict no-op FP extend/round not allowed."); 1237 SDValue Res = 1238 VT.bitsGT(Op.getValueType()) 1239 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1240 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1241 {Chain, Op, getIntPtrConstant(0, DL)}); 1242 1243 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1244 } 1245 1246 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1247 return VT.bitsGT(Op.getValueType()) ? 1248 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1249 getNode(ISD::TRUNCATE, DL, VT, Op); 1250 } 1251 1252 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1253 return VT.bitsGT(Op.getValueType()) ? 1254 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1255 getNode(ISD::TRUNCATE, DL, VT, Op); 1256 } 1257 1258 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1259 return VT.bitsGT(Op.getValueType()) ? 1260 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1261 getNode(ISD::TRUNCATE, DL, VT, Op); 1262 } 1263 1264 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1265 EVT OpVT) { 1266 if (VT.bitsLE(Op.getValueType())) 1267 return getNode(ISD::TRUNCATE, SL, VT, Op); 1268 1269 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1270 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1271 } 1272 1273 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1274 EVT OpVT = Op.getValueType(); 1275 assert(VT.isInteger() && OpVT.isInteger() && 1276 "Cannot getZeroExtendInReg FP types"); 1277 assert(VT.isVector() == OpVT.isVector() && 1278 "getZeroExtendInReg type should be vector iff the operand " 1279 "type is vector!"); 1280 assert((!VT.isVector() || 1281 VT.getVectorElementCount() == OpVT.getVectorElementCount()) && 1282 "Vector element counts must match in getZeroExtendInReg"); 1283 assert(VT.bitsLE(OpVT) && "Not extending!"); 1284 if (OpVT == VT) 1285 return Op; 1286 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1287 VT.getScalarSizeInBits()); 1288 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1289 } 1290 1291 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1292 // Only unsigned pointer semantics are supported right now. In the future this 1293 // might delegate to TLI to check pointer signedness. 1294 return getZExtOrTrunc(Op, DL, VT); 1295 } 1296 1297 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1298 // Only unsigned pointer semantics are supported right now. In the future this 1299 // might delegate to TLI to check pointer signedness. 1300 return getZeroExtendInReg(Op, DL, VT); 1301 } 1302 1303 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1304 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1305 EVT EltVT = VT.getScalarType(); 1306 SDValue NegOne = 1307 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1308 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1309 } 1310 1311 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1312 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1313 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1314 } 1315 1316 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1317 EVT OpVT) { 1318 if (!V) 1319 return getConstant(0, DL, VT); 1320 1321 switch (TLI->getBooleanContents(OpVT)) { 1322 case TargetLowering::ZeroOrOneBooleanContent: 1323 case TargetLowering::UndefinedBooleanContent: 1324 return getConstant(1, DL, VT); 1325 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1326 return getAllOnesConstant(DL, VT); 1327 } 1328 llvm_unreachable("Unexpected boolean content enum!"); 1329 } 1330 1331 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1332 bool isT, bool isO) { 1333 EVT EltVT = VT.getScalarType(); 1334 assert((EltVT.getSizeInBits() >= 64 || 1335 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1336 "getConstant with a uint64_t value that doesn't fit in the type!"); 1337 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1338 } 1339 1340 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1341 bool isT, bool isO) { 1342 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1343 } 1344 1345 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1346 EVT VT, bool isT, bool isO) { 1347 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1348 1349 EVT EltVT = VT.getScalarType(); 1350 const ConstantInt *Elt = &Val; 1351 1352 // In some cases the vector type is legal but the element type is illegal and 1353 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1354 // inserted value (the type does not need to match the vector element type). 1355 // Any extra bits introduced will be truncated away. 1356 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1357 TargetLowering::TypePromoteInteger) { 1358 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1359 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1360 Elt = ConstantInt::get(*getContext(), NewVal); 1361 } 1362 // In other cases the element type is illegal and needs to be expanded, for 1363 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1364 // the value into n parts and use a vector type with n-times the elements. 1365 // Then bitcast to the type requested. 1366 // Legalizing constants too early makes the DAGCombiner's job harder so we 1367 // only legalize if the DAG tells us we must produce legal types. 1368 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1369 TLI->getTypeAction(*getContext(), EltVT) == 1370 TargetLowering::TypeExpandInteger) { 1371 const APInt &NewVal = Elt->getValue(); 1372 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1373 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1374 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1375 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1376 1377 // Check the temporary vector is the correct size. If this fails then 1378 // getTypeToTransformTo() probably returned a type whose size (in bits) 1379 // isn't a power-of-2 factor of the requested type size. 1380 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1381 1382 SmallVector<SDValue, 2> EltParts; 1383 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1384 EltParts.push_back(getConstant( 1385 NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL, 1386 ViaEltVT, isT, isO)); 1387 } 1388 1389 // EltParts is currently in little endian order. If we actually want 1390 // big-endian order then reverse it now. 1391 if (getDataLayout().isBigEndian()) 1392 std::reverse(EltParts.begin(), EltParts.end()); 1393 1394 // The elements must be reversed when the element order is different 1395 // to the endianness of the elements (because the BITCAST is itself a 1396 // vector shuffle in this situation). However, we do not need any code to 1397 // perform this reversal because getConstant() is producing a vector 1398 // splat. 1399 // This situation occurs in MIPS MSA. 1400 1401 SmallVector<SDValue, 8> Ops; 1402 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1403 llvm::append_range(Ops, EltParts); 1404 1405 SDValue V = 1406 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1407 return V; 1408 } 1409 1410 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1411 "APInt size does not match type size!"); 1412 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1413 FoldingSetNodeID ID; 1414 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1415 ID.AddPointer(Elt); 1416 ID.AddBoolean(isO); 1417 void *IP = nullptr; 1418 SDNode *N = nullptr; 1419 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1420 if (!VT.isVector()) 1421 return SDValue(N, 0); 1422 1423 if (!N) { 1424 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1425 CSEMap.InsertNode(N, IP); 1426 InsertNode(N); 1427 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1428 } 1429 1430 SDValue Result(N, 0); 1431 if (VT.isScalableVector()) 1432 Result = getSplatVector(VT, DL, Result); 1433 else if (VT.isVector()) 1434 Result = getSplatBuildVector(VT, DL, Result); 1435 1436 return Result; 1437 } 1438 1439 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1440 bool isTarget) { 1441 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1442 } 1443 1444 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1445 const SDLoc &DL, bool LegalTypes) { 1446 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1447 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1448 return getConstant(Val, DL, ShiftVT); 1449 } 1450 1451 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1452 bool isTarget) { 1453 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1454 } 1455 1456 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1457 bool isTarget) { 1458 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1459 } 1460 1461 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1462 EVT VT, bool isTarget) { 1463 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1464 1465 EVT EltVT = VT.getScalarType(); 1466 1467 // Do the map lookup using the actual bit pattern for the floating point 1468 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1469 // we don't have issues with SNANs. 1470 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1471 FoldingSetNodeID ID; 1472 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1473 ID.AddPointer(&V); 1474 void *IP = nullptr; 1475 SDNode *N = nullptr; 1476 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1477 if (!VT.isVector()) 1478 return SDValue(N, 0); 1479 1480 if (!N) { 1481 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1482 CSEMap.InsertNode(N, IP); 1483 InsertNode(N); 1484 } 1485 1486 SDValue Result(N, 0); 1487 if (VT.isScalableVector()) 1488 Result = getSplatVector(VT, DL, Result); 1489 else if (VT.isVector()) 1490 Result = getSplatBuildVector(VT, DL, Result); 1491 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1492 return Result; 1493 } 1494 1495 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1496 bool isTarget) { 1497 EVT EltVT = VT.getScalarType(); 1498 if (EltVT == MVT::f32) 1499 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1500 else if (EltVT == MVT::f64) 1501 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1502 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1503 EltVT == MVT::f16 || EltVT == MVT::bf16) { 1504 bool Ignored; 1505 APFloat APF = APFloat(Val); 1506 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1507 &Ignored); 1508 return getConstantFP(APF, DL, VT, isTarget); 1509 } else 1510 llvm_unreachable("Unsupported type in getConstantFP"); 1511 } 1512 1513 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1514 EVT VT, int64_t Offset, bool isTargetGA, 1515 unsigned TargetFlags) { 1516 assert((TargetFlags == 0 || isTargetGA) && 1517 "Cannot set target flags on target-independent globals"); 1518 1519 // Truncate (with sign-extension) the offset value to the pointer size. 1520 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1521 if (BitWidth < 64) 1522 Offset = SignExtend64(Offset, BitWidth); 1523 1524 unsigned Opc; 1525 if (GV->isThreadLocal()) 1526 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1527 else 1528 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1529 1530 FoldingSetNodeID ID; 1531 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1532 ID.AddPointer(GV); 1533 ID.AddInteger(Offset); 1534 ID.AddInteger(TargetFlags); 1535 void *IP = nullptr; 1536 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1537 return SDValue(E, 0); 1538 1539 auto *N = newSDNode<GlobalAddressSDNode>( 1540 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1541 CSEMap.InsertNode(N, IP); 1542 InsertNode(N); 1543 return SDValue(N, 0); 1544 } 1545 1546 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1547 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1548 FoldingSetNodeID ID; 1549 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1550 ID.AddInteger(FI); 1551 void *IP = nullptr; 1552 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1553 return SDValue(E, 0); 1554 1555 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1556 CSEMap.InsertNode(N, IP); 1557 InsertNode(N); 1558 return SDValue(N, 0); 1559 } 1560 1561 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1562 unsigned TargetFlags) { 1563 assert((TargetFlags == 0 || isTarget) && 1564 "Cannot set target flags on target-independent jump tables"); 1565 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1566 FoldingSetNodeID ID; 1567 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1568 ID.AddInteger(JTI); 1569 ID.AddInteger(TargetFlags); 1570 void *IP = nullptr; 1571 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1572 return SDValue(E, 0); 1573 1574 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1575 CSEMap.InsertNode(N, IP); 1576 InsertNode(N); 1577 return SDValue(N, 0); 1578 } 1579 1580 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1581 MaybeAlign Alignment, int Offset, 1582 bool isTarget, unsigned TargetFlags) { 1583 assert((TargetFlags == 0 || isTarget) && 1584 "Cannot set target flags on target-independent globals"); 1585 if (!Alignment) 1586 Alignment = shouldOptForSize() 1587 ? getDataLayout().getABITypeAlign(C->getType()) 1588 : getDataLayout().getPrefTypeAlign(C->getType()); 1589 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1590 FoldingSetNodeID ID; 1591 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1592 ID.AddInteger(Alignment->value()); 1593 ID.AddInteger(Offset); 1594 ID.AddPointer(C); 1595 ID.AddInteger(TargetFlags); 1596 void *IP = nullptr; 1597 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1598 return SDValue(E, 0); 1599 1600 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1601 TargetFlags); 1602 CSEMap.InsertNode(N, IP); 1603 InsertNode(N); 1604 SDValue V = SDValue(N, 0); 1605 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1606 return V; 1607 } 1608 1609 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1610 MaybeAlign Alignment, int Offset, 1611 bool isTarget, unsigned TargetFlags) { 1612 assert((TargetFlags == 0 || isTarget) && 1613 "Cannot set target flags on target-independent globals"); 1614 if (!Alignment) 1615 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1616 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1617 FoldingSetNodeID ID; 1618 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1619 ID.AddInteger(Alignment->value()); 1620 ID.AddInteger(Offset); 1621 C->addSelectionDAGCSEId(ID); 1622 ID.AddInteger(TargetFlags); 1623 void *IP = nullptr; 1624 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1625 return SDValue(E, 0); 1626 1627 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1628 TargetFlags); 1629 CSEMap.InsertNode(N, IP); 1630 InsertNode(N); 1631 return SDValue(N, 0); 1632 } 1633 1634 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1635 unsigned TargetFlags) { 1636 FoldingSetNodeID ID; 1637 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1638 ID.AddInteger(Index); 1639 ID.AddInteger(Offset); 1640 ID.AddInteger(TargetFlags); 1641 void *IP = nullptr; 1642 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1643 return SDValue(E, 0); 1644 1645 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1646 CSEMap.InsertNode(N, IP); 1647 InsertNode(N); 1648 return SDValue(N, 0); 1649 } 1650 1651 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1652 FoldingSetNodeID ID; 1653 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1654 ID.AddPointer(MBB); 1655 void *IP = nullptr; 1656 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1657 return SDValue(E, 0); 1658 1659 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1660 CSEMap.InsertNode(N, IP); 1661 InsertNode(N); 1662 return SDValue(N, 0); 1663 } 1664 1665 SDValue SelectionDAG::getValueType(EVT VT) { 1666 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1667 ValueTypeNodes.size()) 1668 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1669 1670 SDNode *&N = VT.isExtended() ? 1671 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1672 1673 if (N) return SDValue(N, 0); 1674 N = newSDNode<VTSDNode>(VT); 1675 InsertNode(N); 1676 return SDValue(N, 0); 1677 } 1678 1679 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1680 SDNode *&N = ExternalSymbols[Sym]; 1681 if (N) return SDValue(N, 0); 1682 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1683 InsertNode(N); 1684 return SDValue(N, 0); 1685 } 1686 1687 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1688 SDNode *&N = MCSymbols[Sym]; 1689 if (N) 1690 return SDValue(N, 0); 1691 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1692 InsertNode(N); 1693 return SDValue(N, 0); 1694 } 1695 1696 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1697 unsigned TargetFlags) { 1698 SDNode *&N = 1699 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1700 if (N) return SDValue(N, 0); 1701 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1702 InsertNode(N); 1703 return SDValue(N, 0); 1704 } 1705 1706 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1707 if ((unsigned)Cond >= CondCodeNodes.size()) 1708 CondCodeNodes.resize(Cond+1); 1709 1710 if (!CondCodeNodes[Cond]) { 1711 auto *N = newSDNode<CondCodeSDNode>(Cond); 1712 CondCodeNodes[Cond] = N; 1713 InsertNode(N); 1714 } 1715 1716 return SDValue(CondCodeNodes[Cond], 0); 1717 } 1718 1719 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1720 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1721 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1722 std::swap(N1, N2); 1723 ShuffleVectorSDNode::commuteMask(M); 1724 } 1725 1726 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1727 SDValue N2, ArrayRef<int> Mask) { 1728 assert(VT.getVectorNumElements() == Mask.size() && 1729 "Must have the same number of vector elements as mask elements!"); 1730 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1731 "Invalid VECTOR_SHUFFLE"); 1732 1733 // Canonicalize shuffle undef, undef -> undef 1734 if (N1.isUndef() && N2.isUndef()) 1735 return getUNDEF(VT); 1736 1737 // Validate that all indices in Mask are within the range of the elements 1738 // input to the shuffle. 1739 int NElts = Mask.size(); 1740 assert(llvm::all_of(Mask, 1741 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1742 "Index out of range"); 1743 1744 // Copy the mask so we can do any needed cleanup. 1745 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1746 1747 // Canonicalize shuffle v, v -> v, undef 1748 if (N1 == N2) { 1749 N2 = getUNDEF(VT); 1750 for (int i = 0; i != NElts; ++i) 1751 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1752 } 1753 1754 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1755 if (N1.isUndef()) 1756 commuteShuffle(N1, N2, MaskVec); 1757 1758 if (TLI->hasVectorBlend()) { 1759 // If shuffling a splat, try to blend the splat instead. We do this here so 1760 // that even when this arises during lowering we don't have to re-handle it. 1761 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1762 BitVector UndefElements; 1763 SDValue Splat = BV->getSplatValue(&UndefElements); 1764 if (!Splat) 1765 return; 1766 1767 for (int i = 0; i < NElts; ++i) { 1768 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1769 continue; 1770 1771 // If this input comes from undef, mark it as such. 1772 if (UndefElements[MaskVec[i] - Offset]) { 1773 MaskVec[i] = -1; 1774 continue; 1775 } 1776 1777 // If we can blend a non-undef lane, use that instead. 1778 if (!UndefElements[i]) 1779 MaskVec[i] = i + Offset; 1780 } 1781 }; 1782 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1783 BlendSplat(N1BV, 0); 1784 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1785 BlendSplat(N2BV, NElts); 1786 } 1787 1788 // Canonicalize all index into lhs, -> shuffle lhs, undef 1789 // Canonicalize all index into rhs, -> shuffle rhs, undef 1790 bool AllLHS = true, AllRHS = true; 1791 bool N2Undef = N2.isUndef(); 1792 for (int i = 0; i != NElts; ++i) { 1793 if (MaskVec[i] >= NElts) { 1794 if (N2Undef) 1795 MaskVec[i] = -1; 1796 else 1797 AllLHS = false; 1798 } else if (MaskVec[i] >= 0) { 1799 AllRHS = false; 1800 } 1801 } 1802 if (AllLHS && AllRHS) 1803 return getUNDEF(VT); 1804 if (AllLHS && !N2Undef) 1805 N2 = getUNDEF(VT); 1806 if (AllRHS) { 1807 N1 = getUNDEF(VT); 1808 commuteShuffle(N1, N2, MaskVec); 1809 } 1810 // Reset our undef status after accounting for the mask. 1811 N2Undef = N2.isUndef(); 1812 // Re-check whether both sides ended up undef. 1813 if (N1.isUndef() && N2Undef) 1814 return getUNDEF(VT); 1815 1816 // If Identity shuffle return that node. 1817 bool Identity = true, AllSame = true; 1818 for (int i = 0; i != NElts; ++i) { 1819 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1820 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1821 } 1822 if (Identity && NElts) 1823 return N1; 1824 1825 // Shuffling a constant splat doesn't change the result. 1826 if (N2Undef) { 1827 SDValue V = N1; 1828 1829 // Look through any bitcasts. We check that these don't change the number 1830 // (and size) of elements and just changes their types. 1831 while (V.getOpcode() == ISD::BITCAST) 1832 V = V->getOperand(0); 1833 1834 // A splat should always show up as a build vector node. 1835 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1836 BitVector UndefElements; 1837 SDValue Splat = BV->getSplatValue(&UndefElements); 1838 // If this is a splat of an undef, shuffling it is also undef. 1839 if (Splat && Splat.isUndef()) 1840 return getUNDEF(VT); 1841 1842 bool SameNumElts = 1843 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1844 1845 // We only have a splat which can skip shuffles if there is a splatted 1846 // value and no undef lanes rearranged by the shuffle. 1847 if (Splat && UndefElements.none()) { 1848 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1849 // number of elements match or the value splatted is a zero constant. 1850 if (SameNumElts) 1851 return N1; 1852 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1853 if (C->isNullValue()) 1854 return N1; 1855 } 1856 1857 // If the shuffle itself creates a splat, build the vector directly. 1858 if (AllSame && SameNumElts) { 1859 EVT BuildVT = BV->getValueType(0); 1860 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1861 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1862 1863 // We may have jumped through bitcasts, so the type of the 1864 // BUILD_VECTOR may not match the type of the shuffle. 1865 if (BuildVT != VT) 1866 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1867 return NewBV; 1868 } 1869 } 1870 } 1871 1872 FoldingSetNodeID ID; 1873 SDValue Ops[2] = { N1, N2 }; 1874 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1875 for (int i = 0; i != NElts; ++i) 1876 ID.AddInteger(MaskVec[i]); 1877 1878 void* IP = nullptr; 1879 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1880 return SDValue(E, 0); 1881 1882 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1883 // SDNode doesn't have access to it. This memory will be "leaked" when 1884 // the node is deallocated, but recovered when the NodeAllocator is released. 1885 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1886 llvm::copy(MaskVec, MaskAlloc); 1887 1888 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1889 dl.getDebugLoc(), MaskAlloc); 1890 createOperands(N, Ops); 1891 1892 CSEMap.InsertNode(N, IP); 1893 InsertNode(N); 1894 SDValue V = SDValue(N, 0); 1895 NewSDValueDbgMsg(V, "Creating new node: ", this); 1896 return V; 1897 } 1898 1899 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1900 EVT VT = SV.getValueType(0); 1901 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1902 ShuffleVectorSDNode::commuteMask(MaskVec); 1903 1904 SDValue Op0 = SV.getOperand(0); 1905 SDValue Op1 = SV.getOperand(1); 1906 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1907 } 1908 1909 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1910 FoldingSetNodeID ID; 1911 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1912 ID.AddInteger(RegNo); 1913 void *IP = nullptr; 1914 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1915 return SDValue(E, 0); 1916 1917 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1918 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1919 CSEMap.InsertNode(N, IP); 1920 InsertNode(N); 1921 return SDValue(N, 0); 1922 } 1923 1924 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1925 FoldingSetNodeID ID; 1926 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1927 ID.AddPointer(RegMask); 1928 void *IP = nullptr; 1929 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1930 return SDValue(E, 0); 1931 1932 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1933 CSEMap.InsertNode(N, IP); 1934 InsertNode(N); 1935 return SDValue(N, 0); 1936 } 1937 1938 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1939 MCSymbol *Label) { 1940 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1941 } 1942 1943 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1944 SDValue Root, MCSymbol *Label) { 1945 FoldingSetNodeID ID; 1946 SDValue Ops[] = { Root }; 1947 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1948 ID.AddPointer(Label); 1949 void *IP = nullptr; 1950 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1951 return SDValue(E, 0); 1952 1953 auto *N = 1954 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1955 createOperands(N, Ops); 1956 1957 CSEMap.InsertNode(N, IP); 1958 InsertNode(N); 1959 return SDValue(N, 0); 1960 } 1961 1962 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1963 int64_t Offset, bool isTarget, 1964 unsigned TargetFlags) { 1965 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1966 1967 FoldingSetNodeID ID; 1968 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1969 ID.AddPointer(BA); 1970 ID.AddInteger(Offset); 1971 ID.AddInteger(TargetFlags); 1972 void *IP = nullptr; 1973 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1974 return SDValue(E, 0); 1975 1976 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1977 CSEMap.InsertNode(N, IP); 1978 InsertNode(N); 1979 return SDValue(N, 0); 1980 } 1981 1982 SDValue SelectionDAG::getSrcValue(const Value *V) { 1983 FoldingSetNodeID ID; 1984 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1985 ID.AddPointer(V); 1986 1987 void *IP = nullptr; 1988 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1989 return SDValue(E, 0); 1990 1991 auto *N = newSDNode<SrcValueSDNode>(V); 1992 CSEMap.InsertNode(N, IP); 1993 InsertNode(N); 1994 return SDValue(N, 0); 1995 } 1996 1997 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1998 FoldingSetNodeID ID; 1999 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 2000 ID.AddPointer(MD); 2001 2002 void *IP = nullptr; 2003 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 2004 return SDValue(E, 0); 2005 2006 auto *N = newSDNode<MDNodeSDNode>(MD); 2007 CSEMap.InsertNode(N, IP); 2008 InsertNode(N); 2009 return SDValue(N, 0); 2010 } 2011 2012 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 2013 if (VT == V.getValueType()) 2014 return V; 2015 2016 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 2017 } 2018 2019 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 2020 unsigned SrcAS, unsigned DestAS) { 2021 SDValue Ops[] = {Ptr}; 2022 FoldingSetNodeID ID; 2023 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 2024 ID.AddInteger(SrcAS); 2025 ID.AddInteger(DestAS); 2026 2027 void *IP = nullptr; 2028 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 2029 return SDValue(E, 0); 2030 2031 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 2032 VT, SrcAS, DestAS); 2033 createOperands(N, Ops); 2034 2035 CSEMap.InsertNode(N, IP); 2036 InsertNode(N); 2037 return SDValue(N, 0); 2038 } 2039 2040 SDValue SelectionDAG::getFreeze(SDValue V) { 2041 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 2042 } 2043 2044 /// getShiftAmountOperand - Return the specified value casted to 2045 /// the target's desired shift amount type. 2046 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 2047 EVT OpTy = Op.getValueType(); 2048 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 2049 if (OpTy == ShTy || OpTy.isVector()) return Op; 2050 2051 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 2052 } 2053 2054 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 2055 SDLoc dl(Node); 2056 const TargetLowering &TLI = getTargetLoweringInfo(); 2057 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2058 EVT VT = Node->getValueType(0); 2059 SDValue Tmp1 = Node->getOperand(0); 2060 SDValue Tmp2 = Node->getOperand(1); 2061 const MaybeAlign MA(Node->getConstantOperandVal(3)); 2062 2063 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 2064 Tmp2, MachinePointerInfo(V)); 2065 SDValue VAList = VAListLoad; 2066 2067 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 2068 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2069 getConstant(MA->value() - 1, dl, VAList.getValueType())); 2070 2071 VAList = 2072 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 2073 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 2074 } 2075 2076 // Increment the pointer, VAList, to the next vaarg 2077 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2078 getConstant(getDataLayout().getTypeAllocSize( 2079 VT.getTypeForEVT(*getContext())), 2080 dl, VAList.getValueType())); 2081 // Store the incremented VAList to the legalized pointer 2082 Tmp1 = 2083 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 2084 // Load the actual argument out of the pointer VAList 2085 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 2086 } 2087 2088 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 2089 SDLoc dl(Node); 2090 const TargetLowering &TLI = getTargetLoweringInfo(); 2091 // This defaults to loading a pointer from the input and storing it to the 2092 // output, returning the chain. 2093 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2094 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2095 SDValue Tmp1 = 2096 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 2097 Node->getOperand(2), MachinePointerInfo(VS)); 2098 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2099 MachinePointerInfo(VD)); 2100 } 2101 2102 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { 2103 const DataLayout &DL = getDataLayout(); 2104 Type *Ty = VT.getTypeForEVT(*getContext()); 2105 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2106 2107 if (TLI->isTypeLegal(VT) || !VT.isVector()) 2108 return RedAlign; 2109 2110 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2111 const Align StackAlign = TFI->getStackAlign(); 2112 2113 // See if we can choose a smaller ABI alignment in cases where it's an 2114 // illegal vector type that will get broken down. 2115 if (RedAlign > StackAlign) { 2116 EVT IntermediateVT; 2117 MVT RegisterVT; 2118 unsigned NumIntermediates; 2119 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, 2120 NumIntermediates, RegisterVT); 2121 Ty = IntermediateVT.getTypeForEVT(*getContext()); 2122 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2123 if (RedAlign2 < RedAlign) 2124 RedAlign = RedAlign2; 2125 } 2126 2127 return RedAlign; 2128 } 2129 2130 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 2131 MachineFrameInfo &MFI = MF->getFrameInfo(); 2132 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2133 int StackID = 0; 2134 if (Bytes.isScalable()) 2135 StackID = TFI->getStackIDForScalableVectors(); 2136 // The stack id gives an indication of whether the object is scalable or 2137 // not, so it's safe to pass in the minimum size here. 2138 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment, 2139 false, nullptr, StackID); 2140 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2141 } 2142 2143 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2144 Type *Ty = VT.getTypeForEVT(*getContext()); 2145 Align StackAlign = 2146 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2147 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2148 } 2149 2150 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2151 TypeSize VT1Size = VT1.getStoreSize(); 2152 TypeSize VT2Size = VT2.getStoreSize(); 2153 assert(VT1Size.isScalable() == VT2Size.isScalable() && 2154 "Don't know how to choose the maximum size when creating a stack " 2155 "temporary"); 2156 TypeSize Bytes = 2157 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size; 2158 2159 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2160 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2161 const DataLayout &DL = getDataLayout(); 2162 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2163 return CreateStackTemporary(Bytes, Align); 2164 } 2165 2166 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2167 ISD::CondCode Cond, const SDLoc &dl) { 2168 EVT OpVT = N1.getValueType(); 2169 2170 // These setcc operations always fold. 2171 switch (Cond) { 2172 default: break; 2173 case ISD::SETFALSE: 2174 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2175 case ISD::SETTRUE: 2176 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2177 2178 case ISD::SETOEQ: 2179 case ISD::SETOGT: 2180 case ISD::SETOGE: 2181 case ISD::SETOLT: 2182 case ISD::SETOLE: 2183 case ISD::SETONE: 2184 case ISD::SETO: 2185 case ISD::SETUO: 2186 case ISD::SETUEQ: 2187 case ISD::SETUNE: 2188 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2189 break; 2190 } 2191 2192 if (OpVT.isInteger()) { 2193 // For EQ and NE, we can always pick a value for the undef to make the 2194 // predicate pass or fail, so we can return undef. 2195 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2196 // icmp eq/ne X, undef -> undef. 2197 if ((N1.isUndef() || N2.isUndef()) && 2198 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2199 return getUNDEF(VT); 2200 2201 // If both operands are undef, we can return undef for int comparison. 2202 // icmp undef, undef -> undef. 2203 if (N1.isUndef() && N2.isUndef()) 2204 return getUNDEF(VT); 2205 2206 // icmp X, X -> true/false 2207 // icmp X, undef -> true/false because undef could be X. 2208 if (N1 == N2) 2209 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2210 } 2211 2212 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2213 const APInt &C2 = N2C->getAPIntValue(); 2214 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2215 const APInt &C1 = N1C->getAPIntValue(); 2216 2217 switch (Cond) { 2218 default: llvm_unreachable("Unknown integer setcc!"); 2219 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2220 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2221 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2222 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2223 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2224 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2225 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2226 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2227 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2228 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2229 } 2230 } 2231 } 2232 2233 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2234 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2235 2236 if (N1CFP && N2CFP) { 2237 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2238 switch (Cond) { 2239 default: break; 2240 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2241 return getUNDEF(VT); 2242 LLVM_FALLTHROUGH; 2243 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2244 OpVT); 2245 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2246 return getUNDEF(VT); 2247 LLVM_FALLTHROUGH; 2248 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2249 R==APFloat::cmpLessThan, dl, VT, 2250 OpVT); 2251 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2252 return getUNDEF(VT); 2253 LLVM_FALLTHROUGH; 2254 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2255 OpVT); 2256 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2257 return getUNDEF(VT); 2258 LLVM_FALLTHROUGH; 2259 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2260 VT, OpVT); 2261 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2262 return getUNDEF(VT); 2263 LLVM_FALLTHROUGH; 2264 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2265 R==APFloat::cmpEqual, dl, VT, 2266 OpVT); 2267 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2268 return getUNDEF(VT); 2269 LLVM_FALLTHROUGH; 2270 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2271 R==APFloat::cmpEqual, dl, VT, OpVT); 2272 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2273 OpVT); 2274 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2275 OpVT); 2276 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2277 R==APFloat::cmpEqual, dl, VT, 2278 OpVT); 2279 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2280 OpVT); 2281 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2282 R==APFloat::cmpLessThan, dl, VT, 2283 OpVT); 2284 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2285 R==APFloat::cmpUnordered, dl, VT, 2286 OpVT); 2287 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2288 VT, OpVT); 2289 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2290 OpVT); 2291 } 2292 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2293 // Ensure that the constant occurs on the RHS. 2294 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2295 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2296 return SDValue(); 2297 return getSetCC(dl, VT, N2, N1, SwappedCond); 2298 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2299 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2300 // If an operand is known to be a nan (or undef that could be a nan), we can 2301 // fold it. 2302 // Choosing NaN for the undef will always make unordered comparison succeed 2303 // and ordered comparison fails. 2304 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2305 switch (ISD::getUnorderedFlavor(Cond)) { 2306 default: 2307 llvm_unreachable("Unknown flavor!"); 2308 case 0: // Known false. 2309 return getBoolConstant(false, dl, VT, OpVT); 2310 case 1: // Known true. 2311 return getBoolConstant(true, dl, VT, OpVT); 2312 case 2: // Undefined. 2313 return getUNDEF(VT); 2314 } 2315 } 2316 2317 // Could not fold it. 2318 return SDValue(); 2319 } 2320 2321 /// See if the specified operand can be simplified with the knowledge that only 2322 /// the bits specified by DemandedBits are used. 2323 /// TODO: really we should be making this into the DAG equivalent of 2324 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2325 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2326 EVT VT = V.getValueType(); 2327 2328 if (VT.isScalableVector()) 2329 return SDValue(); 2330 2331 APInt DemandedElts = VT.isVector() 2332 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2333 : APInt(1, 1); 2334 return GetDemandedBits(V, DemandedBits, DemandedElts); 2335 } 2336 2337 /// See if the specified operand can be simplified with the knowledge that only 2338 /// the bits specified by DemandedBits are used in the elements specified by 2339 /// DemandedElts. 2340 /// TODO: really we should be making this into the DAG equivalent of 2341 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2342 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2343 const APInt &DemandedElts) { 2344 switch (V.getOpcode()) { 2345 default: 2346 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2347 *this, 0); 2348 case ISD::Constant: { 2349 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); 2350 APInt NewVal = CVal & DemandedBits; 2351 if (NewVal != CVal) 2352 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2353 break; 2354 } 2355 case ISD::SRL: 2356 // Only look at single-use SRLs. 2357 if (!V.getNode()->hasOneUse()) 2358 break; 2359 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2360 // See if we can recursively simplify the LHS. 2361 unsigned Amt = RHSC->getZExtValue(); 2362 2363 // Watch out for shift count overflow though. 2364 if (Amt >= DemandedBits.getBitWidth()) 2365 break; 2366 APInt SrcDemandedBits = DemandedBits << Amt; 2367 if (SDValue SimplifyLHS = 2368 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2369 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2370 V.getOperand(1)); 2371 } 2372 break; 2373 } 2374 return SDValue(); 2375 } 2376 2377 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2378 /// use this predicate to simplify operations downstream. 2379 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2380 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2381 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2382 } 2383 2384 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2385 /// this predicate to simplify operations downstream. Mask is known to be zero 2386 /// for bits that V cannot have. 2387 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2388 unsigned Depth) const { 2389 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); 2390 } 2391 2392 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2393 /// DemandedElts. We use this predicate to simplify operations downstream. 2394 /// Mask is known to be zero for bits that V cannot have. 2395 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2396 const APInt &DemandedElts, 2397 unsigned Depth) const { 2398 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2399 } 2400 2401 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2402 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2403 unsigned Depth) const { 2404 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2405 } 2406 2407 /// isSplatValue - Return true if the vector V has the same value 2408 /// across all DemandedElts. For scalable vectors it does not make 2409 /// sense to specify which elements are demanded or undefined, therefore 2410 /// they are simply ignored. 2411 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2412 APInt &UndefElts, unsigned Depth) { 2413 EVT VT = V.getValueType(); 2414 assert(VT.isVector() && "Vector type expected"); 2415 2416 if (!VT.isScalableVector() && !DemandedElts) 2417 return false; // No demanded elts, better to assume we don't know anything. 2418 2419 if (Depth >= MaxRecursionDepth) 2420 return false; // Limit search depth. 2421 2422 // Deal with some common cases here that work for both fixed and scalable 2423 // vector types. 2424 switch (V.getOpcode()) { 2425 case ISD::SPLAT_VECTOR: 2426 UndefElts = V.getOperand(0).isUndef() 2427 ? APInt::getAllOnesValue(DemandedElts.getBitWidth()) 2428 : APInt(DemandedElts.getBitWidth(), 0); 2429 return true; 2430 case ISD::ADD: 2431 case ISD::SUB: 2432 case ISD::AND: { 2433 APInt UndefLHS, UndefRHS; 2434 SDValue LHS = V.getOperand(0); 2435 SDValue RHS = V.getOperand(1); 2436 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) && 2437 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) { 2438 UndefElts = UndefLHS | UndefRHS; 2439 return true; 2440 } 2441 break; 2442 } 2443 case ISD::TRUNCATE: 2444 case ISD::SIGN_EXTEND: 2445 case ISD::ZERO_EXTEND: 2446 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1); 2447 } 2448 2449 // We don't support other cases than those above for scalable vectors at 2450 // the moment. 2451 if (VT.isScalableVector()) 2452 return false; 2453 2454 unsigned NumElts = VT.getVectorNumElements(); 2455 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2456 UndefElts = APInt::getNullValue(NumElts); 2457 2458 switch (V.getOpcode()) { 2459 case ISD::BUILD_VECTOR: { 2460 SDValue Scl; 2461 for (unsigned i = 0; i != NumElts; ++i) { 2462 SDValue Op = V.getOperand(i); 2463 if (Op.isUndef()) { 2464 UndefElts.setBit(i); 2465 continue; 2466 } 2467 if (!DemandedElts[i]) 2468 continue; 2469 if (Scl && Scl != Op) 2470 return false; 2471 Scl = Op; 2472 } 2473 return true; 2474 } 2475 case ISD::VECTOR_SHUFFLE: { 2476 // Check if this is a shuffle node doing a splat. 2477 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2478 int SplatIndex = -1; 2479 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2480 for (int i = 0; i != (int)NumElts; ++i) { 2481 int M = Mask[i]; 2482 if (M < 0) { 2483 UndefElts.setBit(i); 2484 continue; 2485 } 2486 if (!DemandedElts[i]) 2487 continue; 2488 if (0 <= SplatIndex && SplatIndex != M) 2489 return false; 2490 SplatIndex = M; 2491 } 2492 return true; 2493 } 2494 case ISD::EXTRACT_SUBVECTOR: { 2495 // Offset the demanded elts by the subvector index. 2496 SDValue Src = V.getOperand(0); 2497 uint64_t Idx = V.getConstantOperandVal(1); 2498 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2499 APInt UndefSrcElts; 2500 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2501 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { 2502 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2503 return true; 2504 } 2505 break; 2506 } 2507 } 2508 2509 return false; 2510 } 2511 2512 /// Helper wrapper to main isSplatValue function. 2513 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2514 EVT VT = V.getValueType(); 2515 assert(VT.isVector() && "Vector type expected"); 2516 2517 APInt UndefElts; 2518 APInt DemandedElts; 2519 2520 // For now we don't support this with scalable vectors. 2521 if (!VT.isScalableVector()) 2522 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2523 return isSplatValue(V, DemandedElts, UndefElts) && 2524 (AllowUndefs || !UndefElts); 2525 } 2526 2527 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2528 V = peekThroughExtractSubvectors(V); 2529 2530 EVT VT = V.getValueType(); 2531 unsigned Opcode = V.getOpcode(); 2532 switch (Opcode) { 2533 default: { 2534 APInt UndefElts; 2535 APInt DemandedElts; 2536 2537 if (!VT.isScalableVector()) 2538 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2539 2540 if (isSplatValue(V, DemandedElts, UndefElts)) { 2541 if (VT.isScalableVector()) { 2542 // DemandedElts and UndefElts are ignored for scalable vectors, since 2543 // the only supported cases are SPLAT_VECTOR nodes. 2544 SplatIdx = 0; 2545 } else { 2546 // Handle case where all demanded elements are UNDEF. 2547 if (DemandedElts.isSubsetOf(UndefElts)) { 2548 SplatIdx = 0; 2549 return getUNDEF(VT); 2550 } 2551 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2552 } 2553 return V; 2554 } 2555 break; 2556 } 2557 case ISD::SPLAT_VECTOR: 2558 SplatIdx = 0; 2559 return V; 2560 case ISD::VECTOR_SHUFFLE: { 2561 if (VT.isScalableVector()) 2562 return SDValue(); 2563 2564 // Check if this is a shuffle node doing a splat. 2565 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2566 // getTargetVShiftNode currently struggles without the splat source. 2567 auto *SVN = cast<ShuffleVectorSDNode>(V); 2568 if (!SVN->isSplat()) 2569 break; 2570 int Idx = SVN->getSplatIndex(); 2571 int NumElts = V.getValueType().getVectorNumElements(); 2572 SplatIdx = Idx % NumElts; 2573 return V.getOperand(Idx / NumElts); 2574 } 2575 } 2576 2577 return SDValue(); 2578 } 2579 2580 SDValue SelectionDAG::getSplatValue(SDValue V) { 2581 int SplatIdx; 2582 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2583 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2584 SrcVector.getValueType().getScalarType(), SrcVector, 2585 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2586 return SDValue(); 2587 } 2588 2589 const APInt * 2590 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2591 const APInt &DemandedElts) const { 2592 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2593 V.getOpcode() == ISD::SRA) && 2594 "Unknown shift node"); 2595 unsigned BitWidth = V.getScalarValueSizeInBits(); 2596 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2597 // Shifting more than the bitwidth is not valid. 2598 const APInt &ShAmt = SA->getAPIntValue(); 2599 if (ShAmt.ult(BitWidth)) 2600 return &ShAmt; 2601 } 2602 return nullptr; 2603 } 2604 2605 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2606 SDValue V, const APInt &DemandedElts) const { 2607 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2608 V.getOpcode() == ISD::SRA) && 2609 "Unknown shift node"); 2610 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2611 return ValidAmt; 2612 unsigned BitWidth = V.getScalarValueSizeInBits(); 2613 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2614 if (!BV) 2615 return nullptr; 2616 const APInt *MinShAmt = nullptr; 2617 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2618 if (!DemandedElts[i]) 2619 continue; 2620 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2621 if (!SA) 2622 return nullptr; 2623 // Shifting more than the bitwidth is not valid. 2624 const APInt &ShAmt = SA->getAPIntValue(); 2625 if (ShAmt.uge(BitWidth)) 2626 return nullptr; 2627 if (MinShAmt && MinShAmt->ule(ShAmt)) 2628 continue; 2629 MinShAmt = &ShAmt; 2630 } 2631 return MinShAmt; 2632 } 2633 2634 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2635 SDValue V, const APInt &DemandedElts) const { 2636 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2637 V.getOpcode() == ISD::SRA) && 2638 "Unknown shift node"); 2639 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2640 return ValidAmt; 2641 unsigned BitWidth = V.getScalarValueSizeInBits(); 2642 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2643 if (!BV) 2644 return nullptr; 2645 const APInt *MaxShAmt = nullptr; 2646 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2647 if (!DemandedElts[i]) 2648 continue; 2649 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2650 if (!SA) 2651 return nullptr; 2652 // Shifting more than the bitwidth is not valid. 2653 const APInt &ShAmt = SA->getAPIntValue(); 2654 if (ShAmt.uge(BitWidth)) 2655 return nullptr; 2656 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2657 continue; 2658 MaxShAmt = &ShAmt; 2659 } 2660 return MaxShAmt; 2661 } 2662 2663 /// Determine which bits of Op are known to be either zero or one and return 2664 /// them in Known. For vectors, the known bits are those that are shared by 2665 /// every vector element. 2666 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2667 EVT VT = Op.getValueType(); 2668 2669 // TOOD: Until we have a plan for how to represent demanded elements for 2670 // scalable vectors, we can just bail out for now. 2671 if (Op.getValueType().isScalableVector()) { 2672 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2673 return KnownBits(BitWidth); 2674 } 2675 2676 APInt DemandedElts = VT.isVector() 2677 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2678 : APInt(1, 1); 2679 return computeKnownBits(Op, DemandedElts, Depth); 2680 } 2681 2682 /// Determine which bits of Op are known to be either zero or one and return 2683 /// them in Known. The DemandedElts argument allows us to only collect the known 2684 /// bits that are shared by the requested vector elements. 2685 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2686 unsigned Depth) const { 2687 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2688 2689 KnownBits Known(BitWidth); // Don't know anything. 2690 2691 // TOOD: Until we have a plan for how to represent demanded elements for 2692 // scalable vectors, we can just bail out for now. 2693 if (Op.getValueType().isScalableVector()) 2694 return Known; 2695 2696 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2697 // We know all of the bits for a constant! 2698 return KnownBits::makeConstant(C->getAPIntValue()); 2699 } 2700 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2701 // We know all of the bits for a constant fp! 2702 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt()); 2703 } 2704 2705 if (Depth >= MaxRecursionDepth) 2706 return Known; // Limit search depth. 2707 2708 KnownBits Known2; 2709 unsigned NumElts = DemandedElts.getBitWidth(); 2710 assert((!Op.getValueType().isVector() || 2711 NumElts == Op.getValueType().getVectorNumElements()) && 2712 "Unexpected vector size"); 2713 2714 if (!DemandedElts) 2715 return Known; // No demanded elts, better to assume we don't know anything. 2716 2717 unsigned Opcode = Op.getOpcode(); 2718 switch (Opcode) { 2719 case ISD::BUILD_VECTOR: 2720 // Collect the known bits that are shared by every demanded vector element. 2721 Known.Zero.setAllBits(); Known.One.setAllBits(); 2722 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2723 if (!DemandedElts[i]) 2724 continue; 2725 2726 SDValue SrcOp = Op.getOperand(i); 2727 Known2 = computeKnownBits(SrcOp, Depth + 1); 2728 2729 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2730 if (SrcOp.getValueSizeInBits() != BitWidth) { 2731 assert(SrcOp.getValueSizeInBits() > BitWidth && 2732 "Expected BUILD_VECTOR implicit truncation"); 2733 Known2 = Known2.trunc(BitWidth); 2734 } 2735 2736 // Known bits are the values that are shared by every demanded element. 2737 Known = KnownBits::commonBits(Known, Known2); 2738 2739 // If we don't know any bits, early out. 2740 if (Known.isUnknown()) 2741 break; 2742 } 2743 break; 2744 case ISD::VECTOR_SHUFFLE: { 2745 // Collect the known bits that are shared by every vector element referenced 2746 // by the shuffle. 2747 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2748 Known.Zero.setAllBits(); Known.One.setAllBits(); 2749 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2750 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2751 for (unsigned i = 0; i != NumElts; ++i) { 2752 if (!DemandedElts[i]) 2753 continue; 2754 2755 int M = SVN->getMaskElt(i); 2756 if (M < 0) { 2757 // For UNDEF elements, we don't know anything about the common state of 2758 // the shuffle result. 2759 Known.resetAll(); 2760 DemandedLHS.clearAllBits(); 2761 DemandedRHS.clearAllBits(); 2762 break; 2763 } 2764 2765 if ((unsigned)M < NumElts) 2766 DemandedLHS.setBit((unsigned)M % NumElts); 2767 else 2768 DemandedRHS.setBit((unsigned)M % NumElts); 2769 } 2770 // Known bits are the values that are shared by every demanded element. 2771 if (!!DemandedLHS) { 2772 SDValue LHS = Op.getOperand(0); 2773 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2774 Known = KnownBits::commonBits(Known, Known2); 2775 } 2776 // If we don't know any bits, early out. 2777 if (Known.isUnknown()) 2778 break; 2779 if (!!DemandedRHS) { 2780 SDValue RHS = Op.getOperand(1); 2781 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2782 Known = KnownBits::commonBits(Known, Known2); 2783 } 2784 break; 2785 } 2786 case ISD::CONCAT_VECTORS: { 2787 // Split DemandedElts and test each of the demanded subvectors. 2788 Known.Zero.setAllBits(); Known.One.setAllBits(); 2789 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2790 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2791 unsigned NumSubVectors = Op.getNumOperands(); 2792 for (unsigned i = 0; i != NumSubVectors; ++i) { 2793 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2794 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2795 if (!!DemandedSub) { 2796 SDValue Sub = Op.getOperand(i); 2797 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2798 Known = KnownBits::commonBits(Known, Known2); 2799 } 2800 // If we don't know any bits, early out. 2801 if (Known.isUnknown()) 2802 break; 2803 } 2804 break; 2805 } 2806 case ISD::INSERT_SUBVECTOR: { 2807 // Demand any elements from the subvector and the remainder from the src its 2808 // inserted into. 2809 SDValue Src = Op.getOperand(0); 2810 SDValue Sub = Op.getOperand(1); 2811 uint64_t Idx = Op.getConstantOperandVal(2); 2812 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2813 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2814 APInt DemandedSrcElts = DemandedElts; 2815 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2816 2817 Known.One.setAllBits(); 2818 Known.Zero.setAllBits(); 2819 if (!!DemandedSubElts) { 2820 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2821 if (Known.isUnknown()) 2822 break; // early-out. 2823 } 2824 if (!!DemandedSrcElts) { 2825 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2826 Known = KnownBits::commonBits(Known, Known2); 2827 } 2828 break; 2829 } 2830 case ISD::EXTRACT_SUBVECTOR: { 2831 // Offset the demanded elts by the subvector index. 2832 SDValue Src = Op.getOperand(0); 2833 // Bail until we can represent demanded elements for scalable vectors. 2834 if (Src.getValueType().isScalableVector()) 2835 break; 2836 uint64_t Idx = Op.getConstantOperandVal(1); 2837 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2838 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2839 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2840 break; 2841 } 2842 case ISD::SCALAR_TO_VECTOR: { 2843 // We know about scalar_to_vector as much as we know about it source, 2844 // which becomes the first element of otherwise unknown vector. 2845 if (DemandedElts != 1) 2846 break; 2847 2848 SDValue N0 = Op.getOperand(0); 2849 Known = computeKnownBits(N0, Depth + 1); 2850 if (N0.getValueSizeInBits() != BitWidth) 2851 Known = Known.trunc(BitWidth); 2852 2853 break; 2854 } 2855 case ISD::BITCAST: { 2856 SDValue N0 = Op.getOperand(0); 2857 EVT SubVT = N0.getValueType(); 2858 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2859 2860 // Ignore bitcasts from unsupported types. 2861 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2862 break; 2863 2864 // Fast handling of 'identity' bitcasts. 2865 if (BitWidth == SubBitWidth) { 2866 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2867 break; 2868 } 2869 2870 bool IsLE = getDataLayout().isLittleEndian(); 2871 2872 // Bitcast 'small element' vector to 'large element' scalar/vector. 2873 if ((BitWidth % SubBitWidth) == 0) { 2874 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2875 2876 // Collect known bits for the (larger) output by collecting the known 2877 // bits from each set of sub elements and shift these into place. 2878 // We need to separately call computeKnownBits for each set of 2879 // sub elements as the knownbits for each is likely to be different. 2880 unsigned SubScale = BitWidth / SubBitWidth; 2881 APInt SubDemandedElts(NumElts * SubScale, 0); 2882 for (unsigned i = 0; i != NumElts; ++i) 2883 if (DemandedElts[i]) 2884 SubDemandedElts.setBit(i * SubScale); 2885 2886 for (unsigned i = 0; i != SubScale; ++i) { 2887 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2888 Depth + 1); 2889 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2890 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2891 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2892 } 2893 } 2894 2895 // Bitcast 'large element' scalar/vector to 'small element' vector. 2896 if ((SubBitWidth % BitWidth) == 0) { 2897 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2898 2899 // Collect known bits for the (smaller) output by collecting the known 2900 // bits from the overlapping larger input elements and extracting the 2901 // sub sections we actually care about. 2902 unsigned SubScale = SubBitWidth / BitWidth; 2903 APInt SubDemandedElts(NumElts / SubScale, 0); 2904 for (unsigned i = 0; i != NumElts; ++i) 2905 if (DemandedElts[i]) 2906 SubDemandedElts.setBit(i / SubScale); 2907 2908 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2909 2910 Known.Zero.setAllBits(); Known.One.setAllBits(); 2911 for (unsigned i = 0; i != NumElts; ++i) 2912 if (DemandedElts[i]) { 2913 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2914 unsigned Offset = (Shifts % SubScale) * BitWidth; 2915 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2916 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2917 // If we don't know any bits, early out. 2918 if (Known.isUnknown()) 2919 break; 2920 } 2921 } 2922 break; 2923 } 2924 case ISD::AND: 2925 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2926 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2927 2928 Known &= Known2; 2929 break; 2930 case ISD::OR: 2931 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2932 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2933 2934 Known |= Known2; 2935 break; 2936 case ISD::XOR: 2937 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2938 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2939 2940 Known ^= Known2; 2941 break; 2942 case ISD::MUL: { 2943 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2944 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2945 Known = KnownBits::computeForMul(Known, Known2); 2946 break; 2947 } 2948 case ISD::UDIV: { 2949 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2950 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2951 Known = KnownBits::udiv(Known, Known2); 2952 break; 2953 } 2954 case ISD::SELECT: 2955 case ISD::VSELECT: 2956 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2957 // If we don't know any bits, early out. 2958 if (Known.isUnknown()) 2959 break; 2960 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2961 2962 // Only known if known in both the LHS and RHS. 2963 Known = KnownBits::commonBits(Known, Known2); 2964 break; 2965 case ISD::SELECT_CC: 2966 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2967 // If we don't know any bits, early out. 2968 if (Known.isUnknown()) 2969 break; 2970 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2971 2972 // Only known if known in both the LHS and RHS. 2973 Known = KnownBits::commonBits(Known, Known2); 2974 break; 2975 case ISD::SMULO: 2976 case ISD::UMULO: 2977 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2978 if (Op.getResNo() != 1) 2979 break; 2980 // The boolean result conforms to getBooleanContents. 2981 // If we know the result of a setcc has the top bits zero, use this info. 2982 // We know that we have an integer-based boolean since these operations 2983 // are only available for integer. 2984 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2985 TargetLowering::ZeroOrOneBooleanContent && 2986 BitWidth > 1) 2987 Known.Zero.setBitsFrom(1); 2988 break; 2989 case ISD::SETCC: 2990 case ISD::STRICT_FSETCC: 2991 case ISD::STRICT_FSETCCS: { 2992 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2993 // If we know the result of a setcc has the top bits zero, use this info. 2994 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2995 TargetLowering::ZeroOrOneBooleanContent && 2996 BitWidth > 1) 2997 Known.Zero.setBitsFrom(1); 2998 break; 2999 } 3000 case ISD::SHL: 3001 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3002 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3003 Known = KnownBits::shl(Known, Known2); 3004 3005 // Minimum shift low bits are known zero. 3006 if (const APInt *ShMinAmt = 3007 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3008 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 3009 break; 3010 case ISD::SRL: 3011 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3012 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3013 Known = KnownBits::lshr(Known, Known2); 3014 3015 // Minimum shift high bits are known zero. 3016 if (const APInt *ShMinAmt = 3017 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3018 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 3019 break; 3020 case ISD::SRA: 3021 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3022 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3023 Known = KnownBits::ashr(Known, Known2); 3024 // TODO: Add minimum shift high known sign bits. 3025 break; 3026 case ISD::FSHL: 3027 case ISD::FSHR: 3028 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 3029 unsigned Amt = C->getAPIntValue().urem(BitWidth); 3030 3031 // For fshl, 0-shift returns the 1st arg. 3032 // For fshr, 0-shift returns the 2nd arg. 3033 if (Amt == 0) { 3034 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 3035 DemandedElts, Depth + 1); 3036 break; 3037 } 3038 3039 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 3040 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 3041 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3042 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3043 if (Opcode == ISD::FSHL) { 3044 Known.One <<= Amt; 3045 Known.Zero <<= Amt; 3046 Known2.One.lshrInPlace(BitWidth - Amt); 3047 Known2.Zero.lshrInPlace(BitWidth - Amt); 3048 } else { 3049 Known.One <<= BitWidth - Amt; 3050 Known.Zero <<= BitWidth - Amt; 3051 Known2.One.lshrInPlace(Amt); 3052 Known2.Zero.lshrInPlace(Amt); 3053 } 3054 Known.One |= Known2.One; 3055 Known.Zero |= Known2.Zero; 3056 } 3057 break; 3058 case ISD::SIGN_EXTEND_INREG: { 3059 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3060 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3061 Known = Known.sextInReg(EVT.getScalarSizeInBits()); 3062 break; 3063 } 3064 case ISD::CTTZ: 3065 case ISD::CTTZ_ZERO_UNDEF: { 3066 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3067 // If we have a known 1, its position is our upper bound. 3068 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 3069 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3070 Known.Zero.setBitsFrom(LowBits); 3071 break; 3072 } 3073 case ISD::CTLZ: 3074 case ISD::CTLZ_ZERO_UNDEF: { 3075 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3076 // If we have a known 1, its position is our upper bound. 3077 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3078 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3079 Known.Zero.setBitsFrom(LowBits); 3080 break; 3081 } 3082 case ISD::CTPOP: { 3083 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3084 // If we know some of the bits are zero, they can't be one. 3085 unsigned PossibleOnes = Known2.countMaxPopulation(); 3086 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3087 break; 3088 } 3089 case ISD::PARITY: { 3090 // Parity returns 0 everywhere but the LSB. 3091 Known.Zero.setBitsFrom(1); 3092 break; 3093 } 3094 case ISD::LOAD: { 3095 LoadSDNode *LD = cast<LoadSDNode>(Op); 3096 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3097 if (ISD::isNON_EXTLoad(LD) && Cst) { 3098 // Determine any common known bits from the loaded constant pool value. 3099 Type *CstTy = Cst->getType(); 3100 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3101 // If its a vector splat, then we can (quickly) reuse the scalar path. 3102 // NOTE: We assume all elements match and none are UNDEF. 3103 if (CstTy->isVectorTy()) { 3104 if (const Constant *Splat = Cst->getSplatValue()) { 3105 Cst = Splat; 3106 CstTy = Cst->getType(); 3107 } 3108 } 3109 // TODO - do we need to handle different bitwidths? 3110 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3111 // Iterate across all vector elements finding common known bits. 3112 Known.One.setAllBits(); 3113 Known.Zero.setAllBits(); 3114 for (unsigned i = 0; i != NumElts; ++i) { 3115 if (!DemandedElts[i]) 3116 continue; 3117 if (Constant *Elt = Cst->getAggregateElement(i)) { 3118 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3119 const APInt &Value = CInt->getValue(); 3120 Known.One &= Value; 3121 Known.Zero &= ~Value; 3122 continue; 3123 } 3124 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3125 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3126 Known.One &= Value; 3127 Known.Zero &= ~Value; 3128 continue; 3129 } 3130 } 3131 Known.One.clearAllBits(); 3132 Known.Zero.clearAllBits(); 3133 break; 3134 } 3135 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3136 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3137 const APInt &Value = CInt->getValue(); 3138 Known.One = Value; 3139 Known.Zero = ~Value; 3140 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3141 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3142 Known.One = Value; 3143 Known.Zero = ~Value; 3144 } 3145 } 3146 } 3147 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3148 // If this is a ZEXTLoad and we are looking at the loaded value. 3149 EVT VT = LD->getMemoryVT(); 3150 unsigned MemBits = VT.getScalarSizeInBits(); 3151 Known.Zero.setBitsFrom(MemBits); 3152 } else if (const MDNode *Ranges = LD->getRanges()) { 3153 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3154 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3155 } 3156 break; 3157 } 3158 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3159 EVT InVT = Op.getOperand(0).getValueType(); 3160 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3161 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3162 Known = Known.zext(BitWidth); 3163 break; 3164 } 3165 case ISD::ZERO_EXTEND: { 3166 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3167 Known = Known.zext(BitWidth); 3168 break; 3169 } 3170 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3171 EVT InVT = Op.getOperand(0).getValueType(); 3172 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3173 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3174 // If the sign bit is known to be zero or one, then sext will extend 3175 // it to the top bits, else it will just zext. 3176 Known = Known.sext(BitWidth); 3177 break; 3178 } 3179 case ISD::SIGN_EXTEND: { 3180 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3181 // If the sign bit is known to be zero or one, then sext will extend 3182 // it to the top bits, else it will just zext. 3183 Known = Known.sext(BitWidth); 3184 break; 3185 } 3186 case ISD::ANY_EXTEND_VECTOR_INREG: { 3187 EVT InVT = Op.getOperand(0).getValueType(); 3188 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3189 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3190 Known = Known.anyext(BitWidth); 3191 break; 3192 } 3193 case ISD::ANY_EXTEND: { 3194 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3195 Known = Known.anyext(BitWidth); 3196 break; 3197 } 3198 case ISD::TRUNCATE: { 3199 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3200 Known = Known.trunc(BitWidth); 3201 break; 3202 } 3203 case ISD::AssertZext: { 3204 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3205 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3206 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3207 Known.Zero |= (~InMask); 3208 Known.One &= (~Known.Zero); 3209 break; 3210 } 3211 case ISD::AssertAlign: { 3212 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); 3213 assert(LogOfAlign != 0); 3214 // If a node is guaranteed to be aligned, set low zero bits accordingly as 3215 // well as clearing one bits. 3216 Known.Zero.setLowBits(LogOfAlign); 3217 Known.One.clearLowBits(LogOfAlign); 3218 break; 3219 } 3220 case ISD::FGETSIGN: 3221 // All bits are zero except the low bit. 3222 Known.Zero.setBitsFrom(1); 3223 break; 3224 case ISD::USUBO: 3225 case ISD::SSUBO: 3226 if (Op.getResNo() == 1) { 3227 // If we know the result of a setcc has the top bits zero, use this info. 3228 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3229 TargetLowering::ZeroOrOneBooleanContent && 3230 BitWidth > 1) 3231 Known.Zero.setBitsFrom(1); 3232 break; 3233 } 3234 LLVM_FALLTHROUGH; 3235 case ISD::SUB: 3236 case ISD::SUBC: { 3237 assert(Op.getResNo() == 0 && 3238 "We only compute knownbits for the difference here."); 3239 3240 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3241 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3242 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3243 Known, Known2); 3244 break; 3245 } 3246 case ISD::UADDO: 3247 case ISD::SADDO: 3248 case ISD::ADDCARRY: 3249 if (Op.getResNo() == 1) { 3250 // If we know the result of a setcc has the top bits zero, use this info. 3251 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3252 TargetLowering::ZeroOrOneBooleanContent && 3253 BitWidth > 1) 3254 Known.Zero.setBitsFrom(1); 3255 break; 3256 } 3257 LLVM_FALLTHROUGH; 3258 case ISD::ADD: 3259 case ISD::ADDC: 3260 case ISD::ADDE: { 3261 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3262 3263 // With ADDE and ADDCARRY, a carry bit may be added in. 3264 KnownBits Carry(1); 3265 if (Opcode == ISD::ADDE) 3266 // Can't track carry from glue, set carry to unknown. 3267 Carry.resetAll(); 3268 else if (Opcode == ISD::ADDCARRY) 3269 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3270 // the trouble (how often will we find a known carry bit). And I haven't 3271 // tested this very much yet, but something like this might work: 3272 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3273 // Carry = Carry.zextOrTrunc(1, false); 3274 Carry.resetAll(); 3275 else 3276 Carry.setAllZero(); 3277 3278 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3279 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3280 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3281 break; 3282 } 3283 case ISD::SREM: { 3284 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3285 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3286 Known = KnownBits::srem(Known, Known2); 3287 break; 3288 } 3289 case ISD::UREM: { 3290 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3291 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3292 Known = KnownBits::urem(Known, Known2); 3293 break; 3294 } 3295 case ISD::EXTRACT_ELEMENT: { 3296 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3297 const unsigned Index = Op.getConstantOperandVal(1); 3298 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3299 3300 // Remove low part of known bits mask 3301 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3302 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3303 3304 // Remove high part of known bit mask 3305 Known = Known.trunc(EltBitWidth); 3306 break; 3307 } 3308 case ISD::EXTRACT_VECTOR_ELT: { 3309 SDValue InVec = Op.getOperand(0); 3310 SDValue EltNo = Op.getOperand(1); 3311 EVT VecVT = InVec.getValueType(); 3312 // computeKnownBits not yet implemented for scalable vectors. 3313 if (VecVT.isScalableVector()) 3314 break; 3315 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3316 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3317 3318 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3319 // anything about the extended bits. 3320 if (BitWidth > EltBitWidth) 3321 Known = Known.trunc(EltBitWidth); 3322 3323 // If we know the element index, just demand that vector element, else for 3324 // an unknown element index, ignore DemandedElts and demand them all. 3325 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3326 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3327 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3328 DemandedSrcElts = 3329 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3330 3331 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3332 if (BitWidth > EltBitWidth) 3333 Known = Known.anyext(BitWidth); 3334 break; 3335 } 3336 case ISD::INSERT_VECTOR_ELT: { 3337 // If we know the element index, split the demand between the 3338 // source vector and the inserted element, otherwise assume we need 3339 // the original demanded vector elements and the value. 3340 SDValue InVec = Op.getOperand(0); 3341 SDValue InVal = Op.getOperand(1); 3342 SDValue EltNo = Op.getOperand(2); 3343 bool DemandedVal = true; 3344 APInt DemandedVecElts = DemandedElts; 3345 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3346 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3347 unsigned EltIdx = CEltNo->getZExtValue(); 3348 DemandedVal = !!DemandedElts[EltIdx]; 3349 DemandedVecElts.clearBit(EltIdx); 3350 } 3351 Known.One.setAllBits(); 3352 Known.Zero.setAllBits(); 3353 if (DemandedVal) { 3354 Known2 = computeKnownBits(InVal, Depth + 1); 3355 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth)); 3356 } 3357 if (!!DemandedVecElts) { 3358 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3359 Known = KnownBits::commonBits(Known, Known2); 3360 } 3361 break; 3362 } 3363 case ISD::BITREVERSE: { 3364 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3365 Known = Known2.reverseBits(); 3366 break; 3367 } 3368 case ISD::BSWAP: { 3369 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3370 Known = Known2.byteSwap(); 3371 break; 3372 } 3373 case ISD::ABS: { 3374 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3375 Known = Known2.abs(); 3376 break; 3377 } 3378 case ISD::UMIN: { 3379 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3380 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3381 Known = KnownBits::umin(Known, Known2); 3382 break; 3383 } 3384 case ISD::UMAX: { 3385 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3386 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3387 Known = KnownBits::umax(Known, Known2); 3388 break; 3389 } 3390 case ISD::SMIN: 3391 case ISD::SMAX: { 3392 // If we have a clamp pattern, we know that the number of sign bits will be 3393 // the minimum of the clamp min/max range. 3394 bool IsMax = (Opcode == ISD::SMAX); 3395 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3396 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3397 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3398 CstHigh = 3399 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3400 if (CstLow && CstHigh) { 3401 if (!IsMax) 3402 std::swap(CstLow, CstHigh); 3403 3404 const APInt &ValueLow = CstLow->getAPIntValue(); 3405 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3406 if (ValueLow.sle(ValueHigh)) { 3407 unsigned LowSignBits = ValueLow.getNumSignBits(); 3408 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3409 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3410 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3411 Known.One.setHighBits(MinSignBits); 3412 break; 3413 } 3414 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3415 Known.Zero.setHighBits(MinSignBits); 3416 break; 3417 } 3418 } 3419 } 3420 3421 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3422 if (Known.isUnknown()) break; // Early-out 3423 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3424 if (IsMax) 3425 Known = KnownBits::smax(Known, Known2); 3426 else 3427 Known = KnownBits::smin(Known, Known2); 3428 break; 3429 } 3430 case ISD::FrameIndex: 3431 case ISD::TargetFrameIndex: 3432 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), 3433 Known, getMachineFunction()); 3434 break; 3435 3436 default: 3437 if (Opcode < ISD::BUILTIN_OP_END) 3438 break; 3439 LLVM_FALLTHROUGH; 3440 case ISD::INTRINSIC_WO_CHAIN: 3441 case ISD::INTRINSIC_W_CHAIN: 3442 case ISD::INTRINSIC_VOID: 3443 // Allow the target to implement this method for its nodes. 3444 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3445 break; 3446 } 3447 3448 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3449 return Known; 3450 } 3451 3452 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3453 SDValue N1) const { 3454 // X + 0 never overflow 3455 if (isNullConstant(N1)) 3456 return OFK_Never; 3457 3458 KnownBits N1Known = computeKnownBits(N1); 3459 if (N1Known.Zero.getBoolValue()) { 3460 KnownBits N0Known = computeKnownBits(N0); 3461 3462 bool overflow; 3463 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3464 if (!overflow) 3465 return OFK_Never; 3466 } 3467 3468 // mulhi + 1 never overflow 3469 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3470 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3471 return OFK_Never; 3472 3473 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3474 KnownBits N0Known = computeKnownBits(N0); 3475 3476 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3477 return OFK_Never; 3478 } 3479 3480 return OFK_Sometime; 3481 } 3482 3483 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3484 EVT OpVT = Val.getValueType(); 3485 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3486 3487 // Is the constant a known power of 2? 3488 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3489 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3490 3491 // A left-shift of a constant one will have exactly one bit set because 3492 // shifting the bit off the end is undefined. 3493 if (Val.getOpcode() == ISD::SHL) { 3494 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3495 if (C && C->getAPIntValue() == 1) 3496 return true; 3497 } 3498 3499 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3500 // one bit set. 3501 if (Val.getOpcode() == ISD::SRL) { 3502 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3503 if (C && C->getAPIntValue().isSignMask()) 3504 return true; 3505 } 3506 3507 // Are all operands of a build vector constant powers of two? 3508 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3509 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3510 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3511 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3512 return false; 3513 })) 3514 return true; 3515 3516 // More could be done here, though the above checks are enough 3517 // to handle some common cases. 3518 3519 // Fall back to computeKnownBits to catch other known cases. 3520 KnownBits Known = computeKnownBits(Val); 3521 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3522 } 3523 3524 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3525 EVT VT = Op.getValueType(); 3526 3527 // TODO: Assume we don't know anything for now. 3528 if (VT.isScalableVector()) 3529 return 1; 3530 3531 APInt DemandedElts = VT.isVector() 3532 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3533 : APInt(1, 1); 3534 return ComputeNumSignBits(Op, DemandedElts, Depth); 3535 } 3536 3537 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3538 unsigned Depth) const { 3539 EVT VT = Op.getValueType(); 3540 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3541 unsigned VTBits = VT.getScalarSizeInBits(); 3542 unsigned NumElts = DemandedElts.getBitWidth(); 3543 unsigned Tmp, Tmp2; 3544 unsigned FirstAnswer = 1; 3545 3546 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3547 const APInt &Val = C->getAPIntValue(); 3548 return Val.getNumSignBits(); 3549 } 3550 3551 if (Depth >= MaxRecursionDepth) 3552 return 1; // Limit search depth. 3553 3554 if (!DemandedElts || VT.isScalableVector()) 3555 return 1; // No demanded elts, better to assume we don't know anything. 3556 3557 unsigned Opcode = Op.getOpcode(); 3558 switch (Opcode) { 3559 default: break; 3560 case ISD::AssertSext: 3561 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3562 return VTBits-Tmp+1; 3563 case ISD::AssertZext: 3564 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3565 return VTBits-Tmp; 3566 3567 case ISD::BUILD_VECTOR: 3568 Tmp = VTBits; 3569 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3570 if (!DemandedElts[i]) 3571 continue; 3572 3573 SDValue SrcOp = Op.getOperand(i); 3574 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); 3575 3576 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3577 if (SrcOp.getValueSizeInBits() != VTBits) { 3578 assert(SrcOp.getValueSizeInBits() > VTBits && 3579 "Expected BUILD_VECTOR implicit truncation"); 3580 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3581 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3582 } 3583 Tmp = std::min(Tmp, Tmp2); 3584 } 3585 return Tmp; 3586 3587 case ISD::VECTOR_SHUFFLE: { 3588 // Collect the minimum number of sign bits that are shared by every vector 3589 // element referenced by the shuffle. 3590 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3591 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3592 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3593 for (unsigned i = 0; i != NumElts; ++i) { 3594 int M = SVN->getMaskElt(i); 3595 if (!DemandedElts[i]) 3596 continue; 3597 // For UNDEF elements, we don't know anything about the common state of 3598 // the shuffle result. 3599 if (M < 0) 3600 return 1; 3601 if ((unsigned)M < NumElts) 3602 DemandedLHS.setBit((unsigned)M % NumElts); 3603 else 3604 DemandedRHS.setBit((unsigned)M % NumElts); 3605 } 3606 Tmp = std::numeric_limits<unsigned>::max(); 3607 if (!!DemandedLHS) 3608 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3609 if (!!DemandedRHS) { 3610 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3611 Tmp = std::min(Tmp, Tmp2); 3612 } 3613 // If we don't know anything, early out and try computeKnownBits fall-back. 3614 if (Tmp == 1) 3615 break; 3616 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3617 return Tmp; 3618 } 3619 3620 case ISD::BITCAST: { 3621 SDValue N0 = Op.getOperand(0); 3622 EVT SrcVT = N0.getValueType(); 3623 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3624 3625 // Ignore bitcasts from unsupported types.. 3626 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3627 break; 3628 3629 // Fast handling of 'identity' bitcasts. 3630 if (VTBits == SrcBits) 3631 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3632 3633 bool IsLE = getDataLayout().isLittleEndian(); 3634 3635 // Bitcast 'large element' scalar/vector to 'small element' vector. 3636 if ((SrcBits % VTBits) == 0) { 3637 assert(VT.isVector() && "Expected bitcast to vector"); 3638 3639 unsigned Scale = SrcBits / VTBits; 3640 APInt SrcDemandedElts(NumElts / Scale, 0); 3641 for (unsigned i = 0; i != NumElts; ++i) 3642 if (DemandedElts[i]) 3643 SrcDemandedElts.setBit(i / Scale); 3644 3645 // Fast case - sign splat can be simply split across the small elements. 3646 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3647 if (Tmp == SrcBits) 3648 return VTBits; 3649 3650 // Slow case - determine how far the sign extends into each sub-element. 3651 Tmp2 = VTBits; 3652 for (unsigned i = 0; i != NumElts; ++i) 3653 if (DemandedElts[i]) { 3654 unsigned SubOffset = i % Scale; 3655 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3656 SubOffset = SubOffset * VTBits; 3657 if (Tmp <= SubOffset) 3658 return 1; 3659 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3660 } 3661 return Tmp2; 3662 } 3663 break; 3664 } 3665 3666 case ISD::SIGN_EXTEND: 3667 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3668 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3669 case ISD::SIGN_EXTEND_INREG: 3670 // Max of the input and what this extends. 3671 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3672 Tmp = VTBits-Tmp+1; 3673 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3674 return std::max(Tmp, Tmp2); 3675 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3676 SDValue Src = Op.getOperand(0); 3677 EVT SrcVT = Src.getValueType(); 3678 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3679 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3680 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3681 } 3682 case ISD::SRA: 3683 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3684 // SRA X, C -> adds C sign bits. 3685 if (const APInt *ShAmt = 3686 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3687 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3688 return Tmp; 3689 case ISD::SHL: 3690 if (const APInt *ShAmt = 3691 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3692 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3693 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3694 if (ShAmt->ult(Tmp)) 3695 return Tmp - ShAmt->getZExtValue(); 3696 } 3697 break; 3698 case ISD::AND: 3699 case ISD::OR: 3700 case ISD::XOR: // NOT is handled here. 3701 // Logical binary ops preserve the number of sign bits at the worst. 3702 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3703 if (Tmp != 1) { 3704 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3705 FirstAnswer = std::min(Tmp, Tmp2); 3706 // We computed what we know about the sign bits as our first 3707 // answer. Now proceed to the generic code that uses 3708 // computeKnownBits, and pick whichever answer is better. 3709 } 3710 break; 3711 3712 case ISD::SELECT: 3713 case ISD::VSELECT: 3714 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3715 if (Tmp == 1) return 1; // Early out. 3716 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3717 return std::min(Tmp, Tmp2); 3718 case ISD::SELECT_CC: 3719 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3720 if (Tmp == 1) return 1; // Early out. 3721 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3722 return std::min(Tmp, Tmp2); 3723 3724 case ISD::SMIN: 3725 case ISD::SMAX: { 3726 // If we have a clamp pattern, we know that the number of sign bits will be 3727 // the minimum of the clamp min/max range. 3728 bool IsMax = (Opcode == ISD::SMAX); 3729 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3730 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3731 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3732 CstHigh = 3733 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3734 if (CstLow && CstHigh) { 3735 if (!IsMax) 3736 std::swap(CstLow, CstHigh); 3737 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3738 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3739 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3740 return std::min(Tmp, Tmp2); 3741 } 3742 } 3743 3744 // Fallback - just get the minimum number of sign bits of the operands. 3745 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3746 if (Tmp == 1) 3747 return 1; // Early out. 3748 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3749 return std::min(Tmp, Tmp2); 3750 } 3751 case ISD::UMIN: 3752 case ISD::UMAX: 3753 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3754 if (Tmp == 1) 3755 return 1; // Early out. 3756 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3757 return std::min(Tmp, Tmp2); 3758 case ISD::SADDO: 3759 case ISD::UADDO: 3760 case ISD::SSUBO: 3761 case ISD::USUBO: 3762 case ISD::SMULO: 3763 case ISD::UMULO: 3764 if (Op.getResNo() != 1) 3765 break; 3766 // The boolean result conforms to getBooleanContents. Fall through. 3767 // If setcc returns 0/-1, all bits are sign bits. 3768 // We know that we have an integer-based boolean since these operations 3769 // are only available for integer. 3770 if (TLI->getBooleanContents(VT.isVector(), false) == 3771 TargetLowering::ZeroOrNegativeOneBooleanContent) 3772 return VTBits; 3773 break; 3774 case ISD::SETCC: 3775 case ISD::STRICT_FSETCC: 3776 case ISD::STRICT_FSETCCS: { 3777 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3778 // If setcc returns 0/-1, all bits are sign bits. 3779 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3780 TargetLowering::ZeroOrNegativeOneBooleanContent) 3781 return VTBits; 3782 break; 3783 } 3784 case ISD::ROTL: 3785 case ISD::ROTR: 3786 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3787 3788 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3789 if (Tmp == VTBits) 3790 return VTBits; 3791 3792 if (ConstantSDNode *C = 3793 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3794 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3795 3796 // Handle rotate right by N like a rotate left by 32-N. 3797 if (Opcode == ISD::ROTR) 3798 RotAmt = (VTBits - RotAmt) % VTBits; 3799 3800 // If we aren't rotating out all of the known-in sign bits, return the 3801 // number that are left. This handles rotl(sext(x), 1) for example. 3802 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3803 } 3804 break; 3805 case ISD::ADD: 3806 case ISD::ADDC: 3807 // Add can have at most one carry bit. Thus we know that the output 3808 // is, at worst, one more bit than the inputs. 3809 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3810 if (Tmp == 1) return 1; // Early out. 3811 3812 // Special case decrementing a value (ADD X, -1): 3813 if (ConstantSDNode *CRHS = 3814 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3815 if (CRHS->isAllOnesValue()) { 3816 KnownBits Known = 3817 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3818 3819 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3820 // sign bits set. 3821 if ((Known.Zero | 1).isAllOnesValue()) 3822 return VTBits; 3823 3824 // If we are subtracting one from a positive number, there is no carry 3825 // out of the result. 3826 if (Known.isNonNegative()) 3827 return Tmp; 3828 } 3829 3830 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3831 if (Tmp2 == 1) return 1; // Early out. 3832 return std::min(Tmp, Tmp2) - 1; 3833 case ISD::SUB: 3834 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3835 if (Tmp2 == 1) return 1; // Early out. 3836 3837 // Handle NEG. 3838 if (ConstantSDNode *CLHS = 3839 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3840 if (CLHS->isNullValue()) { 3841 KnownBits Known = 3842 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3843 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3844 // sign bits set. 3845 if ((Known.Zero | 1).isAllOnesValue()) 3846 return VTBits; 3847 3848 // If the input is known to be positive (the sign bit is known clear), 3849 // the output of the NEG has the same number of sign bits as the input. 3850 if (Known.isNonNegative()) 3851 return Tmp2; 3852 3853 // Otherwise, we treat this like a SUB. 3854 } 3855 3856 // Sub can have at most one carry bit. Thus we know that the output 3857 // is, at worst, one more bit than the inputs. 3858 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3859 if (Tmp == 1) return 1; // Early out. 3860 return std::min(Tmp, Tmp2) - 1; 3861 case ISD::MUL: { 3862 // The output of the Mul can be at most twice the valid bits in the inputs. 3863 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3864 if (SignBitsOp0 == 1) 3865 break; 3866 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3867 if (SignBitsOp1 == 1) 3868 break; 3869 unsigned OutValidBits = 3870 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3871 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3872 } 3873 case ISD::TRUNCATE: { 3874 // Check if the sign bits of source go down as far as the truncated value. 3875 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3876 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3877 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3878 return NumSrcSignBits - (NumSrcBits - VTBits); 3879 break; 3880 } 3881 case ISD::EXTRACT_ELEMENT: { 3882 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3883 const int BitWidth = Op.getValueSizeInBits(); 3884 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3885 3886 // Get reverse index (starting from 1), Op1 value indexes elements from 3887 // little end. Sign starts at big end. 3888 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3889 3890 // If the sign portion ends in our element the subtraction gives correct 3891 // result. Otherwise it gives either negative or > bitwidth result 3892 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3893 } 3894 case ISD::INSERT_VECTOR_ELT: { 3895 // If we know the element index, split the demand between the 3896 // source vector and the inserted element, otherwise assume we need 3897 // the original demanded vector elements and the value. 3898 SDValue InVec = Op.getOperand(0); 3899 SDValue InVal = Op.getOperand(1); 3900 SDValue EltNo = Op.getOperand(2); 3901 bool DemandedVal = true; 3902 APInt DemandedVecElts = DemandedElts; 3903 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3904 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3905 unsigned EltIdx = CEltNo->getZExtValue(); 3906 DemandedVal = !!DemandedElts[EltIdx]; 3907 DemandedVecElts.clearBit(EltIdx); 3908 } 3909 Tmp = std::numeric_limits<unsigned>::max(); 3910 if (DemandedVal) { 3911 // TODO - handle implicit truncation of inserted elements. 3912 if (InVal.getScalarValueSizeInBits() != VTBits) 3913 break; 3914 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3915 Tmp = std::min(Tmp, Tmp2); 3916 } 3917 if (!!DemandedVecElts) { 3918 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3919 Tmp = std::min(Tmp, Tmp2); 3920 } 3921 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3922 return Tmp; 3923 } 3924 case ISD::EXTRACT_VECTOR_ELT: { 3925 SDValue InVec = Op.getOperand(0); 3926 SDValue EltNo = Op.getOperand(1); 3927 EVT VecVT = InVec.getValueType(); 3928 const unsigned BitWidth = Op.getValueSizeInBits(); 3929 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3930 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3931 3932 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3933 // anything about sign bits. But if the sizes match we can derive knowledge 3934 // about sign bits from the vector operand. 3935 if (BitWidth != EltBitWidth) 3936 break; 3937 3938 // If we know the element index, just demand that vector element, else for 3939 // an unknown element index, ignore DemandedElts and demand them all. 3940 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3941 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3942 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3943 DemandedSrcElts = 3944 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3945 3946 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3947 } 3948 case ISD::EXTRACT_SUBVECTOR: { 3949 // Offset the demanded elts by the subvector index. 3950 SDValue Src = Op.getOperand(0); 3951 // Bail until we can represent demanded elements for scalable vectors. 3952 if (Src.getValueType().isScalableVector()) 3953 break; 3954 uint64_t Idx = Op.getConstantOperandVal(1); 3955 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3956 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3957 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3958 } 3959 case ISD::CONCAT_VECTORS: { 3960 // Determine the minimum number of sign bits across all demanded 3961 // elts of the input vectors. Early out if the result is already 1. 3962 Tmp = std::numeric_limits<unsigned>::max(); 3963 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3964 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3965 unsigned NumSubVectors = Op.getNumOperands(); 3966 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3967 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3968 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3969 if (!DemandedSub) 3970 continue; 3971 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3972 Tmp = std::min(Tmp, Tmp2); 3973 } 3974 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3975 return Tmp; 3976 } 3977 case ISD::INSERT_SUBVECTOR: { 3978 // Demand any elements from the subvector and the remainder from the src its 3979 // inserted into. 3980 SDValue Src = Op.getOperand(0); 3981 SDValue Sub = Op.getOperand(1); 3982 uint64_t Idx = Op.getConstantOperandVal(2); 3983 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3984 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3985 APInt DemandedSrcElts = DemandedElts; 3986 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 3987 3988 Tmp = std::numeric_limits<unsigned>::max(); 3989 if (!!DemandedSubElts) { 3990 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3991 if (Tmp == 1) 3992 return 1; // early-out 3993 } 3994 if (!!DemandedSrcElts) { 3995 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3996 Tmp = std::min(Tmp, Tmp2); 3997 } 3998 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3999 return Tmp; 4000 } 4001 } 4002 4003 // If we are looking at the loaded value of the SDNode. 4004 if (Op.getResNo() == 0) { 4005 // Handle LOADX separately here. EXTLOAD case will fallthrough. 4006 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 4007 unsigned ExtType = LD->getExtensionType(); 4008 switch (ExtType) { 4009 default: break; 4010 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 4011 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4012 return VTBits - Tmp + 1; 4013 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 4014 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4015 return VTBits - Tmp; 4016 case ISD::NON_EXTLOAD: 4017 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 4018 // We only need to handle vectors - computeKnownBits should handle 4019 // scalar cases. 4020 Type *CstTy = Cst->getType(); 4021 if (CstTy->isVectorTy() && 4022 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4023 Tmp = VTBits; 4024 for (unsigned i = 0; i != NumElts; ++i) { 4025 if (!DemandedElts[i]) 4026 continue; 4027 if (Constant *Elt = Cst->getAggregateElement(i)) { 4028 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4029 const APInt &Value = CInt->getValue(); 4030 Tmp = std::min(Tmp, Value.getNumSignBits()); 4031 continue; 4032 } 4033 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4034 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4035 Tmp = std::min(Tmp, Value.getNumSignBits()); 4036 continue; 4037 } 4038 } 4039 // Unknown type. Conservatively assume no bits match sign bit. 4040 return 1; 4041 } 4042 return Tmp; 4043 } 4044 } 4045 break; 4046 } 4047 } 4048 } 4049 4050 // Allow the target to implement this method for its nodes. 4051 if (Opcode >= ISD::BUILTIN_OP_END || 4052 Opcode == ISD::INTRINSIC_WO_CHAIN || 4053 Opcode == ISD::INTRINSIC_W_CHAIN || 4054 Opcode == ISD::INTRINSIC_VOID) { 4055 unsigned NumBits = 4056 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4057 if (NumBits > 1) 4058 FirstAnswer = std::max(FirstAnswer, NumBits); 4059 } 4060 4061 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4062 // use this information. 4063 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4064 4065 APInt Mask; 4066 if (Known.isNonNegative()) { // sign bit is 0 4067 Mask = Known.Zero; 4068 } else if (Known.isNegative()) { // sign bit is 1; 4069 Mask = Known.One; 4070 } else { 4071 // Nothing known. 4072 return FirstAnswer; 4073 } 4074 4075 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4076 // the number of identical bits in the top of the input value. 4077 Mask <<= Mask.getBitWidth()-VTBits; 4078 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4079 } 4080 4081 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4082 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4083 !isa<ConstantSDNode>(Op.getOperand(1))) 4084 return false; 4085 4086 if (Op.getOpcode() == ISD::OR && 4087 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4088 return false; 4089 4090 return true; 4091 } 4092 4093 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4094 // If we're told that NaNs won't happen, assume they won't. 4095 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4096 return true; 4097 4098 if (Depth >= MaxRecursionDepth) 4099 return false; // Limit search depth. 4100 4101 // TODO: Handle vectors. 4102 // If the value is a constant, we can obviously see if it is a NaN or not. 4103 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4104 return !C->getValueAPF().isNaN() || 4105 (SNaN && !C->getValueAPF().isSignaling()); 4106 } 4107 4108 unsigned Opcode = Op.getOpcode(); 4109 switch (Opcode) { 4110 case ISD::FADD: 4111 case ISD::FSUB: 4112 case ISD::FMUL: 4113 case ISD::FDIV: 4114 case ISD::FREM: 4115 case ISD::FSIN: 4116 case ISD::FCOS: { 4117 if (SNaN) 4118 return true; 4119 // TODO: Need isKnownNeverInfinity 4120 return false; 4121 } 4122 case ISD::FCANONICALIZE: 4123 case ISD::FEXP: 4124 case ISD::FEXP2: 4125 case ISD::FTRUNC: 4126 case ISD::FFLOOR: 4127 case ISD::FCEIL: 4128 case ISD::FROUND: 4129 case ISD::FROUNDEVEN: 4130 case ISD::FRINT: 4131 case ISD::FNEARBYINT: { 4132 if (SNaN) 4133 return true; 4134 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4135 } 4136 case ISD::FABS: 4137 case ISD::FNEG: 4138 case ISD::FCOPYSIGN: { 4139 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4140 } 4141 case ISD::SELECT: 4142 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4143 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4144 case ISD::FP_EXTEND: 4145 case ISD::FP_ROUND: { 4146 if (SNaN) 4147 return true; 4148 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4149 } 4150 case ISD::SINT_TO_FP: 4151 case ISD::UINT_TO_FP: 4152 return true; 4153 case ISD::FMA: 4154 case ISD::FMAD: { 4155 if (SNaN) 4156 return true; 4157 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4158 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4159 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4160 } 4161 case ISD::FSQRT: // Need is known positive 4162 case ISD::FLOG: 4163 case ISD::FLOG2: 4164 case ISD::FLOG10: 4165 case ISD::FPOWI: 4166 case ISD::FPOW: { 4167 if (SNaN) 4168 return true; 4169 // TODO: Refine on operand 4170 return false; 4171 } 4172 case ISD::FMINNUM: 4173 case ISD::FMAXNUM: { 4174 // Only one needs to be known not-nan, since it will be returned if the 4175 // other ends up being one. 4176 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4177 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4178 } 4179 case ISD::FMINNUM_IEEE: 4180 case ISD::FMAXNUM_IEEE: { 4181 if (SNaN) 4182 return true; 4183 // This can return a NaN if either operand is an sNaN, or if both operands 4184 // are NaN. 4185 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4186 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4187 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4188 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4189 } 4190 case ISD::FMINIMUM: 4191 case ISD::FMAXIMUM: { 4192 // TODO: Does this quiet or return the origina NaN as-is? 4193 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4194 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4195 } 4196 case ISD::EXTRACT_VECTOR_ELT: { 4197 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4198 } 4199 default: 4200 if (Opcode >= ISD::BUILTIN_OP_END || 4201 Opcode == ISD::INTRINSIC_WO_CHAIN || 4202 Opcode == ISD::INTRINSIC_W_CHAIN || 4203 Opcode == ISD::INTRINSIC_VOID) { 4204 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4205 } 4206 4207 return false; 4208 } 4209 } 4210 4211 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4212 assert(Op.getValueType().isFloatingPoint() && 4213 "Floating point type expected"); 4214 4215 // If the value is a constant, we can obviously see if it is a zero or not. 4216 // TODO: Add BuildVector support. 4217 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4218 return !C->isZero(); 4219 return false; 4220 } 4221 4222 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4223 assert(!Op.getValueType().isFloatingPoint() && 4224 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4225 4226 // If the value is a constant, we can obviously see if it is a zero or not. 4227 if (ISD::matchUnaryPredicate( 4228 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4229 return true; 4230 4231 // TODO: Recognize more cases here. 4232 switch (Op.getOpcode()) { 4233 default: break; 4234 case ISD::OR: 4235 if (isKnownNeverZero(Op.getOperand(1)) || 4236 isKnownNeverZero(Op.getOperand(0))) 4237 return true; 4238 break; 4239 } 4240 4241 return false; 4242 } 4243 4244 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4245 // Check the obvious case. 4246 if (A == B) return true; 4247 4248 // For for negative and positive zero. 4249 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4250 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4251 if (CA->isZero() && CB->isZero()) return true; 4252 4253 // Otherwise they may not be equal. 4254 return false; 4255 } 4256 4257 // FIXME: unify with llvm::haveNoCommonBitsSet. 4258 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4259 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4260 assert(A.getValueType() == B.getValueType() && 4261 "Values must have the same type"); 4262 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4263 } 4264 4265 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4266 ArrayRef<SDValue> Ops, 4267 SelectionDAG &DAG) { 4268 int NumOps = Ops.size(); 4269 assert(NumOps != 0 && "Can't build an empty vector!"); 4270 assert(!VT.isScalableVector() && 4271 "BUILD_VECTOR cannot be used with scalable types"); 4272 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4273 "Incorrect element count in BUILD_VECTOR!"); 4274 4275 // BUILD_VECTOR of UNDEFs is UNDEF. 4276 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4277 return DAG.getUNDEF(VT); 4278 4279 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4280 SDValue IdentitySrc; 4281 bool IsIdentity = true; 4282 for (int i = 0; i != NumOps; ++i) { 4283 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4284 Ops[i].getOperand(0).getValueType() != VT || 4285 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4286 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4287 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4288 IsIdentity = false; 4289 break; 4290 } 4291 IdentitySrc = Ops[i].getOperand(0); 4292 } 4293 if (IsIdentity) 4294 return IdentitySrc; 4295 4296 return SDValue(); 4297 } 4298 4299 /// Try to simplify vector concatenation to an input value, undef, or build 4300 /// vector. 4301 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4302 ArrayRef<SDValue> Ops, 4303 SelectionDAG &DAG) { 4304 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4305 assert(llvm::all_of(Ops, 4306 [Ops](SDValue Op) { 4307 return Ops[0].getValueType() == Op.getValueType(); 4308 }) && 4309 "Concatenation of vectors with inconsistent value types!"); 4310 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == 4311 VT.getVectorElementCount() && 4312 "Incorrect element count in vector concatenation!"); 4313 4314 if (Ops.size() == 1) 4315 return Ops[0]; 4316 4317 // Concat of UNDEFs is UNDEF. 4318 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4319 return DAG.getUNDEF(VT); 4320 4321 // Scan the operands and look for extract operations from a single source 4322 // that correspond to insertion at the same location via this concatenation: 4323 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4324 SDValue IdentitySrc; 4325 bool IsIdentity = true; 4326 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4327 SDValue Op = Ops[i]; 4328 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); 4329 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4330 Op.getOperand(0).getValueType() != VT || 4331 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4332 Op.getConstantOperandVal(1) != IdentityIndex) { 4333 IsIdentity = false; 4334 break; 4335 } 4336 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4337 "Unexpected identity source vector for concat of extracts"); 4338 IdentitySrc = Op.getOperand(0); 4339 } 4340 if (IsIdentity) { 4341 assert(IdentitySrc && "Failed to set source vector of extracts"); 4342 return IdentitySrc; 4343 } 4344 4345 // The code below this point is only designed to work for fixed width 4346 // vectors, so we bail out for now. 4347 if (VT.isScalableVector()) 4348 return SDValue(); 4349 4350 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4351 // simplified to one big BUILD_VECTOR. 4352 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4353 EVT SVT = VT.getScalarType(); 4354 SmallVector<SDValue, 16> Elts; 4355 for (SDValue Op : Ops) { 4356 EVT OpVT = Op.getValueType(); 4357 if (Op.isUndef()) 4358 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4359 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4360 Elts.append(Op->op_begin(), Op->op_end()); 4361 else 4362 return SDValue(); 4363 } 4364 4365 // BUILD_VECTOR requires all inputs to be of the same type, find the 4366 // maximum type and extend them all. 4367 for (SDValue Op : Elts) 4368 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4369 4370 if (SVT.bitsGT(VT.getScalarType())) { 4371 for (SDValue &Op : Elts) { 4372 if (Op.isUndef()) 4373 Op = DAG.getUNDEF(SVT); 4374 else 4375 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4376 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4377 : DAG.getSExtOrTrunc(Op, DL, SVT); 4378 } 4379 } 4380 4381 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4382 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4383 return V; 4384 } 4385 4386 /// Gets or creates the specified node. 4387 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4388 FoldingSetNodeID ID; 4389 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4390 void *IP = nullptr; 4391 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4392 return SDValue(E, 0); 4393 4394 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4395 getVTList(VT)); 4396 CSEMap.InsertNode(N, IP); 4397 4398 InsertNode(N); 4399 SDValue V = SDValue(N, 0); 4400 NewSDValueDbgMsg(V, "Creating new node: ", this); 4401 return V; 4402 } 4403 4404 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4405 SDValue Operand) { 4406 SDNodeFlags Flags; 4407 if (Inserter) 4408 Flags = Inserter->getFlags(); 4409 return getNode(Opcode, DL, VT, Operand, Flags); 4410 } 4411 4412 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4413 SDValue Operand, const SDNodeFlags Flags) { 4414 // Constant fold unary operations with an integer constant operand. Even 4415 // opaque constant will be folded, because the folding of unary operations 4416 // doesn't create new constants with different values. Nevertheless, the 4417 // opaque flag is preserved during folding to prevent future folding with 4418 // other constants. 4419 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4420 const APInt &Val = C->getAPIntValue(); 4421 switch (Opcode) { 4422 default: break; 4423 case ISD::SIGN_EXTEND: 4424 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4425 C->isTargetOpcode(), C->isOpaque()); 4426 case ISD::TRUNCATE: 4427 if (C->isOpaque()) 4428 break; 4429 LLVM_FALLTHROUGH; 4430 case ISD::ANY_EXTEND: 4431 case ISD::ZERO_EXTEND: 4432 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4433 C->isTargetOpcode(), C->isOpaque()); 4434 case ISD::UINT_TO_FP: 4435 case ISD::SINT_TO_FP: { 4436 APFloat apf(EVTToAPFloatSemantics(VT), 4437 APInt::getNullValue(VT.getSizeInBits())); 4438 (void)apf.convertFromAPInt(Val, 4439 Opcode==ISD::SINT_TO_FP, 4440 APFloat::rmNearestTiesToEven); 4441 return getConstantFP(apf, DL, VT); 4442 } 4443 case ISD::BITCAST: 4444 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4445 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4446 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4447 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4448 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4449 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4450 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4451 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4452 break; 4453 case ISD::ABS: 4454 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4455 C->isOpaque()); 4456 case ISD::BITREVERSE: 4457 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4458 C->isOpaque()); 4459 case ISD::BSWAP: 4460 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4461 C->isOpaque()); 4462 case ISD::CTPOP: 4463 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4464 C->isOpaque()); 4465 case ISD::CTLZ: 4466 case ISD::CTLZ_ZERO_UNDEF: 4467 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4468 C->isOpaque()); 4469 case ISD::CTTZ: 4470 case ISD::CTTZ_ZERO_UNDEF: 4471 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4472 C->isOpaque()); 4473 case ISD::FP16_TO_FP: { 4474 bool Ignored; 4475 APFloat FPV(APFloat::IEEEhalf(), 4476 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4477 4478 // This can return overflow, underflow, or inexact; we don't care. 4479 // FIXME need to be more flexible about rounding mode. 4480 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4481 APFloat::rmNearestTiesToEven, &Ignored); 4482 return getConstantFP(FPV, DL, VT); 4483 } 4484 } 4485 } 4486 4487 // Constant fold unary operations with a floating point constant operand. 4488 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4489 APFloat V = C->getValueAPF(); // make copy 4490 switch (Opcode) { 4491 case ISD::FNEG: 4492 V.changeSign(); 4493 return getConstantFP(V, DL, VT); 4494 case ISD::FABS: 4495 V.clearSign(); 4496 return getConstantFP(V, DL, VT); 4497 case ISD::FCEIL: { 4498 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4499 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4500 return getConstantFP(V, DL, VT); 4501 break; 4502 } 4503 case ISD::FTRUNC: { 4504 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4505 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4506 return getConstantFP(V, DL, VT); 4507 break; 4508 } 4509 case ISD::FFLOOR: { 4510 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4511 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4512 return getConstantFP(V, DL, VT); 4513 break; 4514 } 4515 case ISD::FP_EXTEND: { 4516 bool ignored; 4517 // This can return overflow, underflow, or inexact; we don't care. 4518 // FIXME need to be more flexible about rounding mode. 4519 (void)V.convert(EVTToAPFloatSemantics(VT), 4520 APFloat::rmNearestTiesToEven, &ignored); 4521 return getConstantFP(V, DL, VT); 4522 } 4523 case ISD::FP_TO_SINT: 4524 case ISD::FP_TO_UINT: { 4525 bool ignored; 4526 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4527 // FIXME need to be more flexible about rounding mode. 4528 APFloat::opStatus s = 4529 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4530 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4531 break; 4532 return getConstant(IntVal, DL, VT); 4533 } 4534 case ISD::BITCAST: 4535 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4536 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4537 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4538 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4539 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4540 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4541 break; 4542 case ISD::FP_TO_FP16: { 4543 bool Ignored; 4544 // This can return overflow, underflow, or inexact; we don't care. 4545 // FIXME need to be more flexible about rounding mode. 4546 (void)V.convert(APFloat::IEEEhalf(), 4547 APFloat::rmNearestTiesToEven, &Ignored); 4548 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4549 } 4550 } 4551 } 4552 4553 // Constant fold unary operations with a vector integer or float operand. 4554 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4555 if (BV->isConstant()) { 4556 switch (Opcode) { 4557 default: 4558 // FIXME: Entirely reasonable to perform folding of other unary 4559 // operations here as the need arises. 4560 break; 4561 case ISD::FNEG: 4562 case ISD::FABS: 4563 case ISD::FCEIL: 4564 case ISD::FTRUNC: 4565 case ISD::FFLOOR: 4566 case ISD::FP_EXTEND: 4567 case ISD::FP_TO_SINT: 4568 case ISD::FP_TO_UINT: 4569 case ISD::TRUNCATE: 4570 case ISD::ANY_EXTEND: 4571 case ISD::ZERO_EXTEND: 4572 case ISD::SIGN_EXTEND: 4573 case ISD::UINT_TO_FP: 4574 case ISD::SINT_TO_FP: 4575 case ISD::ABS: 4576 case ISD::BITREVERSE: 4577 case ISD::BSWAP: 4578 case ISD::CTLZ: 4579 case ISD::CTLZ_ZERO_UNDEF: 4580 case ISD::CTTZ: 4581 case ISD::CTTZ_ZERO_UNDEF: 4582 case ISD::CTPOP: { 4583 SDValue Ops = { Operand }; 4584 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4585 return Fold; 4586 } 4587 } 4588 } 4589 } 4590 4591 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4592 switch (Opcode) { 4593 case ISD::FREEZE: 4594 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4595 break; 4596 case ISD::TokenFactor: 4597 case ISD::MERGE_VALUES: 4598 case ISD::CONCAT_VECTORS: 4599 return Operand; // Factor, merge or concat of one node? No need. 4600 case ISD::BUILD_VECTOR: { 4601 // Attempt to simplify BUILD_VECTOR. 4602 SDValue Ops[] = {Operand}; 4603 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4604 return V; 4605 break; 4606 } 4607 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4608 case ISD::FP_EXTEND: 4609 assert(VT.isFloatingPoint() && 4610 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4611 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4612 assert((!VT.isVector() || 4613 VT.getVectorElementCount() == 4614 Operand.getValueType().getVectorElementCount()) && 4615 "Vector element count mismatch!"); 4616 assert(Operand.getValueType().bitsLT(VT) && 4617 "Invalid fpext node, dst < src!"); 4618 if (Operand.isUndef()) 4619 return getUNDEF(VT); 4620 break; 4621 case ISD::FP_TO_SINT: 4622 case ISD::FP_TO_UINT: 4623 if (Operand.isUndef()) 4624 return getUNDEF(VT); 4625 break; 4626 case ISD::SINT_TO_FP: 4627 case ISD::UINT_TO_FP: 4628 // [us]itofp(undef) = 0, because the result value is bounded. 4629 if (Operand.isUndef()) 4630 return getConstantFP(0.0, DL, VT); 4631 break; 4632 case ISD::SIGN_EXTEND: 4633 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4634 "Invalid SIGN_EXTEND!"); 4635 assert(VT.isVector() == Operand.getValueType().isVector() && 4636 "SIGN_EXTEND result type type should be vector iff the operand " 4637 "type is vector!"); 4638 if (Operand.getValueType() == VT) return Operand; // noop extension 4639 assert((!VT.isVector() || 4640 VT.getVectorElementCount() == 4641 Operand.getValueType().getVectorElementCount()) && 4642 "Vector element count mismatch!"); 4643 assert(Operand.getValueType().bitsLT(VT) && 4644 "Invalid sext node, dst < src!"); 4645 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4646 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4647 else if (OpOpcode == ISD::UNDEF) 4648 // sext(undef) = 0, because the top bits will all be the same. 4649 return getConstant(0, DL, VT); 4650 break; 4651 case ISD::ZERO_EXTEND: 4652 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4653 "Invalid ZERO_EXTEND!"); 4654 assert(VT.isVector() == Operand.getValueType().isVector() && 4655 "ZERO_EXTEND result type type should be vector iff the operand " 4656 "type is vector!"); 4657 if (Operand.getValueType() == VT) return Operand; // noop extension 4658 assert((!VT.isVector() || 4659 VT.getVectorElementCount() == 4660 Operand.getValueType().getVectorElementCount()) && 4661 "Vector element count mismatch!"); 4662 assert(Operand.getValueType().bitsLT(VT) && 4663 "Invalid zext node, dst < src!"); 4664 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4665 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4666 else if (OpOpcode == ISD::UNDEF) 4667 // zext(undef) = 0, because the top bits will be zero. 4668 return getConstant(0, DL, VT); 4669 break; 4670 case ISD::ANY_EXTEND: 4671 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4672 "Invalid ANY_EXTEND!"); 4673 assert(VT.isVector() == Operand.getValueType().isVector() && 4674 "ANY_EXTEND result type type should be vector iff the operand " 4675 "type is vector!"); 4676 if (Operand.getValueType() == VT) return Operand; // noop extension 4677 assert((!VT.isVector() || 4678 VT.getVectorElementCount() == 4679 Operand.getValueType().getVectorElementCount()) && 4680 "Vector element count mismatch!"); 4681 assert(Operand.getValueType().bitsLT(VT) && 4682 "Invalid anyext node, dst < src!"); 4683 4684 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4685 OpOpcode == ISD::ANY_EXTEND) 4686 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4687 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4688 else if (OpOpcode == ISD::UNDEF) 4689 return getUNDEF(VT); 4690 4691 // (ext (trunc x)) -> x 4692 if (OpOpcode == ISD::TRUNCATE) { 4693 SDValue OpOp = Operand.getOperand(0); 4694 if (OpOp.getValueType() == VT) { 4695 transferDbgValues(Operand, OpOp); 4696 return OpOp; 4697 } 4698 } 4699 break; 4700 case ISD::TRUNCATE: 4701 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4702 "Invalid TRUNCATE!"); 4703 assert(VT.isVector() == Operand.getValueType().isVector() && 4704 "TRUNCATE result type type should be vector iff the operand " 4705 "type is vector!"); 4706 if (Operand.getValueType() == VT) return Operand; // noop truncate 4707 assert((!VT.isVector() || 4708 VT.getVectorElementCount() == 4709 Operand.getValueType().getVectorElementCount()) && 4710 "Vector element count mismatch!"); 4711 assert(Operand.getValueType().bitsGT(VT) && 4712 "Invalid truncate node, src < dst!"); 4713 if (OpOpcode == ISD::TRUNCATE) 4714 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4715 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4716 OpOpcode == ISD::ANY_EXTEND) { 4717 // If the source is smaller than the dest, we still need an extend. 4718 if (Operand.getOperand(0).getValueType().getScalarType() 4719 .bitsLT(VT.getScalarType())) 4720 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4721 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4722 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4723 return Operand.getOperand(0); 4724 } 4725 if (OpOpcode == ISD::UNDEF) 4726 return getUNDEF(VT); 4727 break; 4728 case ISD::ANY_EXTEND_VECTOR_INREG: 4729 case ISD::ZERO_EXTEND_VECTOR_INREG: 4730 case ISD::SIGN_EXTEND_VECTOR_INREG: 4731 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4732 assert(Operand.getValueType().bitsLE(VT) && 4733 "The input must be the same size or smaller than the result."); 4734 assert(VT.getVectorNumElements() < 4735 Operand.getValueType().getVectorNumElements() && 4736 "The destination vector type must have fewer lanes than the input."); 4737 break; 4738 case ISD::ABS: 4739 assert(VT.isInteger() && VT == Operand.getValueType() && 4740 "Invalid ABS!"); 4741 if (OpOpcode == ISD::UNDEF) 4742 return getUNDEF(VT); 4743 break; 4744 case ISD::BSWAP: 4745 assert(VT.isInteger() && VT == Operand.getValueType() && 4746 "Invalid BSWAP!"); 4747 assert((VT.getScalarSizeInBits() % 16 == 0) && 4748 "BSWAP types must be a multiple of 16 bits!"); 4749 if (OpOpcode == ISD::UNDEF) 4750 return getUNDEF(VT); 4751 break; 4752 case ISD::BITREVERSE: 4753 assert(VT.isInteger() && VT == Operand.getValueType() && 4754 "Invalid BITREVERSE!"); 4755 if (OpOpcode == ISD::UNDEF) 4756 return getUNDEF(VT); 4757 break; 4758 case ISD::BITCAST: 4759 // Basic sanity checking. 4760 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4761 "Cannot BITCAST between types of different sizes!"); 4762 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4763 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4764 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4765 if (OpOpcode == ISD::UNDEF) 4766 return getUNDEF(VT); 4767 break; 4768 case ISD::SCALAR_TO_VECTOR: 4769 assert(VT.isVector() && !Operand.getValueType().isVector() && 4770 (VT.getVectorElementType() == Operand.getValueType() || 4771 (VT.getVectorElementType().isInteger() && 4772 Operand.getValueType().isInteger() && 4773 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4774 "Illegal SCALAR_TO_VECTOR node!"); 4775 if (OpOpcode == ISD::UNDEF) 4776 return getUNDEF(VT); 4777 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4778 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4779 isa<ConstantSDNode>(Operand.getOperand(1)) && 4780 Operand.getConstantOperandVal(1) == 0 && 4781 Operand.getOperand(0).getValueType() == VT) 4782 return Operand.getOperand(0); 4783 break; 4784 case ISD::FNEG: 4785 // Negation of an unknown bag of bits is still completely undefined. 4786 if (OpOpcode == ISD::UNDEF) 4787 return getUNDEF(VT); 4788 4789 if (OpOpcode == ISD::FNEG) // --X -> X 4790 return Operand.getOperand(0); 4791 break; 4792 case ISD::FABS: 4793 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4794 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4795 break; 4796 case ISD::VSCALE: 4797 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4798 break; 4799 case ISD::VECREDUCE_SMIN: 4800 case ISD::VECREDUCE_UMAX: 4801 if (Operand.getValueType().getScalarType() == MVT::i1) 4802 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand); 4803 break; 4804 case ISD::VECREDUCE_SMAX: 4805 case ISD::VECREDUCE_UMIN: 4806 if (Operand.getValueType().getScalarType() == MVT::i1) 4807 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand); 4808 break; 4809 } 4810 4811 SDNode *N; 4812 SDVTList VTs = getVTList(VT); 4813 SDValue Ops[] = {Operand}; 4814 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4815 FoldingSetNodeID ID; 4816 AddNodeIDNode(ID, Opcode, VTs, Ops); 4817 void *IP = nullptr; 4818 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4819 E->intersectFlagsWith(Flags); 4820 return SDValue(E, 0); 4821 } 4822 4823 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4824 N->setFlags(Flags); 4825 createOperands(N, Ops); 4826 CSEMap.InsertNode(N, IP); 4827 } else { 4828 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4829 createOperands(N, Ops); 4830 } 4831 4832 InsertNode(N); 4833 SDValue V = SDValue(N, 0); 4834 NewSDValueDbgMsg(V, "Creating new node: ", this); 4835 return V; 4836 } 4837 4838 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4839 const APInt &C2) { 4840 switch (Opcode) { 4841 case ISD::ADD: return C1 + C2; 4842 case ISD::SUB: return C1 - C2; 4843 case ISD::MUL: return C1 * C2; 4844 case ISD::AND: return C1 & C2; 4845 case ISD::OR: return C1 | C2; 4846 case ISD::XOR: return C1 ^ C2; 4847 case ISD::SHL: return C1 << C2; 4848 case ISD::SRL: return C1.lshr(C2); 4849 case ISD::SRA: return C1.ashr(C2); 4850 case ISD::ROTL: return C1.rotl(C2); 4851 case ISD::ROTR: return C1.rotr(C2); 4852 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4853 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4854 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4855 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4856 case ISD::SADDSAT: return C1.sadd_sat(C2); 4857 case ISD::UADDSAT: return C1.uadd_sat(C2); 4858 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4859 case ISD::USUBSAT: return C1.usub_sat(C2); 4860 case ISD::UDIV: 4861 if (!C2.getBoolValue()) 4862 break; 4863 return C1.udiv(C2); 4864 case ISD::UREM: 4865 if (!C2.getBoolValue()) 4866 break; 4867 return C1.urem(C2); 4868 case ISD::SDIV: 4869 if (!C2.getBoolValue()) 4870 break; 4871 return C1.sdiv(C2); 4872 case ISD::SREM: 4873 if (!C2.getBoolValue()) 4874 break; 4875 return C1.srem(C2); 4876 } 4877 return llvm::None; 4878 } 4879 4880 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4881 const GlobalAddressSDNode *GA, 4882 const SDNode *N2) { 4883 if (GA->getOpcode() != ISD::GlobalAddress) 4884 return SDValue(); 4885 if (!TLI->isOffsetFoldingLegal(GA)) 4886 return SDValue(); 4887 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4888 if (!C2) 4889 return SDValue(); 4890 int64_t Offset = C2->getSExtValue(); 4891 switch (Opcode) { 4892 case ISD::ADD: break; 4893 case ISD::SUB: Offset = -uint64_t(Offset); break; 4894 default: return SDValue(); 4895 } 4896 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4897 GA->getOffset() + uint64_t(Offset)); 4898 } 4899 4900 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4901 switch (Opcode) { 4902 case ISD::SDIV: 4903 case ISD::UDIV: 4904 case ISD::SREM: 4905 case ISD::UREM: { 4906 // If a divisor is zero/undef or any element of a divisor vector is 4907 // zero/undef, the whole op is undef. 4908 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4909 SDValue Divisor = Ops[1]; 4910 if (Divisor.isUndef() || isNullConstant(Divisor)) 4911 return true; 4912 4913 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4914 llvm::any_of(Divisor->op_values(), 4915 [](SDValue V) { return V.isUndef() || 4916 isNullConstant(V); }); 4917 // TODO: Handle signed overflow. 4918 } 4919 // TODO: Handle oversized shifts. 4920 default: 4921 return false; 4922 } 4923 } 4924 4925 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4926 EVT VT, ArrayRef<SDValue> Ops) { 4927 // If the opcode is a target-specific ISD node, there's nothing we can 4928 // do here and the operand rules may not line up with the below, so 4929 // bail early. 4930 if (Opcode >= ISD::BUILTIN_OP_END) 4931 return SDValue(); 4932 4933 // For now, the array Ops should only contain two values. 4934 // This enforcement will be removed once this function is merged with 4935 // FoldConstantVectorArithmetic 4936 if (Ops.size() != 2) 4937 return SDValue(); 4938 4939 if (isUndef(Opcode, Ops)) 4940 return getUNDEF(VT); 4941 4942 SDNode *N1 = Ops[0].getNode(); 4943 SDNode *N2 = Ops[1].getNode(); 4944 4945 // Handle the case of two scalars. 4946 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4947 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4948 if (C1->isOpaque() || C2->isOpaque()) 4949 return SDValue(); 4950 4951 Optional<APInt> FoldAttempt = 4952 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4953 if (!FoldAttempt) 4954 return SDValue(); 4955 4956 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4957 assert((!Folded || !VT.isVector()) && 4958 "Can't fold vectors ops with scalar operands"); 4959 return Folded; 4960 } 4961 } 4962 4963 // fold (add Sym, c) -> Sym+c 4964 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4965 return FoldSymbolOffset(Opcode, VT, GA, N2); 4966 if (TLI->isCommutativeBinOp(Opcode)) 4967 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4968 return FoldSymbolOffset(Opcode, VT, GA, N1); 4969 4970 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4971 // vector width, however we should be able to do constant folds involving 4972 // splat vector nodes too. 4973 if (VT.isScalableVector()) 4974 return SDValue(); 4975 4976 // For fixed width vectors, extract each constant element and fold them 4977 // individually. Either input may be an undef value. 4978 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4979 if (!BV1 && !N1->isUndef()) 4980 return SDValue(); 4981 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4982 if (!BV2 && !N2->isUndef()) 4983 return SDValue(); 4984 // If both operands are undef, that's handled the same way as scalars. 4985 if (!BV1 && !BV2) 4986 return SDValue(); 4987 4988 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4989 "Vector binop with different number of elements in operands?"); 4990 4991 EVT SVT = VT.getScalarType(); 4992 EVT LegalSVT = SVT; 4993 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4994 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4995 if (LegalSVT.bitsLT(SVT)) 4996 return SDValue(); 4997 } 4998 SmallVector<SDValue, 4> Outputs; 4999 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 5000 for (unsigned I = 0; I != NumOps; ++I) { 5001 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 5002 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 5003 if (SVT.isInteger()) { 5004 if (V1->getValueType(0).bitsGT(SVT)) 5005 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 5006 if (V2->getValueType(0).bitsGT(SVT)) 5007 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 5008 } 5009 5010 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 5011 return SDValue(); 5012 5013 // Fold one vector element. 5014 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 5015 if (LegalSVT != SVT) 5016 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5017 5018 // Scalar folding only succeeded if the result is a constant or UNDEF. 5019 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5020 ScalarResult.getOpcode() != ISD::ConstantFP) 5021 return SDValue(); 5022 Outputs.push_back(ScalarResult); 5023 } 5024 5025 assert(VT.getVectorNumElements() == Outputs.size() && 5026 "Vector size mismatch!"); 5027 5028 // We may have a vector type but a scalar result. Create a splat. 5029 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 5030 5031 // Build a big vector out of the scalar elements we generated. 5032 return getBuildVector(VT, SDLoc(), Outputs); 5033 } 5034 5035 // TODO: Merge with FoldConstantArithmetic 5036 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 5037 const SDLoc &DL, EVT VT, 5038 ArrayRef<SDValue> Ops, 5039 const SDNodeFlags Flags) { 5040 // If the opcode is a target-specific ISD node, there's nothing we can 5041 // do here and the operand rules may not line up with the below, so 5042 // bail early. 5043 if (Opcode >= ISD::BUILTIN_OP_END) 5044 return SDValue(); 5045 5046 if (isUndef(Opcode, Ops)) 5047 return getUNDEF(VT); 5048 5049 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 5050 if (!VT.isVector()) 5051 return SDValue(); 5052 5053 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5054 // vector width, however we should be able to do constant folds involving 5055 // splat vector nodes too. 5056 if (VT.isScalableVector()) 5057 return SDValue(); 5058 5059 // From this point onwards all vectors are assumed to be fixed width. 5060 unsigned NumElts = VT.getVectorNumElements(); 5061 5062 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5063 return !Op.getValueType().isVector() || 5064 Op.getValueType().getVectorNumElements() == NumElts; 5065 }; 5066 5067 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5068 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5069 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5070 (BV && BV->isConstant()); 5071 }; 5072 5073 // All operands must be vector types with the same number of elements as 5074 // the result type and must be either UNDEF or a build vector of constant 5075 // or UNDEF scalars. 5076 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5077 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5078 return SDValue(); 5079 5080 // If we are comparing vectors, then the result needs to be a i1 boolean 5081 // that is then sign-extended back to the legal result type. 5082 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5083 5084 // Find legal integer scalar type for constant promotion and 5085 // ensure that its scalar size is at least as large as source. 5086 EVT LegalSVT = VT.getScalarType(); 5087 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5088 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5089 if (LegalSVT.bitsLT(VT.getScalarType())) 5090 return SDValue(); 5091 } 5092 5093 // Constant fold each scalar lane separately. 5094 SmallVector<SDValue, 4> ScalarResults; 5095 for (unsigned i = 0; i != NumElts; i++) { 5096 SmallVector<SDValue, 4> ScalarOps; 5097 for (SDValue Op : Ops) { 5098 EVT InSVT = Op.getValueType().getScalarType(); 5099 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5100 if (!InBV) { 5101 // We've checked that this is UNDEF or a constant of some kind. 5102 if (Op.isUndef()) 5103 ScalarOps.push_back(getUNDEF(InSVT)); 5104 else 5105 ScalarOps.push_back(Op); 5106 continue; 5107 } 5108 5109 SDValue ScalarOp = InBV->getOperand(i); 5110 EVT ScalarVT = ScalarOp.getValueType(); 5111 5112 // Build vector (integer) scalar operands may need implicit 5113 // truncation - do this before constant folding. 5114 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5115 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5116 5117 ScalarOps.push_back(ScalarOp); 5118 } 5119 5120 // Constant fold the scalar operands. 5121 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5122 5123 // Legalize the (integer) scalar constant if necessary. 5124 if (LegalSVT != SVT) 5125 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5126 5127 // Scalar folding only succeeded if the result is a constant or UNDEF. 5128 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5129 ScalarResult.getOpcode() != ISD::ConstantFP) 5130 return SDValue(); 5131 ScalarResults.push_back(ScalarResult); 5132 } 5133 5134 SDValue V = getBuildVector(VT, DL, ScalarResults); 5135 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5136 return V; 5137 } 5138 5139 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5140 EVT VT, SDValue N1, SDValue N2) { 5141 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5142 // should. That will require dealing with a potentially non-default 5143 // rounding mode, checking the "opStatus" return value from the APFloat 5144 // math calculations, and possibly other variations. 5145 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5146 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5147 if (N1CFP && N2CFP) { 5148 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5149 switch (Opcode) { 5150 case ISD::FADD: 5151 C1.add(C2, APFloat::rmNearestTiesToEven); 5152 return getConstantFP(C1, DL, VT); 5153 case ISD::FSUB: 5154 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5155 return getConstantFP(C1, DL, VT); 5156 case ISD::FMUL: 5157 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5158 return getConstantFP(C1, DL, VT); 5159 case ISD::FDIV: 5160 C1.divide(C2, APFloat::rmNearestTiesToEven); 5161 return getConstantFP(C1, DL, VT); 5162 case ISD::FREM: 5163 C1.mod(C2); 5164 return getConstantFP(C1, DL, VT); 5165 case ISD::FCOPYSIGN: 5166 C1.copySign(C2); 5167 return getConstantFP(C1, DL, VT); 5168 default: break; 5169 } 5170 } 5171 if (N1CFP && Opcode == ISD::FP_ROUND) { 5172 APFloat C1 = N1CFP->getValueAPF(); // make copy 5173 bool Unused; 5174 // This can return overflow, underflow, or inexact; we don't care. 5175 // FIXME need to be more flexible about rounding mode. 5176 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5177 &Unused); 5178 return getConstantFP(C1, DL, VT); 5179 } 5180 5181 switch (Opcode) { 5182 case ISD::FSUB: 5183 // -0.0 - undef --> undef (consistent with "fneg undef") 5184 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5185 return getUNDEF(VT); 5186 LLVM_FALLTHROUGH; 5187 5188 case ISD::FADD: 5189 case ISD::FMUL: 5190 case ISD::FDIV: 5191 case ISD::FREM: 5192 // If both operands are undef, the result is undef. If 1 operand is undef, 5193 // the result is NaN. This should match the behavior of the IR optimizer. 5194 if (N1.isUndef() && N2.isUndef()) 5195 return getUNDEF(VT); 5196 if (N1.isUndef() || N2.isUndef()) 5197 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5198 } 5199 return SDValue(); 5200 } 5201 5202 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { 5203 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!"); 5204 5205 // There's no need to assert on a byte-aligned pointer. All pointers are at 5206 // least byte aligned. 5207 if (A == Align(1)) 5208 return Val; 5209 5210 FoldingSetNodeID ID; 5211 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); 5212 ID.AddInteger(A.value()); 5213 5214 void *IP = nullptr; 5215 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5216 return SDValue(E, 0); 5217 5218 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), 5219 Val.getValueType(), A); 5220 createOperands(N, {Val}); 5221 5222 CSEMap.InsertNode(N, IP); 5223 InsertNode(N); 5224 5225 SDValue V(N, 0); 5226 NewSDValueDbgMsg(V, "Creating new node: ", this); 5227 return V; 5228 } 5229 5230 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5231 SDValue N1, SDValue N2) { 5232 SDNodeFlags Flags; 5233 if (Inserter) 5234 Flags = Inserter->getFlags(); 5235 return getNode(Opcode, DL, VT, N1, N2, Flags); 5236 } 5237 5238 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5239 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5240 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5241 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5242 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5243 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5244 5245 // Canonicalize constant to RHS if commutative. 5246 if (TLI->isCommutativeBinOp(Opcode)) { 5247 if (N1C && !N2C) { 5248 std::swap(N1C, N2C); 5249 std::swap(N1, N2); 5250 } else if (N1CFP && !N2CFP) { 5251 std::swap(N1CFP, N2CFP); 5252 std::swap(N1, N2); 5253 } 5254 } 5255 5256 switch (Opcode) { 5257 default: break; 5258 case ISD::TokenFactor: 5259 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5260 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5261 // Fold trivial token factors. 5262 if (N1.getOpcode() == ISD::EntryToken) return N2; 5263 if (N2.getOpcode() == ISD::EntryToken) return N1; 5264 if (N1 == N2) return N1; 5265 break; 5266 case ISD::BUILD_VECTOR: { 5267 // Attempt to simplify BUILD_VECTOR. 5268 SDValue Ops[] = {N1, N2}; 5269 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5270 return V; 5271 break; 5272 } 5273 case ISD::CONCAT_VECTORS: { 5274 SDValue Ops[] = {N1, N2}; 5275 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5276 return V; 5277 break; 5278 } 5279 case ISD::AND: 5280 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5281 assert(N1.getValueType() == N2.getValueType() && 5282 N1.getValueType() == VT && "Binary operator types must match!"); 5283 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5284 // worth handling here. 5285 if (N2C && N2C->isNullValue()) 5286 return N2; 5287 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5288 return N1; 5289 break; 5290 case ISD::OR: 5291 case ISD::XOR: 5292 case ISD::ADD: 5293 case ISD::SUB: 5294 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5295 assert(N1.getValueType() == N2.getValueType() && 5296 N1.getValueType() == VT && "Binary operator types must match!"); 5297 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5298 // it's worth handling here. 5299 if (N2C && N2C->isNullValue()) 5300 return N1; 5301 break; 5302 case ISD::MUL: 5303 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5304 assert(N1.getValueType() == N2.getValueType() && 5305 N1.getValueType() == VT && "Binary operator types must match!"); 5306 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5307 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5308 APInt N2CImm = N2C->getAPIntValue(); 5309 return getVScale(DL, VT, MulImm * N2CImm); 5310 } 5311 break; 5312 case ISD::UDIV: 5313 case ISD::UREM: 5314 case ISD::MULHU: 5315 case ISD::MULHS: 5316 case ISD::SDIV: 5317 case ISD::SREM: 5318 case ISD::SADDSAT: 5319 case ISD::SSUBSAT: 5320 case ISD::UADDSAT: 5321 case ISD::USUBSAT: 5322 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5323 assert(N1.getValueType() == N2.getValueType() && 5324 N1.getValueType() == VT && "Binary operator types must match!"); 5325 break; 5326 case ISD::SMIN: 5327 case ISD::UMAX: 5328 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5329 assert(N1.getValueType() == N2.getValueType() && 5330 N1.getValueType() == VT && "Binary operator types must match!"); 5331 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5332 return getNode(ISD::OR, DL, VT, N1, N2); 5333 break; 5334 case ISD::SMAX: 5335 case ISD::UMIN: 5336 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5337 assert(N1.getValueType() == N2.getValueType() && 5338 N1.getValueType() == VT && "Binary operator types must match!"); 5339 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5340 return getNode(ISD::AND, DL, VT, N1, N2); 5341 break; 5342 case ISD::FADD: 5343 case ISD::FSUB: 5344 case ISD::FMUL: 5345 case ISD::FDIV: 5346 case ISD::FREM: 5347 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5348 assert(N1.getValueType() == N2.getValueType() && 5349 N1.getValueType() == VT && "Binary operator types must match!"); 5350 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5351 return V; 5352 break; 5353 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5354 assert(N1.getValueType() == VT && 5355 N1.getValueType().isFloatingPoint() && 5356 N2.getValueType().isFloatingPoint() && 5357 "Invalid FCOPYSIGN!"); 5358 break; 5359 case ISD::SHL: 5360 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5361 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5362 APInt ShiftImm = N2C->getAPIntValue(); 5363 return getVScale(DL, VT, MulImm << ShiftImm); 5364 } 5365 LLVM_FALLTHROUGH; 5366 case ISD::SRA: 5367 case ISD::SRL: 5368 if (SDValue V = simplifyShift(N1, N2)) 5369 return V; 5370 LLVM_FALLTHROUGH; 5371 case ISD::ROTL: 5372 case ISD::ROTR: 5373 assert(VT == N1.getValueType() && 5374 "Shift operators return type must be the same as their first arg"); 5375 assert(VT.isInteger() && N2.getValueType().isInteger() && 5376 "Shifts only work on integers"); 5377 assert((!VT.isVector() || VT == N2.getValueType()) && 5378 "Vector shift amounts must be in the same as their first arg"); 5379 // Verify that the shift amount VT is big enough to hold valid shift 5380 // amounts. This catches things like trying to shift an i1024 value by an 5381 // i8, which is easy to fall into in generic code that uses 5382 // TLI.getShiftAmount(). 5383 assert(N2.getValueType().getScalarSizeInBits() >= 5384 Log2_32_Ceil(VT.getScalarSizeInBits()) && 5385 "Invalid use of small shift amount with oversized value!"); 5386 5387 // Always fold shifts of i1 values so the code generator doesn't need to 5388 // handle them. Since we know the size of the shift has to be less than the 5389 // size of the value, the shift/rotate count is guaranteed to be zero. 5390 if (VT == MVT::i1) 5391 return N1; 5392 if (N2C && N2C->isNullValue()) 5393 return N1; 5394 break; 5395 case ISD::FP_ROUND: 5396 assert(VT.isFloatingPoint() && 5397 N1.getValueType().isFloatingPoint() && 5398 VT.bitsLE(N1.getValueType()) && 5399 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5400 "Invalid FP_ROUND!"); 5401 if (N1.getValueType() == VT) return N1; // noop conversion. 5402 break; 5403 case ISD::AssertSext: 5404 case ISD::AssertZext: { 5405 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5406 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5407 assert(VT.isInteger() && EVT.isInteger() && 5408 "Cannot *_EXTEND_INREG FP types"); 5409 assert(!EVT.isVector() && 5410 "AssertSExt/AssertZExt type should be the vector element type " 5411 "rather than the vector type!"); 5412 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5413 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5414 break; 5415 } 5416 case ISD::SIGN_EXTEND_INREG: { 5417 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5418 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5419 assert(VT.isInteger() && EVT.isInteger() && 5420 "Cannot *_EXTEND_INREG FP types"); 5421 assert(EVT.isVector() == VT.isVector() && 5422 "SIGN_EXTEND_INREG type should be vector iff the operand " 5423 "type is vector!"); 5424 assert((!EVT.isVector() || 5425 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5426 "Vector element counts must match in SIGN_EXTEND_INREG"); 5427 assert(EVT.bitsLE(VT) && "Not extending!"); 5428 if (EVT == VT) return N1; // Not actually extending 5429 5430 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5431 unsigned FromBits = EVT.getScalarSizeInBits(); 5432 Val <<= Val.getBitWidth() - FromBits; 5433 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5434 return getConstant(Val, DL, ConstantVT); 5435 }; 5436 5437 if (N1C) { 5438 const APInt &Val = N1C->getAPIntValue(); 5439 return SignExtendInReg(Val, VT); 5440 } 5441 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5442 SmallVector<SDValue, 8> Ops; 5443 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5444 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5445 SDValue Op = N1.getOperand(i); 5446 if (Op.isUndef()) { 5447 Ops.push_back(getUNDEF(OpVT)); 5448 continue; 5449 } 5450 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5451 APInt Val = C->getAPIntValue(); 5452 Ops.push_back(SignExtendInReg(Val, OpVT)); 5453 } 5454 return getBuildVector(VT, DL, Ops); 5455 } 5456 break; 5457 } 5458 case ISD::EXTRACT_VECTOR_ELT: 5459 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5460 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5461 element type of the vector."); 5462 5463 // Extract from an undefined value or using an undefined index is undefined. 5464 if (N1.isUndef() || N2.isUndef()) 5465 return getUNDEF(VT); 5466 5467 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length 5468 // vectors. For scalable vectors we will provide appropriate support for 5469 // dealing with arbitrary indices. 5470 if (N2C && N1.getValueType().isFixedLengthVector() && 5471 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5472 return getUNDEF(VT); 5473 5474 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5475 // expanding copies of large vectors from registers. This only works for 5476 // fixed length vectors, since we need to know the exact number of 5477 // elements. 5478 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && 5479 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { 5480 unsigned Factor = 5481 N1.getOperand(0).getValueType().getVectorNumElements(); 5482 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5483 N1.getOperand(N2C->getZExtValue() / Factor), 5484 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5485 } 5486 5487 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while 5488 // lowering is expanding large vector constants. 5489 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || 5490 N1.getOpcode() == ISD::SPLAT_VECTOR)) { 5491 assert((N1.getOpcode() != ISD::BUILD_VECTOR || 5492 N1.getValueType().isFixedLengthVector()) && 5493 "BUILD_VECTOR used for scalable vectors"); 5494 unsigned Index = 5495 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; 5496 SDValue Elt = N1.getOperand(Index); 5497 5498 if (VT != Elt.getValueType()) 5499 // If the vector element type is not legal, the BUILD_VECTOR operands 5500 // are promoted and implicitly truncated, and the result implicitly 5501 // extended. Make that explicit here. 5502 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5503 5504 return Elt; 5505 } 5506 5507 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5508 // operations are lowered to scalars. 5509 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5510 // If the indices are the same, return the inserted element else 5511 // if the indices are known different, extract the element from 5512 // the original vector. 5513 SDValue N1Op2 = N1.getOperand(2); 5514 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5515 5516 if (N1Op2C && N2C) { 5517 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5518 if (VT == N1.getOperand(1).getValueType()) 5519 return N1.getOperand(1); 5520 else 5521 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5522 } 5523 5524 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5525 } 5526 } 5527 5528 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5529 // when vector types are scalarized and v1iX is legal. 5530 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). 5531 // Here we are completely ignoring the extract element index (N2), 5532 // which is fine for fixed width vectors, since any index other than 0 5533 // is undefined anyway. However, this cannot be ignored for scalable 5534 // vectors - in theory we could support this, but we don't want to do this 5535 // without a profitability check. 5536 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5537 N1.getValueType().isFixedLengthVector() && 5538 N1.getValueType().getVectorNumElements() == 1) { 5539 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5540 N1.getOperand(1)); 5541 } 5542 break; 5543 case ISD::EXTRACT_ELEMENT: 5544 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5545 assert(!N1.getValueType().isVector() && !VT.isVector() && 5546 (N1.getValueType().isInteger() == VT.isInteger()) && 5547 N1.getValueType() != VT && 5548 "Wrong types for EXTRACT_ELEMENT!"); 5549 5550 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5551 // 64-bit integers into 32-bit parts. Instead of building the extract of 5552 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5553 if (N1.getOpcode() == ISD::BUILD_PAIR) 5554 return N1.getOperand(N2C->getZExtValue()); 5555 5556 // EXTRACT_ELEMENT of a constant int is also very common. 5557 if (N1C) { 5558 unsigned ElementSize = VT.getSizeInBits(); 5559 unsigned Shift = ElementSize * N2C->getZExtValue(); 5560 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5561 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5562 } 5563 break; 5564 case ISD::EXTRACT_SUBVECTOR: 5565 EVT N1VT = N1.getValueType(); 5566 assert(VT.isVector() && N1VT.isVector() && 5567 "Extract subvector VTs must be vectors!"); 5568 assert(VT.getVectorElementType() == N1VT.getVectorElementType() && 5569 "Extract subvector VTs must have the same element type!"); 5570 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && 5571 "Cannot extract a scalable vector from a fixed length vector!"); 5572 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5573 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && 5574 "Extract subvector must be from larger vector to smaller vector!"); 5575 assert(N2C && "Extract subvector index must be a constant"); 5576 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5577 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= 5578 N1VT.getVectorMinNumElements()) && 5579 "Extract subvector overflow!"); 5580 assert(N2C->getAPIntValue().getBitWidth() == 5581 TLI->getVectorIdxTy(getDataLayout()) 5582 .getSizeInBits() 5583 .getFixedSize() && 5584 "Constant index for EXTRACT_SUBVECTOR has an invalid size"); 5585 5586 // Trivial extraction. 5587 if (VT == N1VT) 5588 return N1; 5589 5590 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5591 if (N1.isUndef()) 5592 return getUNDEF(VT); 5593 5594 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5595 // the concat have the same type as the extract. 5596 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 && 5597 VT == N1.getOperand(0).getValueType()) { 5598 unsigned Factor = VT.getVectorMinNumElements(); 5599 return N1.getOperand(N2C->getZExtValue() / Factor); 5600 } 5601 5602 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5603 // during shuffle legalization. 5604 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5605 VT == N1.getOperand(1).getValueType()) 5606 return N1.getOperand(1); 5607 break; 5608 } 5609 5610 // Perform trivial constant folding. 5611 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5612 return SV; 5613 5614 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5615 return V; 5616 5617 // Canonicalize an UNDEF to the RHS, even over a constant. 5618 if (N1.isUndef()) { 5619 if (TLI->isCommutativeBinOp(Opcode)) { 5620 std::swap(N1, N2); 5621 } else { 5622 switch (Opcode) { 5623 case ISD::SIGN_EXTEND_INREG: 5624 case ISD::SUB: 5625 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5626 case ISD::UDIV: 5627 case ISD::SDIV: 5628 case ISD::UREM: 5629 case ISD::SREM: 5630 case ISD::SSUBSAT: 5631 case ISD::USUBSAT: 5632 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5633 } 5634 } 5635 } 5636 5637 // Fold a bunch of operators when the RHS is undef. 5638 if (N2.isUndef()) { 5639 switch (Opcode) { 5640 case ISD::XOR: 5641 if (N1.isUndef()) 5642 // Handle undef ^ undef -> 0 special case. This is a common 5643 // idiom (misuse). 5644 return getConstant(0, DL, VT); 5645 LLVM_FALLTHROUGH; 5646 case ISD::ADD: 5647 case ISD::SUB: 5648 case ISD::UDIV: 5649 case ISD::SDIV: 5650 case ISD::UREM: 5651 case ISD::SREM: 5652 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5653 case ISD::MUL: 5654 case ISD::AND: 5655 case ISD::SSUBSAT: 5656 case ISD::USUBSAT: 5657 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5658 case ISD::OR: 5659 case ISD::SADDSAT: 5660 case ISD::UADDSAT: 5661 return getAllOnesConstant(DL, VT); 5662 } 5663 } 5664 5665 // Memoize this node if possible. 5666 SDNode *N; 5667 SDVTList VTs = getVTList(VT); 5668 SDValue Ops[] = {N1, N2}; 5669 if (VT != MVT::Glue) { 5670 FoldingSetNodeID ID; 5671 AddNodeIDNode(ID, Opcode, VTs, Ops); 5672 void *IP = nullptr; 5673 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5674 E->intersectFlagsWith(Flags); 5675 return SDValue(E, 0); 5676 } 5677 5678 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5679 N->setFlags(Flags); 5680 createOperands(N, Ops); 5681 CSEMap.InsertNode(N, IP); 5682 } else { 5683 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5684 createOperands(N, Ops); 5685 } 5686 5687 InsertNode(N); 5688 SDValue V = SDValue(N, 0); 5689 NewSDValueDbgMsg(V, "Creating new node: ", this); 5690 return V; 5691 } 5692 5693 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5694 SDValue N1, SDValue N2, SDValue N3) { 5695 SDNodeFlags Flags; 5696 if (Inserter) 5697 Flags = Inserter->getFlags(); 5698 return getNode(Opcode, DL, VT, N1, N2, N3, Flags); 5699 } 5700 5701 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5702 SDValue N1, SDValue N2, SDValue N3, 5703 const SDNodeFlags Flags) { 5704 // Perform various simplifications. 5705 switch (Opcode) { 5706 case ISD::FMA: { 5707 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5708 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5709 N3.getValueType() == VT && "FMA types must match!"); 5710 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5711 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5712 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5713 if (N1CFP && N2CFP && N3CFP) { 5714 APFloat V1 = N1CFP->getValueAPF(); 5715 const APFloat &V2 = N2CFP->getValueAPF(); 5716 const APFloat &V3 = N3CFP->getValueAPF(); 5717 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5718 return getConstantFP(V1, DL, VT); 5719 } 5720 break; 5721 } 5722 case ISD::BUILD_VECTOR: { 5723 // Attempt to simplify BUILD_VECTOR. 5724 SDValue Ops[] = {N1, N2, N3}; 5725 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5726 return V; 5727 break; 5728 } 5729 case ISD::CONCAT_VECTORS: { 5730 SDValue Ops[] = {N1, N2, N3}; 5731 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5732 return V; 5733 break; 5734 } 5735 case ISD::SETCC: { 5736 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5737 assert(N1.getValueType() == N2.getValueType() && 5738 "SETCC operands must have the same type!"); 5739 assert(VT.isVector() == N1.getValueType().isVector() && 5740 "SETCC type should be vector iff the operand type is vector!"); 5741 assert((!VT.isVector() || VT.getVectorElementCount() == 5742 N1.getValueType().getVectorElementCount()) && 5743 "SETCC vector element counts must match!"); 5744 // Use FoldSetCC to simplify SETCC's. 5745 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5746 return V; 5747 // Vector constant folding. 5748 SDValue Ops[] = {N1, N2, N3}; 5749 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5750 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5751 return V; 5752 } 5753 break; 5754 } 5755 case ISD::SELECT: 5756 case ISD::VSELECT: 5757 if (SDValue V = simplifySelect(N1, N2, N3)) 5758 return V; 5759 break; 5760 case ISD::VECTOR_SHUFFLE: 5761 llvm_unreachable("should use getVectorShuffle constructor!"); 5762 case ISD::INSERT_VECTOR_ELT: { 5763 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5764 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5765 // for scalable vectors where we will generate appropriate code to 5766 // deal with out-of-bounds cases correctly. 5767 if (N3C && N1.getValueType().isFixedLengthVector() && 5768 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5769 return getUNDEF(VT); 5770 5771 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5772 if (N3.isUndef()) 5773 return getUNDEF(VT); 5774 5775 // If the inserted element is an UNDEF, just use the input vector. 5776 if (N2.isUndef()) 5777 return N1; 5778 5779 break; 5780 } 5781 case ISD::INSERT_SUBVECTOR: { 5782 // Inserting undef into undef is still undef. 5783 if (N1.isUndef() && N2.isUndef()) 5784 return getUNDEF(VT); 5785 5786 EVT N2VT = N2.getValueType(); 5787 assert(VT == N1.getValueType() && 5788 "Dest and insert subvector source types must match!"); 5789 assert(VT.isVector() && N2VT.isVector() && 5790 "Insert subvector VTs must be vectors!"); 5791 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && 5792 "Cannot insert a scalable vector into a fixed length vector!"); 5793 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5794 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && 5795 "Insert subvector must be from smaller vector to larger vector!"); 5796 assert(isa<ConstantSDNode>(N3) && 5797 "Insert subvector index must be constant"); 5798 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5799 (N2VT.getVectorMinNumElements() + 5800 cast<ConstantSDNode>(N3)->getZExtValue()) <= 5801 VT.getVectorMinNumElements()) && 5802 "Insert subvector overflow!"); 5803 5804 // Trivial insertion. 5805 if (VT == N2VT) 5806 return N2; 5807 5808 // If this is an insert of an extracted vector into an undef vector, we 5809 // can just use the input to the extract. 5810 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5811 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5812 return N2.getOperand(0); 5813 break; 5814 } 5815 case ISD::BITCAST: 5816 // Fold bit_convert nodes from a type to themselves. 5817 if (N1.getValueType() == VT) 5818 return N1; 5819 break; 5820 } 5821 5822 // Memoize node if it doesn't produce a flag. 5823 SDNode *N; 5824 SDVTList VTs = getVTList(VT); 5825 SDValue Ops[] = {N1, N2, N3}; 5826 if (VT != MVT::Glue) { 5827 FoldingSetNodeID ID; 5828 AddNodeIDNode(ID, Opcode, VTs, Ops); 5829 void *IP = nullptr; 5830 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5831 E->intersectFlagsWith(Flags); 5832 return SDValue(E, 0); 5833 } 5834 5835 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5836 N->setFlags(Flags); 5837 createOperands(N, Ops); 5838 CSEMap.InsertNode(N, IP); 5839 } else { 5840 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5841 createOperands(N, Ops); 5842 } 5843 5844 InsertNode(N); 5845 SDValue V = SDValue(N, 0); 5846 NewSDValueDbgMsg(V, "Creating new node: ", this); 5847 return V; 5848 } 5849 5850 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5851 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5852 SDValue Ops[] = { N1, N2, N3, N4 }; 5853 return getNode(Opcode, DL, VT, Ops); 5854 } 5855 5856 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5857 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5858 SDValue N5) { 5859 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5860 return getNode(Opcode, DL, VT, Ops); 5861 } 5862 5863 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5864 /// the incoming stack arguments to be loaded from the stack. 5865 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5866 SmallVector<SDValue, 8> ArgChains; 5867 5868 // Include the original chain at the beginning of the list. When this is 5869 // used by target LowerCall hooks, this helps legalize find the 5870 // CALLSEQ_BEGIN node. 5871 ArgChains.push_back(Chain); 5872 5873 // Add a chain value for each stack argument. 5874 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5875 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5876 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5877 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5878 if (FI->getIndex() < 0) 5879 ArgChains.push_back(SDValue(L, 1)); 5880 5881 // Build a tokenfactor for all the chains. 5882 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5883 } 5884 5885 /// getMemsetValue - Vectorized representation of the memset value 5886 /// operand. 5887 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5888 const SDLoc &dl) { 5889 assert(!Value.isUndef()); 5890 5891 unsigned NumBits = VT.getScalarSizeInBits(); 5892 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5893 assert(C->getAPIntValue().getBitWidth() == 8); 5894 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5895 if (VT.isInteger()) { 5896 bool IsOpaque = VT.getSizeInBits() > 64 || 5897 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5898 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5899 } 5900 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5901 VT); 5902 } 5903 5904 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5905 EVT IntVT = VT.getScalarType(); 5906 if (!IntVT.isInteger()) 5907 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5908 5909 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5910 if (NumBits > 8) { 5911 // Use a multiplication with 0x010101... to extend the input to the 5912 // required length. 5913 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5914 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5915 DAG.getConstant(Magic, dl, IntVT)); 5916 } 5917 5918 if (VT != Value.getValueType() && !VT.isInteger()) 5919 Value = DAG.getBitcast(VT.getScalarType(), Value); 5920 if (VT != Value.getValueType()) 5921 Value = DAG.getSplatBuildVector(VT, dl, Value); 5922 5923 return Value; 5924 } 5925 5926 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5927 /// used when a memcpy is turned into a memset when the source is a constant 5928 /// string ptr. 5929 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5930 const TargetLowering &TLI, 5931 const ConstantDataArraySlice &Slice) { 5932 // Handle vector with all elements zero. 5933 if (Slice.Array == nullptr) { 5934 if (VT.isInteger()) 5935 return DAG.getConstant(0, dl, VT); 5936 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5937 return DAG.getConstantFP(0.0, dl, VT); 5938 else if (VT.isVector()) { 5939 unsigned NumElts = VT.getVectorNumElements(); 5940 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5941 return DAG.getNode(ISD::BITCAST, dl, VT, 5942 DAG.getConstant(0, dl, 5943 EVT::getVectorVT(*DAG.getContext(), 5944 EltVT, NumElts))); 5945 } else 5946 llvm_unreachable("Expected type!"); 5947 } 5948 5949 assert(!VT.isVector() && "Can't handle vector type here!"); 5950 unsigned NumVTBits = VT.getSizeInBits(); 5951 unsigned NumVTBytes = NumVTBits / 8; 5952 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5953 5954 APInt Val(NumVTBits, 0); 5955 if (DAG.getDataLayout().isLittleEndian()) { 5956 for (unsigned i = 0; i != NumBytes; ++i) 5957 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5958 } else { 5959 for (unsigned i = 0; i != NumBytes; ++i) 5960 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5961 } 5962 5963 // If the "cost" of materializing the integer immediate is less than the cost 5964 // of a load, then it is cost effective to turn the load into the immediate. 5965 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5966 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5967 return DAG.getConstant(Val, dl, VT); 5968 return SDValue(nullptr, 0); 5969 } 5970 5971 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset, 5972 const SDLoc &DL, 5973 const SDNodeFlags Flags) { 5974 EVT VT = Base.getValueType(); 5975 SDValue Index; 5976 5977 if (Offset.isScalable()) 5978 Index = getVScale(DL, Base.getValueType(), 5979 APInt(Base.getValueSizeInBits().getFixedSize(), 5980 Offset.getKnownMinSize())); 5981 else 5982 Index = getConstant(Offset.getFixedSize(), DL, VT); 5983 5984 return getMemBasePlusOffset(Base, Index, DL, Flags); 5985 } 5986 5987 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5988 const SDLoc &DL, 5989 const SDNodeFlags Flags) { 5990 assert(Offset.getValueType().isInteger()); 5991 EVT BasePtrVT = Ptr.getValueType(); 5992 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5993 } 5994 5995 /// Returns true if memcpy source is constant data. 5996 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5997 uint64_t SrcDelta = 0; 5998 GlobalAddressSDNode *G = nullptr; 5999 if (Src.getOpcode() == ISD::GlobalAddress) 6000 G = cast<GlobalAddressSDNode>(Src); 6001 else if (Src.getOpcode() == ISD::ADD && 6002 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 6003 Src.getOperand(1).getOpcode() == ISD::Constant) { 6004 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 6005 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 6006 } 6007 if (!G) 6008 return false; 6009 6010 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 6011 SrcDelta + G->getOffset()); 6012 } 6013 6014 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 6015 SelectionDAG &DAG) { 6016 // On Darwin, -Os means optimize for size without hurting performance, so 6017 // only really optimize for size when -Oz (MinSize) is used. 6018 if (MF.getTarget().getTargetTriple().isOSDarwin()) 6019 return MF.getFunction().hasMinSize(); 6020 return DAG.shouldOptForSize(); 6021 } 6022 6023 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 6024 SmallVector<SDValue, 32> &OutChains, unsigned From, 6025 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 6026 SmallVector<SDValue, 16> &OutStoreChains) { 6027 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 6028 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 6029 SmallVector<SDValue, 16> GluedLoadChains; 6030 for (unsigned i = From; i < To; ++i) { 6031 OutChains.push_back(OutLoadChains[i]); 6032 GluedLoadChains.push_back(OutLoadChains[i]); 6033 } 6034 6035 // Chain for all loads. 6036 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6037 GluedLoadChains); 6038 6039 for (unsigned i = From; i < To; ++i) { 6040 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 6041 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 6042 ST->getBasePtr(), ST->getMemoryVT(), 6043 ST->getMemOperand()); 6044 OutChains.push_back(NewStore); 6045 } 6046 } 6047 6048 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6049 SDValue Chain, SDValue Dst, SDValue Src, 6050 uint64_t Size, Align Alignment, 6051 bool isVol, bool AlwaysInline, 6052 MachinePointerInfo DstPtrInfo, 6053 MachinePointerInfo SrcPtrInfo) { 6054 // Turn a memcpy of undef to nop. 6055 // FIXME: We need to honor volatile even is Src is undef. 6056 if (Src.isUndef()) 6057 return Chain; 6058 6059 // Expand memcpy to a series of load and store ops if the size operand falls 6060 // below a certain threshold. 6061 // TODO: In the AlwaysInline case, if the size is big then generate a loop 6062 // rather than maybe a humongous number of loads and stores. 6063 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6064 const DataLayout &DL = DAG.getDataLayout(); 6065 LLVMContext &C = *DAG.getContext(); 6066 std::vector<EVT> MemOps; 6067 bool DstAlignCanChange = false; 6068 MachineFunction &MF = DAG.getMachineFunction(); 6069 MachineFrameInfo &MFI = MF.getFrameInfo(); 6070 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6071 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6072 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6073 DstAlignCanChange = true; 6074 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6075 if (!SrcAlign || Alignment > *SrcAlign) 6076 SrcAlign = Alignment; 6077 assert(SrcAlign && "SrcAlign must be set"); 6078 ConstantDataArraySlice Slice; 6079 // If marked as volatile, perform a copy even when marked as constant. 6080 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice); 6081 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 6082 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 6083 const MemOp Op = isZeroConstant 6084 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 6085 /*IsZeroMemset*/ true, isVol) 6086 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 6087 *SrcAlign, isVol, CopyFromConstant); 6088 if (!TLI.findOptimalMemOpLowering( 6089 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 6090 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 6091 return SDValue(); 6092 6093 if (DstAlignCanChange) { 6094 Type *Ty = MemOps[0].getTypeForEVT(C); 6095 Align NewAlign = DL.getABITypeAlign(Ty); 6096 6097 // Don't promote to an alignment that would require dynamic stack 6098 // realignment. 6099 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 6100 if (!TRI->needsStackRealignment(MF)) 6101 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 6102 NewAlign = NewAlign / 2; 6103 6104 if (NewAlign > Alignment) { 6105 // Give the stack frame object a larger alignment if needed. 6106 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6107 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6108 Alignment = NewAlign; 6109 } 6110 } 6111 6112 MachineMemOperand::Flags MMOFlags = 6113 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6114 SmallVector<SDValue, 16> OutLoadChains; 6115 SmallVector<SDValue, 16> OutStoreChains; 6116 SmallVector<SDValue, 32> OutChains; 6117 unsigned NumMemOps = MemOps.size(); 6118 uint64_t SrcOff = 0, DstOff = 0; 6119 for (unsigned i = 0; i != NumMemOps; ++i) { 6120 EVT VT = MemOps[i]; 6121 unsigned VTSize = VT.getSizeInBits() / 8; 6122 SDValue Value, Store; 6123 6124 if (VTSize > Size) { 6125 // Issuing an unaligned load / store pair that overlaps with the previous 6126 // pair. Adjust the offset accordingly. 6127 assert(i == NumMemOps-1 && i != 0); 6128 SrcOff -= VTSize - Size; 6129 DstOff -= VTSize - Size; 6130 } 6131 6132 if (CopyFromConstant && 6133 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 6134 // It's unlikely a store of a vector immediate can be done in a single 6135 // instruction. It would require a load from a constantpool first. 6136 // We only handle zero vectors here. 6137 // FIXME: Handle other cases where store of vector immediate is done in 6138 // a single instruction. 6139 ConstantDataArraySlice SubSlice; 6140 if (SrcOff < Slice.Length) { 6141 SubSlice = Slice; 6142 SubSlice.move(SrcOff); 6143 } else { 6144 // This is an out-of-bounds access and hence UB. Pretend we read zero. 6145 SubSlice.Array = nullptr; 6146 SubSlice.Offset = 0; 6147 SubSlice.Length = VTSize; 6148 } 6149 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6150 if (Value.getNode()) { 6151 Store = DAG.getStore( 6152 Chain, dl, Value, 6153 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6154 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6155 OutChains.push_back(Store); 6156 } 6157 } 6158 6159 if (!Store.getNode()) { 6160 // The type might not be legal for the target. This should only happen 6161 // if the type is smaller than a legal type, as on PPC, so the right 6162 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6163 // to Load/Store if NVT==VT. 6164 // FIXME does the case above also need this? 6165 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6166 assert(NVT.bitsGE(VT)); 6167 6168 bool isDereferenceable = 6169 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6170 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6171 if (isDereferenceable) 6172 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6173 6174 Value = DAG.getExtLoad( 6175 ISD::EXTLOAD, dl, NVT, Chain, 6176 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6177 SrcPtrInfo.getWithOffset(SrcOff), VT, 6178 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags); 6179 OutLoadChains.push_back(Value.getValue(1)); 6180 6181 Store = DAG.getTruncStore( 6182 Chain, dl, Value, 6183 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6184 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 6185 OutStoreChains.push_back(Store); 6186 } 6187 SrcOff += VTSize; 6188 DstOff += VTSize; 6189 Size -= VTSize; 6190 } 6191 6192 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6193 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6194 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6195 6196 if (NumLdStInMemcpy) { 6197 // It may be that memcpy might be converted to memset if it's memcpy 6198 // of constants. In such a case, we won't have loads and stores, but 6199 // just stores. In the absence of loads, there is nothing to gang up. 6200 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6201 // If target does not care, just leave as it. 6202 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6203 OutChains.push_back(OutLoadChains[i]); 6204 OutChains.push_back(OutStoreChains[i]); 6205 } 6206 } else { 6207 // Ld/St less than/equal limit set by target. 6208 if (NumLdStInMemcpy <= GluedLdStLimit) { 6209 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6210 NumLdStInMemcpy, OutLoadChains, 6211 OutStoreChains); 6212 } else { 6213 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6214 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6215 unsigned GlueIter = 0; 6216 6217 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6218 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6219 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6220 6221 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6222 OutLoadChains, OutStoreChains); 6223 GlueIter += GluedLdStLimit; 6224 } 6225 6226 // Residual ld/st. 6227 if (RemainingLdStInMemcpy) { 6228 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6229 RemainingLdStInMemcpy, OutLoadChains, 6230 OutStoreChains); 6231 } 6232 } 6233 } 6234 } 6235 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6236 } 6237 6238 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6239 SDValue Chain, SDValue Dst, SDValue Src, 6240 uint64_t Size, Align Alignment, 6241 bool isVol, bool AlwaysInline, 6242 MachinePointerInfo DstPtrInfo, 6243 MachinePointerInfo SrcPtrInfo) { 6244 // Turn a memmove of undef to nop. 6245 // FIXME: We need to honor volatile even is Src is undef. 6246 if (Src.isUndef()) 6247 return Chain; 6248 6249 // Expand memmove to a series of load and store ops if the size operand falls 6250 // below a certain threshold. 6251 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6252 const DataLayout &DL = DAG.getDataLayout(); 6253 LLVMContext &C = *DAG.getContext(); 6254 std::vector<EVT> MemOps; 6255 bool DstAlignCanChange = false; 6256 MachineFunction &MF = DAG.getMachineFunction(); 6257 MachineFrameInfo &MFI = MF.getFrameInfo(); 6258 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6259 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6260 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6261 DstAlignCanChange = true; 6262 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6263 if (!SrcAlign || Alignment > *SrcAlign) 6264 SrcAlign = Alignment; 6265 assert(SrcAlign && "SrcAlign must be set"); 6266 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6267 if (!TLI.findOptimalMemOpLowering( 6268 MemOps, Limit, 6269 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6270 /*IsVolatile*/ true), 6271 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6272 MF.getFunction().getAttributes())) 6273 return SDValue(); 6274 6275 if (DstAlignCanChange) { 6276 Type *Ty = MemOps[0].getTypeForEVT(C); 6277 Align NewAlign = DL.getABITypeAlign(Ty); 6278 if (NewAlign > Alignment) { 6279 // Give the stack frame object a larger alignment if needed. 6280 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6281 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6282 Alignment = NewAlign; 6283 } 6284 } 6285 6286 MachineMemOperand::Flags MMOFlags = 6287 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6288 uint64_t SrcOff = 0, DstOff = 0; 6289 SmallVector<SDValue, 8> LoadValues; 6290 SmallVector<SDValue, 8> LoadChains; 6291 SmallVector<SDValue, 8> OutChains; 6292 unsigned NumMemOps = MemOps.size(); 6293 for (unsigned i = 0; i < NumMemOps; i++) { 6294 EVT VT = MemOps[i]; 6295 unsigned VTSize = VT.getSizeInBits() / 8; 6296 SDValue Value; 6297 6298 bool isDereferenceable = 6299 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6300 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6301 if (isDereferenceable) 6302 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6303 6304 Value = 6305 DAG.getLoad(VT, dl, Chain, 6306 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6307 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags); 6308 LoadValues.push_back(Value); 6309 LoadChains.push_back(Value.getValue(1)); 6310 SrcOff += VTSize; 6311 } 6312 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6313 OutChains.clear(); 6314 for (unsigned i = 0; i < NumMemOps; i++) { 6315 EVT VT = MemOps[i]; 6316 unsigned VTSize = VT.getSizeInBits() / 8; 6317 SDValue Store; 6318 6319 Store = 6320 DAG.getStore(Chain, dl, LoadValues[i], 6321 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6322 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6323 OutChains.push_back(Store); 6324 DstOff += VTSize; 6325 } 6326 6327 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6328 } 6329 6330 /// Lower the call to 'memset' intrinsic function into a series of store 6331 /// operations. 6332 /// 6333 /// \param DAG Selection DAG where lowered code is placed. 6334 /// \param dl Link to corresponding IR location. 6335 /// \param Chain Control flow dependency. 6336 /// \param Dst Pointer to destination memory location. 6337 /// \param Src Value of byte to write into the memory. 6338 /// \param Size Number of bytes to write. 6339 /// \param Alignment Alignment of the destination in bytes. 6340 /// \param isVol True if destination is volatile. 6341 /// \param DstPtrInfo IR information on the memory pointer. 6342 /// \returns New head in the control flow, if lowering was successful, empty 6343 /// SDValue otherwise. 6344 /// 6345 /// The function tries to replace 'llvm.memset' intrinsic with several store 6346 /// operations and value calculation code. This is usually profitable for small 6347 /// memory size. 6348 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6349 SDValue Chain, SDValue Dst, SDValue Src, 6350 uint64_t Size, Align Alignment, bool isVol, 6351 MachinePointerInfo DstPtrInfo) { 6352 // Turn a memset of undef to nop. 6353 // FIXME: We need to honor volatile even is Src is undef. 6354 if (Src.isUndef()) 6355 return Chain; 6356 6357 // Expand memset to a series of load/store ops if the size operand 6358 // falls below a certain threshold. 6359 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6360 std::vector<EVT> MemOps; 6361 bool DstAlignCanChange = false; 6362 MachineFunction &MF = DAG.getMachineFunction(); 6363 MachineFrameInfo &MFI = MF.getFrameInfo(); 6364 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6365 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6366 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6367 DstAlignCanChange = true; 6368 bool IsZeroVal = 6369 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6370 if (!TLI.findOptimalMemOpLowering( 6371 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6372 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6373 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6374 return SDValue(); 6375 6376 if (DstAlignCanChange) { 6377 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6378 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6379 if (NewAlign > Alignment) { 6380 // Give the stack frame object a larger alignment if needed. 6381 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6382 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6383 Alignment = NewAlign; 6384 } 6385 } 6386 6387 SmallVector<SDValue, 8> OutChains; 6388 uint64_t DstOff = 0; 6389 unsigned NumMemOps = MemOps.size(); 6390 6391 // Find the largest store and generate the bit pattern for it. 6392 EVT LargestVT = MemOps[0]; 6393 for (unsigned i = 1; i < NumMemOps; i++) 6394 if (MemOps[i].bitsGT(LargestVT)) 6395 LargestVT = MemOps[i]; 6396 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6397 6398 for (unsigned i = 0; i < NumMemOps; i++) { 6399 EVT VT = MemOps[i]; 6400 unsigned VTSize = VT.getSizeInBits() / 8; 6401 if (VTSize > Size) { 6402 // Issuing an unaligned load / store pair that overlaps with the previous 6403 // pair. Adjust the offset accordingly. 6404 assert(i == NumMemOps-1 && i != 0); 6405 DstOff -= VTSize - Size; 6406 } 6407 6408 // If this store is smaller than the largest store see whether we can get 6409 // the smaller value for free with a truncate. 6410 SDValue Value = MemSetValue; 6411 if (VT.bitsLT(LargestVT)) { 6412 if (!LargestVT.isVector() && !VT.isVector() && 6413 TLI.isTruncateFree(LargestVT, VT)) 6414 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6415 else 6416 Value = getMemsetValue(Src, VT, DAG, dl); 6417 } 6418 assert(Value.getValueType() == VT && "Value with wrong type."); 6419 SDValue Store = DAG.getStore( 6420 Chain, dl, Value, 6421 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6422 DstPtrInfo.getWithOffset(DstOff), Alignment, 6423 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6424 OutChains.push_back(Store); 6425 DstOff += VT.getSizeInBits() / 8; 6426 Size -= VTSize; 6427 } 6428 6429 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6430 } 6431 6432 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6433 unsigned AS) { 6434 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6435 // pointer operands can be losslessly bitcasted to pointers of address space 0 6436 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { 6437 report_fatal_error("cannot lower memory intrinsic in address space " + 6438 Twine(AS)); 6439 } 6440 } 6441 6442 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6443 SDValue Src, SDValue Size, Align Alignment, 6444 bool isVol, bool AlwaysInline, bool isTailCall, 6445 MachinePointerInfo DstPtrInfo, 6446 MachinePointerInfo SrcPtrInfo) { 6447 // Check to see if we should lower the memcpy to loads and stores first. 6448 // For cases within the target-specified limits, this is the best choice. 6449 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6450 if (ConstantSize) { 6451 // Memcpy with size zero? Just return the original chain. 6452 if (ConstantSize->isNullValue()) 6453 return Chain; 6454 6455 SDValue Result = getMemcpyLoadsAndStores( 6456 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6457 isVol, false, DstPtrInfo, SrcPtrInfo); 6458 if (Result.getNode()) 6459 return Result; 6460 } 6461 6462 // Then check to see if we should lower the memcpy with target-specific 6463 // code. If the target chooses to do this, this is the next best. 6464 if (TSI) { 6465 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6466 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, 6467 DstPtrInfo, SrcPtrInfo); 6468 if (Result.getNode()) 6469 return Result; 6470 } 6471 6472 // If we really need inline code and the target declined to provide it, 6473 // use a (potentially long) sequence of loads and stores. 6474 if (AlwaysInline) { 6475 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6476 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6477 ConstantSize->getZExtValue(), Alignment, 6478 isVol, true, DstPtrInfo, SrcPtrInfo); 6479 } 6480 6481 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6482 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6483 6484 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6485 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6486 // respect volatile, so they may do things like read or write memory 6487 // beyond the given memory regions. But fixing this isn't easy, and most 6488 // people don't care. 6489 6490 // Emit a library call. 6491 TargetLowering::ArgListTy Args; 6492 TargetLowering::ArgListEntry Entry; 6493 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6494 Entry.Node = Dst; Args.push_back(Entry); 6495 Entry.Node = Src; Args.push_back(Entry); 6496 6497 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6498 Entry.Node = Size; Args.push_back(Entry); 6499 // FIXME: pass in SDLoc 6500 TargetLowering::CallLoweringInfo CLI(*this); 6501 CLI.setDebugLoc(dl) 6502 .setChain(Chain) 6503 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6504 Dst.getValueType().getTypeForEVT(*getContext()), 6505 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6506 TLI->getPointerTy(getDataLayout())), 6507 std::move(Args)) 6508 .setDiscardResult() 6509 .setTailCall(isTailCall); 6510 6511 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6512 return CallResult.second; 6513 } 6514 6515 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6516 SDValue Dst, unsigned DstAlign, 6517 SDValue Src, unsigned SrcAlign, 6518 SDValue Size, Type *SizeTy, 6519 unsigned ElemSz, bool isTailCall, 6520 MachinePointerInfo DstPtrInfo, 6521 MachinePointerInfo SrcPtrInfo) { 6522 // Emit a library call. 6523 TargetLowering::ArgListTy Args; 6524 TargetLowering::ArgListEntry Entry; 6525 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6526 Entry.Node = Dst; 6527 Args.push_back(Entry); 6528 6529 Entry.Node = Src; 6530 Args.push_back(Entry); 6531 6532 Entry.Ty = SizeTy; 6533 Entry.Node = Size; 6534 Args.push_back(Entry); 6535 6536 RTLIB::Libcall LibraryCall = 6537 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6538 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6539 report_fatal_error("Unsupported element size"); 6540 6541 TargetLowering::CallLoweringInfo CLI(*this); 6542 CLI.setDebugLoc(dl) 6543 .setChain(Chain) 6544 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6545 Type::getVoidTy(*getContext()), 6546 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6547 TLI->getPointerTy(getDataLayout())), 6548 std::move(Args)) 6549 .setDiscardResult() 6550 .setTailCall(isTailCall); 6551 6552 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6553 return CallResult.second; 6554 } 6555 6556 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6557 SDValue Src, SDValue Size, Align Alignment, 6558 bool isVol, bool isTailCall, 6559 MachinePointerInfo DstPtrInfo, 6560 MachinePointerInfo SrcPtrInfo) { 6561 // Check to see if we should lower the memmove to loads and stores first. 6562 // For cases within the target-specified limits, this is the best choice. 6563 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6564 if (ConstantSize) { 6565 // Memmove with size zero? Just return the original chain. 6566 if (ConstantSize->isNullValue()) 6567 return Chain; 6568 6569 SDValue Result = getMemmoveLoadsAndStores( 6570 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6571 isVol, false, DstPtrInfo, SrcPtrInfo); 6572 if (Result.getNode()) 6573 return Result; 6574 } 6575 6576 // Then check to see if we should lower the memmove with target-specific 6577 // code. If the target chooses to do this, this is the next best. 6578 if (TSI) { 6579 SDValue Result = 6580 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, 6581 Alignment, isVol, DstPtrInfo, SrcPtrInfo); 6582 if (Result.getNode()) 6583 return Result; 6584 } 6585 6586 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6587 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6588 6589 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6590 // not be safe. See memcpy above for more details. 6591 6592 // Emit a library call. 6593 TargetLowering::ArgListTy Args; 6594 TargetLowering::ArgListEntry Entry; 6595 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6596 Entry.Node = Dst; Args.push_back(Entry); 6597 Entry.Node = Src; Args.push_back(Entry); 6598 6599 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6600 Entry.Node = Size; Args.push_back(Entry); 6601 // FIXME: pass in SDLoc 6602 TargetLowering::CallLoweringInfo CLI(*this); 6603 CLI.setDebugLoc(dl) 6604 .setChain(Chain) 6605 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6606 Dst.getValueType().getTypeForEVT(*getContext()), 6607 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6608 TLI->getPointerTy(getDataLayout())), 6609 std::move(Args)) 6610 .setDiscardResult() 6611 .setTailCall(isTailCall); 6612 6613 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6614 return CallResult.second; 6615 } 6616 6617 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6618 SDValue Dst, unsigned DstAlign, 6619 SDValue Src, unsigned SrcAlign, 6620 SDValue Size, Type *SizeTy, 6621 unsigned ElemSz, bool isTailCall, 6622 MachinePointerInfo DstPtrInfo, 6623 MachinePointerInfo SrcPtrInfo) { 6624 // Emit a library call. 6625 TargetLowering::ArgListTy Args; 6626 TargetLowering::ArgListEntry Entry; 6627 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6628 Entry.Node = Dst; 6629 Args.push_back(Entry); 6630 6631 Entry.Node = Src; 6632 Args.push_back(Entry); 6633 6634 Entry.Ty = SizeTy; 6635 Entry.Node = Size; 6636 Args.push_back(Entry); 6637 6638 RTLIB::Libcall LibraryCall = 6639 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6640 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6641 report_fatal_error("Unsupported element size"); 6642 6643 TargetLowering::CallLoweringInfo CLI(*this); 6644 CLI.setDebugLoc(dl) 6645 .setChain(Chain) 6646 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6647 Type::getVoidTy(*getContext()), 6648 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6649 TLI->getPointerTy(getDataLayout())), 6650 std::move(Args)) 6651 .setDiscardResult() 6652 .setTailCall(isTailCall); 6653 6654 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6655 return CallResult.second; 6656 } 6657 6658 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6659 SDValue Src, SDValue Size, Align Alignment, 6660 bool isVol, bool isTailCall, 6661 MachinePointerInfo DstPtrInfo) { 6662 // Check to see if we should lower the memset to stores first. 6663 // For cases within the target-specified limits, this is the best choice. 6664 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6665 if (ConstantSize) { 6666 // Memset with size zero? Just return the original chain. 6667 if (ConstantSize->isNullValue()) 6668 return Chain; 6669 6670 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6671 ConstantSize->getZExtValue(), Alignment, 6672 isVol, DstPtrInfo); 6673 6674 if (Result.getNode()) 6675 return Result; 6676 } 6677 6678 // Then check to see if we should lower the memset with target-specific 6679 // code. If the target chooses to do this, this is the next best. 6680 if (TSI) { 6681 SDValue Result = TSI->EmitTargetCodeForMemset( 6682 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo); 6683 if (Result.getNode()) 6684 return Result; 6685 } 6686 6687 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6688 6689 // Emit a library call. 6690 TargetLowering::ArgListTy Args; 6691 TargetLowering::ArgListEntry Entry; 6692 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6693 Args.push_back(Entry); 6694 Entry.Node = Src; 6695 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6696 Args.push_back(Entry); 6697 Entry.Node = Size; 6698 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6699 Args.push_back(Entry); 6700 6701 // FIXME: pass in SDLoc 6702 TargetLowering::CallLoweringInfo CLI(*this); 6703 CLI.setDebugLoc(dl) 6704 .setChain(Chain) 6705 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6706 Dst.getValueType().getTypeForEVT(*getContext()), 6707 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6708 TLI->getPointerTy(getDataLayout())), 6709 std::move(Args)) 6710 .setDiscardResult() 6711 .setTailCall(isTailCall); 6712 6713 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6714 return CallResult.second; 6715 } 6716 6717 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6718 SDValue Dst, unsigned DstAlign, 6719 SDValue Value, SDValue Size, Type *SizeTy, 6720 unsigned ElemSz, bool isTailCall, 6721 MachinePointerInfo DstPtrInfo) { 6722 // Emit a library call. 6723 TargetLowering::ArgListTy Args; 6724 TargetLowering::ArgListEntry Entry; 6725 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6726 Entry.Node = Dst; 6727 Args.push_back(Entry); 6728 6729 Entry.Ty = Type::getInt8Ty(*getContext()); 6730 Entry.Node = Value; 6731 Args.push_back(Entry); 6732 6733 Entry.Ty = SizeTy; 6734 Entry.Node = Size; 6735 Args.push_back(Entry); 6736 6737 RTLIB::Libcall LibraryCall = 6738 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6739 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6740 report_fatal_error("Unsupported element size"); 6741 6742 TargetLowering::CallLoweringInfo CLI(*this); 6743 CLI.setDebugLoc(dl) 6744 .setChain(Chain) 6745 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6746 Type::getVoidTy(*getContext()), 6747 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6748 TLI->getPointerTy(getDataLayout())), 6749 std::move(Args)) 6750 .setDiscardResult() 6751 .setTailCall(isTailCall); 6752 6753 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6754 return CallResult.second; 6755 } 6756 6757 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6758 SDVTList VTList, ArrayRef<SDValue> Ops, 6759 MachineMemOperand *MMO) { 6760 FoldingSetNodeID ID; 6761 ID.AddInteger(MemVT.getRawBits()); 6762 AddNodeIDNode(ID, Opcode, VTList, Ops); 6763 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6764 void* IP = nullptr; 6765 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6766 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6767 return SDValue(E, 0); 6768 } 6769 6770 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6771 VTList, MemVT, MMO); 6772 createOperands(N, Ops); 6773 6774 CSEMap.InsertNode(N, IP); 6775 InsertNode(N); 6776 return SDValue(N, 0); 6777 } 6778 6779 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6780 EVT MemVT, SDVTList VTs, SDValue Chain, 6781 SDValue Ptr, SDValue Cmp, SDValue Swp, 6782 MachineMemOperand *MMO) { 6783 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6784 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6785 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6786 6787 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6788 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6789 } 6790 6791 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6792 SDValue Chain, SDValue Ptr, SDValue Val, 6793 MachineMemOperand *MMO) { 6794 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6795 Opcode == ISD::ATOMIC_LOAD_SUB || 6796 Opcode == ISD::ATOMIC_LOAD_AND || 6797 Opcode == ISD::ATOMIC_LOAD_CLR || 6798 Opcode == ISD::ATOMIC_LOAD_OR || 6799 Opcode == ISD::ATOMIC_LOAD_XOR || 6800 Opcode == ISD::ATOMIC_LOAD_NAND || 6801 Opcode == ISD::ATOMIC_LOAD_MIN || 6802 Opcode == ISD::ATOMIC_LOAD_MAX || 6803 Opcode == ISD::ATOMIC_LOAD_UMIN || 6804 Opcode == ISD::ATOMIC_LOAD_UMAX || 6805 Opcode == ISD::ATOMIC_LOAD_FADD || 6806 Opcode == ISD::ATOMIC_LOAD_FSUB || 6807 Opcode == ISD::ATOMIC_SWAP || 6808 Opcode == ISD::ATOMIC_STORE) && 6809 "Invalid Atomic Op"); 6810 6811 EVT VT = Val.getValueType(); 6812 6813 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6814 getVTList(VT, MVT::Other); 6815 SDValue Ops[] = {Chain, Ptr, Val}; 6816 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6817 } 6818 6819 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6820 EVT VT, SDValue Chain, SDValue Ptr, 6821 MachineMemOperand *MMO) { 6822 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6823 6824 SDVTList VTs = getVTList(VT, MVT::Other); 6825 SDValue Ops[] = {Chain, Ptr}; 6826 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6827 } 6828 6829 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6830 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6831 if (Ops.size() == 1) 6832 return Ops[0]; 6833 6834 SmallVector<EVT, 4> VTs; 6835 VTs.reserve(Ops.size()); 6836 for (unsigned i = 0; i < Ops.size(); ++i) 6837 VTs.push_back(Ops[i].getValueType()); 6838 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6839 } 6840 6841 SDValue SelectionDAG::getMemIntrinsicNode( 6842 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6843 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6844 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6845 if (!Size && MemVT.isScalableVector()) 6846 Size = MemoryLocation::UnknownSize; 6847 else if (!Size) 6848 Size = MemVT.getStoreSize(); 6849 6850 MachineFunction &MF = getMachineFunction(); 6851 MachineMemOperand *MMO = 6852 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6853 6854 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6855 } 6856 6857 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6858 SDVTList VTList, 6859 ArrayRef<SDValue> Ops, EVT MemVT, 6860 MachineMemOperand *MMO) { 6861 assert((Opcode == ISD::INTRINSIC_VOID || 6862 Opcode == ISD::INTRINSIC_W_CHAIN || 6863 Opcode == ISD::PREFETCH || 6864 ((int)Opcode <= std::numeric_limits<int>::max() && 6865 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6866 "Opcode is not a memory-accessing opcode!"); 6867 6868 // Memoize the node unless it returns a flag. 6869 MemIntrinsicSDNode *N; 6870 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6871 FoldingSetNodeID ID; 6872 AddNodeIDNode(ID, Opcode, VTList, Ops); 6873 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6874 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6875 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6876 void *IP = nullptr; 6877 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6878 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6879 return SDValue(E, 0); 6880 } 6881 6882 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6883 VTList, MemVT, MMO); 6884 createOperands(N, Ops); 6885 6886 CSEMap.InsertNode(N, IP); 6887 } else { 6888 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6889 VTList, MemVT, MMO); 6890 createOperands(N, Ops); 6891 } 6892 InsertNode(N); 6893 SDValue V(N, 0); 6894 NewSDValueDbgMsg(V, "Creating new node: ", this); 6895 return V; 6896 } 6897 6898 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6899 SDValue Chain, int FrameIndex, 6900 int64_t Size, int64_t Offset) { 6901 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6902 const auto VTs = getVTList(MVT::Other); 6903 SDValue Ops[2] = { 6904 Chain, 6905 getFrameIndex(FrameIndex, 6906 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6907 true)}; 6908 6909 FoldingSetNodeID ID; 6910 AddNodeIDNode(ID, Opcode, VTs, Ops); 6911 ID.AddInteger(FrameIndex); 6912 ID.AddInteger(Size); 6913 ID.AddInteger(Offset); 6914 void *IP = nullptr; 6915 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6916 return SDValue(E, 0); 6917 6918 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6919 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6920 createOperands(N, Ops); 6921 CSEMap.InsertNode(N, IP); 6922 InsertNode(N); 6923 SDValue V(N, 0); 6924 NewSDValueDbgMsg(V, "Creating new node: ", this); 6925 return V; 6926 } 6927 6928 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, 6929 uint64_t Guid, uint64_t Index, 6930 uint32_t Attr) { 6931 const unsigned Opcode = ISD::PSEUDO_PROBE; 6932 const auto VTs = getVTList(MVT::Other); 6933 SDValue Ops[] = {Chain}; 6934 FoldingSetNodeID ID; 6935 AddNodeIDNode(ID, Opcode, VTs, Ops); 6936 ID.AddInteger(Guid); 6937 ID.AddInteger(Index); 6938 void *IP = nullptr; 6939 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP)) 6940 return SDValue(E, 0); 6941 6942 auto *N = newSDNode<PseudoProbeSDNode>( 6943 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr); 6944 createOperands(N, Ops); 6945 CSEMap.InsertNode(N, IP); 6946 InsertNode(N); 6947 SDValue V(N, 0); 6948 NewSDValueDbgMsg(V, "Creating new node: ", this); 6949 return V; 6950 } 6951 6952 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6953 /// MachinePointerInfo record from it. This is particularly useful because the 6954 /// code generator has many cases where it doesn't bother passing in a 6955 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6956 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6957 SelectionDAG &DAG, SDValue Ptr, 6958 int64_t Offset = 0) { 6959 // If this is FI+Offset, we can model it. 6960 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6961 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6962 FI->getIndex(), Offset); 6963 6964 // If this is (FI+Offset1)+Offset2, we can model it. 6965 if (Ptr.getOpcode() != ISD::ADD || 6966 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6967 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6968 return Info; 6969 6970 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6971 return MachinePointerInfo::getFixedStack( 6972 DAG.getMachineFunction(), FI, 6973 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6974 } 6975 6976 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6977 /// MachinePointerInfo record from it. This is particularly useful because the 6978 /// code generator has many cases where it doesn't bother passing in a 6979 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6980 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6981 SelectionDAG &DAG, SDValue Ptr, 6982 SDValue OffsetOp) { 6983 // If the 'Offset' value isn't a constant, we can't handle this. 6984 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6985 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6986 if (OffsetOp.isUndef()) 6987 return InferPointerInfo(Info, DAG, Ptr); 6988 return Info; 6989 } 6990 6991 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6992 EVT VT, const SDLoc &dl, SDValue Chain, 6993 SDValue Ptr, SDValue Offset, 6994 MachinePointerInfo PtrInfo, EVT MemVT, 6995 Align Alignment, 6996 MachineMemOperand::Flags MMOFlags, 6997 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6998 assert(Chain.getValueType() == MVT::Other && 6999 "Invalid chain type"); 7000 7001 MMOFlags |= MachineMemOperand::MOLoad; 7002 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 7003 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 7004 // clients. 7005 if (PtrInfo.V.isNull()) 7006 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 7007 7008 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 7009 MachineFunction &MF = getMachineFunction(); 7010 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 7011 Alignment, AAInfo, Ranges); 7012 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 7013 } 7014 7015 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 7016 EVT VT, const SDLoc &dl, SDValue Chain, 7017 SDValue Ptr, SDValue Offset, EVT MemVT, 7018 MachineMemOperand *MMO) { 7019 if (VT == MemVT) { 7020 ExtType = ISD::NON_EXTLOAD; 7021 } else if (ExtType == ISD::NON_EXTLOAD) { 7022 assert(VT == MemVT && "Non-extending load from different memory type!"); 7023 } else { 7024 // Extending load. 7025 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 7026 "Should only be an extending load, not truncating!"); 7027 assert(VT.isInteger() == MemVT.isInteger() && 7028 "Cannot convert from FP to Int or Int -> FP!"); 7029 assert(VT.isVector() == MemVT.isVector() && 7030 "Cannot use an ext load to convert to or from a vector!"); 7031 assert((!VT.isVector() || 7032 VT.getVectorElementCount() == MemVT.getVectorElementCount()) && 7033 "Cannot use an ext load to change the number of vector elements!"); 7034 } 7035 7036 bool Indexed = AM != ISD::UNINDEXED; 7037 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 7038 7039 SDVTList VTs = Indexed ? 7040 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 7041 SDValue Ops[] = { Chain, Ptr, Offset }; 7042 FoldingSetNodeID ID; 7043 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 7044 ID.AddInteger(MemVT.getRawBits()); 7045 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 7046 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 7047 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7048 void *IP = nullptr; 7049 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7050 cast<LoadSDNode>(E)->refineAlignment(MMO); 7051 return SDValue(E, 0); 7052 } 7053 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7054 ExtType, MemVT, MMO); 7055 createOperands(N, Ops); 7056 7057 CSEMap.InsertNode(N, IP); 7058 InsertNode(N); 7059 SDValue V(N, 0); 7060 NewSDValueDbgMsg(V, "Creating new node: ", this); 7061 return V; 7062 } 7063 7064 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7065 SDValue Ptr, MachinePointerInfo PtrInfo, 7066 MaybeAlign Alignment, 7067 MachineMemOperand::Flags MMOFlags, 7068 const AAMDNodes &AAInfo, const MDNode *Ranges) { 7069 SDValue Undef = getUNDEF(Ptr.getValueType()); 7070 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7071 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 7072 } 7073 7074 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7075 SDValue Ptr, MachineMemOperand *MMO) { 7076 SDValue Undef = getUNDEF(Ptr.getValueType()); 7077 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7078 VT, MMO); 7079 } 7080 7081 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7082 EVT VT, SDValue Chain, SDValue Ptr, 7083 MachinePointerInfo PtrInfo, EVT MemVT, 7084 MaybeAlign Alignment, 7085 MachineMemOperand::Flags MMOFlags, 7086 const AAMDNodes &AAInfo) { 7087 SDValue Undef = getUNDEF(Ptr.getValueType()); 7088 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 7089 MemVT, Alignment, MMOFlags, AAInfo); 7090 } 7091 7092 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7093 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 7094 MachineMemOperand *MMO) { 7095 SDValue Undef = getUNDEF(Ptr.getValueType()); 7096 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 7097 MemVT, MMO); 7098 } 7099 7100 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 7101 SDValue Base, SDValue Offset, 7102 ISD::MemIndexedMode AM) { 7103 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 7104 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 7105 // Don't propagate the invariant or dereferenceable flags. 7106 auto MMOFlags = 7107 LD->getMemOperand()->getFlags() & 7108 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 7109 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 7110 LD->getChain(), Base, Offset, LD->getPointerInfo(), 7111 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo()); 7112 } 7113 7114 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7115 SDValue Ptr, MachinePointerInfo PtrInfo, 7116 Align Alignment, 7117 MachineMemOperand::Flags MMOFlags, 7118 const AAMDNodes &AAInfo) { 7119 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 7120 7121 MMOFlags |= MachineMemOperand::MOStore; 7122 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7123 7124 if (PtrInfo.V.isNull()) 7125 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7126 7127 MachineFunction &MF = getMachineFunction(); 7128 uint64_t Size = 7129 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 7130 MachineMemOperand *MMO = 7131 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 7132 return getStore(Chain, dl, Val, Ptr, MMO); 7133 } 7134 7135 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7136 SDValue Ptr, MachineMemOperand *MMO) { 7137 assert(Chain.getValueType() == MVT::Other && 7138 "Invalid chain type"); 7139 EVT VT = Val.getValueType(); 7140 SDVTList VTs = getVTList(MVT::Other); 7141 SDValue Undef = getUNDEF(Ptr.getValueType()); 7142 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7143 FoldingSetNodeID ID; 7144 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7145 ID.AddInteger(VT.getRawBits()); 7146 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7147 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 7148 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7149 void *IP = nullptr; 7150 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7151 cast<StoreSDNode>(E)->refineAlignment(MMO); 7152 return SDValue(E, 0); 7153 } 7154 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7155 ISD::UNINDEXED, false, VT, MMO); 7156 createOperands(N, Ops); 7157 7158 CSEMap.InsertNode(N, IP); 7159 InsertNode(N); 7160 SDValue V(N, 0); 7161 NewSDValueDbgMsg(V, "Creating new node: ", this); 7162 return V; 7163 } 7164 7165 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7166 SDValue Ptr, MachinePointerInfo PtrInfo, 7167 EVT SVT, Align Alignment, 7168 MachineMemOperand::Flags MMOFlags, 7169 const AAMDNodes &AAInfo) { 7170 assert(Chain.getValueType() == MVT::Other && 7171 "Invalid chain type"); 7172 7173 MMOFlags |= MachineMemOperand::MOStore; 7174 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7175 7176 if (PtrInfo.V.isNull()) 7177 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7178 7179 MachineFunction &MF = getMachineFunction(); 7180 MachineMemOperand *MMO = MF.getMachineMemOperand( 7181 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()), 7182 Alignment, AAInfo); 7183 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7184 } 7185 7186 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7187 SDValue Ptr, EVT SVT, 7188 MachineMemOperand *MMO) { 7189 EVT VT = Val.getValueType(); 7190 7191 assert(Chain.getValueType() == MVT::Other && 7192 "Invalid chain type"); 7193 if (VT == SVT) 7194 return getStore(Chain, dl, Val, Ptr, MMO); 7195 7196 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7197 "Should only be a truncating store, not extending!"); 7198 assert(VT.isInteger() == SVT.isInteger() && 7199 "Can't do FP-INT conversion!"); 7200 assert(VT.isVector() == SVT.isVector() && 7201 "Cannot use trunc store to convert to or from a vector!"); 7202 assert((!VT.isVector() || 7203 VT.getVectorElementCount() == SVT.getVectorElementCount()) && 7204 "Cannot use trunc store to change the number of vector elements!"); 7205 7206 SDVTList VTs = getVTList(MVT::Other); 7207 SDValue Undef = getUNDEF(Ptr.getValueType()); 7208 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7209 FoldingSetNodeID ID; 7210 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7211 ID.AddInteger(SVT.getRawBits()); 7212 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7213 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7214 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7215 void *IP = nullptr; 7216 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7217 cast<StoreSDNode>(E)->refineAlignment(MMO); 7218 return SDValue(E, 0); 7219 } 7220 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7221 ISD::UNINDEXED, true, SVT, MMO); 7222 createOperands(N, Ops); 7223 7224 CSEMap.InsertNode(N, IP); 7225 InsertNode(N); 7226 SDValue V(N, 0); 7227 NewSDValueDbgMsg(V, "Creating new node: ", this); 7228 return V; 7229 } 7230 7231 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7232 SDValue Base, SDValue Offset, 7233 ISD::MemIndexedMode AM) { 7234 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7235 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7236 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7237 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7238 FoldingSetNodeID ID; 7239 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7240 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7241 ID.AddInteger(ST->getRawSubclassData()); 7242 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7243 void *IP = nullptr; 7244 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7245 return SDValue(E, 0); 7246 7247 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7248 ST->isTruncatingStore(), ST->getMemoryVT(), 7249 ST->getMemOperand()); 7250 createOperands(N, Ops); 7251 7252 CSEMap.InsertNode(N, IP); 7253 InsertNode(N); 7254 SDValue V(N, 0); 7255 NewSDValueDbgMsg(V, "Creating new node: ", this); 7256 return V; 7257 } 7258 7259 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7260 SDValue Base, SDValue Offset, SDValue Mask, 7261 SDValue PassThru, EVT MemVT, 7262 MachineMemOperand *MMO, 7263 ISD::MemIndexedMode AM, 7264 ISD::LoadExtType ExtTy, bool isExpanding) { 7265 bool Indexed = AM != ISD::UNINDEXED; 7266 assert((Indexed || Offset.isUndef()) && 7267 "Unindexed masked load with an offset!"); 7268 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7269 : getVTList(VT, MVT::Other); 7270 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7271 FoldingSetNodeID ID; 7272 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7273 ID.AddInteger(MemVT.getRawBits()); 7274 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7275 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7276 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7277 void *IP = nullptr; 7278 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7279 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7280 return SDValue(E, 0); 7281 } 7282 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7283 AM, ExtTy, isExpanding, MemVT, MMO); 7284 createOperands(N, Ops); 7285 7286 CSEMap.InsertNode(N, IP); 7287 InsertNode(N); 7288 SDValue V(N, 0); 7289 NewSDValueDbgMsg(V, "Creating new node: ", this); 7290 return V; 7291 } 7292 7293 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7294 SDValue Base, SDValue Offset, 7295 ISD::MemIndexedMode AM) { 7296 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7297 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7298 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7299 Offset, LD->getMask(), LD->getPassThru(), 7300 LD->getMemoryVT(), LD->getMemOperand(), AM, 7301 LD->getExtensionType(), LD->isExpandingLoad()); 7302 } 7303 7304 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7305 SDValue Val, SDValue Base, SDValue Offset, 7306 SDValue Mask, EVT MemVT, 7307 MachineMemOperand *MMO, 7308 ISD::MemIndexedMode AM, bool IsTruncating, 7309 bool IsCompressing) { 7310 assert(Chain.getValueType() == MVT::Other && 7311 "Invalid chain type"); 7312 bool Indexed = AM != ISD::UNINDEXED; 7313 assert((Indexed || Offset.isUndef()) && 7314 "Unindexed masked store with an offset!"); 7315 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7316 : getVTList(MVT::Other); 7317 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7318 FoldingSetNodeID ID; 7319 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7320 ID.AddInteger(MemVT.getRawBits()); 7321 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7322 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7323 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7324 void *IP = nullptr; 7325 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7326 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7327 return SDValue(E, 0); 7328 } 7329 auto *N = 7330 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7331 IsTruncating, IsCompressing, MemVT, MMO); 7332 createOperands(N, Ops); 7333 7334 CSEMap.InsertNode(N, IP); 7335 InsertNode(N); 7336 SDValue V(N, 0); 7337 NewSDValueDbgMsg(V, "Creating new node: ", this); 7338 return V; 7339 } 7340 7341 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7342 SDValue Base, SDValue Offset, 7343 ISD::MemIndexedMode AM) { 7344 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7345 assert(ST->getOffset().isUndef() && 7346 "Masked store is already a indexed store!"); 7347 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7348 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7349 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7350 } 7351 7352 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7353 ArrayRef<SDValue> Ops, 7354 MachineMemOperand *MMO, 7355 ISD::MemIndexType IndexType, 7356 ISD::LoadExtType ExtTy) { 7357 assert(Ops.size() == 6 && "Incompatible number of operands"); 7358 7359 FoldingSetNodeID ID; 7360 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7361 ID.AddInteger(VT.getRawBits()); 7362 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7363 dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy)); 7364 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7365 void *IP = nullptr; 7366 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7367 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7368 return SDValue(E, 0); 7369 } 7370 7371 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]); 7372 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7373 VTs, VT, MMO, IndexType, ExtTy); 7374 createOperands(N, Ops); 7375 7376 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7377 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7378 assert(N->getMask().getValueType().getVectorElementCount() == 7379 N->getValueType(0).getVectorElementCount() && 7380 "Vector width mismatch between mask and data"); 7381 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == 7382 N->getValueType(0).getVectorElementCount().isScalable() && 7383 "Scalable flags of index and data do not match"); 7384 assert(ElementCount::isKnownGE( 7385 N->getIndex().getValueType().getVectorElementCount(), 7386 N->getValueType(0).getVectorElementCount()) && 7387 "Vector width mismatch between index and data"); 7388 assert(isa<ConstantSDNode>(N->getScale()) && 7389 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7390 "Scale should be a constant power of 2"); 7391 7392 CSEMap.InsertNode(N, IP); 7393 InsertNode(N); 7394 SDValue V(N, 0); 7395 NewSDValueDbgMsg(V, "Creating new node: ", this); 7396 return V; 7397 } 7398 7399 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7400 ArrayRef<SDValue> Ops, 7401 MachineMemOperand *MMO, 7402 ISD::MemIndexType IndexType, 7403 bool IsTrunc) { 7404 assert(Ops.size() == 6 && "Incompatible number of operands"); 7405 7406 FoldingSetNodeID ID; 7407 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7408 ID.AddInteger(VT.getRawBits()); 7409 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7410 dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc)); 7411 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7412 void *IP = nullptr; 7413 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7414 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7415 return SDValue(E, 0); 7416 } 7417 7418 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]); 7419 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7420 VTs, VT, MMO, IndexType, IsTrunc); 7421 createOperands(N, Ops); 7422 7423 assert(N->getMask().getValueType().getVectorElementCount() == 7424 N->getValue().getValueType().getVectorElementCount() && 7425 "Vector width mismatch between mask and data"); 7426 assert( 7427 N->getIndex().getValueType().getVectorElementCount().isScalable() == 7428 N->getValue().getValueType().getVectorElementCount().isScalable() && 7429 "Scalable flags of index and data do not match"); 7430 assert(ElementCount::isKnownGE( 7431 N->getIndex().getValueType().getVectorElementCount(), 7432 N->getValue().getValueType().getVectorElementCount()) && 7433 "Vector width mismatch between index and data"); 7434 assert(isa<ConstantSDNode>(N->getScale()) && 7435 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7436 "Scale should be a constant power of 2"); 7437 7438 CSEMap.InsertNode(N, IP); 7439 InsertNode(N); 7440 SDValue V(N, 0); 7441 NewSDValueDbgMsg(V, "Creating new node: ", this); 7442 return V; 7443 } 7444 7445 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7446 // select undef, T, F --> T (if T is a constant), otherwise F 7447 // select, ?, undef, F --> F 7448 // select, ?, T, undef --> T 7449 if (Cond.isUndef()) 7450 return isConstantValueOfAnyType(T) ? T : F; 7451 if (T.isUndef()) 7452 return F; 7453 if (F.isUndef()) 7454 return T; 7455 7456 // select true, T, F --> T 7457 // select false, T, F --> F 7458 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7459 return CondC->isNullValue() ? F : T; 7460 7461 // TODO: This should simplify VSELECT with constant condition using something 7462 // like this (but check boolean contents to be complete?): 7463 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7464 // return T; 7465 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7466 // return F; 7467 7468 // select ?, T, T --> T 7469 if (T == F) 7470 return T; 7471 7472 return SDValue(); 7473 } 7474 7475 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7476 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7477 if (X.isUndef()) 7478 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7479 // shift X, undef --> undef (because it may shift by the bitwidth) 7480 if (Y.isUndef()) 7481 return getUNDEF(X.getValueType()); 7482 7483 // shift 0, Y --> 0 7484 // shift X, 0 --> X 7485 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7486 return X; 7487 7488 // shift X, C >= bitwidth(X) --> undef 7489 // All vector elements must be too big (or undef) to avoid partial undefs. 7490 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7491 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7492 }; 7493 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7494 return getUNDEF(X.getValueType()); 7495 7496 return SDValue(); 7497 } 7498 7499 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7500 SDNodeFlags Flags) { 7501 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7502 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7503 // operation is poison. That result can be relaxed to undef. 7504 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7505 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7506 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7507 (YC && YC->getValueAPF().isNaN()); 7508 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7509 (YC && YC->getValueAPF().isInfinity()); 7510 7511 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7512 return getUNDEF(X.getValueType()); 7513 7514 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7515 return getUNDEF(X.getValueType()); 7516 7517 if (!YC) 7518 return SDValue(); 7519 7520 // X + -0.0 --> X 7521 if (Opcode == ISD::FADD) 7522 if (YC->getValueAPF().isNegZero()) 7523 return X; 7524 7525 // X - +0.0 --> X 7526 if (Opcode == ISD::FSUB) 7527 if (YC->getValueAPF().isPosZero()) 7528 return X; 7529 7530 // X * 1.0 --> X 7531 // X / 1.0 --> X 7532 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7533 if (YC->getValueAPF().isExactlyValue(1.0)) 7534 return X; 7535 7536 // X * 0.0 --> 0.0 7537 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros()) 7538 if (YC->getValueAPF().isZero()) 7539 return getConstantFP(0.0, SDLoc(Y), Y.getValueType()); 7540 7541 return SDValue(); 7542 } 7543 7544 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7545 SDValue Ptr, SDValue SV, unsigned Align) { 7546 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7547 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7548 } 7549 7550 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7551 ArrayRef<SDUse> Ops) { 7552 switch (Ops.size()) { 7553 case 0: return getNode(Opcode, DL, VT); 7554 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7555 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7556 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7557 default: break; 7558 } 7559 7560 // Copy from an SDUse array into an SDValue array for use with 7561 // the regular getNode logic. 7562 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7563 return getNode(Opcode, DL, VT, NewOps); 7564 } 7565 7566 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7567 ArrayRef<SDValue> Ops) { 7568 SDNodeFlags Flags; 7569 if (Inserter) 7570 Flags = Inserter->getFlags(); 7571 return getNode(Opcode, DL, VT, Ops, Flags); 7572 } 7573 7574 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7575 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7576 unsigned NumOps = Ops.size(); 7577 switch (NumOps) { 7578 case 0: return getNode(Opcode, DL, VT); 7579 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7580 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7581 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7582 default: break; 7583 } 7584 7585 switch (Opcode) { 7586 default: break; 7587 case ISD::BUILD_VECTOR: 7588 // Attempt to simplify BUILD_VECTOR. 7589 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7590 return V; 7591 break; 7592 case ISD::CONCAT_VECTORS: 7593 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7594 return V; 7595 break; 7596 case ISD::SELECT_CC: 7597 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7598 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7599 "LHS and RHS of condition must have same type!"); 7600 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7601 "True and False arms of SelectCC must have same type!"); 7602 assert(Ops[2].getValueType() == VT && 7603 "select_cc node must be of same type as true and false value!"); 7604 break; 7605 case ISD::BR_CC: 7606 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7607 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7608 "LHS/RHS of comparison should match types!"); 7609 break; 7610 } 7611 7612 // Memoize nodes. 7613 SDNode *N; 7614 SDVTList VTs = getVTList(VT); 7615 7616 if (VT != MVT::Glue) { 7617 FoldingSetNodeID ID; 7618 AddNodeIDNode(ID, Opcode, VTs, Ops); 7619 void *IP = nullptr; 7620 7621 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7622 return SDValue(E, 0); 7623 7624 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7625 createOperands(N, Ops); 7626 7627 CSEMap.InsertNode(N, IP); 7628 } else { 7629 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7630 createOperands(N, Ops); 7631 } 7632 7633 N->setFlags(Flags); 7634 InsertNode(N); 7635 SDValue V(N, 0); 7636 NewSDValueDbgMsg(V, "Creating new node: ", this); 7637 return V; 7638 } 7639 7640 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7641 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7642 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7643 } 7644 7645 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7646 ArrayRef<SDValue> Ops) { 7647 SDNodeFlags Flags; 7648 if (Inserter) 7649 Flags = Inserter->getFlags(); 7650 return getNode(Opcode, DL, VTList, Ops, Flags); 7651 } 7652 7653 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7654 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7655 if (VTList.NumVTs == 1) 7656 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7657 7658 switch (Opcode) { 7659 case ISD::STRICT_FP_EXTEND: 7660 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7661 "Invalid STRICT_FP_EXTEND!"); 7662 assert(VTList.VTs[0].isFloatingPoint() && 7663 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7664 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7665 "STRICT_FP_EXTEND result type should be vector iff the operand " 7666 "type is vector!"); 7667 assert((!VTList.VTs[0].isVector() || 7668 VTList.VTs[0].getVectorNumElements() == 7669 Ops[1].getValueType().getVectorNumElements()) && 7670 "Vector element count mismatch!"); 7671 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7672 "Invalid fpext node, dst <= src!"); 7673 break; 7674 case ISD::STRICT_FP_ROUND: 7675 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7676 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7677 "STRICT_FP_ROUND result type should be vector iff the operand " 7678 "type is vector!"); 7679 assert((!VTList.VTs[0].isVector() || 7680 VTList.VTs[0].getVectorNumElements() == 7681 Ops[1].getValueType().getVectorNumElements()) && 7682 "Vector element count mismatch!"); 7683 assert(VTList.VTs[0].isFloatingPoint() && 7684 Ops[1].getValueType().isFloatingPoint() && 7685 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7686 isa<ConstantSDNode>(Ops[2]) && 7687 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7688 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7689 "Invalid STRICT_FP_ROUND!"); 7690 break; 7691 #if 0 7692 // FIXME: figure out how to safely handle things like 7693 // int foo(int x) { return 1 << (x & 255); } 7694 // int bar() { return foo(256); } 7695 case ISD::SRA_PARTS: 7696 case ISD::SRL_PARTS: 7697 case ISD::SHL_PARTS: 7698 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7699 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7700 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7701 else if (N3.getOpcode() == ISD::AND) 7702 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7703 // If the and is only masking out bits that cannot effect the shift, 7704 // eliminate the and. 7705 unsigned NumBits = VT.getScalarSizeInBits()*2; 7706 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7707 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7708 } 7709 break; 7710 #endif 7711 } 7712 7713 // Memoize the node unless it returns a flag. 7714 SDNode *N; 7715 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7716 FoldingSetNodeID ID; 7717 AddNodeIDNode(ID, Opcode, VTList, Ops); 7718 void *IP = nullptr; 7719 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7720 return SDValue(E, 0); 7721 7722 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7723 createOperands(N, Ops); 7724 CSEMap.InsertNode(N, IP); 7725 } else { 7726 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7727 createOperands(N, Ops); 7728 } 7729 7730 N->setFlags(Flags); 7731 InsertNode(N); 7732 SDValue V(N, 0); 7733 NewSDValueDbgMsg(V, "Creating new node: ", this); 7734 return V; 7735 } 7736 7737 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7738 SDVTList VTList) { 7739 return getNode(Opcode, DL, VTList, None); 7740 } 7741 7742 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7743 SDValue N1) { 7744 SDValue Ops[] = { N1 }; 7745 return getNode(Opcode, DL, VTList, Ops); 7746 } 7747 7748 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7749 SDValue N1, SDValue N2) { 7750 SDValue Ops[] = { N1, N2 }; 7751 return getNode(Opcode, DL, VTList, Ops); 7752 } 7753 7754 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7755 SDValue N1, SDValue N2, SDValue N3) { 7756 SDValue Ops[] = { N1, N2, N3 }; 7757 return getNode(Opcode, DL, VTList, Ops); 7758 } 7759 7760 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7761 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7762 SDValue Ops[] = { N1, N2, N3, N4 }; 7763 return getNode(Opcode, DL, VTList, Ops); 7764 } 7765 7766 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7767 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7768 SDValue N5) { 7769 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7770 return getNode(Opcode, DL, VTList, Ops); 7771 } 7772 7773 SDVTList SelectionDAG::getVTList(EVT VT) { 7774 return makeVTList(SDNode::getValueTypeList(VT), 1); 7775 } 7776 7777 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7778 FoldingSetNodeID ID; 7779 ID.AddInteger(2U); 7780 ID.AddInteger(VT1.getRawBits()); 7781 ID.AddInteger(VT2.getRawBits()); 7782 7783 void *IP = nullptr; 7784 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7785 if (!Result) { 7786 EVT *Array = Allocator.Allocate<EVT>(2); 7787 Array[0] = VT1; 7788 Array[1] = VT2; 7789 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7790 VTListMap.InsertNode(Result, IP); 7791 } 7792 return Result->getSDVTList(); 7793 } 7794 7795 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7796 FoldingSetNodeID ID; 7797 ID.AddInteger(3U); 7798 ID.AddInteger(VT1.getRawBits()); 7799 ID.AddInteger(VT2.getRawBits()); 7800 ID.AddInteger(VT3.getRawBits()); 7801 7802 void *IP = nullptr; 7803 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7804 if (!Result) { 7805 EVT *Array = Allocator.Allocate<EVT>(3); 7806 Array[0] = VT1; 7807 Array[1] = VT2; 7808 Array[2] = VT3; 7809 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7810 VTListMap.InsertNode(Result, IP); 7811 } 7812 return Result->getSDVTList(); 7813 } 7814 7815 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7816 FoldingSetNodeID ID; 7817 ID.AddInteger(4U); 7818 ID.AddInteger(VT1.getRawBits()); 7819 ID.AddInteger(VT2.getRawBits()); 7820 ID.AddInteger(VT3.getRawBits()); 7821 ID.AddInteger(VT4.getRawBits()); 7822 7823 void *IP = nullptr; 7824 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7825 if (!Result) { 7826 EVT *Array = Allocator.Allocate<EVT>(4); 7827 Array[0] = VT1; 7828 Array[1] = VT2; 7829 Array[2] = VT3; 7830 Array[3] = VT4; 7831 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7832 VTListMap.InsertNode(Result, IP); 7833 } 7834 return Result->getSDVTList(); 7835 } 7836 7837 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7838 unsigned NumVTs = VTs.size(); 7839 FoldingSetNodeID ID; 7840 ID.AddInteger(NumVTs); 7841 for (unsigned index = 0; index < NumVTs; index++) { 7842 ID.AddInteger(VTs[index].getRawBits()); 7843 } 7844 7845 void *IP = nullptr; 7846 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7847 if (!Result) { 7848 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7849 llvm::copy(VTs, Array); 7850 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7851 VTListMap.InsertNode(Result, IP); 7852 } 7853 return Result->getSDVTList(); 7854 } 7855 7856 7857 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7858 /// specified operands. If the resultant node already exists in the DAG, 7859 /// this does not modify the specified node, instead it returns the node that 7860 /// already exists. If the resultant node does not exist in the DAG, the 7861 /// input node is returned. As a degenerate case, if you specify the same 7862 /// input operands as the node already has, the input node is returned. 7863 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7864 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7865 7866 // Check to see if there is no change. 7867 if (Op == N->getOperand(0)) return N; 7868 7869 // See if the modified node already exists. 7870 void *InsertPos = nullptr; 7871 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7872 return Existing; 7873 7874 // Nope it doesn't. Remove the node from its current place in the maps. 7875 if (InsertPos) 7876 if (!RemoveNodeFromCSEMaps(N)) 7877 InsertPos = nullptr; 7878 7879 // Now we update the operands. 7880 N->OperandList[0].set(Op); 7881 7882 updateDivergence(N); 7883 // If this gets put into a CSE map, add it. 7884 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7885 return N; 7886 } 7887 7888 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7889 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7890 7891 // Check to see if there is no change. 7892 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7893 return N; // No operands changed, just return the input node. 7894 7895 // See if the modified node already exists. 7896 void *InsertPos = nullptr; 7897 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7898 return Existing; 7899 7900 // Nope it doesn't. Remove the node from its current place in the maps. 7901 if (InsertPos) 7902 if (!RemoveNodeFromCSEMaps(N)) 7903 InsertPos = nullptr; 7904 7905 // Now we update the operands. 7906 if (N->OperandList[0] != Op1) 7907 N->OperandList[0].set(Op1); 7908 if (N->OperandList[1] != Op2) 7909 N->OperandList[1].set(Op2); 7910 7911 updateDivergence(N); 7912 // If this gets put into a CSE map, add it. 7913 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7914 return N; 7915 } 7916 7917 SDNode *SelectionDAG:: 7918 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7919 SDValue Ops[] = { Op1, Op2, Op3 }; 7920 return UpdateNodeOperands(N, Ops); 7921 } 7922 7923 SDNode *SelectionDAG:: 7924 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7925 SDValue Op3, SDValue Op4) { 7926 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7927 return UpdateNodeOperands(N, Ops); 7928 } 7929 7930 SDNode *SelectionDAG:: 7931 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7932 SDValue Op3, SDValue Op4, SDValue Op5) { 7933 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7934 return UpdateNodeOperands(N, Ops); 7935 } 7936 7937 SDNode *SelectionDAG:: 7938 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7939 unsigned NumOps = Ops.size(); 7940 assert(N->getNumOperands() == NumOps && 7941 "Update with wrong number of operands"); 7942 7943 // If no operands changed just return the input node. 7944 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7945 return N; 7946 7947 // See if the modified node already exists. 7948 void *InsertPos = nullptr; 7949 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7950 return Existing; 7951 7952 // Nope it doesn't. Remove the node from its current place in the maps. 7953 if (InsertPos) 7954 if (!RemoveNodeFromCSEMaps(N)) 7955 InsertPos = nullptr; 7956 7957 // Now we update the operands. 7958 for (unsigned i = 0; i != NumOps; ++i) 7959 if (N->OperandList[i] != Ops[i]) 7960 N->OperandList[i].set(Ops[i]); 7961 7962 updateDivergence(N); 7963 // If this gets put into a CSE map, add it. 7964 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7965 return N; 7966 } 7967 7968 /// DropOperands - Release the operands and set this node to have 7969 /// zero operands. 7970 void SDNode::DropOperands() { 7971 // Unlike the code in MorphNodeTo that does this, we don't need to 7972 // watch for dead nodes here. 7973 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7974 SDUse &Use = *I++; 7975 Use.set(SDValue()); 7976 } 7977 } 7978 7979 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7980 ArrayRef<MachineMemOperand *> NewMemRefs) { 7981 if (NewMemRefs.empty()) { 7982 N->clearMemRefs(); 7983 return; 7984 } 7985 7986 // Check if we can avoid allocating by storing a single reference directly. 7987 if (NewMemRefs.size() == 1) { 7988 N->MemRefs = NewMemRefs[0]; 7989 N->NumMemRefs = 1; 7990 return; 7991 } 7992 7993 MachineMemOperand **MemRefsBuffer = 7994 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7995 llvm::copy(NewMemRefs, MemRefsBuffer); 7996 N->MemRefs = MemRefsBuffer; 7997 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7998 } 7999 8000 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 8001 /// machine opcode. 8002 /// 8003 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8004 EVT VT) { 8005 SDVTList VTs = getVTList(VT); 8006 return SelectNodeTo(N, MachineOpc, VTs, None); 8007 } 8008 8009 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8010 EVT VT, SDValue Op1) { 8011 SDVTList VTs = getVTList(VT); 8012 SDValue Ops[] = { Op1 }; 8013 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8014 } 8015 8016 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8017 EVT VT, SDValue Op1, 8018 SDValue Op2) { 8019 SDVTList VTs = getVTList(VT); 8020 SDValue Ops[] = { Op1, Op2 }; 8021 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8022 } 8023 8024 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8025 EVT VT, SDValue Op1, 8026 SDValue Op2, SDValue Op3) { 8027 SDVTList VTs = getVTList(VT); 8028 SDValue Ops[] = { Op1, Op2, Op3 }; 8029 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8030 } 8031 8032 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8033 EVT VT, ArrayRef<SDValue> Ops) { 8034 SDVTList VTs = getVTList(VT); 8035 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8036 } 8037 8038 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8039 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 8040 SDVTList VTs = getVTList(VT1, VT2); 8041 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8042 } 8043 8044 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8045 EVT VT1, EVT VT2) { 8046 SDVTList VTs = getVTList(VT1, VT2); 8047 return SelectNodeTo(N, MachineOpc, VTs, None); 8048 } 8049 8050 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8051 EVT VT1, EVT VT2, EVT VT3, 8052 ArrayRef<SDValue> Ops) { 8053 SDVTList VTs = getVTList(VT1, VT2, VT3); 8054 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8055 } 8056 8057 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8058 EVT VT1, EVT VT2, 8059 SDValue Op1, SDValue Op2) { 8060 SDVTList VTs = getVTList(VT1, VT2); 8061 SDValue Ops[] = { Op1, Op2 }; 8062 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8063 } 8064 8065 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8066 SDVTList VTs,ArrayRef<SDValue> Ops) { 8067 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 8068 // Reset the NodeID to -1. 8069 New->setNodeId(-1); 8070 if (New != N) { 8071 ReplaceAllUsesWith(N, New); 8072 RemoveDeadNode(N); 8073 } 8074 return New; 8075 } 8076 8077 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 8078 /// the line number information on the merged node since it is not possible to 8079 /// preserve the information that operation is associated with multiple lines. 8080 /// This will make the debugger working better at -O0, were there is a higher 8081 /// probability having other instructions associated with that line. 8082 /// 8083 /// For IROrder, we keep the smaller of the two 8084 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 8085 DebugLoc NLoc = N->getDebugLoc(); 8086 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 8087 N->setDebugLoc(DebugLoc()); 8088 } 8089 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 8090 N->setIROrder(Order); 8091 return N; 8092 } 8093 8094 /// MorphNodeTo - This *mutates* the specified node to have the specified 8095 /// return type, opcode, and operands. 8096 /// 8097 /// Note that MorphNodeTo returns the resultant node. If there is already a 8098 /// node of the specified opcode and operands, it returns that node instead of 8099 /// the current one. Note that the SDLoc need not be the same. 8100 /// 8101 /// Using MorphNodeTo is faster than creating a new node and swapping it in 8102 /// with ReplaceAllUsesWith both because it often avoids allocating a new 8103 /// node, and because it doesn't require CSE recalculation for any of 8104 /// the node's users. 8105 /// 8106 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 8107 /// As a consequence it isn't appropriate to use from within the DAG combiner or 8108 /// the legalizer which maintain worklists that would need to be updated when 8109 /// deleting things. 8110 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 8111 SDVTList VTs, ArrayRef<SDValue> Ops) { 8112 // If an identical node already exists, use it. 8113 void *IP = nullptr; 8114 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 8115 FoldingSetNodeID ID; 8116 AddNodeIDNode(ID, Opc, VTs, Ops); 8117 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 8118 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 8119 } 8120 8121 if (!RemoveNodeFromCSEMaps(N)) 8122 IP = nullptr; 8123 8124 // Start the morphing. 8125 N->NodeType = Opc; 8126 N->ValueList = VTs.VTs; 8127 N->NumValues = VTs.NumVTs; 8128 8129 // Clear the operands list, updating used nodes to remove this from their 8130 // use list. Keep track of any operands that become dead as a result. 8131 SmallPtrSet<SDNode*, 16> DeadNodeSet; 8132 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 8133 SDUse &Use = *I++; 8134 SDNode *Used = Use.getNode(); 8135 Use.set(SDValue()); 8136 if (Used->use_empty()) 8137 DeadNodeSet.insert(Used); 8138 } 8139 8140 // For MachineNode, initialize the memory references information. 8141 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 8142 MN->clearMemRefs(); 8143 8144 // Swap for an appropriately sized array from the recycler. 8145 removeOperands(N); 8146 createOperands(N, Ops); 8147 8148 // Delete any nodes that are still dead after adding the uses for the 8149 // new operands. 8150 if (!DeadNodeSet.empty()) { 8151 SmallVector<SDNode *, 16> DeadNodes; 8152 for (SDNode *N : DeadNodeSet) 8153 if (N->use_empty()) 8154 DeadNodes.push_back(N); 8155 RemoveDeadNodes(DeadNodes); 8156 } 8157 8158 if (IP) 8159 CSEMap.InsertNode(N, IP); // Memoize the new node. 8160 return N; 8161 } 8162 8163 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 8164 unsigned OrigOpc = Node->getOpcode(); 8165 unsigned NewOpc; 8166 switch (OrigOpc) { 8167 default: 8168 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 8169 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8170 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 8171 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8172 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 8173 #include "llvm/IR/ConstrainedOps.def" 8174 } 8175 8176 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 8177 8178 // We're taking this node out of the chain, so we need to re-link things. 8179 SDValue InputChain = Node->getOperand(0); 8180 SDValue OutputChain = SDValue(Node, 1); 8181 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 8182 8183 SmallVector<SDValue, 3> Ops; 8184 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 8185 Ops.push_back(Node->getOperand(i)); 8186 8187 SDVTList VTs = getVTList(Node->getValueType(0)); 8188 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 8189 8190 // MorphNodeTo can operate in two ways: if an existing node with the 8191 // specified operands exists, it can just return it. Otherwise, it 8192 // updates the node in place to have the requested operands. 8193 if (Res == Node) { 8194 // If we updated the node in place, reset the node ID. To the isel, 8195 // this should be just like a newly allocated machine node. 8196 Res->setNodeId(-1); 8197 } else { 8198 ReplaceAllUsesWith(Node, Res); 8199 RemoveDeadNode(Node); 8200 } 8201 8202 return Res; 8203 } 8204 8205 /// getMachineNode - These are used for target selectors to create a new node 8206 /// with specified return type(s), MachineInstr opcode, and operands. 8207 /// 8208 /// Note that getMachineNode returns the resultant node. If there is already a 8209 /// node of the specified opcode and operands, it returns that node instead of 8210 /// the current one. 8211 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8212 EVT VT) { 8213 SDVTList VTs = getVTList(VT); 8214 return getMachineNode(Opcode, dl, VTs, None); 8215 } 8216 8217 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8218 EVT VT, SDValue Op1) { 8219 SDVTList VTs = getVTList(VT); 8220 SDValue Ops[] = { Op1 }; 8221 return getMachineNode(Opcode, dl, VTs, Ops); 8222 } 8223 8224 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8225 EVT VT, SDValue Op1, SDValue Op2) { 8226 SDVTList VTs = getVTList(VT); 8227 SDValue Ops[] = { Op1, Op2 }; 8228 return getMachineNode(Opcode, dl, VTs, Ops); 8229 } 8230 8231 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8232 EVT VT, SDValue Op1, SDValue Op2, 8233 SDValue Op3) { 8234 SDVTList VTs = getVTList(VT); 8235 SDValue Ops[] = { Op1, Op2, Op3 }; 8236 return getMachineNode(Opcode, dl, VTs, Ops); 8237 } 8238 8239 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8240 EVT VT, ArrayRef<SDValue> Ops) { 8241 SDVTList VTs = getVTList(VT); 8242 return getMachineNode(Opcode, dl, VTs, Ops); 8243 } 8244 8245 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8246 EVT VT1, EVT VT2, SDValue Op1, 8247 SDValue Op2) { 8248 SDVTList VTs = getVTList(VT1, VT2); 8249 SDValue Ops[] = { Op1, Op2 }; 8250 return getMachineNode(Opcode, dl, VTs, Ops); 8251 } 8252 8253 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8254 EVT VT1, EVT VT2, SDValue Op1, 8255 SDValue Op2, SDValue Op3) { 8256 SDVTList VTs = getVTList(VT1, VT2); 8257 SDValue Ops[] = { Op1, Op2, Op3 }; 8258 return getMachineNode(Opcode, dl, VTs, Ops); 8259 } 8260 8261 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8262 EVT VT1, EVT VT2, 8263 ArrayRef<SDValue> Ops) { 8264 SDVTList VTs = getVTList(VT1, VT2); 8265 return getMachineNode(Opcode, dl, VTs, Ops); 8266 } 8267 8268 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8269 EVT VT1, EVT VT2, EVT VT3, 8270 SDValue Op1, SDValue Op2) { 8271 SDVTList VTs = getVTList(VT1, VT2, VT3); 8272 SDValue Ops[] = { Op1, Op2 }; 8273 return getMachineNode(Opcode, dl, VTs, Ops); 8274 } 8275 8276 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8277 EVT VT1, EVT VT2, EVT VT3, 8278 SDValue Op1, SDValue Op2, 8279 SDValue Op3) { 8280 SDVTList VTs = getVTList(VT1, VT2, VT3); 8281 SDValue Ops[] = { Op1, Op2, Op3 }; 8282 return getMachineNode(Opcode, dl, VTs, Ops); 8283 } 8284 8285 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8286 EVT VT1, EVT VT2, EVT VT3, 8287 ArrayRef<SDValue> Ops) { 8288 SDVTList VTs = getVTList(VT1, VT2, VT3); 8289 return getMachineNode(Opcode, dl, VTs, Ops); 8290 } 8291 8292 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8293 ArrayRef<EVT> ResultTys, 8294 ArrayRef<SDValue> Ops) { 8295 SDVTList VTs = getVTList(ResultTys); 8296 return getMachineNode(Opcode, dl, VTs, Ops); 8297 } 8298 8299 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8300 SDVTList VTs, 8301 ArrayRef<SDValue> Ops) { 8302 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8303 MachineSDNode *N; 8304 void *IP = nullptr; 8305 8306 if (DoCSE) { 8307 FoldingSetNodeID ID; 8308 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8309 IP = nullptr; 8310 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8311 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8312 } 8313 } 8314 8315 // Allocate a new MachineSDNode. 8316 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8317 createOperands(N, Ops); 8318 8319 if (DoCSE) 8320 CSEMap.InsertNode(N, IP); 8321 8322 InsertNode(N); 8323 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8324 return N; 8325 } 8326 8327 /// getTargetExtractSubreg - A convenience function for creating 8328 /// TargetOpcode::EXTRACT_SUBREG nodes. 8329 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8330 SDValue Operand) { 8331 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8332 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8333 VT, Operand, SRIdxVal); 8334 return SDValue(Subreg, 0); 8335 } 8336 8337 /// getTargetInsertSubreg - A convenience function for creating 8338 /// TargetOpcode::INSERT_SUBREG nodes. 8339 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8340 SDValue Operand, SDValue Subreg) { 8341 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8342 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8343 VT, Operand, Subreg, SRIdxVal); 8344 return SDValue(Result, 0); 8345 } 8346 8347 /// getNodeIfExists - Get the specified node if it's already available, or 8348 /// else return NULL. 8349 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8350 ArrayRef<SDValue> Ops) { 8351 SDNodeFlags Flags; 8352 if (Inserter) 8353 Flags = Inserter->getFlags(); 8354 return getNodeIfExists(Opcode, VTList, Ops, Flags); 8355 } 8356 8357 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8358 ArrayRef<SDValue> Ops, 8359 const SDNodeFlags Flags) { 8360 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8361 FoldingSetNodeID ID; 8362 AddNodeIDNode(ID, Opcode, VTList, Ops); 8363 void *IP = nullptr; 8364 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8365 E->intersectFlagsWith(Flags); 8366 return E; 8367 } 8368 } 8369 return nullptr; 8370 } 8371 8372 /// doesNodeExist - Check if a node exists without modifying its flags. 8373 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList, 8374 ArrayRef<SDValue> Ops) { 8375 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8376 FoldingSetNodeID ID; 8377 AddNodeIDNode(ID, Opcode, VTList, Ops); 8378 void *IP = nullptr; 8379 if (FindNodeOrInsertPos(ID, SDLoc(), IP)) 8380 return true; 8381 } 8382 return false; 8383 } 8384 8385 /// getDbgValue - Creates a SDDbgValue node. 8386 /// 8387 /// SDNode 8388 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8389 SDNode *N, unsigned R, bool IsIndirect, 8390 const DebugLoc &DL, unsigned O) { 8391 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8392 "Expected inlined-at fields to agree"); 8393 return new (DbgInfo->getAlloc()) 8394 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8395 } 8396 8397 /// Constant 8398 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8399 DIExpression *Expr, 8400 const Value *C, 8401 const DebugLoc &DL, unsigned O) { 8402 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8403 "Expected inlined-at fields to agree"); 8404 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8405 } 8406 8407 /// FrameIndex 8408 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8409 DIExpression *Expr, unsigned FI, 8410 bool IsIndirect, 8411 const DebugLoc &DL, 8412 unsigned O) { 8413 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8414 "Expected inlined-at fields to agree"); 8415 return new (DbgInfo->getAlloc()) 8416 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8417 } 8418 8419 /// VReg 8420 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8421 DIExpression *Expr, 8422 unsigned VReg, bool IsIndirect, 8423 const DebugLoc &DL, unsigned O) { 8424 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8425 "Expected inlined-at fields to agree"); 8426 return new (DbgInfo->getAlloc()) 8427 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8428 } 8429 8430 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8431 unsigned OffsetInBits, unsigned SizeInBits, 8432 bool InvalidateDbg) { 8433 SDNode *FromNode = From.getNode(); 8434 SDNode *ToNode = To.getNode(); 8435 assert(FromNode && ToNode && "Can't modify dbg values"); 8436 8437 // PR35338 8438 // TODO: assert(From != To && "Redundant dbg value transfer"); 8439 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8440 if (From == To || FromNode == ToNode) 8441 return; 8442 8443 if (!FromNode->getHasDebugValue()) 8444 return; 8445 8446 SmallVector<SDDbgValue *, 2> ClonedDVs; 8447 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8448 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8449 continue; 8450 8451 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8452 8453 // Just transfer the dbg value attached to From. 8454 if (Dbg->getResNo() != From.getResNo()) 8455 continue; 8456 8457 DIVariable *Var = Dbg->getVariable(); 8458 auto *Expr = Dbg->getExpression(); 8459 // If a fragment is requested, update the expression. 8460 if (SizeInBits) { 8461 // When splitting a larger (e.g., sign-extended) value whose 8462 // lower bits are described with an SDDbgValue, do not attempt 8463 // to transfer the SDDbgValue to the upper bits. 8464 if (auto FI = Expr->getFragmentInfo()) 8465 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8466 continue; 8467 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8468 SizeInBits); 8469 if (!Fragment) 8470 continue; 8471 Expr = *Fragment; 8472 } 8473 // Clone the SDDbgValue and move it to To. 8474 SDDbgValue *Clone = getDbgValue( 8475 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8476 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8477 ClonedDVs.push_back(Clone); 8478 8479 if (InvalidateDbg) { 8480 // Invalidate value and indicate the SDDbgValue should not be emitted. 8481 Dbg->setIsInvalidated(); 8482 Dbg->setIsEmitted(); 8483 } 8484 } 8485 8486 for (SDDbgValue *Dbg : ClonedDVs) 8487 AddDbgValue(Dbg, ToNode, false); 8488 } 8489 8490 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8491 if (!N.getHasDebugValue()) 8492 return; 8493 8494 SmallVector<SDDbgValue *, 2> ClonedDVs; 8495 for (auto DV : GetDbgValues(&N)) { 8496 if (DV->isInvalidated()) 8497 continue; 8498 switch (N.getOpcode()) { 8499 default: 8500 break; 8501 case ISD::ADD: 8502 SDValue N0 = N.getOperand(0); 8503 SDValue N1 = N.getOperand(1); 8504 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8505 isConstantIntBuildVectorOrConstantInt(N1)) { 8506 uint64_t Offset = N.getConstantOperandVal(1); 8507 // Rewrite an ADD constant node into a DIExpression. Since we are 8508 // performing arithmetic to compute the variable's *value* in the 8509 // DIExpression, we need to mark the expression with a 8510 // DW_OP_stack_value. 8511 auto *DIExpr = DV->getExpression(); 8512 DIExpr = 8513 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8514 SDDbgValue *Clone = 8515 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8516 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8517 ClonedDVs.push_back(Clone); 8518 DV->setIsInvalidated(); 8519 DV->setIsEmitted(); 8520 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8521 N0.getNode()->dumprFull(this); 8522 dbgs() << " into " << *DIExpr << '\n'); 8523 } 8524 } 8525 } 8526 8527 for (SDDbgValue *Dbg : ClonedDVs) 8528 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8529 } 8530 8531 /// Creates a SDDbgLabel node. 8532 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8533 const DebugLoc &DL, unsigned O) { 8534 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8535 "Expected inlined-at fields to agree"); 8536 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8537 } 8538 8539 namespace { 8540 8541 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8542 /// pointed to by a use iterator is deleted, increment the use iterator 8543 /// so that it doesn't dangle. 8544 /// 8545 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8546 SDNode::use_iterator &UI; 8547 SDNode::use_iterator &UE; 8548 8549 void NodeDeleted(SDNode *N, SDNode *E) override { 8550 // Increment the iterator as needed. 8551 while (UI != UE && N == *UI) 8552 ++UI; 8553 } 8554 8555 public: 8556 RAUWUpdateListener(SelectionDAG &d, 8557 SDNode::use_iterator &ui, 8558 SDNode::use_iterator &ue) 8559 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8560 }; 8561 8562 } // end anonymous namespace 8563 8564 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8565 /// This can cause recursive merging of nodes in the DAG. 8566 /// 8567 /// This version assumes From has a single result value. 8568 /// 8569 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8570 SDNode *From = FromN.getNode(); 8571 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8572 "Cannot replace with this method!"); 8573 assert(From != To.getNode() && "Cannot replace uses of with self"); 8574 8575 // Preserve Debug Values 8576 transferDbgValues(FromN, To); 8577 8578 // Iterate over all the existing uses of From. New uses will be added 8579 // to the beginning of the use list, which we avoid visiting. 8580 // This specifically avoids visiting uses of From that arise while the 8581 // replacement is happening, because any such uses would be the result 8582 // of CSE: If an existing node looks like From after one of its operands 8583 // is replaced by To, we don't want to replace of all its users with To 8584 // too. See PR3018 for more info. 8585 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8586 RAUWUpdateListener Listener(*this, UI, UE); 8587 while (UI != UE) { 8588 SDNode *User = *UI; 8589 8590 // This node is about to morph, remove its old self from the CSE maps. 8591 RemoveNodeFromCSEMaps(User); 8592 8593 // A user can appear in a use list multiple times, and when this 8594 // happens the uses are usually next to each other in the list. 8595 // To help reduce the number of CSE recomputations, process all 8596 // the uses of this user that we can find this way. 8597 do { 8598 SDUse &Use = UI.getUse(); 8599 ++UI; 8600 Use.set(To); 8601 if (To->isDivergent() != From->isDivergent()) 8602 updateDivergence(User); 8603 } while (UI != UE && *UI == User); 8604 // Now that we have modified User, add it back to the CSE maps. If it 8605 // already exists there, recursively merge the results together. 8606 AddModifiedNodeToCSEMaps(User); 8607 } 8608 8609 // If we just RAUW'd the root, take note. 8610 if (FromN == getRoot()) 8611 setRoot(To); 8612 } 8613 8614 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8615 /// This can cause recursive merging of nodes in the DAG. 8616 /// 8617 /// This version assumes that for each value of From, there is a 8618 /// corresponding value in To in the same position with the same type. 8619 /// 8620 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8621 #ifndef NDEBUG 8622 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8623 assert((!From->hasAnyUseOfValue(i) || 8624 From->getValueType(i) == To->getValueType(i)) && 8625 "Cannot use this version of ReplaceAllUsesWith!"); 8626 #endif 8627 8628 // Handle the trivial case. 8629 if (From == To) 8630 return; 8631 8632 // Preserve Debug Info. Only do this if there's a use. 8633 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8634 if (From->hasAnyUseOfValue(i)) { 8635 assert((i < To->getNumValues()) && "Invalid To location"); 8636 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8637 } 8638 8639 // Iterate over just the existing users of From. See the comments in 8640 // the ReplaceAllUsesWith above. 8641 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8642 RAUWUpdateListener Listener(*this, UI, UE); 8643 while (UI != UE) { 8644 SDNode *User = *UI; 8645 8646 // This node is about to morph, remove its old self from the CSE maps. 8647 RemoveNodeFromCSEMaps(User); 8648 8649 // A user can appear in a use list multiple times, and when this 8650 // happens the uses are usually next to each other in the list. 8651 // To help reduce the number of CSE recomputations, process all 8652 // the uses of this user that we can find this way. 8653 do { 8654 SDUse &Use = UI.getUse(); 8655 ++UI; 8656 Use.setNode(To); 8657 if (To->isDivergent() != From->isDivergent()) 8658 updateDivergence(User); 8659 } while (UI != UE && *UI == User); 8660 8661 // Now that we have modified User, add it back to the CSE maps. If it 8662 // already exists there, recursively merge the results together. 8663 AddModifiedNodeToCSEMaps(User); 8664 } 8665 8666 // If we just RAUW'd the root, take note. 8667 if (From == getRoot().getNode()) 8668 setRoot(SDValue(To, getRoot().getResNo())); 8669 } 8670 8671 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8672 /// This can cause recursive merging of nodes in the DAG. 8673 /// 8674 /// This version can replace From with any result values. To must match the 8675 /// number and types of values returned by From. 8676 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8677 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8678 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8679 8680 // Preserve Debug Info. 8681 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8682 transferDbgValues(SDValue(From, i), To[i]); 8683 8684 // Iterate over just the existing users of From. See the comments in 8685 // the ReplaceAllUsesWith above. 8686 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8687 RAUWUpdateListener Listener(*this, UI, UE); 8688 while (UI != UE) { 8689 SDNode *User = *UI; 8690 8691 // This node is about to morph, remove its old self from the CSE maps. 8692 RemoveNodeFromCSEMaps(User); 8693 8694 // A user can appear in a use list multiple times, and when this happens the 8695 // uses are usually next to each other in the list. To help reduce the 8696 // number of CSE and divergence recomputations, process all the uses of this 8697 // user that we can find this way. 8698 bool To_IsDivergent = false; 8699 do { 8700 SDUse &Use = UI.getUse(); 8701 const SDValue &ToOp = To[Use.getResNo()]; 8702 ++UI; 8703 Use.set(ToOp); 8704 To_IsDivergent |= ToOp->isDivergent(); 8705 } while (UI != UE && *UI == User); 8706 8707 if (To_IsDivergent != From->isDivergent()) 8708 updateDivergence(User); 8709 8710 // Now that we have modified User, add it back to the CSE maps. If it 8711 // already exists there, recursively merge the results together. 8712 AddModifiedNodeToCSEMaps(User); 8713 } 8714 8715 // If we just RAUW'd the root, take note. 8716 if (From == getRoot().getNode()) 8717 setRoot(SDValue(To[getRoot().getResNo()])); 8718 } 8719 8720 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8721 /// uses of other values produced by From.getNode() alone. The Deleted 8722 /// vector is handled the same way as for ReplaceAllUsesWith. 8723 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8724 // Handle the really simple, really trivial case efficiently. 8725 if (From == To) return; 8726 8727 // Handle the simple, trivial, case efficiently. 8728 if (From.getNode()->getNumValues() == 1) { 8729 ReplaceAllUsesWith(From, To); 8730 return; 8731 } 8732 8733 // Preserve Debug Info. 8734 transferDbgValues(From, To); 8735 8736 // Iterate over just the existing users of From. See the comments in 8737 // the ReplaceAllUsesWith above. 8738 SDNode::use_iterator UI = From.getNode()->use_begin(), 8739 UE = From.getNode()->use_end(); 8740 RAUWUpdateListener Listener(*this, UI, UE); 8741 while (UI != UE) { 8742 SDNode *User = *UI; 8743 bool UserRemovedFromCSEMaps = false; 8744 8745 // A user can appear in a use list multiple times, and when this 8746 // happens the uses are usually next to each other in the list. 8747 // To help reduce the number of CSE recomputations, process all 8748 // the uses of this user that we can find this way. 8749 do { 8750 SDUse &Use = UI.getUse(); 8751 8752 // Skip uses of different values from the same node. 8753 if (Use.getResNo() != From.getResNo()) { 8754 ++UI; 8755 continue; 8756 } 8757 8758 // If this node hasn't been modified yet, it's still in the CSE maps, 8759 // so remove its old self from the CSE maps. 8760 if (!UserRemovedFromCSEMaps) { 8761 RemoveNodeFromCSEMaps(User); 8762 UserRemovedFromCSEMaps = true; 8763 } 8764 8765 ++UI; 8766 Use.set(To); 8767 if (To->isDivergent() != From->isDivergent()) 8768 updateDivergence(User); 8769 } while (UI != UE && *UI == User); 8770 // We are iterating over all uses of the From node, so if a use 8771 // doesn't use the specific value, no changes are made. 8772 if (!UserRemovedFromCSEMaps) 8773 continue; 8774 8775 // Now that we have modified User, add it back to the CSE maps. If it 8776 // already exists there, recursively merge the results together. 8777 AddModifiedNodeToCSEMaps(User); 8778 } 8779 8780 // If we just RAUW'd the root, take note. 8781 if (From == getRoot()) 8782 setRoot(To); 8783 } 8784 8785 namespace { 8786 8787 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8788 /// to record information about a use. 8789 struct UseMemo { 8790 SDNode *User; 8791 unsigned Index; 8792 SDUse *Use; 8793 }; 8794 8795 /// operator< - Sort Memos by User. 8796 bool operator<(const UseMemo &L, const UseMemo &R) { 8797 return (intptr_t)L.User < (intptr_t)R.User; 8798 } 8799 8800 } // end anonymous namespace 8801 8802 bool SelectionDAG::calculateDivergence(SDNode *N) { 8803 if (TLI->isSDNodeAlwaysUniform(N)) { 8804 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) && 8805 "Conflicting divergence information!"); 8806 return false; 8807 } 8808 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA)) 8809 return true; 8810 for (auto &Op : N->ops()) { 8811 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent()) 8812 return true; 8813 } 8814 return false; 8815 } 8816 8817 void SelectionDAG::updateDivergence(SDNode *N) { 8818 SmallVector<SDNode *, 16> Worklist(1, N); 8819 do { 8820 N = Worklist.pop_back_val(); 8821 bool IsDivergent = calculateDivergence(N); 8822 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8823 N->SDNodeBits.IsDivergent = IsDivergent; 8824 llvm::append_range(Worklist, N->uses()); 8825 } 8826 } while (!Worklist.empty()); 8827 } 8828 8829 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8830 DenseMap<SDNode *, unsigned> Degree; 8831 Order.reserve(AllNodes.size()); 8832 for (auto &N : allnodes()) { 8833 unsigned NOps = N.getNumOperands(); 8834 Degree[&N] = NOps; 8835 if (0 == NOps) 8836 Order.push_back(&N); 8837 } 8838 for (size_t I = 0; I != Order.size(); ++I) { 8839 SDNode *N = Order[I]; 8840 for (auto U : N->uses()) { 8841 unsigned &UnsortedOps = Degree[U]; 8842 if (0 == --UnsortedOps) 8843 Order.push_back(U); 8844 } 8845 } 8846 } 8847 8848 #ifndef NDEBUG 8849 void SelectionDAG::VerifyDAGDiverence() { 8850 std::vector<SDNode *> TopoOrder; 8851 CreateTopologicalOrder(TopoOrder); 8852 for (auto *N : TopoOrder) { 8853 assert(calculateDivergence(N) == N->isDivergent() && 8854 "Divergence bit inconsistency detected"); 8855 } 8856 } 8857 #endif 8858 8859 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8860 /// uses of other values produced by From.getNode() alone. The same value 8861 /// may appear in both the From and To list. The Deleted vector is 8862 /// handled the same way as for ReplaceAllUsesWith. 8863 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8864 const SDValue *To, 8865 unsigned Num){ 8866 // Handle the simple, trivial case efficiently. 8867 if (Num == 1) 8868 return ReplaceAllUsesOfValueWith(*From, *To); 8869 8870 transferDbgValues(*From, *To); 8871 8872 // Read up all the uses and make records of them. This helps 8873 // processing new uses that are introduced during the 8874 // replacement process. 8875 SmallVector<UseMemo, 4> Uses; 8876 for (unsigned i = 0; i != Num; ++i) { 8877 unsigned FromResNo = From[i].getResNo(); 8878 SDNode *FromNode = From[i].getNode(); 8879 for (SDNode::use_iterator UI = FromNode->use_begin(), 8880 E = FromNode->use_end(); UI != E; ++UI) { 8881 SDUse &Use = UI.getUse(); 8882 if (Use.getResNo() == FromResNo) { 8883 UseMemo Memo = { *UI, i, &Use }; 8884 Uses.push_back(Memo); 8885 } 8886 } 8887 } 8888 8889 // Sort the uses, so that all the uses from a given User are together. 8890 llvm::sort(Uses); 8891 8892 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8893 UseIndex != UseIndexEnd; ) { 8894 // We know that this user uses some value of From. If it is the right 8895 // value, update it. 8896 SDNode *User = Uses[UseIndex].User; 8897 8898 // This node is about to morph, remove its old self from the CSE maps. 8899 RemoveNodeFromCSEMaps(User); 8900 8901 // The Uses array is sorted, so all the uses for a given User 8902 // are next to each other in the list. 8903 // To help reduce the number of CSE recomputations, process all 8904 // the uses of this user that we can find this way. 8905 do { 8906 unsigned i = Uses[UseIndex].Index; 8907 SDUse &Use = *Uses[UseIndex].Use; 8908 ++UseIndex; 8909 8910 Use.set(To[i]); 8911 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8912 8913 // Now that we have modified User, add it back to the CSE maps. If it 8914 // already exists there, recursively merge the results together. 8915 AddModifiedNodeToCSEMaps(User); 8916 } 8917 } 8918 8919 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8920 /// based on their topological order. It returns the maximum id and a vector 8921 /// of the SDNodes* in assigned order by reference. 8922 unsigned SelectionDAG::AssignTopologicalOrder() { 8923 unsigned DAGSize = 0; 8924 8925 // SortedPos tracks the progress of the algorithm. Nodes before it are 8926 // sorted, nodes after it are unsorted. When the algorithm completes 8927 // it is at the end of the list. 8928 allnodes_iterator SortedPos = allnodes_begin(); 8929 8930 // Visit all the nodes. Move nodes with no operands to the front of 8931 // the list immediately. Annotate nodes that do have operands with their 8932 // operand count. Before we do this, the Node Id fields of the nodes 8933 // may contain arbitrary values. After, the Node Id fields for nodes 8934 // before SortedPos will contain the topological sort index, and the 8935 // Node Id fields for nodes At SortedPos and after will contain the 8936 // count of outstanding operands. 8937 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8938 SDNode *N = &*I++; 8939 checkForCycles(N, this); 8940 unsigned Degree = N->getNumOperands(); 8941 if (Degree == 0) { 8942 // A node with no uses, add it to the result array immediately. 8943 N->setNodeId(DAGSize++); 8944 allnodes_iterator Q(N); 8945 if (Q != SortedPos) 8946 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8947 assert(SortedPos != AllNodes.end() && "Overran node list"); 8948 ++SortedPos; 8949 } else { 8950 // Temporarily use the Node Id as scratch space for the degree count. 8951 N->setNodeId(Degree); 8952 } 8953 } 8954 8955 // Visit all the nodes. As we iterate, move nodes into sorted order, 8956 // such that by the time the end is reached all nodes will be sorted. 8957 for (SDNode &Node : allnodes()) { 8958 SDNode *N = &Node; 8959 checkForCycles(N, this); 8960 // N is in sorted position, so all its uses have one less operand 8961 // that needs to be sorted. 8962 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8963 UI != UE; ++UI) { 8964 SDNode *P = *UI; 8965 unsigned Degree = P->getNodeId(); 8966 assert(Degree != 0 && "Invalid node degree"); 8967 --Degree; 8968 if (Degree == 0) { 8969 // All of P's operands are sorted, so P may sorted now. 8970 P->setNodeId(DAGSize++); 8971 if (P->getIterator() != SortedPos) 8972 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8973 assert(SortedPos != AllNodes.end() && "Overran node list"); 8974 ++SortedPos; 8975 } else { 8976 // Update P's outstanding operand count. 8977 P->setNodeId(Degree); 8978 } 8979 } 8980 if (Node.getIterator() == SortedPos) { 8981 #ifndef NDEBUG 8982 allnodes_iterator I(N); 8983 SDNode *S = &*++I; 8984 dbgs() << "Overran sorted position:\n"; 8985 S->dumprFull(this); dbgs() << "\n"; 8986 dbgs() << "Checking if this is due to cycles\n"; 8987 checkForCycles(this, true); 8988 #endif 8989 llvm_unreachable(nullptr); 8990 } 8991 } 8992 8993 assert(SortedPos == AllNodes.end() && 8994 "Topological sort incomplete!"); 8995 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8996 "First node in topological sort is not the entry token!"); 8997 assert(AllNodes.front().getNodeId() == 0 && 8998 "First node in topological sort has non-zero id!"); 8999 assert(AllNodes.front().getNumOperands() == 0 && 9000 "First node in topological sort has operands!"); 9001 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 9002 "Last node in topologic sort has unexpected id!"); 9003 assert(AllNodes.back().use_empty() && 9004 "Last node in topologic sort has users!"); 9005 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 9006 return DAGSize; 9007 } 9008 9009 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 9010 /// value is produced by SD. 9011 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 9012 if (SD) { 9013 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 9014 SD->setHasDebugValue(true); 9015 } 9016 DbgInfo->add(DB, SD, isParameter); 9017 } 9018 9019 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 9020 DbgInfo->add(DB); 9021 } 9022 9023 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain, 9024 SDValue NewMemOpChain) { 9025 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node"); 9026 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT"); 9027 // The new memory operation must have the same position as the old load in 9028 // terms of memory dependency. Create a TokenFactor for the old load and new 9029 // memory operation and update uses of the old load's output chain to use that 9030 // TokenFactor. 9031 if (OldChain == NewMemOpChain || OldChain.use_empty()) 9032 return NewMemOpChain; 9033 9034 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other, 9035 OldChain, NewMemOpChain); 9036 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 9037 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain); 9038 return TokenFactor; 9039 } 9040 9041 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 9042 SDValue NewMemOp) { 9043 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 9044 SDValue OldChain = SDValue(OldLoad, 1); 9045 SDValue NewMemOpChain = NewMemOp.getValue(1); 9046 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain); 9047 } 9048 9049 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 9050 Function **OutFunction) { 9051 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 9052 9053 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 9054 auto *Module = MF->getFunction().getParent(); 9055 auto *Function = Module->getFunction(Symbol); 9056 9057 if (OutFunction != nullptr) 9058 *OutFunction = Function; 9059 9060 if (Function != nullptr) { 9061 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 9062 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 9063 } 9064 9065 std::string ErrorStr; 9066 raw_string_ostream ErrorFormatter(ErrorStr); 9067 9068 ErrorFormatter << "Undefined external symbol "; 9069 ErrorFormatter << '"' << Symbol << '"'; 9070 ErrorFormatter.flush(); 9071 9072 report_fatal_error(ErrorStr); 9073 } 9074 9075 //===----------------------------------------------------------------------===// 9076 // SDNode Class 9077 //===----------------------------------------------------------------------===// 9078 9079 bool llvm::isNullConstant(SDValue V) { 9080 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9081 return Const != nullptr && Const->isNullValue(); 9082 } 9083 9084 bool llvm::isNullFPConstant(SDValue V) { 9085 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 9086 return Const != nullptr && Const->isZero() && !Const->isNegative(); 9087 } 9088 9089 bool llvm::isAllOnesConstant(SDValue V) { 9090 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9091 return Const != nullptr && Const->isAllOnesValue(); 9092 } 9093 9094 bool llvm::isOneConstant(SDValue V) { 9095 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9096 return Const != nullptr && Const->isOne(); 9097 } 9098 9099 SDValue llvm::peekThroughBitcasts(SDValue V) { 9100 while (V.getOpcode() == ISD::BITCAST) 9101 V = V.getOperand(0); 9102 return V; 9103 } 9104 9105 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 9106 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 9107 V = V.getOperand(0); 9108 return V; 9109 } 9110 9111 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 9112 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 9113 V = V.getOperand(0); 9114 return V; 9115 } 9116 9117 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 9118 if (V.getOpcode() != ISD::XOR) 9119 return false; 9120 V = peekThroughBitcasts(V.getOperand(1)); 9121 unsigned NumBits = V.getScalarValueSizeInBits(); 9122 ConstantSDNode *C = 9123 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 9124 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 9125 } 9126 9127 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 9128 bool AllowTruncation) { 9129 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9130 return CN; 9131 9132 // SplatVectors can truncate their operands. Ignore that case here unless 9133 // AllowTruncation is set. 9134 if (N->getOpcode() == ISD::SPLAT_VECTOR) { 9135 EVT VecEltVT = N->getValueType(0).getVectorElementType(); 9136 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 9137 EVT CVT = CN->getValueType(0); 9138 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension"); 9139 if (AllowTruncation || CVT == VecEltVT) 9140 return CN; 9141 } 9142 } 9143 9144 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9145 BitVector UndefElements; 9146 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 9147 9148 // BuildVectors can truncate their operands. Ignore that case here unless 9149 // AllowTruncation is set. 9150 if (CN && (UndefElements.none() || AllowUndefs)) { 9151 EVT CVT = CN->getValueType(0); 9152 EVT NSVT = N.getValueType().getScalarType(); 9153 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9154 if (AllowTruncation || (CVT == NSVT)) 9155 return CN; 9156 } 9157 } 9158 9159 return nullptr; 9160 } 9161 9162 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 9163 bool AllowUndefs, 9164 bool AllowTruncation) { 9165 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9166 return CN; 9167 9168 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9169 BitVector UndefElements; 9170 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 9171 9172 // BuildVectors can truncate their operands. Ignore that case here unless 9173 // AllowTruncation is set. 9174 if (CN && (UndefElements.none() || AllowUndefs)) { 9175 EVT CVT = CN->getValueType(0); 9176 EVT NSVT = N.getValueType().getScalarType(); 9177 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9178 if (AllowTruncation || (CVT == NSVT)) 9179 return CN; 9180 } 9181 } 9182 9183 return nullptr; 9184 } 9185 9186 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 9187 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9188 return CN; 9189 9190 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9191 BitVector UndefElements; 9192 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 9193 if (CN && (UndefElements.none() || AllowUndefs)) 9194 return CN; 9195 } 9196 9197 if (N.getOpcode() == ISD::SPLAT_VECTOR) 9198 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0))) 9199 return CN; 9200 9201 return nullptr; 9202 } 9203 9204 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 9205 const APInt &DemandedElts, 9206 bool AllowUndefs) { 9207 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9208 return CN; 9209 9210 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9211 BitVector UndefElements; 9212 ConstantFPSDNode *CN = 9213 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 9214 if (CN && (UndefElements.none() || AllowUndefs)) 9215 return CN; 9216 } 9217 9218 return nullptr; 9219 } 9220 9221 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 9222 // TODO: may want to use peekThroughBitcast() here. 9223 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9224 return C && C->isNullValue(); 9225 } 9226 9227 bool llvm::isOneOrOneSplat(SDValue N) { 9228 // TODO: may want to use peekThroughBitcast() here. 9229 unsigned BitWidth = N.getScalarValueSizeInBits(); 9230 ConstantSDNode *C = isConstOrConstSplat(N); 9231 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 9232 } 9233 9234 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 9235 N = peekThroughBitcasts(N); 9236 unsigned BitWidth = N.getScalarValueSizeInBits(); 9237 ConstantSDNode *C = isConstOrConstSplat(N); 9238 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 9239 } 9240 9241 HandleSDNode::~HandleSDNode() { 9242 DropOperands(); 9243 } 9244 9245 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 9246 const DebugLoc &DL, 9247 const GlobalValue *GA, EVT VT, 9248 int64_t o, unsigned TF) 9249 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9250 TheGlobal = GA; 9251 } 9252 9253 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9254 EVT VT, unsigned SrcAS, 9255 unsigned DestAS) 9256 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9257 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9258 9259 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9260 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9261 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9262 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9263 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9264 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9265 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9266 9267 // We check here that the size of the memory operand fits within the size of 9268 // the MMO. This is because the MMO might indicate only a possible address 9269 // range instead of specifying the affected memory addresses precisely. 9270 // TODO: Make MachineMemOperands aware of scalable vectors. 9271 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9272 "Size mismatch!"); 9273 } 9274 9275 /// Profile - Gather unique data for the node. 9276 /// 9277 void SDNode::Profile(FoldingSetNodeID &ID) const { 9278 AddNodeIDNode(ID, this); 9279 } 9280 9281 namespace { 9282 9283 struct EVTArray { 9284 std::vector<EVT> VTs; 9285 9286 EVTArray() { 9287 VTs.reserve(MVT::LAST_VALUETYPE); 9288 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9289 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9290 } 9291 }; 9292 9293 } // end anonymous namespace 9294 9295 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9296 static ManagedStatic<EVTArray> SimpleVTArray; 9297 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9298 9299 /// getValueTypeList - Return a pointer to the specified value type. 9300 /// 9301 const EVT *SDNode::getValueTypeList(EVT VT) { 9302 if (VT.isExtended()) { 9303 sys::SmartScopedLock<true> Lock(*VTMutex); 9304 return &(*EVTs->insert(VT).first); 9305 } else { 9306 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9307 "Value type out of range!"); 9308 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9309 } 9310 } 9311 9312 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9313 /// indicated value. This method ignores uses of other values defined by this 9314 /// operation. 9315 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9316 assert(Value < getNumValues() && "Bad value!"); 9317 9318 // TODO: Only iterate over uses of a given value of the node 9319 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9320 if (UI.getUse().getResNo() == Value) { 9321 if (NUses == 0) 9322 return false; 9323 --NUses; 9324 } 9325 } 9326 9327 // Found exactly the right number of uses? 9328 return NUses == 0; 9329 } 9330 9331 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9332 /// value. This method ignores uses of other values defined by this operation. 9333 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9334 assert(Value < getNumValues() && "Bad value!"); 9335 9336 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9337 if (UI.getUse().getResNo() == Value) 9338 return true; 9339 9340 return false; 9341 } 9342 9343 /// isOnlyUserOf - Return true if this node is the only use of N. 9344 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9345 bool Seen = false; 9346 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9347 SDNode *User = *I; 9348 if (User == this) 9349 Seen = true; 9350 else 9351 return false; 9352 } 9353 9354 return Seen; 9355 } 9356 9357 /// Return true if the only users of N are contained in Nodes. 9358 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9359 bool Seen = false; 9360 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9361 SDNode *User = *I; 9362 if (llvm::is_contained(Nodes, User)) 9363 Seen = true; 9364 else 9365 return false; 9366 } 9367 9368 return Seen; 9369 } 9370 9371 /// isOperand - Return true if this node is an operand of N. 9372 bool SDValue::isOperandOf(const SDNode *N) const { 9373 return is_contained(N->op_values(), *this); 9374 } 9375 9376 bool SDNode::isOperandOf(const SDNode *N) const { 9377 return any_of(N->op_values(), 9378 [this](SDValue Op) { return this == Op.getNode(); }); 9379 } 9380 9381 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9382 /// be a chain) reaches the specified operand without crossing any 9383 /// side-effecting instructions on any chain path. In practice, this looks 9384 /// through token factors and non-volatile loads. In order to remain efficient, 9385 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9386 /// 9387 /// Note that we only need to examine chains when we're searching for 9388 /// side-effects; SelectionDAG requires that all side-effects are represented 9389 /// by chains, even if another operand would force a specific ordering. This 9390 /// constraint is necessary to allow transformations like splitting loads. 9391 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9392 unsigned Depth) const { 9393 if (*this == Dest) return true; 9394 9395 // Don't search too deeply, we just want to be able to see through 9396 // TokenFactor's etc. 9397 if (Depth == 0) return false; 9398 9399 // If this is a token factor, all inputs to the TF happen in parallel. 9400 if (getOpcode() == ISD::TokenFactor) { 9401 // First, try a shallow search. 9402 if (is_contained((*this)->ops(), Dest)) { 9403 // We found the chain we want as an operand of this TokenFactor. 9404 // Essentially, we reach the chain without side-effects if we could 9405 // serialize the TokenFactor into a simple chain of operations with 9406 // Dest as the last operation. This is automatically true if the 9407 // chain has one use: there are no other ordering constraints. 9408 // If the chain has more than one use, we give up: some other 9409 // use of Dest might force a side-effect between Dest and the current 9410 // node. 9411 if (Dest.hasOneUse()) 9412 return true; 9413 } 9414 // Next, try a deep search: check whether every operand of the TokenFactor 9415 // reaches Dest. 9416 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9417 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9418 }); 9419 } 9420 9421 // Loads don't have side effects, look through them. 9422 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9423 if (Ld->isUnordered()) 9424 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9425 } 9426 return false; 9427 } 9428 9429 bool SDNode::hasPredecessor(const SDNode *N) const { 9430 SmallPtrSet<const SDNode *, 32> Visited; 9431 SmallVector<const SDNode *, 16> Worklist; 9432 Worklist.push_back(this); 9433 return hasPredecessorHelper(N, Visited, Worklist); 9434 } 9435 9436 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9437 this->Flags.intersectWith(Flags); 9438 } 9439 9440 SDValue 9441 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9442 ArrayRef<ISD::NodeType> CandidateBinOps, 9443 bool AllowPartials) { 9444 // The pattern must end in an extract from index 0. 9445 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9446 !isNullConstant(Extract->getOperand(1))) 9447 return SDValue(); 9448 9449 // Match against one of the candidate binary ops. 9450 SDValue Op = Extract->getOperand(0); 9451 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9452 return Op.getOpcode() == unsigned(BinOp); 9453 })) 9454 return SDValue(); 9455 9456 // Floating-point reductions may require relaxed constraints on the final step 9457 // of the reduction because they may reorder intermediate operations. 9458 unsigned CandidateBinOp = Op.getOpcode(); 9459 if (Op.getValueType().isFloatingPoint()) { 9460 SDNodeFlags Flags = Op->getFlags(); 9461 switch (CandidateBinOp) { 9462 case ISD::FADD: 9463 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9464 return SDValue(); 9465 break; 9466 default: 9467 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9468 } 9469 } 9470 9471 // Matching failed - attempt to see if we did enough stages that a partial 9472 // reduction from a subvector is possible. 9473 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9474 if (!AllowPartials || !Op) 9475 return SDValue(); 9476 EVT OpVT = Op.getValueType(); 9477 EVT OpSVT = OpVT.getScalarType(); 9478 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9479 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9480 return SDValue(); 9481 BinOp = (ISD::NodeType)CandidateBinOp; 9482 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9483 getVectorIdxConstant(0, SDLoc(Op))); 9484 }; 9485 9486 // At each stage, we're looking for something that looks like: 9487 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9488 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9489 // i32 undef, i32 undef, i32 undef, i32 undef> 9490 // %a = binop <8 x i32> %op, %s 9491 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9492 // we expect something like: 9493 // <4,5,6,7,u,u,u,u> 9494 // <2,3,u,u,u,u,u,u> 9495 // <1,u,u,u,u,u,u,u> 9496 // While a partial reduction match would be: 9497 // <2,3,u,u,u,u,u,u> 9498 // <1,u,u,u,u,u,u,u> 9499 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9500 SDValue PrevOp; 9501 for (unsigned i = 0; i < Stages; ++i) { 9502 unsigned MaskEnd = (1 << i); 9503 9504 if (Op.getOpcode() != CandidateBinOp) 9505 return PartialReduction(PrevOp, MaskEnd); 9506 9507 SDValue Op0 = Op.getOperand(0); 9508 SDValue Op1 = Op.getOperand(1); 9509 9510 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9511 if (Shuffle) { 9512 Op = Op1; 9513 } else { 9514 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9515 Op = Op0; 9516 } 9517 9518 // The first operand of the shuffle should be the same as the other operand 9519 // of the binop. 9520 if (!Shuffle || Shuffle->getOperand(0) != Op) 9521 return PartialReduction(PrevOp, MaskEnd); 9522 9523 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9524 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9525 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9526 return PartialReduction(PrevOp, MaskEnd); 9527 9528 PrevOp = Op; 9529 } 9530 9531 // Handle subvector reductions, which tend to appear after the shuffle 9532 // reduction stages. 9533 while (Op.getOpcode() == CandidateBinOp) { 9534 unsigned NumElts = Op.getValueType().getVectorNumElements(); 9535 SDValue Op0 = Op.getOperand(0); 9536 SDValue Op1 = Op.getOperand(1); 9537 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9538 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9539 Op0.getOperand(0) != Op1.getOperand(0)) 9540 break; 9541 SDValue Src = Op0.getOperand(0); 9542 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 9543 if (NumSrcElts != (2 * NumElts)) 9544 break; 9545 if (!(Op0.getConstantOperandAPInt(1) == 0 && 9546 Op1.getConstantOperandAPInt(1) == NumElts) && 9547 !(Op1.getConstantOperandAPInt(1) == 0 && 9548 Op0.getConstantOperandAPInt(1) == NumElts)) 9549 break; 9550 Op = Src; 9551 } 9552 9553 BinOp = (ISD::NodeType)CandidateBinOp; 9554 return Op; 9555 } 9556 9557 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9558 assert(N->getNumValues() == 1 && 9559 "Can't unroll a vector with multiple results!"); 9560 9561 EVT VT = N->getValueType(0); 9562 unsigned NE = VT.getVectorNumElements(); 9563 EVT EltVT = VT.getVectorElementType(); 9564 SDLoc dl(N); 9565 9566 SmallVector<SDValue, 8> Scalars; 9567 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9568 9569 // If ResNE is 0, fully unroll the vector op. 9570 if (ResNE == 0) 9571 ResNE = NE; 9572 else if (NE > ResNE) 9573 NE = ResNE; 9574 9575 unsigned i; 9576 for (i= 0; i != NE; ++i) { 9577 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9578 SDValue Operand = N->getOperand(j); 9579 EVT OperandVT = Operand.getValueType(); 9580 if (OperandVT.isVector()) { 9581 // A vector operand; extract a single element. 9582 EVT OperandEltVT = OperandVT.getVectorElementType(); 9583 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9584 Operand, getVectorIdxConstant(i, dl)); 9585 } else { 9586 // A scalar operand; just use it as is. 9587 Operands[j] = Operand; 9588 } 9589 } 9590 9591 switch (N->getOpcode()) { 9592 default: { 9593 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9594 N->getFlags())); 9595 break; 9596 } 9597 case ISD::VSELECT: 9598 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9599 break; 9600 case ISD::SHL: 9601 case ISD::SRA: 9602 case ISD::SRL: 9603 case ISD::ROTL: 9604 case ISD::ROTR: 9605 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9606 getShiftAmountOperand(Operands[0].getValueType(), 9607 Operands[1]))); 9608 break; 9609 case ISD::SIGN_EXTEND_INREG: { 9610 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9611 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9612 Operands[0], 9613 getValueType(ExtVT))); 9614 } 9615 } 9616 } 9617 9618 for (; i < ResNE; ++i) 9619 Scalars.push_back(getUNDEF(EltVT)); 9620 9621 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9622 return getBuildVector(VecVT, dl, Scalars); 9623 } 9624 9625 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9626 SDNode *N, unsigned ResNE) { 9627 unsigned Opcode = N->getOpcode(); 9628 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9629 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9630 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9631 "Expected an overflow opcode"); 9632 9633 EVT ResVT = N->getValueType(0); 9634 EVT OvVT = N->getValueType(1); 9635 EVT ResEltVT = ResVT.getVectorElementType(); 9636 EVT OvEltVT = OvVT.getVectorElementType(); 9637 SDLoc dl(N); 9638 9639 // If ResNE is 0, fully unroll the vector op. 9640 unsigned NE = ResVT.getVectorNumElements(); 9641 if (ResNE == 0) 9642 ResNE = NE; 9643 else if (NE > ResNE) 9644 NE = ResNE; 9645 9646 SmallVector<SDValue, 8> LHSScalars; 9647 SmallVector<SDValue, 8> RHSScalars; 9648 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9649 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9650 9651 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9652 SDVTList VTs = getVTList(ResEltVT, SVT); 9653 SmallVector<SDValue, 8> ResScalars; 9654 SmallVector<SDValue, 8> OvScalars; 9655 for (unsigned i = 0; i < NE; ++i) { 9656 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9657 SDValue Ov = 9658 getSelect(dl, OvEltVT, Res.getValue(1), 9659 getBoolConstant(true, dl, OvEltVT, ResVT), 9660 getConstant(0, dl, OvEltVT)); 9661 9662 ResScalars.push_back(Res); 9663 OvScalars.push_back(Ov); 9664 } 9665 9666 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9667 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9668 9669 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9670 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9671 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9672 getBuildVector(NewOvVT, dl, OvScalars)); 9673 } 9674 9675 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9676 LoadSDNode *Base, 9677 unsigned Bytes, 9678 int Dist) const { 9679 if (LD->isVolatile() || Base->isVolatile()) 9680 return false; 9681 // TODO: probably too restrictive for atomics, revisit 9682 if (!LD->isSimple()) 9683 return false; 9684 if (LD->isIndexed() || Base->isIndexed()) 9685 return false; 9686 if (LD->getChain() != Base->getChain()) 9687 return false; 9688 EVT VT = LD->getValueType(0); 9689 if (VT.getSizeInBits() / 8 != Bytes) 9690 return false; 9691 9692 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9693 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9694 9695 int64_t Offset = 0; 9696 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9697 return (Dist * Bytes == Offset); 9698 return false; 9699 } 9700 9701 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9702 /// if it cannot be inferred. 9703 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9704 // If this is a GlobalAddress + cst, return the alignment. 9705 const GlobalValue *GV = nullptr; 9706 int64_t GVOffset = 0; 9707 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9708 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9709 KnownBits Known(PtrWidth); 9710 llvm::computeKnownBits(GV, Known, getDataLayout()); 9711 unsigned AlignBits = Known.countMinTrailingZeros(); 9712 if (AlignBits) 9713 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9714 } 9715 9716 // If this is a direct reference to a stack slot, use information about the 9717 // stack slot's alignment. 9718 int FrameIdx = INT_MIN; 9719 int64_t FrameOffset = 0; 9720 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9721 FrameIdx = FI->getIndex(); 9722 } else if (isBaseWithConstantOffset(Ptr) && 9723 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9724 // Handle FI+Cst 9725 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9726 FrameOffset = Ptr.getConstantOperandVal(1); 9727 } 9728 9729 if (FrameIdx != INT_MIN) { 9730 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9731 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9732 } 9733 9734 return None; 9735 } 9736 9737 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9738 /// which is split (or expanded) into two not necessarily identical pieces. 9739 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9740 // Currently all types are split in half. 9741 EVT LoVT, HiVT; 9742 if (!VT.isVector()) 9743 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9744 else 9745 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9746 9747 return std::make_pair(LoVT, HiVT); 9748 } 9749 9750 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9751 /// type, dependent on an enveloping VT that has been split into two identical 9752 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9753 std::pair<EVT, EVT> 9754 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9755 bool *HiIsEmpty) const { 9756 EVT EltTp = VT.getVectorElementType(); 9757 // Examples: 9758 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9759 // custom VL=9 with enveloping VL=8/8 yields 8/1 9760 // custom VL=10 with enveloping VL=8/8 yields 8/2 9761 // etc. 9762 ElementCount VTNumElts = VT.getVectorElementCount(); 9763 ElementCount EnvNumElts = EnvVT.getVectorElementCount(); 9764 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() && 9765 "Mixing fixed width and scalable vectors when enveloping a type"); 9766 EVT LoVT, HiVT; 9767 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) { 9768 LoVT = EnvVT; 9769 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts); 9770 *HiIsEmpty = false; 9771 } else { 9772 // Flag that hi type has zero storage size, but return split envelop type 9773 // (this would be easier if vector types with zero elements were allowed). 9774 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts); 9775 HiVT = EnvVT; 9776 *HiIsEmpty = true; 9777 } 9778 return std::make_pair(LoVT, HiVT); 9779 } 9780 9781 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9782 /// low/high part. 9783 std::pair<SDValue, SDValue> 9784 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9785 const EVT &HiVT) { 9786 assert(LoVT.isScalableVector() == HiVT.isScalableVector() && 9787 LoVT.isScalableVector() == N.getValueType().isScalableVector() && 9788 "Splitting vector with an invalid mixture of fixed and scalable " 9789 "vector types"); 9790 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <= 9791 N.getValueType().getVectorMinNumElements() && 9792 "More vector elements requested than available!"); 9793 SDValue Lo, Hi; 9794 Lo = 9795 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9796 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements() 9797 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales 9798 // IDX with the runtime scaling factor of the result vector type. For 9799 // fixed-width result vectors, that runtime scaling factor is 1. 9800 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9801 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL)); 9802 return std::make_pair(Lo, Hi); 9803 } 9804 9805 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9806 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9807 EVT VT = N.getValueType(); 9808 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9809 NextPowerOf2(VT.getVectorNumElements())); 9810 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9811 getVectorIdxConstant(0, DL)); 9812 } 9813 9814 void SelectionDAG::ExtractVectorElements(SDValue Op, 9815 SmallVectorImpl<SDValue> &Args, 9816 unsigned Start, unsigned Count, 9817 EVT EltVT) { 9818 EVT VT = Op.getValueType(); 9819 if (Count == 0) 9820 Count = VT.getVectorNumElements(); 9821 if (EltVT == EVT()) 9822 EltVT = VT.getVectorElementType(); 9823 SDLoc SL(Op); 9824 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9825 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9826 getVectorIdxConstant(i, SL))); 9827 } 9828 } 9829 9830 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9831 unsigned GlobalAddressSDNode::getAddressSpace() const { 9832 return getGlobal()->getType()->getAddressSpace(); 9833 } 9834 9835 Type *ConstantPoolSDNode::getType() const { 9836 if (isMachineConstantPoolEntry()) 9837 return Val.MachineCPVal->getType(); 9838 return Val.ConstVal->getType(); 9839 } 9840 9841 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9842 unsigned &SplatBitSize, 9843 bool &HasAnyUndefs, 9844 unsigned MinSplatBits, 9845 bool IsBigEndian) const { 9846 EVT VT = getValueType(0); 9847 assert(VT.isVector() && "Expected a vector type"); 9848 unsigned VecWidth = VT.getSizeInBits(); 9849 if (MinSplatBits > VecWidth) 9850 return false; 9851 9852 // FIXME: The widths are based on this node's type, but build vectors can 9853 // truncate their operands. 9854 SplatValue = APInt(VecWidth, 0); 9855 SplatUndef = APInt(VecWidth, 0); 9856 9857 // Get the bits. Bits with undefined values (when the corresponding element 9858 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9859 // in SplatValue. If any of the values are not constant, give up and return 9860 // false. 9861 unsigned int NumOps = getNumOperands(); 9862 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9863 unsigned EltWidth = VT.getScalarSizeInBits(); 9864 9865 for (unsigned j = 0; j < NumOps; ++j) { 9866 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9867 SDValue OpVal = getOperand(i); 9868 unsigned BitPos = j * EltWidth; 9869 9870 if (OpVal.isUndef()) 9871 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9872 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9873 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9874 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9875 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9876 else 9877 return false; 9878 } 9879 9880 // The build_vector is all constants or undefs. Find the smallest element 9881 // size that splats the vector. 9882 HasAnyUndefs = (SplatUndef != 0); 9883 9884 // FIXME: This does not work for vectors with elements less than 8 bits. 9885 while (VecWidth > 8) { 9886 unsigned HalfSize = VecWidth / 2; 9887 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9888 APInt LowValue = SplatValue.trunc(HalfSize); 9889 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9890 APInt LowUndef = SplatUndef.trunc(HalfSize); 9891 9892 // If the two halves do not match (ignoring undef bits), stop here. 9893 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9894 MinSplatBits > HalfSize) 9895 break; 9896 9897 SplatValue = HighValue | LowValue; 9898 SplatUndef = HighUndef & LowUndef; 9899 9900 VecWidth = HalfSize; 9901 } 9902 9903 SplatBitSize = VecWidth; 9904 return true; 9905 } 9906 9907 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9908 BitVector *UndefElements) const { 9909 unsigned NumOps = getNumOperands(); 9910 if (UndefElements) { 9911 UndefElements->clear(); 9912 UndefElements->resize(NumOps); 9913 } 9914 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 9915 if (!DemandedElts) 9916 return SDValue(); 9917 SDValue Splatted; 9918 for (unsigned i = 0; i != NumOps; ++i) { 9919 if (!DemandedElts[i]) 9920 continue; 9921 SDValue Op = getOperand(i); 9922 if (Op.isUndef()) { 9923 if (UndefElements) 9924 (*UndefElements)[i] = true; 9925 } else if (!Splatted) { 9926 Splatted = Op; 9927 } else if (Splatted != Op) { 9928 return SDValue(); 9929 } 9930 } 9931 9932 if (!Splatted) { 9933 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9934 assert(getOperand(FirstDemandedIdx).isUndef() && 9935 "Can only have a splat without a constant for all undefs."); 9936 return getOperand(FirstDemandedIdx); 9937 } 9938 9939 return Splatted; 9940 } 9941 9942 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9943 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9944 return getSplatValue(DemandedElts, UndefElements); 9945 } 9946 9947 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts, 9948 SmallVectorImpl<SDValue> &Sequence, 9949 BitVector *UndefElements) const { 9950 unsigned NumOps = getNumOperands(); 9951 Sequence.clear(); 9952 if (UndefElements) { 9953 UndefElements->clear(); 9954 UndefElements->resize(NumOps); 9955 } 9956 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 9957 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps)) 9958 return false; 9959 9960 // Set the undefs even if we don't find a sequence (like getSplatValue). 9961 if (UndefElements) 9962 for (unsigned I = 0; I != NumOps; ++I) 9963 if (DemandedElts[I] && getOperand(I).isUndef()) 9964 (*UndefElements)[I] = true; 9965 9966 // Iteratively widen the sequence length looking for repetitions. 9967 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) { 9968 Sequence.append(SeqLen, SDValue()); 9969 for (unsigned I = 0; I != NumOps; ++I) { 9970 if (!DemandedElts[I]) 9971 continue; 9972 SDValue &SeqOp = Sequence[I % SeqLen]; 9973 SDValue Op = getOperand(I); 9974 if (Op.isUndef()) { 9975 if (!SeqOp) 9976 SeqOp = Op; 9977 continue; 9978 } 9979 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) { 9980 Sequence.clear(); 9981 break; 9982 } 9983 SeqOp = Op; 9984 } 9985 if (!Sequence.empty()) 9986 return true; 9987 } 9988 9989 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern"); 9990 return false; 9991 } 9992 9993 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, 9994 BitVector *UndefElements) const { 9995 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9996 return getRepeatedSequence(DemandedElts, Sequence, UndefElements); 9997 } 9998 9999 ConstantSDNode * 10000 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 10001 BitVector *UndefElements) const { 10002 return dyn_cast_or_null<ConstantSDNode>( 10003 getSplatValue(DemandedElts, UndefElements)); 10004 } 10005 10006 ConstantSDNode * 10007 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 10008 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 10009 } 10010 10011 ConstantFPSDNode * 10012 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 10013 BitVector *UndefElements) const { 10014 return dyn_cast_or_null<ConstantFPSDNode>( 10015 getSplatValue(DemandedElts, UndefElements)); 10016 } 10017 10018 ConstantFPSDNode * 10019 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 10020 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 10021 } 10022 10023 int32_t 10024 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 10025 uint32_t BitWidth) const { 10026 if (ConstantFPSDNode *CN = 10027 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 10028 bool IsExact; 10029 APSInt IntVal(BitWidth); 10030 const APFloat &APF = CN->getValueAPF(); 10031 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 10032 APFloat::opOK || 10033 !IsExact) 10034 return -1; 10035 10036 return IntVal.exactLogBase2(); 10037 } 10038 return -1; 10039 } 10040 10041 bool BuildVectorSDNode::isConstant() const { 10042 for (const SDValue &Op : op_values()) { 10043 unsigned Opc = Op.getOpcode(); 10044 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 10045 return false; 10046 } 10047 return true; 10048 } 10049 10050 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 10051 // Find the first non-undef value in the shuffle mask. 10052 unsigned i, e; 10053 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 10054 /* search */; 10055 10056 // If all elements are undefined, this shuffle can be considered a splat 10057 // (although it should eventually get simplified away completely). 10058 if (i == e) 10059 return true; 10060 10061 // Make sure all remaining elements are either undef or the same as the first 10062 // non-undef value. 10063 for (int Idx = Mask[i]; i != e; ++i) 10064 if (Mask[i] >= 0 && Mask[i] != Idx) 10065 return false; 10066 return true; 10067 } 10068 10069 // Returns the SDNode if it is a constant integer BuildVector 10070 // or constant integer. 10071 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const { 10072 if (isa<ConstantSDNode>(N)) 10073 return N.getNode(); 10074 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 10075 return N.getNode(); 10076 // Treat a GlobalAddress supporting constant offset folding as a 10077 // constant integer. 10078 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 10079 if (GA->getOpcode() == ISD::GlobalAddress && 10080 TLI->isOffsetFoldingLegal(GA)) 10081 return GA; 10082 if ((N.getOpcode() == ISD::SPLAT_VECTOR) && 10083 isa<ConstantSDNode>(N.getOperand(0))) 10084 return N.getNode(); 10085 return nullptr; 10086 } 10087 10088 // Returns the SDNode if it is a constant float BuildVector 10089 // or constant float. 10090 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const { 10091 if (isa<ConstantFPSDNode>(N)) 10092 return N.getNode(); 10093 10094 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 10095 return N.getNode(); 10096 10097 return nullptr; 10098 } 10099 10100 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 10101 assert(!Node->OperandList && "Node already has operands"); 10102 assert(SDNode::getMaxNumOperands() >= Vals.size() && 10103 "too many operands to fit into SDNode"); 10104 SDUse *Ops = OperandRecycler.allocate( 10105 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 10106 10107 bool IsDivergent = false; 10108 for (unsigned I = 0; I != Vals.size(); ++I) { 10109 Ops[I].setUser(Node); 10110 Ops[I].setInitial(Vals[I]); 10111 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 10112 IsDivergent |= Ops[I].getNode()->isDivergent(); 10113 } 10114 Node->NumOperands = Vals.size(); 10115 Node->OperandList = Ops; 10116 if (!TLI->isSDNodeAlwaysUniform(Node)) { 10117 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 10118 Node->SDNodeBits.IsDivergent = IsDivergent; 10119 } 10120 checkForCycles(Node); 10121 } 10122 10123 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 10124 SmallVectorImpl<SDValue> &Vals) { 10125 size_t Limit = SDNode::getMaxNumOperands(); 10126 while (Vals.size() > Limit) { 10127 unsigned SliceIdx = Vals.size() - Limit; 10128 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 10129 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 10130 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 10131 Vals.emplace_back(NewTF); 10132 } 10133 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 10134 } 10135 10136 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL, 10137 EVT VT, SDNodeFlags Flags) { 10138 switch (Opcode) { 10139 default: 10140 return SDValue(); 10141 case ISD::ADD: 10142 case ISD::OR: 10143 case ISD::XOR: 10144 case ISD::UMAX: 10145 return getConstant(0, DL, VT); 10146 case ISD::MUL: 10147 return getConstant(1, DL, VT); 10148 case ISD::AND: 10149 case ISD::UMIN: 10150 return getAllOnesConstant(DL, VT); 10151 case ISD::SMAX: 10152 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT); 10153 case ISD::SMIN: 10154 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT); 10155 case ISD::FADD: 10156 return getConstantFP(-0.0, DL, VT); 10157 case ISD::FMUL: 10158 return getConstantFP(1.0, DL, VT); 10159 case ISD::FMINNUM: 10160 case ISD::FMAXNUM: { 10161 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF. 10162 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT); 10163 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) : 10164 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) : 10165 APFloat::getLargest(Semantics); 10166 if (Opcode == ISD::FMAXNUM) 10167 NeutralAF.changeSign(); 10168 10169 return getConstantFP(NeutralAF, DL, VT); 10170 } 10171 } 10172 } 10173 10174 #ifndef NDEBUG 10175 static void checkForCyclesHelper(const SDNode *N, 10176 SmallPtrSetImpl<const SDNode*> &Visited, 10177 SmallPtrSetImpl<const SDNode*> &Checked, 10178 const llvm::SelectionDAG *DAG) { 10179 // If this node has already been checked, don't check it again. 10180 if (Checked.count(N)) 10181 return; 10182 10183 // If a node has already been visited on this depth-first walk, reject it as 10184 // a cycle. 10185 if (!Visited.insert(N).second) { 10186 errs() << "Detected cycle in SelectionDAG\n"; 10187 dbgs() << "Offending node:\n"; 10188 N->dumprFull(DAG); dbgs() << "\n"; 10189 abort(); 10190 } 10191 10192 for (const SDValue &Op : N->op_values()) 10193 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 10194 10195 Checked.insert(N); 10196 Visited.erase(N); 10197 } 10198 #endif 10199 10200 void llvm::checkForCycles(const llvm::SDNode *N, 10201 const llvm::SelectionDAG *DAG, 10202 bool force) { 10203 #ifndef NDEBUG 10204 bool check = force; 10205 #ifdef EXPENSIVE_CHECKS 10206 check = true; 10207 #endif // EXPENSIVE_CHECKS 10208 if (check) { 10209 assert(N && "Checking nonexistent SDNode"); 10210 SmallPtrSet<const SDNode*, 32> visited; 10211 SmallPtrSet<const SDNode*, 32> checked; 10212 checkForCyclesHelper(N, visited, checked, DAG); 10213 } 10214 #endif // !NDEBUG 10215 } 10216 10217 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 10218 checkForCycles(DAG->getRoot().getNode(), DAG, force); 10219 } 10220