1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/RuntimeLibcalls.h" 36 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 39 #include "llvm/CodeGen/TargetLowering.h" 40 #include "llvm/CodeGen/TargetRegisterInfo.h" 41 #include "llvm/CodeGen/TargetSubtargetInfo.h" 42 #include "llvm/CodeGen/ValueTypes.h" 43 #include "llvm/IR/Constant.h" 44 #include "llvm/IR/Constants.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/DebugInfoMetadata.h" 47 #include "llvm/IR/DebugLoc.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CodeGen.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/KnownBits.h" 60 #include "llvm/Support/MachineValueType.h" 61 #include "llvm/Support/ManagedStatic.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/Mutex.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetMachine.h" 66 #include "llvm/Target/TargetOptions.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 #define DEBUG_TYPE "selectiondag" 91 92 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 93 cl::Hidden, cl::init(true), 94 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 95 96 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 97 cl::desc("Number limit for gluing ld/st of memcpy."), 98 cl::Hidden, cl::init(0)); 99 100 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 101 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 102 } 103 104 //===----------------------------------------------------------------------===// 105 // ConstantFPSDNode Class 106 //===----------------------------------------------------------------------===// 107 108 /// isExactlyValue - We don't rely on operator== working on double values, as 109 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 110 /// As such, this method can be used to do an exact bit-for-bit comparison of 111 /// two floating point values. 112 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 113 return getValueAPF().bitwiseIsEqual(V); 114 } 115 116 bool ConstantFPSDNode::isValueValidForType(EVT VT, 117 const APFloat& Val) { 118 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 119 120 // convert modifies in place, so make a copy. 121 APFloat Val2 = APFloat(Val); 122 bool losesInfo; 123 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 124 APFloat::rmNearestTiesToEven, 125 &losesInfo); 126 return !losesInfo; 127 } 128 129 //===----------------------------------------------------------------------===// 130 // ISD Namespace 131 //===----------------------------------------------------------------------===// 132 133 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 134 auto *BV = dyn_cast<BuildVectorSDNode>(N); 135 if (!BV) 136 return false; 137 138 APInt SplatUndef; 139 unsigned SplatBitSize; 140 bool HasUndefs; 141 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 142 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 143 EltSize) && 144 EltSize == SplatBitSize; 145 } 146 147 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 148 // specializations of the more general isConstantSplatVector()? 149 150 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 151 // Look through a bit convert. 152 while (N->getOpcode() == ISD::BITCAST) 153 N = N->getOperand(0).getNode(); 154 155 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 156 157 unsigned i = 0, e = N->getNumOperands(); 158 159 // Skip over all of the undef values. 160 while (i != e && N->getOperand(i).isUndef()) 161 ++i; 162 163 // Do not accept an all-undef vector. 164 if (i == e) return false; 165 166 // Do not accept build_vectors that aren't all constants or which have non-~0 167 // elements. We have to be a bit careful here, as the type of the constant 168 // may not be the same as the type of the vector elements due to type 169 // legalization (the elements are promoted to a legal type for the target and 170 // a vector of a type may be legal when the base element type is not). 171 // We only want to check enough bits to cover the vector elements, because 172 // we care if the resultant vector is all ones, not whether the individual 173 // constants are. 174 SDValue NotZero = N->getOperand(i); 175 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 176 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 177 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 178 return false; 179 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 180 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 181 return false; 182 } else 183 return false; 184 185 // Okay, we have at least one ~0 value, check to see if the rest match or are 186 // undefs. Even with the above element type twiddling, this should be OK, as 187 // the same type legalization should have applied to all the elements. 188 for (++i; i != e; ++i) 189 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 190 return false; 191 return true; 192 } 193 194 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 195 // Look through a bit convert. 196 while (N->getOpcode() == ISD::BITCAST) 197 N = N->getOperand(0).getNode(); 198 199 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 200 201 bool IsAllUndef = true; 202 for (const SDValue &Op : N->op_values()) { 203 if (Op.isUndef()) 204 continue; 205 IsAllUndef = false; 206 // Do not accept build_vectors that aren't all constants or which have non-0 207 // elements. We have to be a bit careful here, as the type of the constant 208 // may not be the same as the type of the vector elements due to type 209 // legalization (the elements are promoted to a legal type for the target 210 // and a vector of a type may be legal when the base element type is not). 211 // We only want to check enough bits to cover the vector elements, because 212 // we care if the resultant vector is all zeros, not whether the individual 213 // constants are. 214 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 215 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 216 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 217 return false; 218 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 219 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 220 return false; 221 } else 222 return false; 223 } 224 225 // Do not accept an all-undef vector. 226 if (IsAllUndef) 227 return false; 228 return true; 229 } 230 231 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 232 if (N->getOpcode() != ISD::BUILD_VECTOR) 233 return false; 234 235 for (const SDValue &Op : N->op_values()) { 236 if (Op.isUndef()) 237 continue; 238 if (!isa<ConstantSDNode>(Op)) 239 return false; 240 } 241 return true; 242 } 243 244 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 245 if (N->getOpcode() != ISD::BUILD_VECTOR) 246 return false; 247 248 for (const SDValue &Op : N->op_values()) { 249 if (Op.isUndef()) 250 continue; 251 if (!isa<ConstantFPSDNode>(Op)) 252 return false; 253 } 254 return true; 255 } 256 257 bool ISD::allOperandsUndef(const SDNode *N) { 258 // Return false if the node has no operands. 259 // This is "logically inconsistent" with the definition of "all" but 260 // is probably the desired behavior. 261 if (N->getNumOperands() == 0) 262 return false; 263 264 for (const SDValue &Op : N->op_values()) 265 if (!Op.isUndef()) 266 return false; 267 268 return true; 269 } 270 271 bool ISD::matchUnaryPredicate(SDValue Op, 272 std::function<bool(ConstantSDNode *)> Match) { 273 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 274 return Match(Cst); 275 276 if (ISD::BUILD_VECTOR != Op.getOpcode()) 277 return false; 278 279 EVT SVT = Op.getValueType().getScalarType(); 280 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 281 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 282 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 283 return false; 284 } 285 return true; 286 } 287 288 bool ISD::matchBinaryPredicate( 289 SDValue LHS, SDValue RHS, 290 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match) { 291 if (LHS.getValueType() != RHS.getValueType()) 292 return false; 293 294 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 295 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 296 return Match(LHSCst, RHSCst); 297 298 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 299 ISD::BUILD_VECTOR != RHS.getOpcode()) 300 return false; 301 302 EVT SVT = LHS.getValueType().getScalarType(); 303 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 304 auto *LHSCst = dyn_cast<ConstantSDNode>(LHS.getOperand(i)); 305 auto *RHSCst = dyn_cast<ConstantSDNode>(RHS.getOperand(i)); 306 if (!LHSCst || !RHSCst) 307 return false; 308 if (LHSCst->getValueType(0) != SVT || 309 LHSCst->getValueType(0) != RHSCst->getValueType(0)) 310 return false; 311 if (!Match(LHSCst, RHSCst)) 312 return false; 313 } 314 return true; 315 } 316 317 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 318 switch (ExtType) { 319 case ISD::EXTLOAD: 320 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 321 case ISD::SEXTLOAD: 322 return ISD::SIGN_EXTEND; 323 case ISD::ZEXTLOAD: 324 return ISD::ZERO_EXTEND; 325 default: 326 break; 327 } 328 329 llvm_unreachable("Invalid LoadExtType"); 330 } 331 332 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 333 // To perform this operation, we just need to swap the L and G bits of the 334 // operation. 335 unsigned OldL = (Operation >> 2) & 1; 336 unsigned OldG = (Operation >> 1) & 1; 337 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 338 (OldL << 1) | // New G bit 339 (OldG << 2)); // New L bit. 340 } 341 342 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 343 unsigned Operation = Op; 344 if (isInteger) 345 Operation ^= 7; // Flip L, G, E bits, but not U. 346 else 347 Operation ^= 15; // Flip all of the condition bits. 348 349 if (Operation > ISD::SETTRUE2) 350 Operation &= ~8; // Don't let N and U bits get set. 351 352 return ISD::CondCode(Operation); 353 } 354 355 /// For an integer comparison, return 1 if the comparison is a signed operation 356 /// and 2 if the result is an unsigned comparison. Return zero if the operation 357 /// does not depend on the sign of the input (setne and seteq). 358 static int isSignedOp(ISD::CondCode Opcode) { 359 switch (Opcode) { 360 default: llvm_unreachable("Illegal integer setcc operation!"); 361 case ISD::SETEQ: 362 case ISD::SETNE: return 0; 363 case ISD::SETLT: 364 case ISD::SETLE: 365 case ISD::SETGT: 366 case ISD::SETGE: return 1; 367 case ISD::SETULT: 368 case ISD::SETULE: 369 case ISD::SETUGT: 370 case ISD::SETUGE: return 2; 371 } 372 } 373 374 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 375 bool IsInteger) { 376 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 377 // Cannot fold a signed integer setcc with an unsigned integer setcc. 378 return ISD::SETCC_INVALID; 379 380 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 381 382 // If the N and U bits get set, then the resultant comparison DOES suddenly 383 // care about orderedness, and it is true when ordered. 384 if (Op > ISD::SETTRUE2) 385 Op &= ~16; // Clear the U bit if the N bit is set. 386 387 // Canonicalize illegal integer setcc's. 388 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 389 Op = ISD::SETNE; 390 391 return ISD::CondCode(Op); 392 } 393 394 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 395 bool IsInteger) { 396 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 397 // Cannot fold a signed setcc with an unsigned setcc. 398 return ISD::SETCC_INVALID; 399 400 // Combine all of the condition bits. 401 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 402 403 // Canonicalize illegal integer setcc's. 404 if (IsInteger) { 405 switch (Result) { 406 default: break; 407 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 408 case ISD::SETOEQ: // SETEQ & SETU[LG]E 409 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 410 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 411 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 412 } 413 } 414 415 return Result; 416 } 417 418 //===----------------------------------------------------------------------===// 419 // SDNode Profile Support 420 //===----------------------------------------------------------------------===// 421 422 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 423 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 424 ID.AddInteger(OpC); 425 } 426 427 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 428 /// solely with their pointer. 429 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 430 ID.AddPointer(VTList.VTs); 431 } 432 433 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 434 static void AddNodeIDOperands(FoldingSetNodeID &ID, 435 ArrayRef<SDValue> Ops) { 436 for (auto& Op : Ops) { 437 ID.AddPointer(Op.getNode()); 438 ID.AddInteger(Op.getResNo()); 439 } 440 } 441 442 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 443 static void AddNodeIDOperands(FoldingSetNodeID &ID, 444 ArrayRef<SDUse> Ops) { 445 for (auto& Op : Ops) { 446 ID.AddPointer(Op.getNode()); 447 ID.AddInteger(Op.getResNo()); 448 } 449 } 450 451 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 452 SDVTList VTList, ArrayRef<SDValue> OpList) { 453 AddNodeIDOpcode(ID, OpC); 454 AddNodeIDValueTypes(ID, VTList); 455 AddNodeIDOperands(ID, OpList); 456 } 457 458 /// If this is an SDNode with special info, add this info to the NodeID data. 459 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 460 switch (N->getOpcode()) { 461 case ISD::TargetExternalSymbol: 462 case ISD::ExternalSymbol: 463 case ISD::MCSymbol: 464 llvm_unreachable("Should only be used on nodes with operands"); 465 default: break; // Normal nodes don't need extra info. 466 case ISD::TargetConstant: 467 case ISD::Constant: { 468 const ConstantSDNode *C = cast<ConstantSDNode>(N); 469 ID.AddPointer(C->getConstantIntValue()); 470 ID.AddBoolean(C->isOpaque()); 471 break; 472 } 473 case ISD::TargetConstantFP: 474 case ISD::ConstantFP: 475 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 476 break; 477 case ISD::TargetGlobalAddress: 478 case ISD::GlobalAddress: 479 case ISD::TargetGlobalTLSAddress: 480 case ISD::GlobalTLSAddress: { 481 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 482 ID.AddPointer(GA->getGlobal()); 483 ID.AddInteger(GA->getOffset()); 484 ID.AddInteger(GA->getTargetFlags()); 485 break; 486 } 487 case ISD::BasicBlock: 488 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 489 break; 490 case ISD::Register: 491 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 492 break; 493 case ISD::RegisterMask: 494 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 495 break; 496 case ISD::SRCVALUE: 497 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 498 break; 499 case ISD::FrameIndex: 500 case ISD::TargetFrameIndex: 501 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 502 break; 503 case ISD::JumpTable: 504 case ISD::TargetJumpTable: 505 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 506 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 507 break; 508 case ISD::ConstantPool: 509 case ISD::TargetConstantPool: { 510 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 511 ID.AddInteger(CP->getAlignment()); 512 ID.AddInteger(CP->getOffset()); 513 if (CP->isMachineConstantPoolEntry()) 514 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 515 else 516 ID.AddPointer(CP->getConstVal()); 517 ID.AddInteger(CP->getTargetFlags()); 518 break; 519 } 520 case ISD::TargetIndex: { 521 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 522 ID.AddInteger(TI->getIndex()); 523 ID.AddInteger(TI->getOffset()); 524 ID.AddInteger(TI->getTargetFlags()); 525 break; 526 } 527 case ISD::LOAD: { 528 const LoadSDNode *LD = cast<LoadSDNode>(N); 529 ID.AddInteger(LD->getMemoryVT().getRawBits()); 530 ID.AddInteger(LD->getRawSubclassData()); 531 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 532 break; 533 } 534 case ISD::STORE: { 535 const StoreSDNode *ST = cast<StoreSDNode>(N); 536 ID.AddInteger(ST->getMemoryVT().getRawBits()); 537 ID.AddInteger(ST->getRawSubclassData()); 538 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 539 break; 540 } 541 case ISD::MLOAD: { 542 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 543 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 544 ID.AddInteger(MLD->getRawSubclassData()); 545 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 546 break; 547 } 548 case ISD::MSTORE: { 549 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 550 ID.AddInteger(MST->getMemoryVT().getRawBits()); 551 ID.AddInteger(MST->getRawSubclassData()); 552 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 553 break; 554 } 555 case ISD::MGATHER: { 556 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 557 ID.AddInteger(MG->getMemoryVT().getRawBits()); 558 ID.AddInteger(MG->getRawSubclassData()); 559 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 560 break; 561 } 562 case ISD::MSCATTER: { 563 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 564 ID.AddInteger(MS->getMemoryVT().getRawBits()); 565 ID.AddInteger(MS->getRawSubclassData()); 566 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 567 break; 568 } 569 case ISD::ATOMIC_CMP_SWAP: 570 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 571 case ISD::ATOMIC_SWAP: 572 case ISD::ATOMIC_LOAD_ADD: 573 case ISD::ATOMIC_LOAD_SUB: 574 case ISD::ATOMIC_LOAD_AND: 575 case ISD::ATOMIC_LOAD_CLR: 576 case ISD::ATOMIC_LOAD_OR: 577 case ISD::ATOMIC_LOAD_XOR: 578 case ISD::ATOMIC_LOAD_NAND: 579 case ISD::ATOMIC_LOAD_MIN: 580 case ISD::ATOMIC_LOAD_MAX: 581 case ISD::ATOMIC_LOAD_UMIN: 582 case ISD::ATOMIC_LOAD_UMAX: 583 case ISD::ATOMIC_LOAD: 584 case ISD::ATOMIC_STORE: { 585 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 586 ID.AddInteger(AT->getMemoryVT().getRawBits()); 587 ID.AddInteger(AT->getRawSubclassData()); 588 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 589 break; 590 } 591 case ISD::PREFETCH: { 592 const MemSDNode *PF = cast<MemSDNode>(N); 593 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 594 break; 595 } 596 case ISD::VECTOR_SHUFFLE: { 597 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 598 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 599 i != e; ++i) 600 ID.AddInteger(SVN->getMaskElt(i)); 601 break; 602 } 603 case ISD::TargetBlockAddress: 604 case ISD::BlockAddress: { 605 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 606 ID.AddPointer(BA->getBlockAddress()); 607 ID.AddInteger(BA->getOffset()); 608 ID.AddInteger(BA->getTargetFlags()); 609 break; 610 } 611 } // end switch (N->getOpcode()) 612 613 // Target specific memory nodes could also have address spaces to check. 614 if (N->isTargetMemoryOpcode()) 615 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 616 } 617 618 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 619 /// data. 620 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 621 AddNodeIDOpcode(ID, N->getOpcode()); 622 // Add the return value info. 623 AddNodeIDValueTypes(ID, N->getVTList()); 624 // Add the operand info. 625 AddNodeIDOperands(ID, N->ops()); 626 627 // Handle SDNode leafs with special info. 628 AddNodeIDCustom(ID, N); 629 } 630 631 //===----------------------------------------------------------------------===// 632 // SelectionDAG Class 633 //===----------------------------------------------------------------------===// 634 635 /// doNotCSE - Return true if CSE should not be performed for this node. 636 static bool doNotCSE(SDNode *N) { 637 if (N->getValueType(0) == MVT::Glue) 638 return true; // Never CSE anything that produces a flag. 639 640 switch (N->getOpcode()) { 641 default: break; 642 case ISD::HANDLENODE: 643 case ISD::EH_LABEL: 644 return true; // Never CSE these nodes. 645 } 646 647 // Check that remaining values produced are not flags. 648 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 649 if (N->getValueType(i) == MVT::Glue) 650 return true; // Never CSE anything that produces a flag. 651 652 return false; 653 } 654 655 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 656 /// SelectionDAG. 657 void SelectionDAG::RemoveDeadNodes() { 658 // Create a dummy node (which is not added to allnodes), that adds a reference 659 // to the root node, preventing it from being deleted. 660 HandleSDNode Dummy(getRoot()); 661 662 SmallVector<SDNode*, 128> DeadNodes; 663 664 // Add all obviously-dead nodes to the DeadNodes worklist. 665 for (SDNode &Node : allnodes()) 666 if (Node.use_empty()) 667 DeadNodes.push_back(&Node); 668 669 RemoveDeadNodes(DeadNodes); 670 671 // If the root changed (e.g. it was a dead load, update the root). 672 setRoot(Dummy.getValue()); 673 } 674 675 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 676 /// given list, and any nodes that become unreachable as a result. 677 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 678 679 // Process the worklist, deleting the nodes and adding their uses to the 680 // worklist. 681 while (!DeadNodes.empty()) { 682 SDNode *N = DeadNodes.pop_back_val(); 683 // Skip to next node if we've already managed to delete the node. This could 684 // happen if replacing a node causes a node previously added to the node to 685 // be deleted. 686 if (N->getOpcode() == ISD::DELETED_NODE) 687 continue; 688 689 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 690 DUL->NodeDeleted(N, nullptr); 691 692 // Take the node out of the appropriate CSE map. 693 RemoveNodeFromCSEMaps(N); 694 695 // Next, brutally remove the operand list. This is safe to do, as there are 696 // no cycles in the graph. 697 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 698 SDUse &Use = *I++; 699 SDNode *Operand = Use.getNode(); 700 Use.set(SDValue()); 701 702 // Now that we removed this operand, see if there are no uses of it left. 703 if (Operand->use_empty()) 704 DeadNodes.push_back(Operand); 705 } 706 707 DeallocateNode(N); 708 } 709 } 710 711 void SelectionDAG::RemoveDeadNode(SDNode *N){ 712 SmallVector<SDNode*, 16> DeadNodes(1, N); 713 714 // Create a dummy node that adds a reference to the root node, preventing 715 // it from being deleted. (This matters if the root is an operand of the 716 // dead node.) 717 HandleSDNode Dummy(getRoot()); 718 719 RemoveDeadNodes(DeadNodes); 720 } 721 722 void SelectionDAG::DeleteNode(SDNode *N) { 723 // First take this out of the appropriate CSE map. 724 RemoveNodeFromCSEMaps(N); 725 726 // Finally, remove uses due to operands of this node, remove from the 727 // AllNodes list, and delete the node. 728 DeleteNodeNotInCSEMaps(N); 729 } 730 731 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 732 assert(N->getIterator() != AllNodes.begin() && 733 "Cannot delete the entry node!"); 734 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 735 736 // Drop all of the operands and decrement used node's use counts. 737 N->DropOperands(); 738 739 DeallocateNode(N); 740 } 741 742 void SDDbgInfo::erase(const SDNode *Node) { 743 DbgValMapType::iterator I = DbgValMap.find(Node); 744 if (I == DbgValMap.end()) 745 return; 746 for (auto &Val: I->second) 747 Val->setIsInvalidated(); 748 DbgValMap.erase(I); 749 } 750 751 void SelectionDAG::DeallocateNode(SDNode *N) { 752 // If we have operands, deallocate them. 753 removeOperands(N); 754 755 NodeAllocator.Deallocate(AllNodes.remove(N)); 756 757 // Set the opcode to DELETED_NODE to help catch bugs when node 758 // memory is reallocated. 759 // FIXME: There are places in SDag that have grown a dependency on the opcode 760 // value in the released node. 761 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 762 N->NodeType = ISD::DELETED_NODE; 763 764 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 765 // them and forget about that node. 766 DbgInfo->erase(N); 767 } 768 769 #ifndef NDEBUG 770 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 771 static void VerifySDNode(SDNode *N) { 772 switch (N->getOpcode()) { 773 default: 774 break; 775 case ISD::BUILD_PAIR: { 776 EVT VT = N->getValueType(0); 777 assert(N->getNumValues() == 1 && "Too many results!"); 778 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 779 "Wrong return type!"); 780 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 781 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 782 "Mismatched operand types!"); 783 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 784 "Wrong operand type!"); 785 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 786 "Wrong return type size"); 787 break; 788 } 789 case ISD::BUILD_VECTOR: { 790 assert(N->getNumValues() == 1 && "Too many results!"); 791 assert(N->getValueType(0).isVector() && "Wrong return type!"); 792 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 793 "Wrong number of operands!"); 794 EVT EltVT = N->getValueType(0).getVectorElementType(); 795 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 796 assert((I->getValueType() == EltVT || 797 (EltVT.isInteger() && I->getValueType().isInteger() && 798 EltVT.bitsLE(I->getValueType()))) && 799 "Wrong operand type!"); 800 assert(I->getValueType() == N->getOperand(0).getValueType() && 801 "Operands must all have the same type"); 802 } 803 break; 804 } 805 } 806 } 807 #endif // NDEBUG 808 809 /// Insert a newly allocated node into the DAG. 810 /// 811 /// Handles insertion into the all nodes list and CSE map, as well as 812 /// verification and other common operations when a new node is allocated. 813 void SelectionDAG::InsertNode(SDNode *N) { 814 AllNodes.push_back(N); 815 #ifndef NDEBUG 816 N->PersistentId = NextPersistentId++; 817 VerifySDNode(N); 818 #endif 819 } 820 821 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 822 /// correspond to it. This is useful when we're about to delete or repurpose 823 /// the node. We don't want future request for structurally identical nodes 824 /// to return N anymore. 825 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 826 bool Erased = false; 827 switch (N->getOpcode()) { 828 case ISD::HANDLENODE: return false; // noop. 829 case ISD::CONDCODE: 830 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 831 "Cond code doesn't exist!"); 832 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 833 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 834 break; 835 case ISD::ExternalSymbol: 836 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 837 break; 838 case ISD::TargetExternalSymbol: { 839 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 840 Erased = TargetExternalSymbols.erase( 841 std::pair<std::string,unsigned char>(ESN->getSymbol(), 842 ESN->getTargetFlags())); 843 break; 844 } 845 case ISD::MCSymbol: { 846 auto *MCSN = cast<MCSymbolSDNode>(N); 847 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 848 break; 849 } 850 case ISD::VALUETYPE: { 851 EVT VT = cast<VTSDNode>(N)->getVT(); 852 if (VT.isExtended()) { 853 Erased = ExtendedValueTypeNodes.erase(VT); 854 } else { 855 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 856 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 857 } 858 break; 859 } 860 default: 861 // Remove it from the CSE Map. 862 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 863 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 864 Erased = CSEMap.RemoveNode(N); 865 break; 866 } 867 #ifndef NDEBUG 868 // Verify that the node was actually in one of the CSE maps, unless it has a 869 // flag result (which cannot be CSE'd) or is one of the special cases that are 870 // not subject to CSE. 871 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 872 !N->isMachineOpcode() && !doNotCSE(N)) { 873 N->dump(this); 874 dbgs() << "\n"; 875 llvm_unreachable("Node is not in map!"); 876 } 877 #endif 878 return Erased; 879 } 880 881 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 882 /// maps and modified in place. Add it back to the CSE maps, unless an identical 883 /// node already exists, in which case transfer all its users to the existing 884 /// node. This transfer can potentially trigger recursive merging. 885 void 886 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 887 // For node types that aren't CSE'd, just act as if no identical node 888 // already exists. 889 if (!doNotCSE(N)) { 890 SDNode *Existing = CSEMap.GetOrInsertNode(N); 891 if (Existing != N) { 892 // If there was already an existing matching node, use ReplaceAllUsesWith 893 // to replace the dead one with the existing one. This can cause 894 // recursive merging of other unrelated nodes down the line. 895 ReplaceAllUsesWith(N, Existing); 896 897 // N is now dead. Inform the listeners and delete it. 898 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 899 DUL->NodeDeleted(N, Existing); 900 DeleteNodeNotInCSEMaps(N); 901 return; 902 } 903 } 904 905 // If the node doesn't already exist, we updated it. Inform listeners. 906 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 907 DUL->NodeUpdated(N); 908 } 909 910 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 911 /// were replaced with those specified. If this node is never memoized, 912 /// return null, otherwise return a pointer to the slot it would take. If a 913 /// node already exists with these operands, the slot will be non-null. 914 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 915 void *&InsertPos) { 916 if (doNotCSE(N)) 917 return nullptr; 918 919 SDValue Ops[] = { Op }; 920 FoldingSetNodeID ID; 921 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 922 AddNodeIDCustom(ID, N); 923 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 924 if (Node) 925 Node->intersectFlagsWith(N->getFlags()); 926 return Node; 927 } 928 929 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 930 /// were replaced with those specified. If this node is never memoized, 931 /// return null, otherwise return a pointer to the slot it would take. If a 932 /// node already exists with these operands, the slot will be non-null. 933 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 934 SDValue Op1, SDValue Op2, 935 void *&InsertPos) { 936 if (doNotCSE(N)) 937 return nullptr; 938 939 SDValue Ops[] = { Op1, Op2 }; 940 FoldingSetNodeID ID; 941 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 942 AddNodeIDCustom(ID, N); 943 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 944 if (Node) 945 Node->intersectFlagsWith(N->getFlags()); 946 return Node; 947 } 948 949 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 950 /// were replaced with those specified. If this node is never memoized, 951 /// return null, otherwise return a pointer to the slot it would take. If a 952 /// node already exists with these operands, the slot will be non-null. 953 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 954 void *&InsertPos) { 955 if (doNotCSE(N)) 956 return nullptr; 957 958 FoldingSetNodeID ID; 959 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 960 AddNodeIDCustom(ID, N); 961 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 962 if (Node) 963 Node->intersectFlagsWith(N->getFlags()); 964 return Node; 965 } 966 967 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 968 Type *Ty = VT == MVT::iPTR ? 969 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 970 VT.getTypeForEVT(*getContext()); 971 972 return getDataLayout().getABITypeAlignment(Ty); 973 } 974 975 // EntryNode could meaningfully have debug info if we can find it... 976 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 977 : TM(tm), OptLevel(OL), 978 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 979 Root(getEntryNode()) { 980 InsertNode(&EntryNode); 981 DbgInfo = new SDDbgInfo(); 982 } 983 984 void SelectionDAG::init(MachineFunction &NewMF, 985 OptimizationRemarkEmitter &NewORE, 986 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 987 LegacyDivergenceAnalysis * Divergence) { 988 MF = &NewMF; 989 SDAGISelPass = PassPtr; 990 ORE = &NewORE; 991 TLI = getSubtarget().getTargetLowering(); 992 TSI = getSubtarget().getSelectionDAGInfo(); 993 LibInfo = LibraryInfo; 994 Context = &MF->getFunction().getContext(); 995 DA = Divergence; 996 } 997 998 SelectionDAG::~SelectionDAG() { 999 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1000 allnodes_clear(); 1001 OperandRecycler.clear(OperandAllocator); 1002 delete DbgInfo; 1003 } 1004 1005 void SelectionDAG::allnodes_clear() { 1006 assert(&*AllNodes.begin() == &EntryNode); 1007 AllNodes.remove(AllNodes.begin()); 1008 while (!AllNodes.empty()) 1009 DeallocateNode(&AllNodes.front()); 1010 #ifndef NDEBUG 1011 NextPersistentId = 0; 1012 #endif 1013 } 1014 1015 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1016 void *&InsertPos) { 1017 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1018 if (N) { 1019 switch (N->getOpcode()) { 1020 default: break; 1021 case ISD::Constant: 1022 case ISD::ConstantFP: 1023 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1024 "debug location. Use another overload."); 1025 } 1026 } 1027 return N; 1028 } 1029 1030 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1031 const SDLoc &DL, void *&InsertPos) { 1032 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1033 if (N) { 1034 switch (N->getOpcode()) { 1035 case ISD::Constant: 1036 case ISD::ConstantFP: 1037 // Erase debug location from the node if the node is used at several 1038 // different places. Do not propagate one location to all uses as it 1039 // will cause a worse single stepping debugging experience. 1040 if (N->getDebugLoc() != DL.getDebugLoc()) 1041 N->setDebugLoc(DebugLoc()); 1042 break; 1043 default: 1044 // When the node's point of use is located earlier in the instruction 1045 // sequence than its prior point of use, update its debug info to the 1046 // earlier location. 1047 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1048 N->setDebugLoc(DL.getDebugLoc()); 1049 break; 1050 } 1051 } 1052 return N; 1053 } 1054 1055 void SelectionDAG::clear() { 1056 allnodes_clear(); 1057 OperandRecycler.clear(OperandAllocator); 1058 OperandAllocator.Reset(); 1059 CSEMap.clear(); 1060 1061 ExtendedValueTypeNodes.clear(); 1062 ExternalSymbols.clear(); 1063 TargetExternalSymbols.clear(); 1064 MCSymbols.clear(); 1065 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1066 static_cast<CondCodeSDNode*>(nullptr)); 1067 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1068 static_cast<SDNode*>(nullptr)); 1069 1070 EntryNode.UseList = nullptr; 1071 InsertNode(&EntryNode); 1072 Root = getEntryNode(); 1073 DbgInfo->clear(); 1074 } 1075 1076 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1077 return VT.bitsGT(Op.getValueType()) 1078 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1079 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1080 } 1081 1082 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1083 return VT.bitsGT(Op.getValueType()) ? 1084 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1085 getNode(ISD::TRUNCATE, DL, VT, Op); 1086 } 1087 1088 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1089 return VT.bitsGT(Op.getValueType()) ? 1090 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1091 getNode(ISD::TRUNCATE, DL, VT, Op); 1092 } 1093 1094 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1095 return VT.bitsGT(Op.getValueType()) ? 1096 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1097 getNode(ISD::TRUNCATE, DL, VT, Op); 1098 } 1099 1100 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1101 EVT OpVT) { 1102 if (VT.bitsLE(Op.getValueType())) 1103 return getNode(ISD::TRUNCATE, SL, VT, Op); 1104 1105 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1106 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1107 } 1108 1109 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1110 assert(!VT.isVector() && 1111 "getZeroExtendInReg should use the vector element type instead of " 1112 "the vector type!"); 1113 if (Op.getValueType().getScalarType() == VT) return Op; 1114 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1115 APInt Imm = APInt::getLowBitsSet(BitWidth, 1116 VT.getSizeInBits()); 1117 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1118 getConstant(Imm, DL, Op.getValueType())); 1119 } 1120 1121 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1122 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1123 EVT EltVT = VT.getScalarType(); 1124 SDValue NegOne = 1125 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1126 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1127 } 1128 1129 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1130 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1131 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1132 } 1133 1134 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1135 EVT OpVT) { 1136 if (!V) 1137 return getConstant(0, DL, VT); 1138 1139 switch (TLI->getBooleanContents(OpVT)) { 1140 case TargetLowering::ZeroOrOneBooleanContent: 1141 case TargetLowering::UndefinedBooleanContent: 1142 return getConstant(1, DL, VT); 1143 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1144 return getAllOnesConstant(DL, VT); 1145 } 1146 llvm_unreachable("Unexpected boolean content enum!"); 1147 } 1148 1149 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1150 bool isT, bool isO) { 1151 EVT EltVT = VT.getScalarType(); 1152 assert((EltVT.getSizeInBits() >= 64 || 1153 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1154 "getConstant with a uint64_t value that doesn't fit in the type!"); 1155 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1156 } 1157 1158 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1159 bool isT, bool isO) { 1160 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1161 } 1162 1163 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1164 EVT VT, bool isT, bool isO) { 1165 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1166 1167 EVT EltVT = VT.getScalarType(); 1168 const ConstantInt *Elt = &Val; 1169 1170 // In some cases the vector type is legal but the element type is illegal and 1171 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1172 // inserted value (the type does not need to match the vector element type). 1173 // Any extra bits introduced will be truncated away. 1174 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1175 TargetLowering::TypePromoteInteger) { 1176 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1177 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1178 Elt = ConstantInt::get(*getContext(), NewVal); 1179 } 1180 // In other cases the element type is illegal and needs to be expanded, for 1181 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1182 // the value into n parts and use a vector type with n-times the elements. 1183 // Then bitcast to the type requested. 1184 // Legalizing constants too early makes the DAGCombiner's job harder so we 1185 // only legalize if the DAG tells us we must produce legal types. 1186 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1187 TLI->getTypeAction(*getContext(), EltVT) == 1188 TargetLowering::TypeExpandInteger) { 1189 const APInt &NewVal = Elt->getValue(); 1190 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1191 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1192 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1193 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1194 1195 // Check the temporary vector is the correct size. If this fails then 1196 // getTypeToTransformTo() probably returned a type whose size (in bits) 1197 // isn't a power-of-2 factor of the requested type size. 1198 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1199 1200 SmallVector<SDValue, 2> EltParts; 1201 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1202 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1203 .zextOrTrunc(ViaEltSizeInBits), DL, 1204 ViaEltVT, isT, isO)); 1205 } 1206 1207 // EltParts is currently in little endian order. If we actually want 1208 // big-endian order then reverse it now. 1209 if (getDataLayout().isBigEndian()) 1210 std::reverse(EltParts.begin(), EltParts.end()); 1211 1212 // The elements must be reversed when the element order is different 1213 // to the endianness of the elements (because the BITCAST is itself a 1214 // vector shuffle in this situation). However, we do not need any code to 1215 // perform this reversal because getConstant() is producing a vector 1216 // splat. 1217 // This situation occurs in MIPS MSA. 1218 1219 SmallVector<SDValue, 8> Ops; 1220 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1221 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1222 1223 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1224 return V; 1225 } 1226 1227 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1228 "APInt size does not match type size!"); 1229 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1230 FoldingSetNodeID ID; 1231 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1232 ID.AddPointer(Elt); 1233 ID.AddBoolean(isO); 1234 void *IP = nullptr; 1235 SDNode *N = nullptr; 1236 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1237 if (!VT.isVector()) 1238 return SDValue(N, 0); 1239 1240 if (!N) { 1241 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1242 CSEMap.InsertNode(N, IP); 1243 InsertNode(N); 1244 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1245 } 1246 1247 SDValue Result(N, 0); 1248 if (VT.isVector()) 1249 Result = getSplatBuildVector(VT, DL, Result); 1250 1251 return Result; 1252 } 1253 1254 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1255 bool isTarget) { 1256 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1257 } 1258 1259 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1260 bool isTarget) { 1261 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1262 } 1263 1264 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1265 EVT VT, bool isTarget) { 1266 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1267 1268 EVT EltVT = VT.getScalarType(); 1269 1270 // Do the map lookup using the actual bit pattern for the floating point 1271 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1272 // we don't have issues with SNANs. 1273 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1274 FoldingSetNodeID ID; 1275 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1276 ID.AddPointer(&V); 1277 void *IP = nullptr; 1278 SDNode *N = nullptr; 1279 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1280 if (!VT.isVector()) 1281 return SDValue(N, 0); 1282 1283 if (!N) { 1284 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1285 CSEMap.InsertNode(N, IP); 1286 InsertNode(N); 1287 } 1288 1289 SDValue Result(N, 0); 1290 if (VT.isVector()) 1291 Result = getSplatBuildVector(VT, DL, Result); 1292 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1293 return Result; 1294 } 1295 1296 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1297 bool isTarget) { 1298 EVT EltVT = VT.getScalarType(); 1299 if (EltVT == MVT::f32) 1300 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1301 else if (EltVT == MVT::f64) 1302 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1303 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1304 EltVT == MVT::f16) { 1305 bool Ignored; 1306 APFloat APF = APFloat(Val); 1307 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1308 &Ignored); 1309 return getConstantFP(APF, DL, VT, isTarget); 1310 } else 1311 llvm_unreachable("Unsupported type in getConstantFP"); 1312 } 1313 1314 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1315 EVT VT, int64_t Offset, bool isTargetGA, 1316 unsigned char TargetFlags) { 1317 assert((TargetFlags == 0 || isTargetGA) && 1318 "Cannot set target flags on target-independent globals"); 1319 1320 // Truncate (with sign-extension) the offset value to the pointer size. 1321 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1322 if (BitWidth < 64) 1323 Offset = SignExtend64(Offset, BitWidth); 1324 1325 unsigned Opc; 1326 if (GV->isThreadLocal()) 1327 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1328 else 1329 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1330 1331 FoldingSetNodeID ID; 1332 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1333 ID.AddPointer(GV); 1334 ID.AddInteger(Offset); 1335 ID.AddInteger(TargetFlags); 1336 void *IP = nullptr; 1337 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1338 return SDValue(E, 0); 1339 1340 auto *N = newSDNode<GlobalAddressSDNode>( 1341 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1342 CSEMap.InsertNode(N, IP); 1343 InsertNode(N); 1344 return SDValue(N, 0); 1345 } 1346 1347 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1348 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1349 FoldingSetNodeID ID; 1350 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1351 ID.AddInteger(FI); 1352 void *IP = nullptr; 1353 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1354 return SDValue(E, 0); 1355 1356 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1357 CSEMap.InsertNode(N, IP); 1358 InsertNode(N); 1359 return SDValue(N, 0); 1360 } 1361 1362 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1363 unsigned char TargetFlags) { 1364 assert((TargetFlags == 0 || isTarget) && 1365 "Cannot set target flags on target-independent jump tables"); 1366 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1367 FoldingSetNodeID ID; 1368 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1369 ID.AddInteger(JTI); 1370 ID.AddInteger(TargetFlags); 1371 void *IP = nullptr; 1372 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1373 return SDValue(E, 0); 1374 1375 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1376 CSEMap.InsertNode(N, IP); 1377 InsertNode(N); 1378 return SDValue(N, 0); 1379 } 1380 1381 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1382 unsigned Alignment, int Offset, 1383 bool isTarget, 1384 unsigned char TargetFlags) { 1385 assert((TargetFlags == 0 || isTarget) && 1386 "Cannot set target flags on target-independent globals"); 1387 if (Alignment == 0) 1388 Alignment = MF->getFunction().optForSize() 1389 ? getDataLayout().getABITypeAlignment(C->getType()) 1390 : getDataLayout().getPrefTypeAlignment(C->getType()); 1391 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1392 FoldingSetNodeID ID; 1393 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1394 ID.AddInteger(Alignment); 1395 ID.AddInteger(Offset); 1396 ID.AddPointer(C); 1397 ID.AddInteger(TargetFlags); 1398 void *IP = nullptr; 1399 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1400 return SDValue(E, 0); 1401 1402 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1403 TargetFlags); 1404 CSEMap.InsertNode(N, IP); 1405 InsertNode(N); 1406 return SDValue(N, 0); 1407 } 1408 1409 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1410 unsigned Alignment, int Offset, 1411 bool isTarget, 1412 unsigned char TargetFlags) { 1413 assert((TargetFlags == 0 || isTarget) && 1414 "Cannot set target flags on target-independent globals"); 1415 if (Alignment == 0) 1416 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1417 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1418 FoldingSetNodeID ID; 1419 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1420 ID.AddInteger(Alignment); 1421 ID.AddInteger(Offset); 1422 C->addSelectionDAGCSEId(ID); 1423 ID.AddInteger(TargetFlags); 1424 void *IP = nullptr; 1425 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1426 return SDValue(E, 0); 1427 1428 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1429 TargetFlags); 1430 CSEMap.InsertNode(N, IP); 1431 InsertNode(N); 1432 return SDValue(N, 0); 1433 } 1434 1435 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1436 unsigned char TargetFlags) { 1437 FoldingSetNodeID ID; 1438 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1439 ID.AddInteger(Index); 1440 ID.AddInteger(Offset); 1441 ID.AddInteger(TargetFlags); 1442 void *IP = nullptr; 1443 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1444 return SDValue(E, 0); 1445 1446 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1447 CSEMap.InsertNode(N, IP); 1448 InsertNode(N); 1449 return SDValue(N, 0); 1450 } 1451 1452 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1453 FoldingSetNodeID ID; 1454 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1455 ID.AddPointer(MBB); 1456 void *IP = nullptr; 1457 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1458 return SDValue(E, 0); 1459 1460 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1461 CSEMap.InsertNode(N, IP); 1462 InsertNode(N); 1463 return SDValue(N, 0); 1464 } 1465 1466 SDValue SelectionDAG::getValueType(EVT VT) { 1467 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1468 ValueTypeNodes.size()) 1469 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1470 1471 SDNode *&N = VT.isExtended() ? 1472 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1473 1474 if (N) return SDValue(N, 0); 1475 N = newSDNode<VTSDNode>(VT); 1476 InsertNode(N); 1477 return SDValue(N, 0); 1478 } 1479 1480 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1481 SDNode *&N = ExternalSymbols[Sym]; 1482 if (N) return SDValue(N, 0); 1483 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1484 InsertNode(N); 1485 return SDValue(N, 0); 1486 } 1487 1488 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1489 SDNode *&N = MCSymbols[Sym]; 1490 if (N) 1491 return SDValue(N, 0); 1492 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1493 InsertNode(N); 1494 return SDValue(N, 0); 1495 } 1496 1497 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1498 unsigned char TargetFlags) { 1499 SDNode *&N = 1500 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1501 TargetFlags)]; 1502 if (N) return SDValue(N, 0); 1503 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1504 InsertNode(N); 1505 return SDValue(N, 0); 1506 } 1507 1508 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1509 if ((unsigned)Cond >= CondCodeNodes.size()) 1510 CondCodeNodes.resize(Cond+1); 1511 1512 if (!CondCodeNodes[Cond]) { 1513 auto *N = newSDNode<CondCodeSDNode>(Cond); 1514 CondCodeNodes[Cond] = N; 1515 InsertNode(N); 1516 } 1517 1518 return SDValue(CondCodeNodes[Cond], 0); 1519 } 1520 1521 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1522 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1523 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1524 std::swap(N1, N2); 1525 ShuffleVectorSDNode::commuteMask(M); 1526 } 1527 1528 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1529 SDValue N2, ArrayRef<int> Mask) { 1530 assert(VT.getVectorNumElements() == Mask.size() && 1531 "Must have the same number of vector elements as mask elements!"); 1532 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1533 "Invalid VECTOR_SHUFFLE"); 1534 1535 // Canonicalize shuffle undef, undef -> undef 1536 if (N1.isUndef() && N2.isUndef()) 1537 return getUNDEF(VT); 1538 1539 // Validate that all indices in Mask are within the range of the elements 1540 // input to the shuffle. 1541 int NElts = Mask.size(); 1542 assert(llvm::all_of(Mask, 1543 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1544 "Index out of range"); 1545 1546 // Copy the mask so we can do any needed cleanup. 1547 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1548 1549 // Canonicalize shuffle v, v -> v, undef 1550 if (N1 == N2) { 1551 N2 = getUNDEF(VT); 1552 for (int i = 0; i != NElts; ++i) 1553 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1554 } 1555 1556 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1557 if (N1.isUndef()) 1558 commuteShuffle(N1, N2, MaskVec); 1559 1560 if (TLI->hasVectorBlend()) { 1561 // If shuffling a splat, try to blend the splat instead. We do this here so 1562 // that even when this arises during lowering we don't have to re-handle it. 1563 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1564 BitVector UndefElements; 1565 SDValue Splat = BV->getSplatValue(&UndefElements); 1566 if (!Splat) 1567 return; 1568 1569 for (int i = 0; i < NElts; ++i) { 1570 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1571 continue; 1572 1573 // If this input comes from undef, mark it as such. 1574 if (UndefElements[MaskVec[i] - Offset]) { 1575 MaskVec[i] = -1; 1576 continue; 1577 } 1578 1579 // If we can blend a non-undef lane, use that instead. 1580 if (!UndefElements[i]) 1581 MaskVec[i] = i + Offset; 1582 } 1583 }; 1584 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1585 BlendSplat(N1BV, 0); 1586 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1587 BlendSplat(N2BV, NElts); 1588 } 1589 1590 // Canonicalize all index into lhs, -> shuffle lhs, undef 1591 // Canonicalize all index into rhs, -> shuffle rhs, undef 1592 bool AllLHS = true, AllRHS = true; 1593 bool N2Undef = N2.isUndef(); 1594 for (int i = 0; i != NElts; ++i) { 1595 if (MaskVec[i] >= NElts) { 1596 if (N2Undef) 1597 MaskVec[i] = -1; 1598 else 1599 AllLHS = false; 1600 } else if (MaskVec[i] >= 0) { 1601 AllRHS = false; 1602 } 1603 } 1604 if (AllLHS && AllRHS) 1605 return getUNDEF(VT); 1606 if (AllLHS && !N2Undef) 1607 N2 = getUNDEF(VT); 1608 if (AllRHS) { 1609 N1 = getUNDEF(VT); 1610 commuteShuffle(N1, N2, MaskVec); 1611 } 1612 // Reset our undef status after accounting for the mask. 1613 N2Undef = N2.isUndef(); 1614 // Re-check whether both sides ended up undef. 1615 if (N1.isUndef() && N2Undef) 1616 return getUNDEF(VT); 1617 1618 // If Identity shuffle return that node. 1619 bool Identity = true, AllSame = true; 1620 for (int i = 0; i != NElts; ++i) { 1621 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1622 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1623 } 1624 if (Identity && NElts) 1625 return N1; 1626 1627 // Shuffling a constant splat doesn't change the result. 1628 if (N2Undef) { 1629 SDValue V = N1; 1630 1631 // Look through any bitcasts. We check that these don't change the number 1632 // (and size) of elements and just changes their types. 1633 while (V.getOpcode() == ISD::BITCAST) 1634 V = V->getOperand(0); 1635 1636 // A splat should always show up as a build vector node. 1637 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1638 BitVector UndefElements; 1639 SDValue Splat = BV->getSplatValue(&UndefElements); 1640 // If this is a splat of an undef, shuffling it is also undef. 1641 if (Splat && Splat.isUndef()) 1642 return getUNDEF(VT); 1643 1644 bool SameNumElts = 1645 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1646 1647 // We only have a splat which can skip shuffles if there is a splatted 1648 // value and no undef lanes rearranged by the shuffle. 1649 if (Splat && UndefElements.none()) { 1650 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1651 // number of elements match or the value splatted is a zero constant. 1652 if (SameNumElts) 1653 return N1; 1654 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1655 if (C->isNullValue()) 1656 return N1; 1657 } 1658 1659 // If the shuffle itself creates a splat, build the vector directly. 1660 if (AllSame && SameNumElts) { 1661 EVT BuildVT = BV->getValueType(0); 1662 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1663 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1664 1665 // We may have jumped through bitcasts, so the type of the 1666 // BUILD_VECTOR may not match the type of the shuffle. 1667 if (BuildVT != VT) 1668 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1669 return NewBV; 1670 } 1671 } 1672 } 1673 1674 FoldingSetNodeID ID; 1675 SDValue Ops[2] = { N1, N2 }; 1676 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1677 for (int i = 0; i != NElts; ++i) 1678 ID.AddInteger(MaskVec[i]); 1679 1680 void* IP = nullptr; 1681 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1682 return SDValue(E, 0); 1683 1684 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1685 // SDNode doesn't have access to it. This memory will be "leaked" when 1686 // the node is deallocated, but recovered when the NodeAllocator is released. 1687 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1688 llvm::copy(MaskVec, MaskAlloc); 1689 1690 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1691 dl.getDebugLoc(), MaskAlloc); 1692 createOperands(N, Ops); 1693 1694 CSEMap.InsertNode(N, IP); 1695 InsertNode(N); 1696 SDValue V = SDValue(N, 0); 1697 NewSDValueDbgMsg(V, "Creating new node: ", this); 1698 return V; 1699 } 1700 1701 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1702 EVT VT = SV.getValueType(0); 1703 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1704 ShuffleVectorSDNode::commuteMask(MaskVec); 1705 1706 SDValue Op0 = SV.getOperand(0); 1707 SDValue Op1 = SV.getOperand(1); 1708 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1709 } 1710 1711 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1712 FoldingSetNodeID ID; 1713 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1714 ID.AddInteger(RegNo); 1715 void *IP = nullptr; 1716 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1717 return SDValue(E, 0); 1718 1719 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1720 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1721 CSEMap.InsertNode(N, IP); 1722 InsertNode(N); 1723 return SDValue(N, 0); 1724 } 1725 1726 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1727 FoldingSetNodeID ID; 1728 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1729 ID.AddPointer(RegMask); 1730 void *IP = nullptr; 1731 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1732 return SDValue(E, 0); 1733 1734 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1735 CSEMap.InsertNode(N, IP); 1736 InsertNode(N); 1737 return SDValue(N, 0); 1738 } 1739 1740 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1741 MCSymbol *Label) { 1742 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1743 } 1744 1745 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1746 SDValue Root, MCSymbol *Label) { 1747 FoldingSetNodeID ID; 1748 SDValue Ops[] = { Root }; 1749 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1750 ID.AddPointer(Label); 1751 void *IP = nullptr; 1752 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1753 return SDValue(E, 0); 1754 1755 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1756 createOperands(N, Ops); 1757 1758 CSEMap.InsertNode(N, IP); 1759 InsertNode(N); 1760 return SDValue(N, 0); 1761 } 1762 1763 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1764 int64_t Offset, 1765 bool isTarget, 1766 unsigned char TargetFlags) { 1767 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1768 1769 FoldingSetNodeID ID; 1770 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1771 ID.AddPointer(BA); 1772 ID.AddInteger(Offset); 1773 ID.AddInteger(TargetFlags); 1774 void *IP = nullptr; 1775 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1776 return SDValue(E, 0); 1777 1778 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1779 CSEMap.InsertNode(N, IP); 1780 InsertNode(N); 1781 return SDValue(N, 0); 1782 } 1783 1784 SDValue SelectionDAG::getSrcValue(const Value *V) { 1785 assert((!V || V->getType()->isPointerTy()) && 1786 "SrcValue is not a pointer?"); 1787 1788 FoldingSetNodeID ID; 1789 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1790 ID.AddPointer(V); 1791 1792 void *IP = nullptr; 1793 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1794 return SDValue(E, 0); 1795 1796 auto *N = newSDNode<SrcValueSDNode>(V); 1797 CSEMap.InsertNode(N, IP); 1798 InsertNode(N); 1799 return SDValue(N, 0); 1800 } 1801 1802 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1803 FoldingSetNodeID ID; 1804 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1805 ID.AddPointer(MD); 1806 1807 void *IP = nullptr; 1808 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1809 return SDValue(E, 0); 1810 1811 auto *N = newSDNode<MDNodeSDNode>(MD); 1812 CSEMap.InsertNode(N, IP); 1813 InsertNode(N); 1814 return SDValue(N, 0); 1815 } 1816 1817 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1818 if (VT == V.getValueType()) 1819 return V; 1820 1821 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1822 } 1823 1824 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1825 unsigned SrcAS, unsigned DestAS) { 1826 SDValue Ops[] = {Ptr}; 1827 FoldingSetNodeID ID; 1828 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1829 ID.AddInteger(SrcAS); 1830 ID.AddInteger(DestAS); 1831 1832 void *IP = nullptr; 1833 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1834 return SDValue(E, 0); 1835 1836 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1837 VT, SrcAS, DestAS); 1838 createOperands(N, Ops); 1839 1840 CSEMap.InsertNode(N, IP); 1841 InsertNode(N); 1842 return SDValue(N, 0); 1843 } 1844 1845 /// getShiftAmountOperand - Return the specified value casted to 1846 /// the target's desired shift amount type. 1847 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1848 EVT OpTy = Op.getValueType(); 1849 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1850 if (OpTy == ShTy || OpTy.isVector()) return Op; 1851 1852 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1853 } 1854 1855 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1856 SDLoc dl(Node); 1857 const TargetLowering &TLI = getTargetLoweringInfo(); 1858 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1859 EVT VT = Node->getValueType(0); 1860 SDValue Tmp1 = Node->getOperand(0); 1861 SDValue Tmp2 = Node->getOperand(1); 1862 unsigned Align = Node->getConstantOperandVal(3); 1863 1864 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1865 Tmp2, MachinePointerInfo(V)); 1866 SDValue VAList = VAListLoad; 1867 1868 if (Align > TLI.getMinStackArgumentAlignment()) { 1869 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1870 1871 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1872 getConstant(Align - 1, dl, VAList.getValueType())); 1873 1874 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1875 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1876 } 1877 1878 // Increment the pointer, VAList, to the next vaarg 1879 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1880 getConstant(getDataLayout().getTypeAllocSize( 1881 VT.getTypeForEVT(*getContext())), 1882 dl, VAList.getValueType())); 1883 // Store the incremented VAList to the legalized pointer 1884 Tmp1 = 1885 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1886 // Load the actual argument out of the pointer VAList 1887 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1888 } 1889 1890 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1891 SDLoc dl(Node); 1892 const TargetLowering &TLI = getTargetLoweringInfo(); 1893 // This defaults to loading a pointer from the input and storing it to the 1894 // output, returning the chain. 1895 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1896 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1897 SDValue Tmp1 = 1898 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1899 Node->getOperand(2), MachinePointerInfo(VS)); 1900 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1901 MachinePointerInfo(VD)); 1902 } 1903 1904 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1905 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1906 unsigned ByteSize = VT.getStoreSize(); 1907 Type *Ty = VT.getTypeForEVT(*getContext()); 1908 unsigned StackAlign = 1909 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1910 1911 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1912 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1913 } 1914 1915 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1916 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1917 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1918 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1919 const DataLayout &DL = getDataLayout(); 1920 unsigned Align = 1921 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1922 1923 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1924 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1925 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1926 } 1927 1928 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1929 ISD::CondCode Cond, const SDLoc &dl) { 1930 EVT OpVT = N1.getValueType(); 1931 1932 // These setcc operations always fold. 1933 switch (Cond) { 1934 default: break; 1935 case ISD::SETFALSE: 1936 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 1937 case ISD::SETTRUE: 1938 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 1939 1940 case ISD::SETOEQ: 1941 case ISD::SETOGT: 1942 case ISD::SETOGE: 1943 case ISD::SETOLT: 1944 case ISD::SETOLE: 1945 case ISD::SETONE: 1946 case ISD::SETO: 1947 case ISD::SETUO: 1948 case ISD::SETUEQ: 1949 case ISD::SETUNE: 1950 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1951 break; 1952 } 1953 1954 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1955 const APInt &C2 = N2C->getAPIntValue(); 1956 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1957 const APInt &C1 = N1C->getAPIntValue(); 1958 1959 switch (Cond) { 1960 default: llvm_unreachable("Unknown integer setcc!"); 1961 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 1962 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 1963 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 1964 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 1965 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 1966 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 1967 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 1968 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 1969 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 1970 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 1971 } 1972 } 1973 } 1974 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1975 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1976 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1977 switch (Cond) { 1978 default: break; 1979 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1980 return getUNDEF(VT); 1981 LLVM_FALLTHROUGH; 1982 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 1983 OpVT); 1984 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1985 return getUNDEF(VT); 1986 LLVM_FALLTHROUGH; 1987 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 1988 R==APFloat::cmpLessThan, dl, VT, 1989 OpVT); 1990 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1991 return getUNDEF(VT); 1992 LLVM_FALLTHROUGH; 1993 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 1994 OpVT); 1995 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1996 return getUNDEF(VT); 1997 LLVM_FALLTHROUGH; 1998 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 1999 VT, OpVT); 2000 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2001 return getUNDEF(VT); 2002 LLVM_FALLTHROUGH; 2003 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2004 R==APFloat::cmpEqual, dl, VT, 2005 OpVT); 2006 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2007 return getUNDEF(VT); 2008 LLVM_FALLTHROUGH; 2009 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2010 R==APFloat::cmpEqual, dl, VT, OpVT); 2011 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2012 OpVT); 2013 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2014 OpVT); 2015 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2016 R==APFloat::cmpEqual, dl, VT, 2017 OpVT); 2018 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2019 OpVT); 2020 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2021 R==APFloat::cmpLessThan, dl, VT, 2022 OpVT); 2023 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2024 R==APFloat::cmpUnordered, dl, VT, 2025 OpVT); 2026 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2027 VT, OpVT); 2028 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2029 OpVT); 2030 } 2031 } else { 2032 // Ensure that the constant occurs on the RHS. 2033 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2034 MVT CompVT = N1.getValueType().getSimpleVT(); 2035 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 2036 return SDValue(); 2037 2038 return getSetCC(dl, VT, N2, N1, SwappedCond); 2039 } 2040 } 2041 2042 // Could not fold it. 2043 return SDValue(); 2044 } 2045 2046 /// See if the specified operand can be simplified with the knowledge that only 2047 /// the bits specified by Mask are used. 2048 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) { 2049 switch (V.getOpcode()) { 2050 default: 2051 break; 2052 case ISD::Constant: { 2053 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 2054 assert(CV && "Const value should be ConstSDNode."); 2055 const APInt &CVal = CV->getAPIntValue(); 2056 APInt NewVal = CVal & Mask; 2057 if (NewVal != CVal) 2058 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2059 break; 2060 } 2061 case ISD::OR: 2062 case ISD::XOR: 2063 // If the LHS or RHS don't contribute bits to the or, drop them. 2064 if (MaskedValueIsZero(V.getOperand(0), Mask)) 2065 return V.getOperand(1); 2066 if (MaskedValueIsZero(V.getOperand(1), Mask)) 2067 return V.getOperand(0); 2068 break; 2069 case ISD::SRL: 2070 // Only look at single-use SRLs. 2071 if (!V.getNode()->hasOneUse()) 2072 break; 2073 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2074 // See if we can recursively simplify the LHS. 2075 unsigned Amt = RHSC->getZExtValue(); 2076 2077 // Watch out for shift count overflow though. 2078 if (Amt >= Mask.getBitWidth()) 2079 break; 2080 APInt NewMask = Mask << Amt; 2081 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 2082 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2083 V.getOperand(1)); 2084 } 2085 break; 2086 case ISD::AND: { 2087 // X & -1 -> X (ignoring bits which aren't demanded). 2088 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 2089 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue())) 2090 return V.getOperand(0); 2091 break; 2092 } 2093 case ISD::ANY_EXTEND: { 2094 SDValue Src = V.getOperand(0); 2095 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2096 // Being conservative here - only peek through if we only demand bits in the 2097 // non-extended source (even though the extended bits are technically undef). 2098 if (Mask.getActiveBits() > SrcBitWidth) 2099 break; 2100 APInt SrcMask = Mask.trunc(SrcBitWidth); 2101 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask)) 2102 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2103 break; 2104 } 2105 } 2106 return SDValue(); 2107 } 2108 2109 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2110 /// use this predicate to simplify operations downstream. 2111 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2112 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2113 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2114 } 2115 2116 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2117 /// this predicate to simplify operations downstream. Mask is known to be zero 2118 /// for bits that V cannot have. 2119 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2120 unsigned Depth) const { 2121 return Mask.isSubsetOf(computeKnownBits(Op, Depth).Zero); 2122 } 2123 2124 /// isSplatValue - Return true if the vector V has the same value 2125 /// across all DemandedElts. 2126 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2127 APInt &UndefElts) { 2128 if (!DemandedElts) 2129 return false; // No demanded elts, better to assume we don't know anything. 2130 2131 EVT VT = V.getValueType(); 2132 assert(VT.isVector() && "Vector type expected"); 2133 2134 unsigned NumElts = VT.getVectorNumElements(); 2135 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2136 UndefElts = APInt::getNullValue(NumElts); 2137 2138 switch (V.getOpcode()) { 2139 case ISD::BUILD_VECTOR: { 2140 SDValue Scl; 2141 for (unsigned i = 0; i != NumElts; ++i) { 2142 SDValue Op = V.getOperand(i); 2143 if (Op.isUndef()) { 2144 UndefElts.setBit(i); 2145 continue; 2146 } 2147 if (!DemandedElts[i]) 2148 continue; 2149 if (Scl && Scl != Op) 2150 return false; 2151 Scl = Op; 2152 } 2153 return true; 2154 } 2155 case ISD::VECTOR_SHUFFLE: { 2156 // Check if this is a shuffle node doing a splat. 2157 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2158 int SplatIndex = -1; 2159 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2160 for (int i = 0; i != (int)NumElts; ++i) { 2161 int M = Mask[i]; 2162 if (M < 0) { 2163 UndefElts.setBit(i); 2164 continue; 2165 } 2166 if (!DemandedElts[i]) 2167 continue; 2168 if (0 <= SplatIndex && SplatIndex != M) 2169 return false; 2170 SplatIndex = M; 2171 } 2172 return true; 2173 } 2174 case ISD::EXTRACT_SUBVECTOR: { 2175 SDValue Src = V.getOperand(0); 2176 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1)); 2177 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2178 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2179 // Offset the demanded elts by the subvector index. 2180 uint64_t Idx = SubIdx->getZExtValue(); 2181 APInt UndefSrcElts; 2182 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2183 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) { 2184 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2185 return true; 2186 } 2187 } 2188 break; 2189 } 2190 case ISD::ADD: 2191 case ISD::SUB: 2192 case ISD::AND: { 2193 APInt UndefLHS, UndefRHS; 2194 SDValue LHS = V.getOperand(0); 2195 SDValue RHS = V.getOperand(1); 2196 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2197 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2198 UndefElts = UndefLHS | UndefRHS; 2199 return true; 2200 } 2201 break; 2202 } 2203 } 2204 2205 return false; 2206 } 2207 2208 /// Helper wrapper to main isSplatValue function. 2209 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2210 EVT VT = V.getValueType(); 2211 assert(VT.isVector() && "Vector type expected"); 2212 unsigned NumElts = VT.getVectorNumElements(); 2213 2214 APInt UndefElts; 2215 APInt DemandedElts = APInt::getAllOnesValue(NumElts); 2216 return isSplatValue(V, DemandedElts, UndefElts) && 2217 (AllowUndefs || !UndefElts); 2218 } 2219 2220 /// Helper function that checks to see if a node is a constant or a 2221 /// build vector of splat constants at least within the demanded elts. 2222 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N, 2223 const APInt &DemandedElts) { 2224 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 2225 return CN; 2226 if (N.getOpcode() != ISD::BUILD_VECTOR) 2227 return nullptr; 2228 EVT VT = N.getValueType(); 2229 ConstantSDNode *Cst = nullptr; 2230 unsigned NumElts = VT.getVectorNumElements(); 2231 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size"); 2232 for (unsigned i = 0; i != NumElts; ++i) { 2233 if (!DemandedElts[i]) 2234 continue; 2235 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i)); 2236 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) || 2237 C->getValueType(0) != VT.getScalarType()) 2238 return nullptr; 2239 Cst = C; 2240 } 2241 return Cst; 2242 } 2243 2244 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2245 /// is less than the element bit-width of the shift node, return it. 2246 static const APInt *getValidShiftAmountConstant(SDValue V) { 2247 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2248 // Shifting more than the bitwidth is not valid. 2249 const APInt &ShAmt = SA->getAPIntValue(); 2250 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2251 return &ShAmt; 2252 } 2253 return nullptr; 2254 } 2255 2256 /// Determine which bits of Op are known to be either zero or one and return 2257 /// them in Known. For vectors, the known bits are those that are shared by 2258 /// every vector element. 2259 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2260 EVT VT = Op.getValueType(); 2261 APInt DemandedElts = VT.isVector() 2262 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2263 : APInt(1, 1); 2264 return computeKnownBits(Op, DemandedElts, Depth); 2265 } 2266 2267 /// Determine which bits of Op are known to be either zero or one and return 2268 /// them in Known. The DemandedElts argument allows us to only collect the known 2269 /// bits that are shared by the requested vector elements. 2270 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2271 unsigned Depth) const { 2272 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2273 2274 KnownBits Known(BitWidth); // Don't know anything. 2275 2276 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2277 // We know all of the bits for a constant! 2278 Known.One = C->getAPIntValue(); 2279 Known.Zero = ~Known.One; 2280 return Known; 2281 } 2282 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2283 // We know all of the bits for a constant fp! 2284 Known.One = C->getValueAPF().bitcastToAPInt(); 2285 Known.Zero = ~Known.One; 2286 return Known; 2287 } 2288 2289 if (Depth == 6) 2290 return Known; // Limit search depth. 2291 2292 KnownBits Known2; 2293 unsigned NumElts = DemandedElts.getBitWidth(); 2294 assert((!Op.getValueType().isVector() || 2295 NumElts == Op.getValueType().getVectorNumElements()) && 2296 "Unexpected vector size"); 2297 2298 if (!DemandedElts) 2299 return Known; // No demanded elts, better to assume we don't know anything. 2300 2301 unsigned Opcode = Op.getOpcode(); 2302 switch (Opcode) { 2303 case ISD::BUILD_VECTOR: 2304 // Collect the known bits that are shared by every demanded vector element. 2305 Known.Zero.setAllBits(); Known.One.setAllBits(); 2306 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2307 if (!DemandedElts[i]) 2308 continue; 2309 2310 SDValue SrcOp = Op.getOperand(i); 2311 Known2 = computeKnownBits(SrcOp, Depth + 1); 2312 2313 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2314 if (SrcOp.getValueSizeInBits() != BitWidth) { 2315 assert(SrcOp.getValueSizeInBits() > BitWidth && 2316 "Expected BUILD_VECTOR implicit truncation"); 2317 Known2 = Known2.trunc(BitWidth); 2318 } 2319 2320 // Known bits are the values that are shared by every demanded element. 2321 Known.One &= Known2.One; 2322 Known.Zero &= Known2.Zero; 2323 2324 // If we don't know any bits, early out. 2325 if (Known.isUnknown()) 2326 break; 2327 } 2328 break; 2329 case ISD::VECTOR_SHUFFLE: { 2330 // Collect the known bits that are shared by every vector element referenced 2331 // by the shuffle. 2332 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2333 Known.Zero.setAllBits(); Known.One.setAllBits(); 2334 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2335 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2336 for (unsigned i = 0; i != NumElts; ++i) { 2337 if (!DemandedElts[i]) 2338 continue; 2339 2340 int M = SVN->getMaskElt(i); 2341 if (M < 0) { 2342 // For UNDEF elements, we don't know anything about the common state of 2343 // the shuffle result. 2344 Known.resetAll(); 2345 DemandedLHS.clearAllBits(); 2346 DemandedRHS.clearAllBits(); 2347 break; 2348 } 2349 2350 if ((unsigned)M < NumElts) 2351 DemandedLHS.setBit((unsigned)M % NumElts); 2352 else 2353 DemandedRHS.setBit((unsigned)M % NumElts); 2354 } 2355 // Known bits are the values that are shared by every demanded element. 2356 if (!!DemandedLHS) { 2357 SDValue LHS = Op.getOperand(0); 2358 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2359 Known.One &= Known2.One; 2360 Known.Zero &= Known2.Zero; 2361 } 2362 // If we don't know any bits, early out. 2363 if (Known.isUnknown()) 2364 break; 2365 if (!!DemandedRHS) { 2366 SDValue RHS = Op.getOperand(1); 2367 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2368 Known.One &= Known2.One; 2369 Known.Zero &= Known2.Zero; 2370 } 2371 break; 2372 } 2373 case ISD::CONCAT_VECTORS: { 2374 // Split DemandedElts and test each of the demanded subvectors. 2375 Known.Zero.setAllBits(); Known.One.setAllBits(); 2376 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2377 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2378 unsigned NumSubVectors = Op.getNumOperands(); 2379 for (unsigned i = 0; i != NumSubVectors; ++i) { 2380 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2381 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2382 if (!!DemandedSub) { 2383 SDValue Sub = Op.getOperand(i); 2384 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2385 Known.One &= Known2.One; 2386 Known.Zero &= Known2.Zero; 2387 } 2388 // If we don't know any bits, early out. 2389 if (Known.isUnknown()) 2390 break; 2391 } 2392 break; 2393 } 2394 case ISD::INSERT_SUBVECTOR: { 2395 // If we know the element index, demand any elements from the subvector and 2396 // the remainder from the src its inserted into, otherwise demand them all. 2397 SDValue Src = Op.getOperand(0); 2398 SDValue Sub = Op.getOperand(1); 2399 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2400 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2401 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2402 Known.One.setAllBits(); 2403 Known.Zero.setAllBits(); 2404 uint64_t Idx = SubIdx->getZExtValue(); 2405 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2406 if (!!DemandedSubElts) { 2407 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2408 if (Known.isUnknown()) 2409 break; // early-out. 2410 } 2411 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2412 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2413 if (!!DemandedSrcElts) { 2414 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2415 Known.One &= Known2.One; 2416 Known.Zero &= Known2.Zero; 2417 } 2418 } else { 2419 Known = computeKnownBits(Sub, Depth + 1); 2420 if (Known.isUnknown()) 2421 break; // early-out. 2422 Known2 = computeKnownBits(Src, Depth + 1); 2423 Known.One &= Known2.One; 2424 Known.Zero &= Known2.Zero; 2425 } 2426 break; 2427 } 2428 case ISD::EXTRACT_SUBVECTOR: { 2429 // If we know the element index, just demand that subvector elements, 2430 // otherwise demand them all. 2431 SDValue Src = Op.getOperand(0); 2432 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2433 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2434 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2435 // Offset the demanded elts by the subvector index. 2436 uint64_t Idx = SubIdx->getZExtValue(); 2437 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2438 Known = computeKnownBits(Src, DemandedSrc, Depth + 1); 2439 } else { 2440 Known = computeKnownBits(Src, Depth + 1); 2441 } 2442 break; 2443 } 2444 case ISD::SCALAR_TO_VECTOR: { 2445 // We know about scalar_to_vector as much as we know about it source, 2446 // which becomes the first element of otherwise unknown vector. 2447 if (DemandedElts != 1) 2448 break; 2449 2450 SDValue N0 = Op.getOperand(0); 2451 Known = computeKnownBits(N0, Depth + 1); 2452 if (N0.getValueSizeInBits() != BitWidth) 2453 Known = Known.trunc(BitWidth); 2454 2455 break; 2456 } 2457 case ISD::BITCAST: { 2458 SDValue N0 = Op.getOperand(0); 2459 EVT SubVT = N0.getValueType(); 2460 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2461 2462 // Ignore bitcasts from unsupported types. 2463 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2464 break; 2465 2466 // Fast handling of 'identity' bitcasts. 2467 if (BitWidth == SubBitWidth) { 2468 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2469 break; 2470 } 2471 2472 bool IsLE = getDataLayout().isLittleEndian(); 2473 2474 // Bitcast 'small element' vector to 'large element' scalar/vector. 2475 if ((BitWidth % SubBitWidth) == 0) { 2476 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2477 2478 // Collect known bits for the (larger) output by collecting the known 2479 // bits from each set of sub elements and shift these into place. 2480 // We need to separately call computeKnownBits for each set of 2481 // sub elements as the knownbits for each is likely to be different. 2482 unsigned SubScale = BitWidth / SubBitWidth; 2483 APInt SubDemandedElts(NumElts * SubScale, 0); 2484 for (unsigned i = 0; i != NumElts; ++i) 2485 if (DemandedElts[i]) 2486 SubDemandedElts.setBit(i * SubScale); 2487 2488 for (unsigned i = 0; i != SubScale; ++i) { 2489 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2490 Depth + 1); 2491 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2492 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2493 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2494 } 2495 } 2496 2497 // Bitcast 'large element' scalar/vector to 'small element' vector. 2498 if ((SubBitWidth % BitWidth) == 0) { 2499 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2500 2501 // Collect known bits for the (smaller) output by collecting the known 2502 // bits from the overlapping larger input elements and extracting the 2503 // sub sections we actually care about. 2504 unsigned SubScale = SubBitWidth / BitWidth; 2505 APInt SubDemandedElts(NumElts / SubScale, 0); 2506 for (unsigned i = 0; i != NumElts; ++i) 2507 if (DemandedElts[i]) 2508 SubDemandedElts.setBit(i / SubScale); 2509 2510 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2511 2512 Known.Zero.setAllBits(); Known.One.setAllBits(); 2513 for (unsigned i = 0; i != NumElts; ++i) 2514 if (DemandedElts[i]) { 2515 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2516 unsigned Offset = (Shifts % SubScale) * BitWidth; 2517 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2518 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2519 // If we don't know any bits, early out. 2520 if (Known.isUnknown()) 2521 break; 2522 } 2523 } 2524 break; 2525 } 2526 case ISD::AND: 2527 // If either the LHS or the RHS are Zero, the result is zero. 2528 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2529 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2530 2531 // Output known-1 bits are only known if set in both the LHS & RHS. 2532 Known.One &= Known2.One; 2533 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2534 Known.Zero |= Known2.Zero; 2535 break; 2536 case ISD::OR: 2537 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2538 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2539 2540 // Output known-0 bits are only known if clear in both the LHS & RHS. 2541 Known.Zero &= Known2.Zero; 2542 // Output known-1 are known to be set if set in either the LHS | RHS. 2543 Known.One |= Known2.One; 2544 break; 2545 case ISD::XOR: { 2546 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2547 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2548 2549 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2550 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2551 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2552 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2553 Known.Zero = KnownZeroOut; 2554 break; 2555 } 2556 case ISD::MUL: { 2557 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2558 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2559 2560 // If low bits are zero in either operand, output low known-0 bits. 2561 // Also compute a conservative estimate for high known-0 bits. 2562 // More trickiness is possible, but this is sufficient for the 2563 // interesting case of alignment computation. 2564 unsigned TrailZ = Known.countMinTrailingZeros() + 2565 Known2.countMinTrailingZeros(); 2566 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2567 Known2.countMinLeadingZeros(), 2568 BitWidth) - BitWidth; 2569 2570 Known.resetAll(); 2571 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2572 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2573 break; 2574 } 2575 case ISD::UDIV: { 2576 // For the purposes of computing leading zeros we can conservatively 2577 // treat a udiv as a logical right shift by the power of 2 known to 2578 // be less than the denominator. 2579 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2580 unsigned LeadZ = Known2.countMinLeadingZeros(); 2581 2582 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2583 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2584 if (RHSMaxLeadingZeros != BitWidth) 2585 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2586 2587 Known.Zero.setHighBits(LeadZ); 2588 break; 2589 } 2590 case ISD::SELECT: 2591 case ISD::VSELECT: 2592 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2593 // If we don't know any bits, early out. 2594 if (Known.isUnknown()) 2595 break; 2596 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2597 2598 // Only known if known in both the LHS and RHS. 2599 Known.One &= Known2.One; 2600 Known.Zero &= Known2.Zero; 2601 break; 2602 case ISD::SELECT_CC: 2603 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2604 // If we don't know any bits, early out. 2605 if (Known.isUnknown()) 2606 break; 2607 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2608 2609 // Only known if known in both the LHS and RHS. 2610 Known.One &= Known2.One; 2611 Known.Zero &= Known2.Zero; 2612 break; 2613 case ISD::SMULO: 2614 case ISD::UMULO: 2615 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2616 if (Op.getResNo() != 1) 2617 break; 2618 // The boolean result conforms to getBooleanContents. 2619 // If we know the result of a setcc has the top bits zero, use this info. 2620 // We know that we have an integer-based boolean since these operations 2621 // are only available for integer. 2622 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2623 TargetLowering::ZeroOrOneBooleanContent && 2624 BitWidth > 1) 2625 Known.Zero.setBitsFrom(1); 2626 break; 2627 case ISD::SETCC: 2628 // If we know the result of a setcc has the top bits zero, use this info. 2629 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2630 TargetLowering::ZeroOrOneBooleanContent && 2631 BitWidth > 1) 2632 Known.Zero.setBitsFrom(1); 2633 break; 2634 case ISD::SHL: 2635 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2636 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2637 unsigned Shift = ShAmt->getZExtValue(); 2638 Known.Zero <<= Shift; 2639 Known.One <<= Shift; 2640 // Low bits are known zero. 2641 Known.Zero.setLowBits(Shift); 2642 } 2643 break; 2644 case ISD::SRL: 2645 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2646 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2647 unsigned Shift = ShAmt->getZExtValue(); 2648 Known.Zero.lshrInPlace(Shift); 2649 Known.One.lshrInPlace(Shift); 2650 // High bits are known zero. 2651 Known.Zero.setHighBits(Shift); 2652 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) { 2653 // If the shift amount is a vector of constants see if we can bound 2654 // the number of upper zero bits. 2655 unsigned ShiftAmountMin = BitWidth; 2656 for (unsigned i = 0; i != BV->getNumOperands(); ++i) { 2657 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) { 2658 const APInt &ShAmt = C->getAPIntValue(); 2659 if (ShAmt.ult(BitWidth)) { 2660 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin, 2661 ShAmt.getZExtValue()); 2662 continue; 2663 } 2664 } 2665 // Don't know anything. 2666 ShiftAmountMin = 0; 2667 break; 2668 } 2669 2670 Known.Zero.setHighBits(ShiftAmountMin); 2671 } 2672 break; 2673 case ISD::SRA: 2674 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2675 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2676 unsigned Shift = ShAmt->getZExtValue(); 2677 // Sign extend known zero/one bit (else is unknown). 2678 Known.Zero.ashrInPlace(Shift); 2679 Known.One.ashrInPlace(Shift); 2680 } 2681 break; 2682 case ISD::FSHL: 2683 case ISD::FSHR: 2684 if (ConstantSDNode *C = 2685 isConstOrDemandedConstSplat(Op.getOperand(2), DemandedElts)) { 2686 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2687 2688 // For fshl, 0-shift returns the 1st arg. 2689 // For fshr, 0-shift returns the 2nd arg. 2690 if (Amt == 0) { 2691 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2692 DemandedElts, Depth + 1); 2693 break; 2694 } 2695 2696 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2697 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2698 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2699 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2700 if (Opcode == ISD::FSHL) { 2701 Known.One <<= Amt; 2702 Known.Zero <<= Amt; 2703 Known2.One.lshrInPlace(BitWidth - Amt); 2704 Known2.Zero.lshrInPlace(BitWidth - Amt); 2705 } else { 2706 Known.One <<= BitWidth - Amt; 2707 Known.Zero <<= BitWidth - Amt; 2708 Known2.One.lshrInPlace(Amt); 2709 Known2.Zero.lshrInPlace(Amt); 2710 } 2711 Known.One |= Known2.One; 2712 Known.Zero |= Known2.Zero; 2713 } 2714 break; 2715 case ISD::SIGN_EXTEND_INREG: { 2716 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2717 unsigned EBits = EVT.getScalarSizeInBits(); 2718 2719 // Sign extension. Compute the demanded bits in the result that are not 2720 // present in the input. 2721 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2722 2723 APInt InSignMask = APInt::getSignMask(EBits); 2724 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2725 2726 // If the sign extended bits are demanded, we know that the sign 2727 // bit is demanded. 2728 InSignMask = InSignMask.zext(BitWidth); 2729 if (NewBits.getBoolValue()) 2730 InputDemandedBits |= InSignMask; 2731 2732 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2733 Known.One &= InputDemandedBits; 2734 Known.Zero &= InputDemandedBits; 2735 2736 // If the sign bit of the input is known set or clear, then we know the 2737 // top bits of the result. 2738 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2739 Known.Zero |= NewBits; 2740 Known.One &= ~NewBits; 2741 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2742 Known.One |= NewBits; 2743 Known.Zero &= ~NewBits; 2744 } else { // Input sign bit unknown 2745 Known.Zero &= ~NewBits; 2746 Known.One &= ~NewBits; 2747 } 2748 break; 2749 } 2750 case ISD::CTTZ: 2751 case ISD::CTTZ_ZERO_UNDEF: { 2752 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2753 // If we have a known 1, its position is our upper bound. 2754 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2755 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2756 Known.Zero.setBitsFrom(LowBits); 2757 break; 2758 } 2759 case ISD::CTLZ: 2760 case ISD::CTLZ_ZERO_UNDEF: { 2761 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2762 // If we have a known 1, its position is our upper bound. 2763 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2764 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2765 Known.Zero.setBitsFrom(LowBits); 2766 break; 2767 } 2768 case ISD::CTPOP: { 2769 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2770 // If we know some of the bits are zero, they can't be one. 2771 unsigned PossibleOnes = Known2.countMaxPopulation(); 2772 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2773 break; 2774 } 2775 case ISD::LOAD: { 2776 LoadSDNode *LD = cast<LoadSDNode>(Op); 2777 // If this is a ZEXTLoad and we are looking at the loaded value. 2778 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2779 EVT VT = LD->getMemoryVT(); 2780 unsigned MemBits = VT.getScalarSizeInBits(); 2781 Known.Zero.setBitsFrom(MemBits); 2782 } else if (const MDNode *Ranges = LD->getRanges()) { 2783 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2784 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2785 } 2786 break; 2787 } 2788 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2789 EVT InVT = Op.getOperand(0).getValueType(); 2790 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 2791 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 2792 Known = Known.zext(BitWidth); 2793 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2794 break; 2795 } 2796 case ISD::ZERO_EXTEND: { 2797 EVT InVT = Op.getOperand(0).getValueType(); 2798 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2799 Known = Known.zext(BitWidth); 2800 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2801 break; 2802 } 2803 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2804 case ISD::SIGN_EXTEND: { 2805 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2806 // If the sign bit is known to be zero or one, then sext will extend 2807 // it to the top bits, else it will just zext. 2808 Known = Known.sext(BitWidth); 2809 break; 2810 } 2811 case ISD::ANY_EXTEND: { 2812 Known = computeKnownBits(Op.getOperand(0), Depth+1); 2813 Known = Known.zext(BitWidth); 2814 break; 2815 } 2816 case ISD::TRUNCATE: { 2817 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2818 Known = Known.trunc(BitWidth); 2819 break; 2820 } 2821 case ISD::AssertZext: { 2822 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2823 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2824 Known = computeKnownBits(Op.getOperand(0), Depth+1); 2825 Known.Zero |= (~InMask); 2826 Known.One &= (~Known.Zero); 2827 break; 2828 } 2829 case ISD::FGETSIGN: 2830 // All bits are zero except the low bit. 2831 Known.Zero.setBitsFrom(1); 2832 break; 2833 case ISD::USUBO: 2834 case ISD::SSUBO: 2835 if (Op.getResNo() == 1) { 2836 // If we know the result of a setcc has the top bits zero, use this info. 2837 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2838 TargetLowering::ZeroOrOneBooleanContent && 2839 BitWidth > 1) 2840 Known.Zero.setBitsFrom(1); 2841 break; 2842 } 2843 LLVM_FALLTHROUGH; 2844 case ISD::SUB: 2845 case ISD::SUBC: { 2846 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2847 // We know that the top bits of C-X are clear if X contains less bits 2848 // than C (i.e. no wrap-around can happen). For example, 20-X is 2849 // positive if we can prove that X is >= 0 and < 16. 2850 if (CLHS->getAPIntValue().isNonNegative()) { 2851 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2852 // NLZ can't be BitWidth with no sign bit 2853 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2854 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, 2855 Depth + 1); 2856 2857 // If all of the MaskV bits are known to be zero, then we know the 2858 // output top bits are zero, because we now know that the output is 2859 // from [0-C]. 2860 if ((Known2.Zero & MaskV) == MaskV) { 2861 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2862 // Top bits known zero. 2863 Known.Zero.setHighBits(NLZ2); 2864 } 2865 } 2866 } 2867 2868 // If low bits are know to be zero in both operands, then we know they are 2869 // going to be 0 in the result. Both addition and complement operations 2870 // preserve the low zero bits. 2871 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2872 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2873 if (KnownZeroLow == 0) 2874 break; 2875 2876 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2877 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2878 Known.Zero.setLowBits(KnownZeroLow); 2879 break; 2880 } 2881 case ISD::UADDO: 2882 case ISD::SADDO: 2883 case ISD::ADDCARRY: 2884 if (Op.getResNo() == 1) { 2885 // If we know the result of a setcc has the top bits zero, use this info. 2886 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2887 TargetLowering::ZeroOrOneBooleanContent && 2888 BitWidth > 1) 2889 Known.Zero.setBitsFrom(1); 2890 break; 2891 } 2892 LLVM_FALLTHROUGH; 2893 case ISD::ADD: 2894 case ISD::ADDC: 2895 case ISD::ADDE: { 2896 // Output known-0 bits are known if clear or set in both the low clear bits 2897 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2898 // low 3 bits clear. 2899 // Output known-0 bits are also known if the top bits of each input are 2900 // known to be clear. For example, if one input has the top 10 bits clear 2901 // and the other has the top 8 bits clear, we know the top 7 bits of the 2902 // output must be clear. 2903 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2904 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2905 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2906 2907 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2908 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2909 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2910 2911 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2912 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2913 // use this information if we know (at least) that the low two bits are 2914 // clear. We then return to the caller that the low bit is unknown but 2915 // that other bits are known zero. 2916 if (KnownZeroLow >= 2) 2917 Known.Zero.setBits(1, KnownZeroLow); 2918 break; 2919 } 2920 2921 Known.Zero.setLowBits(KnownZeroLow); 2922 if (KnownZeroHigh > 1) 2923 Known.Zero.setHighBits(KnownZeroHigh - 1); 2924 break; 2925 } 2926 case ISD::SREM: 2927 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2928 const APInt &RA = Rem->getAPIntValue().abs(); 2929 if (RA.isPowerOf2()) { 2930 APInt LowBits = RA - 1; 2931 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2932 2933 // The low bits of the first operand are unchanged by the srem. 2934 Known.Zero = Known2.Zero & LowBits; 2935 Known.One = Known2.One & LowBits; 2936 2937 // If the first operand is non-negative or has all low bits zero, then 2938 // the upper bits are all zero. 2939 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2940 Known.Zero |= ~LowBits; 2941 2942 // If the first operand is negative and not all low bits are zero, then 2943 // the upper bits are all one. 2944 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2945 Known.One |= ~LowBits; 2946 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2947 } 2948 } 2949 break; 2950 case ISD::UREM: { 2951 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2952 const APInt &RA = Rem->getAPIntValue(); 2953 if (RA.isPowerOf2()) { 2954 APInt LowBits = (RA - 1); 2955 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2956 2957 // The upper bits are all zero, the lower ones are unchanged. 2958 Known.Zero = Known2.Zero | ~LowBits; 2959 Known.One = Known2.One & LowBits; 2960 break; 2961 } 2962 } 2963 2964 // Since the result is less than or equal to either operand, any leading 2965 // zero bits in either operand must also exist in the result. 2966 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2967 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2968 2969 uint32_t Leaders = 2970 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2971 Known.resetAll(); 2972 Known.Zero.setHighBits(Leaders); 2973 break; 2974 } 2975 case ISD::EXTRACT_ELEMENT: { 2976 Known = computeKnownBits(Op.getOperand(0), Depth+1); 2977 const unsigned Index = Op.getConstantOperandVal(1); 2978 const unsigned BitWidth = Op.getValueSizeInBits(); 2979 2980 // Remove low part of known bits mask 2981 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2982 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2983 2984 // Remove high part of known bit mask 2985 Known = Known.trunc(BitWidth); 2986 break; 2987 } 2988 case ISD::EXTRACT_VECTOR_ELT: { 2989 SDValue InVec = Op.getOperand(0); 2990 SDValue EltNo = Op.getOperand(1); 2991 EVT VecVT = InVec.getValueType(); 2992 const unsigned BitWidth = Op.getValueSizeInBits(); 2993 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2994 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2995 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2996 // anything about the extended bits. 2997 if (BitWidth > EltBitWidth) 2998 Known = Known.trunc(EltBitWidth); 2999 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3000 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 3001 // If we know the element index, just demand that vector element. 3002 unsigned Idx = ConstEltNo->getZExtValue(); 3003 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 3004 Known = computeKnownBits(InVec, DemandedElt, Depth + 1); 3005 } else { 3006 // Unknown element index, so ignore DemandedElts and demand them all. 3007 Known = computeKnownBits(InVec, Depth + 1); 3008 } 3009 if (BitWidth > EltBitWidth) 3010 Known = Known.zext(BitWidth); 3011 break; 3012 } 3013 case ISD::INSERT_VECTOR_ELT: { 3014 SDValue InVec = Op.getOperand(0); 3015 SDValue InVal = Op.getOperand(1); 3016 SDValue EltNo = Op.getOperand(2); 3017 3018 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3019 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3020 // If we know the element index, split the demand between the 3021 // source vector and the inserted element. 3022 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 3023 unsigned EltIdx = CEltNo->getZExtValue(); 3024 3025 // If we demand the inserted element then add its common known bits. 3026 if (DemandedElts[EltIdx]) { 3027 Known2 = computeKnownBits(InVal, Depth + 1); 3028 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3029 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3030 } 3031 3032 // If we demand the source vector then add its common known bits, ensuring 3033 // that we don't demand the inserted element. 3034 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 3035 if (!!VectorElts) { 3036 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1); 3037 Known.One &= Known2.One; 3038 Known.Zero &= Known2.Zero; 3039 } 3040 } else { 3041 // Unknown element index, so ignore DemandedElts and demand them all. 3042 Known = computeKnownBits(InVec, Depth + 1); 3043 Known2 = computeKnownBits(InVal, Depth + 1); 3044 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3045 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3046 } 3047 break; 3048 } 3049 case ISD::BITREVERSE: { 3050 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3051 Known.Zero = Known2.Zero.reverseBits(); 3052 Known.One = Known2.One.reverseBits(); 3053 break; 3054 } 3055 case ISD::BSWAP: { 3056 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3057 Known.Zero = Known2.Zero.byteSwap(); 3058 Known.One = Known2.One.byteSwap(); 3059 break; 3060 } 3061 case ISD::ABS: { 3062 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3063 3064 // If the source's MSB is zero then we know the rest of the bits already. 3065 if (Known2.isNonNegative()) { 3066 Known.Zero = Known2.Zero; 3067 Known.One = Known2.One; 3068 break; 3069 } 3070 3071 // We only know that the absolute values's MSB will be zero iff there is 3072 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3073 Known2.One.clearSignBit(); 3074 if (Known2.One.getBoolValue()) { 3075 Known.Zero = APInt::getSignMask(BitWidth); 3076 break; 3077 } 3078 break; 3079 } 3080 case ISD::UMIN: { 3081 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3082 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3083 3084 // UMIN - we know that the result will have the maximum of the 3085 // known zero leading bits of the inputs. 3086 unsigned LeadZero = Known.countMinLeadingZeros(); 3087 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3088 3089 Known.Zero &= Known2.Zero; 3090 Known.One &= Known2.One; 3091 Known.Zero.setHighBits(LeadZero); 3092 break; 3093 } 3094 case ISD::UMAX: { 3095 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3096 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3097 3098 // UMAX - we know that the result will have the maximum of the 3099 // known one leading bits of the inputs. 3100 unsigned LeadOne = Known.countMinLeadingOnes(); 3101 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3102 3103 Known.Zero &= Known2.Zero; 3104 Known.One &= Known2.One; 3105 Known.One.setHighBits(LeadOne); 3106 break; 3107 } 3108 case ISD::SMIN: 3109 case ISD::SMAX: { 3110 // If we have a clamp pattern, we know that the number of sign bits will be 3111 // the minimum of the clamp min/max range. 3112 bool IsMax = (Opcode == ISD::SMAX); 3113 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3114 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts))) 3115 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3116 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1), 3117 DemandedElts); 3118 if (CstLow && CstHigh) { 3119 if (!IsMax) 3120 std::swap(CstLow, CstHigh); 3121 3122 const APInt &ValueLow = CstLow->getAPIntValue(); 3123 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3124 if (ValueLow.sle(ValueHigh)) { 3125 unsigned LowSignBits = ValueLow.getNumSignBits(); 3126 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3127 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3128 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3129 Known.One.setHighBits(MinSignBits); 3130 break; 3131 } 3132 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3133 Known.Zero.setHighBits(MinSignBits); 3134 break; 3135 } 3136 } 3137 } 3138 3139 // Fallback - just get the shared known bits of the operands. 3140 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3141 if (Known.isUnknown()) break; // Early-out 3142 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3143 Known.Zero &= Known2.Zero; 3144 Known.One &= Known2.One; 3145 break; 3146 } 3147 case ISD::FrameIndex: 3148 case ISD::TargetFrameIndex: 3149 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3150 break; 3151 3152 default: 3153 if (Opcode < ISD::BUILTIN_OP_END) 3154 break; 3155 LLVM_FALLTHROUGH; 3156 case ISD::INTRINSIC_WO_CHAIN: 3157 case ISD::INTRINSIC_W_CHAIN: 3158 case ISD::INTRINSIC_VOID: 3159 // Allow the target to implement this method for its nodes. 3160 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3161 break; 3162 } 3163 3164 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3165 return Known; 3166 } 3167 3168 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3169 SDValue N1) const { 3170 // X + 0 never overflow 3171 if (isNullConstant(N1)) 3172 return OFK_Never; 3173 3174 KnownBits N1Known; 3175 computeKnownBits(N1, N1Known); 3176 if (N1Known.Zero.getBoolValue()) { 3177 KnownBits N0Known; 3178 computeKnownBits(N0, N0Known); 3179 3180 bool overflow; 3181 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 3182 if (!overflow) 3183 return OFK_Never; 3184 } 3185 3186 // mulhi + 1 never overflow 3187 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3188 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 3189 return OFK_Never; 3190 3191 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3192 KnownBits N0Known; 3193 computeKnownBits(N0, N0Known); 3194 3195 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 3196 return OFK_Never; 3197 } 3198 3199 return OFK_Sometime; 3200 } 3201 3202 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3203 EVT OpVT = Val.getValueType(); 3204 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3205 3206 // Is the constant a known power of 2? 3207 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3208 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3209 3210 // A left-shift of a constant one will have exactly one bit set because 3211 // shifting the bit off the end is undefined. 3212 if (Val.getOpcode() == ISD::SHL) { 3213 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3214 if (C && C->getAPIntValue() == 1) 3215 return true; 3216 } 3217 3218 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3219 // one bit set. 3220 if (Val.getOpcode() == ISD::SRL) { 3221 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3222 if (C && C->getAPIntValue().isSignMask()) 3223 return true; 3224 } 3225 3226 // Are all operands of a build vector constant powers of two? 3227 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3228 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3229 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3230 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3231 return false; 3232 })) 3233 return true; 3234 3235 // More could be done here, though the above checks are enough 3236 // to handle some common cases. 3237 3238 // Fall back to computeKnownBits to catch other known cases. 3239 KnownBits Known = computeKnownBits(Val); 3240 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3241 } 3242 3243 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3244 EVT VT = Op.getValueType(); 3245 APInt DemandedElts = VT.isVector() 3246 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3247 : APInt(1, 1); 3248 return ComputeNumSignBits(Op, DemandedElts, Depth); 3249 } 3250 3251 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3252 unsigned Depth) const { 3253 EVT VT = Op.getValueType(); 3254 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3255 unsigned VTBits = VT.getScalarSizeInBits(); 3256 unsigned NumElts = DemandedElts.getBitWidth(); 3257 unsigned Tmp, Tmp2; 3258 unsigned FirstAnswer = 1; 3259 3260 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3261 const APInt &Val = C->getAPIntValue(); 3262 return Val.getNumSignBits(); 3263 } 3264 3265 if (Depth == 6) 3266 return 1; // Limit search depth. 3267 3268 if (!DemandedElts) 3269 return 1; // No demanded elts, better to assume we don't know anything. 3270 3271 unsigned Opcode = Op.getOpcode(); 3272 switch (Opcode) { 3273 default: break; 3274 case ISD::AssertSext: 3275 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3276 return VTBits-Tmp+1; 3277 case ISD::AssertZext: 3278 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3279 return VTBits-Tmp; 3280 3281 case ISD::BUILD_VECTOR: 3282 Tmp = VTBits; 3283 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3284 if (!DemandedElts[i]) 3285 continue; 3286 3287 SDValue SrcOp = Op.getOperand(i); 3288 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3289 3290 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3291 if (SrcOp.getValueSizeInBits() != VTBits) { 3292 assert(SrcOp.getValueSizeInBits() > VTBits && 3293 "Expected BUILD_VECTOR implicit truncation"); 3294 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3295 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3296 } 3297 Tmp = std::min(Tmp, Tmp2); 3298 } 3299 return Tmp; 3300 3301 case ISD::VECTOR_SHUFFLE: { 3302 // Collect the minimum number of sign bits that are shared by every vector 3303 // element referenced by the shuffle. 3304 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3305 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3306 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3307 for (unsigned i = 0; i != NumElts; ++i) { 3308 int M = SVN->getMaskElt(i); 3309 if (!DemandedElts[i]) 3310 continue; 3311 // For UNDEF elements, we don't know anything about the common state of 3312 // the shuffle result. 3313 if (M < 0) 3314 return 1; 3315 if ((unsigned)M < NumElts) 3316 DemandedLHS.setBit((unsigned)M % NumElts); 3317 else 3318 DemandedRHS.setBit((unsigned)M % NumElts); 3319 } 3320 Tmp = std::numeric_limits<unsigned>::max(); 3321 if (!!DemandedLHS) 3322 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3323 if (!!DemandedRHS) { 3324 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3325 Tmp = std::min(Tmp, Tmp2); 3326 } 3327 // If we don't know anything, early out and try computeKnownBits fall-back. 3328 if (Tmp == 1) 3329 break; 3330 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3331 return Tmp; 3332 } 3333 3334 case ISD::BITCAST: { 3335 SDValue N0 = Op.getOperand(0); 3336 EVT SrcVT = N0.getValueType(); 3337 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3338 3339 // Ignore bitcasts from unsupported types.. 3340 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3341 break; 3342 3343 // Fast handling of 'identity' bitcasts. 3344 if (VTBits == SrcBits) 3345 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3346 3347 bool IsLE = getDataLayout().isLittleEndian(); 3348 3349 // Bitcast 'large element' scalar/vector to 'small element' vector. 3350 if ((SrcBits % VTBits) == 0) { 3351 assert(VT.isVector() && "Expected bitcast to vector"); 3352 3353 unsigned Scale = SrcBits / VTBits; 3354 APInt SrcDemandedElts(NumElts / Scale, 0); 3355 for (unsigned i = 0; i != NumElts; ++i) 3356 if (DemandedElts[i]) 3357 SrcDemandedElts.setBit(i / Scale); 3358 3359 // Fast case - sign splat can be simply split across the small elements. 3360 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3361 if (Tmp == SrcBits) 3362 return VTBits; 3363 3364 // Slow case - determine how far the sign extends into each sub-element. 3365 Tmp2 = VTBits; 3366 for (unsigned i = 0; i != NumElts; ++i) 3367 if (DemandedElts[i]) { 3368 unsigned SubOffset = i % Scale; 3369 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3370 SubOffset = SubOffset * VTBits; 3371 if (Tmp <= SubOffset) 3372 return 1; 3373 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3374 } 3375 return Tmp2; 3376 } 3377 break; 3378 } 3379 3380 case ISD::SIGN_EXTEND: 3381 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3382 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3383 case ISD::SIGN_EXTEND_INREG: 3384 // Max of the input and what this extends. 3385 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3386 Tmp = VTBits-Tmp+1; 3387 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3388 return std::max(Tmp, Tmp2); 3389 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3390 SDValue Src = Op.getOperand(0); 3391 EVT SrcVT = Src.getValueType(); 3392 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3393 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3394 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3395 } 3396 3397 case ISD::SRA: 3398 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3399 // SRA X, C -> adds C sign bits. 3400 if (ConstantSDNode *C = 3401 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3402 APInt ShiftVal = C->getAPIntValue(); 3403 ShiftVal += Tmp; 3404 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3405 } 3406 return Tmp; 3407 case ISD::SHL: 3408 if (ConstantSDNode *C = 3409 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3410 // shl destroys sign bits. 3411 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3412 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3413 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3414 return Tmp - C->getZExtValue(); 3415 } 3416 break; 3417 case ISD::AND: 3418 case ISD::OR: 3419 case ISD::XOR: // NOT is handled here. 3420 // Logical binary ops preserve the number of sign bits at the worst. 3421 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3422 if (Tmp != 1) { 3423 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3424 FirstAnswer = std::min(Tmp, Tmp2); 3425 // We computed what we know about the sign bits as our first 3426 // answer. Now proceed to the generic code that uses 3427 // computeKnownBits, and pick whichever answer is better. 3428 } 3429 break; 3430 3431 case ISD::SELECT: 3432 case ISD::VSELECT: 3433 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3434 if (Tmp == 1) return 1; // Early out. 3435 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3436 return std::min(Tmp, Tmp2); 3437 case ISD::SELECT_CC: 3438 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3439 if (Tmp == 1) return 1; // Early out. 3440 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3441 return std::min(Tmp, Tmp2); 3442 3443 case ISD::SMIN: 3444 case ISD::SMAX: { 3445 // If we have a clamp pattern, we know that the number of sign bits will be 3446 // the minimum of the clamp min/max range. 3447 bool IsMax = (Opcode == ISD::SMAX); 3448 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3449 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts))) 3450 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3451 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1), 3452 DemandedElts); 3453 if (CstLow && CstHigh) { 3454 if (!IsMax) 3455 std::swap(CstLow, CstHigh); 3456 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3457 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3458 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3459 return std::min(Tmp, Tmp2); 3460 } 3461 } 3462 3463 // Fallback - just get the minimum number of sign bits of the operands. 3464 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3465 if (Tmp == 1) 3466 return 1; // Early out. 3467 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3468 return std::min(Tmp, Tmp2); 3469 } 3470 case ISD::UMIN: 3471 case ISD::UMAX: 3472 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3473 if (Tmp == 1) 3474 return 1; // Early out. 3475 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3476 return std::min(Tmp, Tmp2); 3477 case ISD::SADDO: 3478 case ISD::UADDO: 3479 case ISD::SSUBO: 3480 case ISD::USUBO: 3481 case ISD::SMULO: 3482 case ISD::UMULO: 3483 if (Op.getResNo() != 1) 3484 break; 3485 // The boolean result conforms to getBooleanContents. Fall through. 3486 // If setcc returns 0/-1, all bits are sign bits. 3487 // We know that we have an integer-based boolean since these operations 3488 // are only available for integer. 3489 if (TLI->getBooleanContents(VT.isVector(), false) == 3490 TargetLowering::ZeroOrNegativeOneBooleanContent) 3491 return VTBits; 3492 break; 3493 case ISD::SETCC: 3494 // If setcc returns 0/-1, all bits are sign bits. 3495 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3496 TargetLowering::ZeroOrNegativeOneBooleanContent) 3497 return VTBits; 3498 break; 3499 case ISD::ROTL: 3500 case ISD::ROTR: 3501 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3502 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3503 3504 // Handle rotate right by N like a rotate left by 32-N. 3505 if (Opcode == ISD::ROTR) 3506 RotAmt = (VTBits - RotAmt) % VTBits; 3507 3508 // If we aren't rotating out all of the known-in sign bits, return the 3509 // number that are left. This handles rotl(sext(x), 1) for example. 3510 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3511 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3512 } 3513 break; 3514 case ISD::ADD: 3515 case ISD::ADDC: 3516 // Add can have at most one carry bit. Thus we know that the output 3517 // is, at worst, one more bit than the inputs. 3518 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3519 if (Tmp == 1) return 1; // Early out. 3520 3521 // Special case decrementing a value (ADD X, -1): 3522 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3523 if (CRHS->isAllOnesValue()) { 3524 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1); 3525 3526 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3527 // sign bits set. 3528 if ((Known.Zero | 1).isAllOnesValue()) 3529 return VTBits; 3530 3531 // If we are subtracting one from a positive number, there is no carry 3532 // out of the result. 3533 if (Known.isNonNegative()) 3534 return Tmp; 3535 } 3536 3537 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3538 if (Tmp2 == 1) return 1; 3539 return std::min(Tmp, Tmp2)-1; 3540 3541 case ISD::SUB: 3542 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3543 if (Tmp2 == 1) return 1; 3544 3545 // Handle NEG. 3546 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3547 if (CLHS->isNullValue()) { 3548 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1); 3549 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3550 // sign bits set. 3551 if ((Known.Zero | 1).isAllOnesValue()) 3552 return VTBits; 3553 3554 // If the input is known to be positive (the sign bit is known clear), 3555 // the output of the NEG has the same number of sign bits as the input. 3556 if (Known.isNonNegative()) 3557 return Tmp2; 3558 3559 // Otherwise, we treat this like a SUB. 3560 } 3561 3562 // Sub can have at most one carry bit. Thus we know that the output 3563 // is, at worst, one more bit than the inputs. 3564 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3565 if (Tmp == 1) return 1; // Early out. 3566 return std::min(Tmp, Tmp2)-1; 3567 case ISD::TRUNCATE: { 3568 // Check if the sign bits of source go down as far as the truncated value. 3569 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3570 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3571 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3572 return NumSrcSignBits - (NumSrcBits - VTBits); 3573 break; 3574 } 3575 case ISD::EXTRACT_ELEMENT: { 3576 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3577 const int BitWidth = Op.getValueSizeInBits(); 3578 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3579 3580 // Get reverse index (starting from 1), Op1 value indexes elements from 3581 // little end. Sign starts at big end. 3582 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3583 3584 // If the sign portion ends in our element the subtraction gives correct 3585 // result. Otherwise it gives either negative or > bitwidth result 3586 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3587 } 3588 case ISD::INSERT_VECTOR_ELT: { 3589 SDValue InVec = Op.getOperand(0); 3590 SDValue InVal = Op.getOperand(1); 3591 SDValue EltNo = Op.getOperand(2); 3592 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3593 3594 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3595 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3596 // If we know the element index, split the demand between the 3597 // source vector and the inserted element. 3598 unsigned EltIdx = CEltNo->getZExtValue(); 3599 3600 // If we demand the inserted element then get its sign bits. 3601 Tmp = std::numeric_limits<unsigned>::max(); 3602 if (DemandedElts[EltIdx]) { 3603 // TODO - handle implicit truncation of inserted elements. 3604 if (InVal.getScalarValueSizeInBits() != VTBits) 3605 break; 3606 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3607 } 3608 3609 // If we demand the source vector then get its sign bits, and determine 3610 // the minimum. 3611 APInt VectorElts = DemandedElts; 3612 VectorElts.clearBit(EltIdx); 3613 if (!!VectorElts) { 3614 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3615 Tmp = std::min(Tmp, Tmp2); 3616 } 3617 } else { 3618 // Unknown element index, so ignore DemandedElts and demand them all. 3619 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3620 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3621 Tmp = std::min(Tmp, Tmp2); 3622 } 3623 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3624 return Tmp; 3625 } 3626 case ISD::EXTRACT_VECTOR_ELT: { 3627 SDValue InVec = Op.getOperand(0); 3628 SDValue EltNo = Op.getOperand(1); 3629 EVT VecVT = InVec.getValueType(); 3630 const unsigned BitWidth = Op.getValueSizeInBits(); 3631 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3632 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3633 3634 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3635 // anything about sign bits. But if the sizes match we can derive knowledge 3636 // about sign bits from the vector operand. 3637 if (BitWidth != EltBitWidth) 3638 break; 3639 3640 // If we know the element index, just demand that vector element, else for 3641 // an unknown element index, ignore DemandedElts and demand them all. 3642 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3643 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3644 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3645 DemandedSrcElts = 3646 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3647 3648 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3649 } 3650 case ISD::EXTRACT_SUBVECTOR: { 3651 // If we know the element index, just demand that subvector elements, 3652 // otherwise demand them all. 3653 SDValue Src = Op.getOperand(0); 3654 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3655 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3656 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3657 // Offset the demanded elts by the subvector index. 3658 uint64_t Idx = SubIdx->getZExtValue(); 3659 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3660 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3661 } 3662 return ComputeNumSignBits(Src, Depth + 1); 3663 } 3664 case ISD::CONCAT_VECTORS: 3665 // Determine the minimum number of sign bits across all demanded 3666 // elts of the input vectors. Early out if the result is already 1. 3667 Tmp = std::numeric_limits<unsigned>::max(); 3668 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3669 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3670 unsigned NumSubVectors = Op.getNumOperands(); 3671 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3672 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3673 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3674 if (!DemandedSub) 3675 continue; 3676 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3677 Tmp = std::min(Tmp, Tmp2); 3678 } 3679 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3680 return Tmp; 3681 } 3682 3683 // If we are looking at the loaded value of the SDNode. 3684 if (Op.getResNo() == 0) { 3685 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3686 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3687 unsigned ExtType = LD->getExtensionType(); 3688 switch (ExtType) { 3689 default: break; 3690 case ISD::SEXTLOAD: // '17' bits known 3691 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3692 return VTBits-Tmp+1; 3693 case ISD::ZEXTLOAD: // '16' bits known 3694 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3695 return VTBits-Tmp; 3696 } 3697 } 3698 } 3699 3700 // Allow the target to implement this method for its nodes. 3701 if (Opcode >= ISD::BUILTIN_OP_END || 3702 Opcode == ISD::INTRINSIC_WO_CHAIN || 3703 Opcode == ISD::INTRINSIC_W_CHAIN || 3704 Opcode == ISD::INTRINSIC_VOID) { 3705 unsigned NumBits = 3706 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3707 if (NumBits > 1) 3708 FirstAnswer = std::max(FirstAnswer, NumBits); 3709 } 3710 3711 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3712 // use this information. 3713 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 3714 3715 APInt Mask; 3716 if (Known.isNonNegative()) { // sign bit is 0 3717 Mask = Known.Zero; 3718 } else if (Known.isNegative()) { // sign bit is 1; 3719 Mask = Known.One; 3720 } else { 3721 // Nothing known. 3722 return FirstAnswer; 3723 } 3724 3725 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3726 // the number of identical bits in the top of the input value. 3727 Mask = ~Mask; 3728 Mask <<= Mask.getBitWidth()-VTBits; 3729 // Return # leading zeros. We use 'min' here in case Val was zero before 3730 // shifting. We don't want to return '64' as for an i32 "0". 3731 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3732 } 3733 3734 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3735 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3736 !isa<ConstantSDNode>(Op.getOperand(1))) 3737 return false; 3738 3739 if (Op.getOpcode() == ISD::OR && 3740 !MaskedValueIsZero(Op.getOperand(0), 3741 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3742 return false; 3743 3744 return true; 3745 } 3746 3747 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 3748 // If we're told that NaNs won't happen, assume they won't. 3749 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 3750 return true; 3751 3752 if (Depth == 6) 3753 return false; // Limit search depth. 3754 3755 // TODO: Handle vectors. 3756 // If the value is a constant, we can obviously see if it is a NaN or not. 3757 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 3758 return !C->getValueAPF().isNaN() || 3759 (SNaN && !C->getValueAPF().isSignaling()); 3760 } 3761 3762 unsigned Opcode = Op.getOpcode(); 3763 switch (Opcode) { 3764 case ISD::FADD: 3765 case ISD::FSUB: 3766 case ISD::FMUL: 3767 case ISD::FDIV: 3768 case ISD::FREM: 3769 case ISD::FSIN: 3770 case ISD::FCOS: { 3771 if (SNaN) 3772 return true; 3773 // TODO: Need isKnownNeverInfinity 3774 return false; 3775 } 3776 case ISD::FCANONICALIZE: 3777 case ISD::FEXP: 3778 case ISD::FEXP2: 3779 case ISD::FTRUNC: 3780 case ISD::FFLOOR: 3781 case ISD::FCEIL: 3782 case ISD::FROUND: 3783 case ISD::FRINT: 3784 case ISD::FNEARBYINT: { 3785 if (SNaN) 3786 return true; 3787 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 3788 } 3789 case ISD::FABS: 3790 case ISD::FNEG: 3791 case ISD::FCOPYSIGN: { 3792 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 3793 } 3794 case ISD::SELECT: 3795 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 3796 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 3797 case ISD::FP_EXTEND: 3798 case ISD::FP_ROUND: { 3799 if (SNaN) 3800 return true; 3801 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 3802 } 3803 case ISD::SINT_TO_FP: 3804 case ISD::UINT_TO_FP: 3805 return true; 3806 case ISD::FMA: 3807 case ISD::FMAD: { 3808 if (SNaN) 3809 return true; 3810 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 3811 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 3812 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 3813 } 3814 case ISD::FSQRT: // Need is known positive 3815 case ISD::FLOG: 3816 case ISD::FLOG2: 3817 case ISD::FLOG10: 3818 case ISD::FPOWI: 3819 case ISD::FPOW: { 3820 if (SNaN) 3821 return true; 3822 // TODO: Refine on operand 3823 return false; 3824 } 3825 case ISD::FMINNUM: 3826 case ISD::FMAXNUM: { 3827 // Only one needs to be known not-nan, since it will be returned if the 3828 // other ends up being one. 3829 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 3830 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 3831 } 3832 case ISD::FMINNUM_IEEE: 3833 case ISD::FMAXNUM_IEEE: { 3834 if (SNaN) 3835 return true; 3836 // This can return a NaN if either operand is an sNaN, or if both operands 3837 // are NaN. 3838 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 3839 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 3840 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 3841 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 3842 } 3843 case ISD::FMINIMUM: 3844 case ISD::FMAXIMUM: { 3845 // TODO: Does this quiet or return the origina NaN as-is? 3846 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 3847 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 3848 } 3849 case ISD::EXTRACT_VECTOR_ELT: { 3850 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 3851 } 3852 default: 3853 if (Opcode >= ISD::BUILTIN_OP_END || 3854 Opcode == ISD::INTRINSIC_WO_CHAIN || 3855 Opcode == ISD::INTRINSIC_W_CHAIN || 3856 Opcode == ISD::INTRINSIC_VOID) { 3857 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 3858 } 3859 3860 return false; 3861 } 3862 } 3863 3864 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 3865 assert(Op.getValueType().isFloatingPoint() && 3866 "Floating point type expected"); 3867 3868 // If the value is a constant, we can obviously see if it is a zero or not. 3869 // TODO: Add BuildVector support. 3870 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3871 return !C->isZero(); 3872 return false; 3873 } 3874 3875 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3876 assert(!Op.getValueType().isFloatingPoint() && 3877 "Floating point types unsupported - use isKnownNeverZeroFloat"); 3878 3879 // If the value is a constant, we can obviously see if it is a zero or not. 3880 if (ISD::matchUnaryPredicate( 3881 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 3882 return true; 3883 3884 // TODO: Recognize more cases here. 3885 switch (Op.getOpcode()) { 3886 default: break; 3887 case ISD::OR: 3888 if (isKnownNeverZero(Op.getOperand(1)) || 3889 isKnownNeverZero(Op.getOperand(0))) 3890 return true; 3891 break; 3892 } 3893 3894 return false; 3895 } 3896 3897 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3898 // Check the obvious case. 3899 if (A == B) return true; 3900 3901 // For for negative and positive zero. 3902 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3903 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3904 if (CA->isZero() && CB->isZero()) return true; 3905 3906 // Otherwise they may not be equal. 3907 return false; 3908 } 3909 3910 // FIXME: unify with llvm::haveNoCommonBitsSet. 3911 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 3912 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3913 assert(A.getValueType() == B.getValueType() && 3914 "Values must have the same type"); 3915 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 3916 } 3917 3918 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 3919 ArrayRef<SDValue> Ops, 3920 SelectionDAG &DAG) { 3921 int NumOps = Ops.size(); 3922 assert(NumOps != 0 && "Can't build an empty vector!"); 3923 assert(VT.getVectorNumElements() == (unsigned)NumOps && 3924 "Incorrect element count in BUILD_VECTOR!"); 3925 3926 // BUILD_VECTOR of UNDEFs is UNDEF. 3927 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3928 return DAG.getUNDEF(VT); 3929 3930 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 3931 SDValue IdentitySrc; 3932 bool IsIdentity = true; 3933 for (int i = 0; i != NumOps; ++i) { 3934 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 3935 Ops[i].getOperand(0).getValueType() != VT || 3936 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 3937 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 3938 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 3939 IsIdentity = false; 3940 break; 3941 } 3942 IdentitySrc = Ops[i].getOperand(0); 3943 } 3944 if (IsIdentity) 3945 return IdentitySrc; 3946 3947 return SDValue(); 3948 } 3949 3950 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3951 ArrayRef<SDValue> Ops, 3952 SelectionDAG &DAG) { 3953 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3954 assert(llvm::all_of(Ops, 3955 [Ops](SDValue Op) { 3956 return Ops[0].getValueType() == Op.getValueType(); 3957 }) && 3958 "Concatenation of vectors with inconsistent value types!"); 3959 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3960 VT.getVectorNumElements() && 3961 "Incorrect element count in vector concatenation!"); 3962 3963 if (Ops.size() == 1) 3964 return Ops[0]; 3965 3966 // Concat of UNDEFs is UNDEF. 3967 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3968 return DAG.getUNDEF(VT); 3969 3970 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3971 // simplified to one big BUILD_VECTOR. 3972 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3973 EVT SVT = VT.getScalarType(); 3974 SmallVector<SDValue, 16> Elts; 3975 for (SDValue Op : Ops) { 3976 EVT OpVT = Op.getValueType(); 3977 if (Op.isUndef()) 3978 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3979 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3980 Elts.append(Op->op_begin(), Op->op_end()); 3981 else 3982 return SDValue(); 3983 } 3984 3985 // BUILD_VECTOR requires all inputs to be of the same type, find the 3986 // maximum type and extend them all. 3987 for (SDValue Op : Elts) 3988 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3989 3990 if (SVT.bitsGT(VT.getScalarType())) 3991 for (SDValue &Op : Elts) 3992 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3993 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3994 : DAG.getSExtOrTrunc(Op, DL, SVT); 3995 3996 SDValue V = DAG.getBuildVector(VT, DL, Elts); 3997 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 3998 return V; 3999 } 4000 4001 /// Gets or creates the specified node. 4002 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4003 FoldingSetNodeID ID; 4004 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4005 void *IP = nullptr; 4006 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4007 return SDValue(E, 0); 4008 4009 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4010 getVTList(VT)); 4011 CSEMap.InsertNode(N, IP); 4012 4013 InsertNode(N); 4014 SDValue V = SDValue(N, 0); 4015 NewSDValueDbgMsg(V, "Creating new node: ", this); 4016 return V; 4017 } 4018 4019 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4020 SDValue Operand, const SDNodeFlags Flags) { 4021 // Constant fold unary operations with an integer constant operand. Even 4022 // opaque constant will be folded, because the folding of unary operations 4023 // doesn't create new constants with different values. Nevertheless, the 4024 // opaque flag is preserved during folding to prevent future folding with 4025 // other constants. 4026 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4027 const APInt &Val = C->getAPIntValue(); 4028 switch (Opcode) { 4029 default: break; 4030 case ISD::SIGN_EXTEND: 4031 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4032 C->isTargetOpcode(), C->isOpaque()); 4033 case ISD::TRUNCATE: 4034 if (C->isOpaque()) 4035 break; 4036 LLVM_FALLTHROUGH; 4037 case ISD::ANY_EXTEND: 4038 case ISD::ZERO_EXTEND: 4039 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4040 C->isTargetOpcode(), C->isOpaque()); 4041 case ISD::UINT_TO_FP: 4042 case ISD::SINT_TO_FP: { 4043 APFloat apf(EVTToAPFloatSemantics(VT), 4044 APInt::getNullValue(VT.getSizeInBits())); 4045 (void)apf.convertFromAPInt(Val, 4046 Opcode==ISD::SINT_TO_FP, 4047 APFloat::rmNearestTiesToEven); 4048 return getConstantFP(apf, DL, VT); 4049 } 4050 case ISD::BITCAST: 4051 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4052 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4053 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4054 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4055 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4056 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4057 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4058 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4059 break; 4060 case ISD::ABS: 4061 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4062 C->isOpaque()); 4063 case ISD::BITREVERSE: 4064 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4065 C->isOpaque()); 4066 case ISD::BSWAP: 4067 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4068 C->isOpaque()); 4069 case ISD::CTPOP: 4070 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4071 C->isOpaque()); 4072 case ISD::CTLZ: 4073 case ISD::CTLZ_ZERO_UNDEF: 4074 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4075 C->isOpaque()); 4076 case ISD::CTTZ: 4077 case ISD::CTTZ_ZERO_UNDEF: 4078 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4079 C->isOpaque()); 4080 case ISD::FP16_TO_FP: { 4081 bool Ignored; 4082 APFloat FPV(APFloat::IEEEhalf(), 4083 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4084 4085 // This can return overflow, underflow, or inexact; we don't care. 4086 // FIXME need to be more flexible about rounding mode. 4087 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4088 APFloat::rmNearestTiesToEven, &Ignored); 4089 return getConstantFP(FPV, DL, VT); 4090 } 4091 } 4092 } 4093 4094 // Constant fold unary operations with a floating point constant operand. 4095 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4096 APFloat V = C->getValueAPF(); // make copy 4097 switch (Opcode) { 4098 case ISD::FNEG: 4099 V.changeSign(); 4100 return getConstantFP(V, DL, VT); 4101 case ISD::FABS: 4102 V.clearSign(); 4103 return getConstantFP(V, DL, VT); 4104 case ISD::FCEIL: { 4105 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4106 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4107 return getConstantFP(V, DL, VT); 4108 break; 4109 } 4110 case ISD::FTRUNC: { 4111 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4112 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4113 return getConstantFP(V, DL, VT); 4114 break; 4115 } 4116 case ISD::FFLOOR: { 4117 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4118 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4119 return getConstantFP(V, DL, VT); 4120 break; 4121 } 4122 case ISD::FP_EXTEND: { 4123 bool ignored; 4124 // This can return overflow, underflow, or inexact; we don't care. 4125 // FIXME need to be more flexible about rounding mode. 4126 (void)V.convert(EVTToAPFloatSemantics(VT), 4127 APFloat::rmNearestTiesToEven, &ignored); 4128 return getConstantFP(V, DL, VT); 4129 } 4130 case ISD::FP_TO_SINT: 4131 case ISD::FP_TO_UINT: { 4132 bool ignored; 4133 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4134 // FIXME need to be more flexible about rounding mode. 4135 APFloat::opStatus s = 4136 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4137 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4138 break; 4139 return getConstant(IntVal, DL, VT); 4140 } 4141 case ISD::BITCAST: 4142 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4143 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4144 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4145 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4146 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4147 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4148 break; 4149 case ISD::FP_TO_FP16: { 4150 bool Ignored; 4151 // This can return overflow, underflow, or inexact; we don't care. 4152 // FIXME need to be more flexible about rounding mode. 4153 (void)V.convert(APFloat::IEEEhalf(), 4154 APFloat::rmNearestTiesToEven, &Ignored); 4155 return getConstant(V.bitcastToAPInt(), DL, VT); 4156 } 4157 } 4158 } 4159 4160 // Constant fold unary operations with a vector integer or float operand. 4161 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4162 if (BV->isConstant()) { 4163 switch (Opcode) { 4164 default: 4165 // FIXME: Entirely reasonable to perform folding of other unary 4166 // operations here as the need arises. 4167 break; 4168 case ISD::FNEG: 4169 case ISD::FABS: 4170 case ISD::FCEIL: 4171 case ISD::FTRUNC: 4172 case ISD::FFLOOR: 4173 case ISD::FP_EXTEND: 4174 case ISD::FP_TO_SINT: 4175 case ISD::FP_TO_UINT: 4176 case ISD::TRUNCATE: 4177 case ISD::ANY_EXTEND: 4178 case ISD::ZERO_EXTEND: 4179 case ISD::SIGN_EXTEND: 4180 case ISD::UINT_TO_FP: 4181 case ISD::SINT_TO_FP: 4182 case ISD::ABS: 4183 case ISD::BITREVERSE: 4184 case ISD::BSWAP: 4185 case ISD::CTLZ: 4186 case ISD::CTLZ_ZERO_UNDEF: 4187 case ISD::CTTZ: 4188 case ISD::CTTZ_ZERO_UNDEF: 4189 case ISD::CTPOP: { 4190 SDValue Ops = { Operand }; 4191 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4192 return Fold; 4193 } 4194 } 4195 } 4196 } 4197 4198 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4199 switch (Opcode) { 4200 case ISD::TokenFactor: 4201 case ISD::MERGE_VALUES: 4202 case ISD::CONCAT_VECTORS: 4203 return Operand; // Factor, merge or concat of one node? No need. 4204 case ISD::BUILD_VECTOR: { 4205 // Attempt to simplify BUILD_VECTOR. 4206 SDValue Ops[] = {Operand}; 4207 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4208 return V; 4209 break; 4210 } 4211 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4212 case ISD::FP_EXTEND: 4213 assert(VT.isFloatingPoint() && 4214 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4215 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4216 assert((!VT.isVector() || 4217 VT.getVectorNumElements() == 4218 Operand.getValueType().getVectorNumElements()) && 4219 "Vector element count mismatch!"); 4220 assert(Operand.getValueType().bitsLT(VT) && 4221 "Invalid fpext node, dst < src!"); 4222 if (Operand.isUndef()) 4223 return getUNDEF(VT); 4224 break; 4225 case ISD::SIGN_EXTEND: 4226 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4227 "Invalid SIGN_EXTEND!"); 4228 if (Operand.getValueType() == VT) return Operand; // noop extension 4229 assert((!VT.isVector() || 4230 VT.getVectorNumElements() == 4231 Operand.getValueType().getVectorNumElements()) && 4232 "Vector element count mismatch!"); 4233 assert(Operand.getValueType().bitsLT(VT) && 4234 "Invalid sext node, dst < src!"); 4235 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4236 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4237 else if (OpOpcode == ISD::UNDEF) 4238 // sext(undef) = 0, because the top bits will all be the same. 4239 return getConstant(0, DL, VT); 4240 break; 4241 case ISD::ZERO_EXTEND: 4242 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4243 "Invalid ZERO_EXTEND!"); 4244 if (Operand.getValueType() == VT) return Operand; // noop extension 4245 assert((!VT.isVector() || 4246 VT.getVectorNumElements() == 4247 Operand.getValueType().getVectorNumElements()) && 4248 "Vector element count mismatch!"); 4249 assert(Operand.getValueType().bitsLT(VT) && 4250 "Invalid zext node, dst < src!"); 4251 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4252 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4253 else if (OpOpcode == ISD::UNDEF) 4254 // zext(undef) = 0, because the top bits will be zero. 4255 return getConstant(0, DL, VT); 4256 break; 4257 case ISD::ANY_EXTEND: 4258 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4259 "Invalid ANY_EXTEND!"); 4260 if (Operand.getValueType() == VT) return Operand; // noop extension 4261 assert((!VT.isVector() || 4262 VT.getVectorNumElements() == 4263 Operand.getValueType().getVectorNumElements()) && 4264 "Vector element count mismatch!"); 4265 assert(Operand.getValueType().bitsLT(VT) && 4266 "Invalid anyext node, dst < src!"); 4267 4268 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4269 OpOpcode == ISD::ANY_EXTEND) 4270 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4271 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4272 else if (OpOpcode == ISD::UNDEF) 4273 return getUNDEF(VT); 4274 4275 // (ext (trunc x)) -> x 4276 if (OpOpcode == ISD::TRUNCATE) { 4277 SDValue OpOp = Operand.getOperand(0); 4278 if (OpOp.getValueType() == VT) { 4279 transferDbgValues(Operand, OpOp); 4280 return OpOp; 4281 } 4282 } 4283 break; 4284 case ISD::TRUNCATE: 4285 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4286 "Invalid TRUNCATE!"); 4287 if (Operand.getValueType() == VT) return Operand; // noop truncate 4288 assert((!VT.isVector() || 4289 VT.getVectorNumElements() == 4290 Operand.getValueType().getVectorNumElements()) && 4291 "Vector element count mismatch!"); 4292 assert(Operand.getValueType().bitsGT(VT) && 4293 "Invalid truncate node, src < dst!"); 4294 if (OpOpcode == ISD::TRUNCATE) 4295 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4296 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4297 OpOpcode == ISD::ANY_EXTEND) { 4298 // If the source is smaller than the dest, we still need an extend. 4299 if (Operand.getOperand(0).getValueType().getScalarType() 4300 .bitsLT(VT.getScalarType())) 4301 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4302 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4303 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4304 return Operand.getOperand(0); 4305 } 4306 if (OpOpcode == ISD::UNDEF) 4307 return getUNDEF(VT); 4308 break; 4309 case ISD::ANY_EXTEND_VECTOR_INREG: 4310 case ISD::ZERO_EXTEND_VECTOR_INREG: 4311 case ISD::SIGN_EXTEND_VECTOR_INREG: 4312 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4313 assert(Operand.getValueType().bitsLE(VT) && 4314 "The input must be the same size or smaller than the result."); 4315 assert(VT.getVectorNumElements() < 4316 Operand.getValueType().getVectorNumElements() && 4317 "The destination vector type must have fewer lanes than the input."); 4318 break; 4319 case ISD::ABS: 4320 assert(VT.isInteger() && VT == Operand.getValueType() && 4321 "Invalid ABS!"); 4322 if (OpOpcode == ISD::UNDEF) 4323 return getUNDEF(VT); 4324 break; 4325 case ISD::BSWAP: 4326 assert(VT.isInteger() && VT == Operand.getValueType() && 4327 "Invalid BSWAP!"); 4328 assert((VT.getScalarSizeInBits() % 16 == 0) && 4329 "BSWAP types must be a multiple of 16 bits!"); 4330 if (OpOpcode == ISD::UNDEF) 4331 return getUNDEF(VT); 4332 break; 4333 case ISD::BITREVERSE: 4334 assert(VT.isInteger() && VT == Operand.getValueType() && 4335 "Invalid BITREVERSE!"); 4336 if (OpOpcode == ISD::UNDEF) 4337 return getUNDEF(VT); 4338 break; 4339 case ISD::BITCAST: 4340 // Basic sanity checking. 4341 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4342 "Cannot BITCAST between types of different sizes!"); 4343 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4344 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4345 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4346 if (OpOpcode == ISD::UNDEF) 4347 return getUNDEF(VT); 4348 break; 4349 case ISD::SCALAR_TO_VECTOR: 4350 assert(VT.isVector() && !Operand.getValueType().isVector() && 4351 (VT.getVectorElementType() == Operand.getValueType() || 4352 (VT.getVectorElementType().isInteger() && 4353 Operand.getValueType().isInteger() && 4354 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4355 "Illegal SCALAR_TO_VECTOR node!"); 4356 if (OpOpcode == ISD::UNDEF) 4357 return getUNDEF(VT); 4358 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4359 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4360 isa<ConstantSDNode>(Operand.getOperand(1)) && 4361 Operand.getConstantOperandVal(1) == 0 && 4362 Operand.getOperand(0).getValueType() == VT) 4363 return Operand.getOperand(0); 4364 break; 4365 case ISD::FNEG: 4366 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 4367 if ((getTarget().Options.UnsafeFPMath || Flags.hasNoSignedZeros()) && 4368 OpOpcode == ISD::FSUB) 4369 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 4370 Operand.getOperand(0), Flags); 4371 if (OpOpcode == ISD::FNEG) // --X -> X 4372 return Operand.getOperand(0); 4373 break; 4374 case ISD::FABS: 4375 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4376 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4377 break; 4378 } 4379 4380 SDNode *N; 4381 SDVTList VTs = getVTList(VT); 4382 SDValue Ops[] = {Operand}; 4383 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4384 FoldingSetNodeID ID; 4385 AddNodeIDNode(ID, Opcode, VTs, Ops); 4386 void *IP = nullptr; 4387 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4388 E->intersectFlagsWith(Flags); 4389 return SDValue(E, 0); 4390 } 4391 4392 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4393 N->setFlags(Flags); 4394 createOperands(N, Ops); 4395 CSEMap.InsertNode(N, IP); 4396 } else { 4397 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4398 createOperands(N, Ops); 4399 } 4400 4401 InsertNode(N); 4402 SDValue V = SDValue(N, 0); 4403 NewSDValueDbgMsg(V, "Creating new node: ", this); 4404 return V; 4405 } 4406 4407 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 4408 const APInt &C2) { 4409 switch (Opcode) { 4410 case ISD::ADD: return std::make_pair(C1 + C2, true); 4411 case ISD::SUB: return std::make_pair(C1 - C2, true); 4412 case ISD::MUL: return std::make_pair(C1 * C2, true); 4413 case ISD::AND: return std::make_pair(C1 & C2, true); 4414 case ISD::OR: return std::make_pair(C1 | C2, true); 4415 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 4416 case ISD::SHL: return std::make_pair(C1 << C2, true); 4417 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 4418 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 4419 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 4420 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 4421 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 4422 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 4423 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 4424 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 4425 case ISD::UDIV: 4426 if (!C2.getBoolValue()) 4427 break; 4428 return std::make_pair(C1.udiv(C2), true); 4429 case ISD::UREM: 4430 if (!C2.getBoolValue()) 4431 break; 4432 return std::make_pair(C1.urem(C2), true); 4433 case ISD::SDIV: 4434 if (!C2.getBoolValue()) 4435 break; 4436 return std::make_pair(C1.sdiv(C2), true); 4437 case ISD::SREM: 4438 if (!C2.getBoolValue()) 4439 break; 4440 return std::make_pair(C1.srem(C2), true); 4441 } 4442 return std::make_pair(APInt(1, 0), false); 4443 } 4444 4445 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4446 EVT VT, const ConstantSDNode *Cst1, 4447 const ConstantSDNode *Cst2) { 4448 if (Cst1->isOpaque() || Cst2->isOpaque()) 4449 return SDValue(); 4450 4451 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 4452 Cst2->getAPIntValue()); 4453 if (!Folded.second) 4454 return SDValue(); 4455 return getConstant(Folded.first, DL, VT); 4456 } 4457 4458 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4459 const GlobalAddressSDNode *GA, 4460 const SDNode *N2) { 4461 if (GA->getOpcode() != ISD::GlobalAddress) 4462 return SDValue(); 4463 if (!TLI->isOffsetFoldingLegal(GA)) 4464 return SDValue(); 4465 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 4466 if (!Cst2) 4467 return SDValue(); 4468 int64_t Offset = Cst2->getSExtValue(); 4469 switch (Opcode) { 4470 case ISD::ADD: break; 4471 case ISD::SUB: Offset = -uint64_t(Offset); break; 4472 default: return SDValue(); 4473 } 4474 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 4475 GA->getOffset() + uint64_t(Offset)); 4476 } 4477 4478 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4479 switch (Opcode) { 4480 case ISD::SDIV: 4481 case ISD::UDIV: 4482 case ISD::SREM: 4483 case ISD::UREM: { 4484 // If a divisor is zero/undef or any element of a divisor vector is 4485 // zero/undef, the whole op is undef. 4486 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4487 SDValue Divisor = Ops[1]; 4488 if (Divisor.isUndef() || isNullConstant(Divisor)) 4489 return true; 4490 4491 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4492 llvm::any_of(Divisor->op_values(), 4493 [](SDValue V) { return V.isUndef() || 4494 isNullConstant(V); }); 4495 // TODO: Handle signed overflow. 4496 } 4497 // TODO: Handle oversized shifts. 4498 default: 4499 return false; 4500 } 4501 } 4502 4503 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4504 EVT VT, SDNode *Cst1, 4505 SDNode *Cst2) { 4506 // If the opcode is a target-specific ISD node, there's nothing we can 4507 // do here and the operand rules may not line up with the below, so 4508 // bail early. 4509 if (Opcode >= ISD::BUILTIN_OP_END) 4510 return SDValue(); 4511 4512 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 4513 return getUNDEF(VT); 4514 4515 // Handle the case of two scalars. 4516 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 4517 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 4518 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 4519 assert((!Folded || !VT.isVector()) && 4520 "Can't fold vectors ops with scalar operands"); 4521 return Folded; 4522 } 4523 } 4524 4525 // fold (add Sym, c) -> Sym+c 4526 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 4527 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 4528 if (TLI->isCommutativeBinOp(Opcode)) 4529 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 4530 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 4531 4532 // For vectors, extract each constant element and fold them individually. 4533 // Either input may be an undef value. 4534 auto *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 4535 if (!BV1 && !Cst1->isUndef()) 4536 return SDValue(); 4537 auto *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 4538 if (!BV2 && !Cst2->isUndef()) 4539 return SDValue(); 4540 // If both operands are undef, that's handled the same way as scalars. 4541 if (!BV1 && !BV2) 4542 return SDValue(); 4543 4544 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4545 "Vector binop with different number of elements in operands?"); 4546 4547 EVT SVT = VT.getScalarType(); 4548 EVT LegalSVT = SVT; 4549 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4550 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4551 if (LegalSVT.bitsLT(SVT)) 4552 return SDValue(); 4553 } 4554 SmallVector<SDValue, 4> Outputs; 4555 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4556 for (unsigned I = 0; I != NumOps; ++I) { 4557 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4558 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4559 if (SVT.isInteger()) { 4560 if (V1->getValueType(0).bitsGT(SVT)) 4561 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4562 if (V2->getValueType(0).bitsGT(SVT)) 4563 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4564 } 4565 4566 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4567 return SDValue(); 4568 4569 // Fold one vector element. 4570 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4571 if (LegalSVT != SVT) 4572 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4573 4574 // Scalar folding only succeeded if the result is a constant or UNDEF. 4575 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4576 ScalarResult.getOpcode() != ISD::ConstantFP) 4577 return SDValue(); 4578 Outputs.push_back(ScalarResult); 4579 } 4580 4581 assert(VT.getVectorNumElements() == Outputs.size() && 4582 "Vector size mismatch!"); 4583 4584 // We may have a vector type but a scalar result. Create a splat. 4585 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4586 4587 // Build a big vector out of the scalar elements we generated. 4588 return getBuildVector(VT, SDLoc(), Outputs); 4589 } 4590 4591 // TODO: Merge with FoldConstantArithmetic 4592 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4593 const SDLoc &DL, EVT VT, 4594 ArrayRef<SDValue> Ops, 4595 const SDNodeFlags Flags) { 4596 // If the opcode is a target-specific ISD node, there's nothing we can 4597 // do here and the operand rules may not line up with the below, so 4598 // bail early. 4599 if (Opcode >= ISD::BUILTIN_OP_END) 4600 return SDValue(); 4601 4602 if (isUndef(Opcode, Ops)) 4603 return getUNDEF(VT); 4604 4605 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4606 if (!VT.isVector()) 4607 return SDValue(); 4608 4609 unsigned NumElts = VT.getVectorNumElements(); 4610 4611 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4612 return !Op.getValueType().isVector() || 4613 Op.getValueType().getVectorNumElements() == NumElts; 4614 }; 4615 4616 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4617 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4618 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4619 (BV && BV->isConstant()); 4620 }; 4621 4622 // All operands must be vector types with the same number of elements as 4623 // the result type and must be either UNDEF or a build vector of constant 4624 // or UNDEF scalars. 4625 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4626 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4627 return SDValue(); 4628 4629 // If we are comparing vectors, then the result needs to be a i1 boolean 4630 // that is then sign-extended back to the legal result type. 4631 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4632 4633 // Find legal integer scalar type for constant promotion and 4634 // ensure that its scalar size is at least as large as source. 4635 EVT LegalSVT = VT.getScalarType(); 4636 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4637 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4638 if (LegalSVT.bitsLT(VT.getScalarType())) 4639 return SDValue(); 4640 } 4641 4642 // Constant fold each scalar lane separately. 4643 SmallVector<SDValue, 4> ScalarResults; 4644 for (unsigned i = 0; i != NumElts; i++) { 4645 SmallVector<SDValue, 4> ScalarOps; 4646 for (SDValue Op : Ops) { 4647 EVT InSVT = Op.getValueType().getScalarType(); 4648 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4649 if (!InBV) { 4650 // We've checked that this is UNDEF or a constant of some kind. 4651 if (Op.isUndef()) 4652 ScalarOps.push_back(getUNDEF(InSVT)); 4653 else 4654 ScalarOps.push_back(Op); 4655 continue; 4656 } 4657 4658 SDValue ScalarOp = InBV->getOperand(i); 4659 EVT ScalarVT = ScalarOp.getValueType(); 4660 4661 // Build vector (integer) scalar operands may need implicit 4662 // truncation - do this before constant folding. 4663 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4664 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4665 4666 ScalarOps.push_back(ScalarOp); 4667 } 4668 4669 // Constant fold the scalar operands. 4670 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4671 4672 // Legalize the (integer) scalar constant if necessary. 4673 if (LegalSVT != SVT) 4674 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4675 4676 // Scalar folding only succeeded if the result is a constant or UNDEF. 4677 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4678 ScalarResult.getOpcode() != ISD::ConstantFP) 4679 return SDValue(); 4680 ScalarResults.push_back(ScalarResult); 4681 } 4682 4683 SDValue V = getBuildVector(VT, DL, ScalarResults); 4684 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4685 return V; 4686 } 4687 4688 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4689 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4690 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4691 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4692 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4693 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4694 4695 // Canonicalize constant to RHS if commutative. 4696 if (TLI->isCommutativeBinOp(Opcode)) { 4697 if (N1C && !N2C) { 4698 std::swap(N1C, N2C); 4699 std::swap(N1, N2); 4700 } else if (N1CFP && !N2CFP) { 4701 std::swap(N1CFP, N2CFP); 4702 std::swap(N1, N2); 4703 } 4704 } 4705 4706 switch (Opcode) { 4707 default: break; 4708 case ISD::TokenFactor: 4709 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4710 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4711 // Fold trivial token factors. 4712 if (N1.getOpcode() == ISD::EntryToken) return N2; 4713 if (N2.getOpcode() == ISD::EntryToken) return N1; 4714 if (N1 == N2) return N1; 4715 break; 4716 case ISD::BUILD_VECTOR: { 4717 // Attempt to simplify BUILD_VECTOR. 4718 SDValue Ops[] = {N1, N2}; 4719 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4720 return V; 4721 break; 4722 } 4723 case ISD::CONCAT_VECTORS: { 4724 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4725 SDValue Ops[] = {N1, N2}; 4726 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4727 return V; 4728 break; 4729 } 4730 case ISD::AND: 4731 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4732 assert(N1.getValueType() == N2.getValueType() && 4733 N1.getValueType() == VT && "Binary operator types must match!"); 4734 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4735 // worth handling here. 4736 if (N2C && N2C->isNullValue()) 4737 return N2; 4738 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4739 return N1; 4740 break; 4741 case ISD::OR: 4742 case ISD::XOR: 4743 case ISD::ADD: 4744 case ISD::SUB: 4745 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4746 assert(N1.getValueType() == N2.getValueType() && 4747 N1.getValueType() == VT && "Binary operator types must match!"); 4748 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4749 // it's worth handling here. 4750 if (N2C && N2C->isNullValue()) 4751 return N1; 4752 break; 4753 case ISD::UDIV: 4754 case ISD::UREM: 4755 case ISD::MULHU: 4756 case ISD::MULHS: 4757 case ISD::MUL: 4758 case ISD::SDIV: 4759 case ISD::SREM: 4760 case ISD::SMIN: 4761 case ISD::SMAX: 4762 case ISD::UMIN: 4763 case ISD::UMAX: 4764 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4765 assert(N1.getValueType() == N2.getValueType() && 4766 N1.getValueType() == VT && "Binary operator types must match!"); 4767 break; 4768 case ISD::FADD: 4769 case ISD::FSUB: 4770 case ISD::FMUL: 4771 case ISD::FDIV: 4772 case ISD::FREM: 4773 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4774 assert(N1.getValueType() == N2.getValueType() && 4775 N1.getValueType() == VT && "Binary operator types must match!"); 4776 break; 4777 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4778 assert(N1.getValueType() == VT && 4779 N1.getValueType().isFloatingPoint() && 4780 N2.getValueType().isFloatingPoint() && 4781 "Invalid FCOPYSIGN!"); 4782 break; 4783 case ISD::SHL: 4784 case ISD::SRA: 4785 case ISD::SRL: 4786 if (SDValue V = simplifyShift(N1, N2)) 4787 return V; 4788 LLVM_FALLTHROUGH; 4789 case ISD::ROTL: 4790 case ISD::ROTR: 4791 assert(VT == N1.getValueType() && 4792 "Shift operators return type must be the same as their first arg"); 4793 assert(VT.isInteger() && N2.getValueType().isInteger() && 4794 "Shifts only work on integers"); 4795 assert((!VT.isVector() || VT == N2.getValueType()) && 4796 "Vector shift amounts must be in the same as their first arg"); 4797 // Verify that the shift amount VT is big enough to hold valid shift 4798 // amounts. This catches things like trying to shift an i1024 value by an 4799 // i8, which is easy to fall into in generic code that uses 4800 // TLI.getShiftAmount(). 4801 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4802 "Invalid use of small shift amount with oversized value!"); 4803 4804 // Always fold shifts of i1 values so the code generator doesn't need to 4805 // handle them. Since we know the size of the shift has to be less than the 4806 // size of the value, the shift/rotate count is guaranteed to be zero. 4807 if (VT == MVT::i1) 4808 return N1; 4809 if (N2C && N2C->isNullValue()) 4810 return N1; 4811 break; 4812 case ISD::FP_ROUND_INREG: { 4813 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4814 assert(VT == N1.getValueType() && "Not an inreg round!"); 4815 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4816 "Cannot FP_ROUND_INREG integer types"); 4817 assert(EVT.isVector() == VT.isVector() && 4818 "FP_ROUND_INREG type should be vector iff the operand " 4819 "type is vector!"); 4820 assert((!EVT.isVector() || 4821 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4822 "Vector element counts must match in FP_ROUND_INREG"); 4823 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4824 (void)EVT; 4825 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4826 break; 4827 } 4828 case ISD::FP_ROUND: 4829 assert(VT.isFloatingPoint() && 4830 N1.getValueType().isFloatingPoint() && 4831 VT.bitsLE(N1.getValueType()) && 4832 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4833 "Invalid FP_ROUND!"); 4834 if (N1.getValueType() == VT) return N1; // noop conversion. 4835 break; 4836 case ISD::AssertSext: 4837 case ISD::AssertZext: { 4838 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4839 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4840 assert(VT.isInteger() && EVT.isInteger() && 4841 "Cannot *_EXTEND_INREG FP types"); 4842 assert(!EVT.isVector() && 4843 "AssertSExt/AssertZExt type should be the vector element type " 4844 "rather than the vector type!"); 4845 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 4846 if (VT.getScalarType() == EVT) return N1; // noop assertion. 4847 break; 4848 } 4849 case ISD::SIGN_EXTEND_INREG: { 4850 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4851 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4852 assert(VT.isInteger() && EVT.isInteger() && 4853 "Cannot *_EXTEND_INREG FP types"); 4854 assert(EVT.isVector() == VT.isVector() && 4855 "SIGN_EXTEND_INREG type should be vector iff the operand " 4856 "type is vector!"); 4857 assert((!EVT.isVector() || 4858 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4859 "Vector element counts must match in SIGN_EXTEND_INREG"); 4860 assert(EVT.bitsLE(VT) && "Not extending!"); 4861 if (EVT == VT) return N1; // Not actually extending 4862 4863 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4864 unsigned FromBits = EVT.getScalarSizeInBits(); 4865 Val <<= Val.getBitWidth() - FromBits; 4866 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4867 return getConstant(Val, DL, ConstantVT); 4868 }; 4869 4870 if (N1C) { 4871 const APInt &Val = N1C->getAPIntValue(); 4872 return SignExtendInReg(Val, VT); 4873 } 4874 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4875 SmallVector<SDValue, 8> Ops; 4876 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4877 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4878 SDValue Op = N1.getOperand(i); 4879 if (Op.isUndef()) { 4880 Ops.push_back(getUNDEF(OpVT)); 4881 continue; 4882 } 4883 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4884 APInt Val = C->getAPIntValue(); 4885 Ops.push_back(SignExtendInReg(Val, OpVT)); 4886 } 4887 return getBuildVector(VT, DL, Ops); 4888 } 4889 break; 4890 } 4891 case ISD::EXTRACT_VECTOR_ELT: 4892 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 4893 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 4894 element type of the vector."); 4895 4896 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4897 if (N1.isUndef()) 4898 return getUNDEF(VT); 4899 4900 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4901 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 4902 return getUNDEF(VT); 4903 4904 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4905 // expanding copies of large vectors from registers. 4906 if (N2C && 4907 N1.getOpcode() == ISD::CONCAT_VECTORS && 4908 N1.getNumOperands() > 0) { 4909 unsigned Factor = 4910 N1.getOperand(0).getValueType().getVectorNumElements(); 4911 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4912 N1.getOperand(N2C->getZExtValue() / Factor), 4913 getConstant(N2C->getZExtValue() % Factor, DL, 4914 N2.getValueType())); 4915 } 4916 4917 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4918 // expanding large vector constants. 4919 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4920 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4921 4922 if (VT != Elt.getValueType()) 4923 // If the vector element type is not legal, the BUILD_VECTOR operands 4924 // are promoted and implicitly truncated, and the result implicitly 4925 // extended. Make that explicit here. 4926 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4927 4928 return Elt; 4929 } 4930 4931 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4932 // operations are lowered to scalars. 4933 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4934 // If the indices are the same, return the inserted element else 4935 // if the indices are known different, extract the element from 4936 // the original vector. 4937 SDValue N1Op2 = N1.getOperand(2); 4938 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4939 4940 if (N1Op2C && N2C) { 4941 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4942 if (VT == N1.getOperand(1).getValueType()) 4943 return N1.getOperand(1); 4944 else 4945 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4946 } 4947 4948 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4949 } 4950 } 4951 4952 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 4953 // when vector types are scalarized and v1iX is legal. 4954 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 4955 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 4956 N1.getValueType().getVectorNumElements() == 1) { 4957 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 4958 N1.getOperand(1)); 4959 } 4960 break; 4961 case ISD::EXTRACT_ELEMENT: 4962 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4963 assert(!N1.getValueType().isVector() && !VT.isVector() && 4964 (N1.getValueType().isInteger() == VT.isInteger()) && 4965 N1.getValueType() != VT && 4966 "Wrong types for EXTRACT_ELEMENT!"); 4967 4968 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4969 // 64-bit integers into 32-bit parts. Instead of building the extract of 4970 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4971 if (N1.getOpcode() == ISD::BUILD_PAIR) 4972 return N1.getOperand(N2C->getZExtValue()); 4973 4974 // EXTRACT_ELEMENT of a constant int is also very common. 4975 if (N1C) { 4976 unsigned ElementSize = VT.getSizeInBits(); 4977 unsigned Shift = ElementSize * N2C->getZExtValue(); 4978 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4979 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4980 } 4981 break; 4982 case ISD::EXTRACT_SUBVECTOR: 4983 if (VT.isSimple() && N1.getValueType().isSimple()) { 4984 assert(VT.isVector() && N1.getValueType().isVector() && 4985 "Extract subvector VTs must be a vectors!"); 4986 assert(VT.getVectorElementType() == 4987 N1.getValueType().getVectorElementType() && 4988 "Extract subvector VTs must have the same element type!"); 4989 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4990 "Extract subvector must be from larger vector to smaller vector!"); 4991 4992 if (N2C) { 4993 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4994 <= N1.getValueType().getVectorNumElements()) 4995 && "Extract subvector overflow!"); 4996 } 4997 4998 // Trivial extraction. 4999 if (VT.getSimpleVT() == N1.getSimpleValueType()) 5000 return N1; 5001 5002 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5003 if (N1.isUndef()) 5004 return getUNDEF(VT); 5005 5006 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5007 // the concat have the same type as the extract. 5008 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5009 N1.getNumOperands() > 0 && 5010 VT == N1.getOperand(0).getValueType()) { 5011 unsigned Factor = VT.getVectorNumElements(); 5012 return N1.getOperand(N2C->getZExtValue() / Factor); 5013 } 5014 5015 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5016 // during shuffle legalization. 5017 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5018 VT == N1.getOperand(1).getValueType()) 5019 return N1.getOperand(1); 5020 } 5021 break; 5022 } 5023 5024 // Perform trivial constant folding. 5025 if (SDValue SV = 5026 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 5027 return SV; 5028 5029 // Constant fold FP operations. 5030 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 5031 if (N1CFP) { 5032 if (N2CFP) { 5033 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 5034 APFloat::opStatus s; 5035 switch (Opcode) { 5036 case ISD::FADD: 5037 s = V1.add(V2, APFloat::rmNearestTiesToEven); 5038 if (!HasFPExceptions || s != APFloat::opInvalidOp) 5039 return getConstantFP(V1, DL, VT); 5040 break; 5041 case ISD::FSUB: 5042 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 5043 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 5044 return getConstantFP(V1, DL, VT); 5045 break; 5046 case ISD::FMUL: 5047 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 5048 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 5049 return getConstantFP(V1, DL, VT); 5050 break; 5051 case ISD::FDIV: 5052 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 5053 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 5054 s!=APFloat::opDivByZero)) { 5055 return getConstantFP(V1, DL, VT); 5056 } 5057 break; 5058 case ISD::FREM : 5059 s = V1.mod(V2); 5060 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 5061 s!=APFloat::opDivByZero)) { 5062 return getConstantFP(V1, DL, VT); 5063 } 5064 break; 5065 case ISD::FCOPYSIGN: 5066 V1.copySign(V2); 5067 return getConstantFP(V1, DL, VT); 5068 default: break; 5069 } 5070 } 5071 5072 if (Opcode == ISD::FP_ROUND) { 5073 APFloat V = N1CFP->getValueAPF(); // make copy 5074 bool ignored; 5075 // This can return overflow, underflow, or inexact; we don't care. 5076 // FIXME need to be more flexible about rounding mode. 5077 (void)V.convert(EVTToAPFloatSemantics(VT), 5078 APFloat::rmNearestTiesToEven, &ignored); 5079 return getConstantFP(V, DL, VT); 5080 } 5081 } 5082 5083 switch (Opcode) { 5084 case ISD::FADD: 5085 case ISD::FSUB: 5086 case ISD::FMUL: 5087 case ISD::FDIV: 5088 case ISD::FREM: 5089 // If both operands are undef, the result is undef. If 1 operand is undef, 5090 // the result is NaN. This should match the behavior of the IR optimizer. 5091 if (N1.isUndef() && N2.isUndef()) 5092 return getUNDEF(VT); 5093 if (N1.isUndef() || N2.isUndef()) 5094 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5095 } 5096 5097 // Canonicalize an UNDEF to the RHS, even over a constant. 5098 if (N1.isUndef()) { 5099 if (TLI->isCommutativeBinOp(Opcode)) { 5100 std::swap(N1, N2); 5101 } else { 5102 switch (Opcode) { 5103 case ISD::FP_ROUND_INREG: 5104 case ISD::SIGN_EXTEND_INREG: 5105 case ISD::SUB: 5106 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5107 case ISD::UDIV: 5108 case ISD::SDIV: 5109 case ISD::UREM: 5110 case ISD::SREM: 5111 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5112 } 5113 } 5114 } 5115 5116 // Fold a bunch of operators when the RHS is undef. 5117 if (N2.isUndef()) { 5118 switch (Opcode) { 5119 case ISD::XOR: 5120 if (N1.isUndef()) 5121 // Handle undef ^ undef -> 0 special case. This is a common 5122 // idiom (misuse). 5123 return getConstant(0, DL, VT); 5124 LLVM_FALLTHROUGH; 5125 case ISD::ADD: 5126 case ISD::SUB: 5127 case ISD::UDIV: 5128 case ISD::SDIV: 5129 case ISD::UREM: 5130 case ISD::SREM: 5131 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5132 case ISD::MUL: 5133 case ISD::AND: 5134 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5135 case ISD::OR: 5136 return getAllOnesConstant(DL, VT); 5137 } 5138 } 5139 5140 // Memoize this node if possible. 5141 SDNode *N; 5142 SDVTList VTs = getVTList(VT); 5143 SDValue Ops[] = {N1, N2}; 5144 if (VT != MVT::Glue) { 5145 FoldingSetNodeID ID; 5146 AddNodeIDNode(ID, Opcode, VTs, Ops); 5147 void *IP = nullptr; 5148 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5149 E->intersectFlagsWith(Flags); 5150 return SDValue(E, 0); 5151 } 5152 5153 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5154 N->setFlags(Flags); 5155 createOperands(N, Ops); 5156 CSEMap.InsertNode(N, IP); 5157 } else { 5158 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5159 createOperands(N, Ops); 5160 } 5161 5162 InsertNode(N); 5163 SDValue V = SDValue(N, 0); 5164 NewSDValueDbgMsg(V, "Creating new node: ", this); 5165 return V; 5166 } 5167 5168 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5169 SDValue N1, SDValue N2, SDValue N3, 5170 const SDNodeFlags Flags) { 5171 // Perform various simplifications. 5172 switch (Opcode) { 5173 case ISD::FMA: { 5174 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5175 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5176 N3.getValueType() == VT && "FMA types must match!"); 5177 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5178 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5179 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5180 if (N1CFP && N2CFP && N3CFP) { 5181 APFloat V1 = N1CFP->getValueAPF(); 5182 const APFloat &V2 = N2CFP->getValueAPF(); 5183 const APFloat &V3 = N3CFP->getValueAPF(); 5184 APFloat::opStatus s = 5185 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5186 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 5187 return getConstantFP(V1, DL, VT); 5188 } 5189 break; 5190 } 5191 case ISD::BUILD_VECTOR: { 5192 // Attempt to simplify BUILD_VECTOR. 5193 SDValue Ops[] = {N1, N2, N3}; 5194 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5195 return V; 5196 break; 5197 } 5198 case ISD::CONCAT_VECTORS: { 5199 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 5200 SDValue Ops[] = {N1, N2, N3}; 5201 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 5202 return V; 5203 break; 5204 } 5205 case ISD::SETCC: { 5206 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5207 assert(N1.getValueType() == N2.getValueType() && 5208 "SETCC operands must have the same type!"); 5209 assert(VT.isVector() == N1.getValueType().isVector() && 5210 "SETCC type should be vector iff the operand type is vector!"); 5211 assert((!VT.isVector() || 5212 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) && 5213 "SETCC vector element counts must match!"); 5214 // Use FoldSetCC to simplify SETCC's. 5215 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5216 return V; 5217 // Vector constant folding. 5218 SDValue Ops[] = {N1, N2, N3}; 5219 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5220 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5221 return V; 5222 } 5223 break; 5224 } 5225 case ISD::SELECT: 5226 case ISD::VSELECT: 5227 if (SDValue V = simplifySelect(N1, N2, N3)) 5228 return V; 5229 break; 5230 case ISD::VECTOR_SHUFFLE: 5231 llvm_unreachable("should use getVectorShuffle constructor!"); 5232 case ISD::INSERT_VECTOR_ELT: { 5233 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5234 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 5235 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5236 return getUNDEF(VT); 5237 break; 5238 } 5239 case ISD::INSERT_SUBVECTOR: { 5240 SDValue Index = N3; 5241 if (VT.isSimple() && N1.getValueType().isSimple() 5242 && N2.getValueType().isSimple()) { 5243 assert(VT.isVector() && N1.getValueType().isVector() && 5244 N2.getValueType().isVector() && 5245 "Insert subvector VTs must be a vectors"); 5246 assert(VT == N1.getValueType() && 5247 "Dest and insert subvector source types must match!"); 5248 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5249 "Insert subvector must be from smaller vector to larger vector!"); 5250 if (isa<ConstantSDNode>(Index)) { 5251 assert((N2.getValueType().getVectorNumElements() + 5252 cast<ConstantSDNode>(Index)->getZExtValue() 5253 <= VT.getVectorNumElements()) 5254 && "Insert subvector overflow!"); 5255 } 5256 5257 // Trivial insertion. 5258 if (VT.getSimpleVT() == N2.getSimpleValueType()) 5259 return N2; 5260 } 5261 break; 5262 } 5263 case ISD::BITCAST: 5264 // Fold bit_convert nodes from a type to themselves. 5265 if (N1.getValueType() == VT) 5266 return N1; 5267 break; 5268 } 5269 5270 // Memoize node if it doesn't produce a flag. 5271 SDNode *N; 5272 SDVTList VTs = getVTList(VT); 5273 SDValue Ops[] = {N1, N2, N3}; 5274 if (VT != MVT::Glue) { 5275 FoldingSetNodeID ID; 5276 AddNodeIDNode(ID, Opcode, VTs, Ops); 5277 void *IP = nullptr; 5278 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5279 E->intersectFlagsWith(Flags); 5280 return SDValue(E, 0); 5281 } 5282 5283 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5284 N->setFlags(Flags); 5285 createOperands(N, Ops); 5286 CSEMap.InsertNode(N, IP); 5287 } else { 5288 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5289 createOperands(N, Ops); 5290 } 5291 5292 InsertNode(N); 5293 SDValue V = SDValue(N, 0); 5294 NewSDValueDbgMsg(V, "Creating new node: ", this); 5295 return V; 5296 } 5297 5298 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5299 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5300 SDValue Ops[] = { N1, N2, N3, N4 }; 5301 return getNode(Opcode, DL, VT, Ops); 5302 } 5303 5304 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5305 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5306 SDValue N5) { 5307 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5308 return getNode(Opcode, DL, VT, Ops); 5309 } 5310 5311 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5312 /// the incoming stack arguments to be loaded from the stack. 5313 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5314 SmallVector<SDValue, 8> ArgChains; 5315 5316 // Include the original chain at the beginning of the list. When this is 5317 // used by target LowerCall hooks, this helps legalize find the 5318 // CALLSEQ_BEGIN node. 5319 ArgChains.push_back(Chain); 5320 5321 // Add a chain value for each stack argument. 5322 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5323 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5324 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5325 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5326 if (FI->getIndex() < 0) 5327 ArgChains.push_back(SDValue(L, 1)); 5328 5329 // Build a tokenfactor for all the chains. 5330 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5331 } 5332 5333 /// getMemsetValue - Vectorized representation of the memset value 5334 /// operand. 5335 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5336 const SDLoc &dl) { 5337 assert(!Value.isUndef()); 5338 5339 unsigned NumBits = VT.getScalarSizeInBits(); 5340 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5341 assert(C->getAPIntValue().getBitWidth() == 8); 5342 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5343 if (VT.isInteger()) { 5344 bool IsOpaque = VT.getSizeInBits() > 64 || 5345 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5346 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5347 } 5348 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5349 VT); 5350 } 5351 5352 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5353 EVT IntVT = VT.getScalarType(); 5354 if (!IntVT.isInteger()) 5355 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5356 5357 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5358 if (NumBits > 8) { 5359 // Use a multiplication with 0x010101... to extend the input to the 5360 // required length. 5361 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5362 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5363 DAG.getConstant(Magic, dl, IntVT)); 5364 } 5365 5366 if (VT != Value.getValueType() && !VT.isInteger()) 5367 Value = DAG.getBitcast(VT.getScalarType(), Value); 5368 if (VT != Value.getValueType()) 5369 Value = DAG.getSplatBuildVector(VT, dl, Value); 5370 5371 return Value; 5372 } 5373 5374 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5375 /// used when a memcpy is turned into a memset when the source is a constant 5376 /// string ptr. 5377 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5378 const TargetLowering &TLI, 5379 const ConstantDataArraySlice &Slice) { 5380 // Handle vector with all elements zero. 5381 if (Slice.Array == nullptr) { 5382 if (VT.isInteger()) 5383 return DAG.getConstant(0, dl, VT); 5384 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5385 return DAG.getConstantFP(0.0, dl, VT); 5386 else if (VT.isVector()) { 5387 unsigned NumElts = VT.getVectorNumElements(); 5388 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5389 return DAG.getNode(ISD::BITCAST, dl, VT, 5390 DAG.getConstant(0, dl, 5391 EVT::getVectorVT(*DAG.getContext(), 5392 EltVT, NumElts))); 5393 } else 5394 llvm_unreachable("Expected type!"); 5395 } 5396 5397 assert(!VT.isVector() && "Can't handle vector type here!"); 5398 unsigned NumVTBits = VT.getSizeInBits(); 5399 unsigned NumVTBytes = NumVTBits / 8; 5400 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5401 5402 APInt Val(NumVTBits, 0); 5403 if (DAG.getDataLayout().isLittleEndian()) { 5404 for (unsigned i = 0; i != NumBytes; ++i) 5405 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5406 } else { 5407 for (unsigned i = 0; i != NumBytes; ++i) 5408 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5409 } 5410 5411 // If the "cost" of materializing the integer immediate is less than the cost 5412 // of a load, then it is cost effective to turn the load into the immediate. 5413 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5414 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5415 return DAG.getConstant(Val, dl, VT); 5416 return SDValue(nullptr, 0); 5417 } 5418 5419 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 5420 const SDLoc &DL) { 5421 EVT VT = Base.getValueType(); 5422 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 5423 } 5424 5425 /// Returns true if memcpy source is constant data. 5426 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5427 uint64_t SrcDelta = 0; 5428 GlobalAddressSDNode *G = nullptr; 5429 if (Src.getOpcode() == ISD::GlobalAddress) 5430 G = cast<GlobalAddressSDNode>(Src); 5431 else if (Src.getOpcode() == ISD::ADD && 5432 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5433 Src.getOperand(1).getOpcode() == ISD::Constant) { 5434 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5435 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5436 } 5437 if (!G) 5438 return false; 5439 5440 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5441 SrcDelta + G->getOffset()); 5442 } 5443 5444 /// Determines the optimal series of memory ops to replace the memset / memcpy. 5445 /// Return true if the number of memory ops is below the threshold (Limit). 5446 /// It returns the types of the sequence of memory ops to perform 5447 /// memset / memcpy by reference. 5448 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 5449 unsigned Limit, uint64_t Size, 5450 unsigned DstAlign, unsigned SrcAlign, 5451 bool IsMemset, 5452 bool ZeroMemset, 5453 bool MemcpyStrSrc, 5454 bool AllowOverlap, 5455 unsigned DstAS, unsigned SrcAS, 5456 SelectionDAG &DAG, 5457 const TargetLowering &TLI) { 5458 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 5459 "Expecting memcpy / memset source to meet alignment requirement!"); 5460 // If 'SrcAlign' is zero, that means the memory operation does not need to 5461 // load the value, i.e. memset or memcpy from constant string. Otherwise, 5462 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 5463 // is the specified alignment of the memory operation. If it is zero, that 5464 // means it's possible to change the alignment of the destination. 5465 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 5466 // not need to be loaded. 5467 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 5468 IsMemset, ZeroMemset, MemcpyStrSrc, 5469 DAG.getMachineFunction()); 5470 5471 if (VT == MVT::Other) { 5472 // Use the largest integer type whose alignment constraints are satisfied. 5473 // We only need to check DstAlign here as SrcAlign is always greater or 5474 // equal to DstAlign (or zero). 5475 VT = MVT::i64; 5476 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 5477 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 5478 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 5479 assert(VT.isInteger()); 5480 5481 // Find the largest legal integer type. 5482 MVT LVT = MVT::i64; 5483 while (!TLI.isTypeLegal(LVT)) 5484 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 5485 assert(LVT.isInteger()); 5486 5487 // If the type we've chosen is larger than the largest legal integer type 5488 // then use that instead. 5489 if (VT.bitsGT(LVT)) 5490 VT = LVT; 5491 } 5492 5493 unsigned NumMemOps = 0; 5494 while (Size != 0) { 5495 unsigned VTSize = VT.getSizeInBits() / 8; 5496 while (VTSize > Size) { 5497 // For now, only use non-vector load / store's for the left-over pieces. 5498 EVT NewVT = VT; 5499 unsigned NewVTSize; 5500 5501 bool Found = false; 5502 if (VT.isVector() || VT.isFloatingPoint()) { 5503 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 5504 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 5505 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 5506 Found = true; 5507 else if (NewVT == MVT::i64 && 5508 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 5509 TLI.isSafeMemOpType(MVT::f64)) { 5510 // i64 is usually not legal on 32-bit targets, but f64 may be. 5511 NewVT = MVT::f64; 5512 Found = true; 5513 } 5514 } 5515 5516 if (!Found) { 5517 do { 5518 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 5519 if (NewVT == MVT::i8) 5520 break; 5521 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 5522 } 5523 NewVTSize = NewVT.getSizeInBits() / 8; 5524 5525 // If the new VT cannot cover all of the remaining bits, then consider 5526 // issuing a (or a pair of) unaligned and overlapping load / store. 5527 bool Fast; 5528 if (NumMemOps && AllowOverlap && NewVTSize < Size && 5529 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && 5530 Fast) 5531 VTSize = Size; 5532 else { 5533 VT = NewVT; 5534 VTSize = NewVTSize; 5535 } 5536 } 5537 5538 if (++NumMemOps > Limit) 5539 return false; 5540 5541 MemOps.push_back(VT); 5542 Size -= VTSize; 5543 } 5544 5545 return true; 5546 } 5547 5548 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5549 // On Darwin, -Os means optimize for size without hurting performance, so 5550 // only really optimize for size when -Oz (MinSize) is used. 5551 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5552 return MF.getFunction().optForMinSize(); 5553 return MF.getFunction().optForSize(); 5554 } 5555 5556 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5557 SmallVector<SDValue, 32> &OutChains, unsigned From, 5558 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5559 SmallVector<SDValue, 16> &OutStoreChains) { 5560 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5561 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5562 SmallVector<SDValue, 16> GluedLoadChains; 5563 for (unsigned i = From; i < To; ++i) { 5564 OutChains.push_back(OutLoadChains[i]); 5565 GluedLoadChains.push_back(OutLoadChains[i]); 5566 } 5567 5568 // Chain for all loads. 5569 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5570 GluedLoadChains); 5571 5572 for (unsigned i = From; i < To; ++i) { 5573 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5574 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5575 ST->getBasePtr(), ST->getMemoryVT(), 5576 ST->getMemOperand()); 5577 OutChains.push_back(NewStore); 5578 } 5579 } 5580 5581 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5582 SDValue Chain, SDValue Dst, SDValue Src, 5583 uint64_t Size, unsigned Align, 5584 bool isVol, bool AlwaysInline, 5585 MachinePointerInfo DstPtrInfo, 5586 MachinePointerInfo SrcPtrInfo) { 5587 // Turn a memcpy of undef to nop. 5588 if (Src.isUndef()) 5589 return Chain; 5590 5591 // Expand memcpy to a series of load and store ops if the size operand falls 5592 // below a certain threshold. 5593 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5594 // rather than maybe a humongous number of loads and stores. 5595 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5596 const DataLayout &DL = DAG.getDataLayout(); 5597 LLVMContext &C = *DAG.getContext(); 5598 std::vector<EVT> MemOps; 5599 bool DstAlignCanChange = false; 5600 MachineFunction &MF = DAG.getMachineFunction(); 5601 MachineFrameInfo &MFI = MF.getFrameInfo(); 5602 bool OptSize = shouldLowerMemFuncForSize(MF); 5603 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5604 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5605 DstAlignCanChange = true; 5606 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5607 if (Align > SrcAlign) 5608 SrcAlign = Align; 5609 ConstantDataArraySlice Slice; 5610 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5611 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5612 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5613 5614 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5615 (DstAlignCanChange ? 0 : Align), 5616 (isZeroConstant ? 0 : SrcAlign), 5617 false, false, CopyFromConstant, true, 5618 DstPtrInfo.getAddrSpace(), 5619 SrcPtrInfo.getAddrSpace(), 5620 DAG, TLI)) 5621 return SDValue(); 5622 5623 if (DstAlignCanChange) { 5624 Type *Ty = MemOps[0].getTypeForEVT(C); 5625 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5626 5627 // Don't promote to an alignment that would require dynamic stack 5628 // realignment. 5629 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5630 if (!TRI->needsStackRealignment(MF)) 5631 while (NewAlign > Align && 5632 DL.exceedsNaturalStackAlignment(NewAlign)) 5633 NewAlign /= 2; 5634 5635 if (NewAlign > Align) { 5636 // Give the stack frame object a larger alignment if needed. 5637 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5638 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5639 Align = NewAlign; 5640 } 5641 } 5642 5643 MachineMemOperand::Flags MMOFlags = 5644 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5645 SmallVector<SDValue, 16> OutLoadChains; 5646 SmallVector<SDValue, 16> OutStoreChains; 5647 SmallVector<SDValue, 32> OutChains; 5648 unsigned NumMemOps = MemOps.size(); 5649 uint64_t SrcOff = 0, DstOff = 0; 5650 for (unsigned i = 0; i != NumMemOps; ++i) { 5651 EVT VT = MemOps[i]; 5652 unsigned VTSize = VT.getSizeInBits() / 8; 5653 SDValue Value, Store; 5654 5655 if (VTSize > Size) { 5656 // Issuing an unaligned load / store pair that overlaps with the previous 5657 // pair. Adjust the offset accordingly. 5658 assert(i == NumMemOps-1 && i != 0); 5659 SrcOff -= VTSize - Size; 5660 DstOff -= VTSize - Size; 5661 } 5662 5663 if (CopyFromConstant && 5664 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5665 // It's unlikely a store of a vector immediate can be done in a single 5666 // instruction. It would require a load from a constantpool first. 5667 // We only handle zero vectors here. 5668 // FIXME: Handle other cases where store of vector immediate is done in 5669 // a single instruction. 5670 ConstantDataArraySlice SubSlice; 5671 if (SrcOff < Slice.Length) { 5672 SubSlice = Slice; 5673 SubSlice.move(SrcOff); 5674 } else { 5675 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5676 SubSlice.Array = nullptr; 5677 SubSlice.Offset = 0; 5678 SubSlice.Length = VTSize; 5679 } 5680 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5681 if (Value.getNode()) { 5682 Store = DAG.getStore(Chain, dl, Value, 5683 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5684 DstPtrInfo.getWithOffset(DstOff), Align, 5685 MMOFlags); 5686 OutChains.push_back(Store); 5687 } 5688 } 5689 5690 if (!Store.getNode()) { 5691 // The type might not be legal for the target. This should only happen 5692 // if the type is smaller than a legal type, as on PPC, so the right 5693 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5694 // to Load/Store if NVT==VT. 5695 // FIXME does the case above also need this? 5696 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5697 assert(NVT.bitsGE(VT)); 5698 5699 bool isDereferenceable = 5700 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5701 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5702 if (isDereferenceable) 5703 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5704 5705 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5706 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5707 SrcPtrInfo.getWithOffset(SrcOff), VT, 5708 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5709 OutLoadChains.push_back(Value.getValue(1)); 5710 5711 Store = DAG.getTruncStore( 5712 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5713 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5714 OutStoreChains.push_back(Store); 5715 } 5716 SrcOff += VTSize; 5717 DstOff += VTSize; 5718 Size -= VTSize; 5719 } 5720 5721 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 5722 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 5723 unsigned NumLdStInMemcpy = OutStoreChains.size(); 5724 5725 if (NumLdStInMemcpy) { 5726 // It may be that memcpy might be converted to memset if it's memcpy 5727 // of constants. In such a case, we won't have loads and stores, but 5728 // just stores. In the absence of loads, there is nothing to gang up. 5729 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 5730 // If target does not care, just leave as it. 5731 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 5732 OutChains.push_back(OutLoadChains[i]); 5733 OutChains.push_back(OutStoreChains[i]); 5734 } 5735 } else { 5736 // Ld/St less than/equal limit set by target. 5737 if (NumLdStInMemcpy <= GluedLdStLimit) { 5738 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5739 NumLdStInMemcpy, OutLoadChains, 5740 OutStoreChains); 5741 } else { 5742 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 5743 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 5744 unsigned GlueIter = 0; 5745 5746 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 5747 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 5748 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 5749 5750 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 5751 OutLoadChains, OutStoreChains); 5752 GlueIter += GluedLdStLimit; 5753 } 5754 5755 // Residual ld/st. 5756 if (RemainingLdStInMemcpy) { 5757 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5758 RemainingLdStInMemcpy, OutLoadChains, 5759 OutStoreChains); 5760 } 5761 } 5762 } 5763 } 5764 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5765 } 5766 5767 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5768 SDValue Chain, SDValue Dst, SDValue Src, 5769 uint64_t Size, unsigned Align, 5770 bool isVol, bool AlwaysInline, 5771 MachinePointerInfo DstPtrInfo, 5772 MachinePointerInfo SrcPtrInfo) { 5773 // Turn a memmove of undef to nop. 5774 if (Src.isUndef()) 5775 return Chain; 5776 5777 // Expand memmove to a series of load and store ops if the size operand falls 5778 // below a certain threshold. 5779 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5780 const DataLayout &DL = DAG.getDataLayout(); 5781 LLVMContext &C = *DAG.getContext(); 5782 std::vector<EVT> MemOps; 5783 bool DstAlignCanChange = false; 5784 MachineFunction &MF = DAG.getMachineFunction(); 5785 MachineFrameInfo &MFI = MF.getFrameInfo(); 5786 bool OptSize = shouldLowerMemFuncForSize(MF); 5787 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5788 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5789 DstAlignCanChange = true; 5790 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5791 if (Align > SrcAlign) 5792 SrcAlign = Align; 5793 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5794 5795 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5796 (DstAlignCanChange ? 0 : Align), SrcAlign, 5797 false, false, false, false, 5798 DstPtrInfo.getAddrSpace(), 5799 SrcPtrInfo.getAddrSpace(), 5800 DAG, TLI)) 5801 return SDValue(); 5802 5803 if (DstAlignCanChange) { 5804 Type *Ty = MemOps[0].getTypeForEVT(C); 5805 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5806 if (NewAlign > Align) { 5807 // Give the stack frame object a larger alignment if needed. 5808 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5809 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5810 Align = NewAlign; 5811 } 5812 } 5813 5814 MachineMemOperand::Flags MMOFlags = 5815 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5816 uint64_t SrcOff = 0, DstOff = 0; 5817 SmallVector<SDValue, 8> LoadValues; 5818 SmallVector<SDValue, 8> LoadChains; 5819 SmallVector<SDValue, 8> OutChains; 5820 unsigned NumMemOps = MemOps.size(); 5821 for (unsigned i = 0; i < NumMemOps; i++) { 5822 EVT VT = MemOps[i]; 5823 unsigned VTSize = VT.getSizeInBits() / 8; 5824 SDValue Value; 5825 5826 bool isDereferenceable = 5827 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5828 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5829 if (isDereferenceable) 5830 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5831 5832 Value = 5833 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5834 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5835 LoadValues.push_back(Value); 5836 LoadChains.push_back(Value.getValue(1)); 5837 SrcOff += VTSize; 5838 } 5839 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5840 OutChains.clear(); 5841 for (unsigned i = 0; i < NumMemOps; i++) { 5842 EVT VT = MemOps[i]; 5843 unsigned VTSize = VT.getSizeInBits() / 8; 5844 SDValue Store; 5845 5846 Store = DAG.getStore(Chain, dl, LoadValues[i], 5847 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5848 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5849 OutChains.push_back(Store); 5850 DstOff += VTSize; 5851 } 5852 5853 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5854 } 5855 5856 /// Lower the call to 'memset' intrinsic function into a series of store 5857 /// operations. 5858 /// 5859 /// \param DAG Selection DAG where lowered code is placed. 5860 /// \param dl Link to corresponding IR location. 5861 /// \param Chain Control flow dependency. 5862 /// \param Dst Pointer to destination memory location. 5863 /// \param Src Value of byte to write into the memory. 5864 /// \param Size Number of bytes to write. 5865 /// \param Align Alignment of the destination in bytes. 5866 /// \param isVol True if destination is volatile. 5867 /// \param DstPtrInfo IR information on the memory pointer. 5868 /// \returns New head in the control flow, if lowering was successful, empty 5869 /// SDValue otherwise. 5870 /// 5871 /// The function tries to replace 'llvm.memset' intrinsic with several store 5872 /// operations and value calculation code. This is usually profitable for small 5873 /// memory size. 5874 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5875 SDValue Chain, SDValue Dst, SDValue Src, 5876 uint64_t Size, unsigned Align, bool isVol, 5877 MachinePointerInfo DstPtrInfo) { 5878 // Turn a memset of undef to nop. 5879 if (Src.isUndef()) 5880 return Chain; 5881 5882 // Expand memset to a series of load/store ops if the size operand 5883 // falls below a certain threshold. 5884 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5885 std::vector<EVT> MemOps; 5886 bool DstAlignCanChange = false; 5887 MachineFunction &MF = DAG.getMachineFunction(); 5888 MachineFrameInfo &MFI = MF.getFrameInfo(); 5889 bool OptSize = shouldLowerMemFuncForSize(MF); 5890 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5891 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5892 DstAlignCanChange = true; 5893 bool IsZeroVal = 5894 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5895 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5896 Size, (DstAlignCanChange ? 0 : Align), 0, 5897 true, IsZeroVal, false, true, 5898 DstPtrInfo.getAddrSpace(), ~0u, 5899 DAG, TLI)) 5900 return SDValue(); 5901 5902 if (DstAlignCanChange) { 5903 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5904 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5905 if (NewAlign > Align) { 5906 // Give the stack frame object a larger alignment if needed. 5907 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5908 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5909 Align = NewAlign; 5910 } 5911 } 5912 5913 SmallVector<SDValue, 8> OutChains; 5914 uint64_t DstOff = 0; 5915 unsigned NumMemOps = MemOps.size(); 5916 5917 // Find the largest store and generate the bit pattern for it. 5918 EVT LargestVT = MemOps[0]; 5919 for (unsigned i = 1; i < NumMemOps; i++) 5920 if (MemOps[i].bitsGT(LargestVT)) 5921 LargestVT = MemOps[i]; 5922 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5923 5924 for (unsigned i = 0; i < NumMemOps; i++) { 5925 EVT VT = MemOps[i]; 5926 unsigned VTSize = VT.getSizeInBits() / 8; 5927 if (VTSize > Size) { 5928 // Issuing an unaligned load / store pair that overlaps with the previous 5929 // pair. Adjust the offset accordingly. 5930 assert(i == NumMemOps-1 && i != 0); 5931 DstOff -= VTSize - Size; 5932 } 5933 5934 // If this store is smaller than the largest store see whether we can get 5935 // the smaller value for free with a truncate. 5936 SDValue Value = MemSetValue; 5937 if (VT.bitsLT(LargestVT)) { 5938 if (!LargestVT.isVector() && !VT.isVector() && 5939 TLI.isTruncateFree(LargestVT, VT)) 5940 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5941 else 5942 Value = getMemsetValue(Src, VT, DAG, dl); 5943 } 5944 assert(Value.getValueType() == VT && "Value with wrong type."); 5945 SDValue Store = DAG.getStore( 5946 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5947 DstPtrInfo.getWithOffset(DstOff), Align, 5948 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5949 OutChains.push_back(Store); 5950 DstOff += VT.getSizeInBits() / 8; 5951 Size -= VTSize; 5952 } 5953 5954 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5955 } 5956 5957 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5958 unsigned AS) { 5959 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5960 // pointer operands can be losslessly bitcasted to pointers of address space 0 5961 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5962 report_fatal_error("cannot lower memory intrinsic in address space " + 5963 Twine(AS)); 5964 } 5965 } 5966 5967 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5968 SDValue Src, SDValue Size, unsigned Align, 5969 bool isVol, bool AlwaysInline, bool isTailCall, 5970 MachinePointerInfo DstPtrInfo, 5971 MachinePointerInfo SrcPtrInfo) { 5972 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5973 5974 // Check to see if we should lower the memcpy to loads and stores first. 5975 // For cases within the target-specified limits, this is the best choice. 5976 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5977 if (ConstantSize) { 5978 // Memcpy with size zero? Just return the original chain. 5979 if (ConstantSize->isNullValue()) 5980 return Chain; 5981 5982 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5983 ConstantSize->getZExtValue(),Align, 5984 isVol, false, DstPtrInfo, SrcPtrInfo); 5985 if (Result.getNode()) 5986 return Result; 5987 } 5988 5989 // Then check to see if we should lower the memcpy with target-specific 5990 // code. If the target chooses to do this, this is the next best. 5991 if (TSI) { 5992 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5993 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5994 DstPtrInfo, SrcPtrInfo); 5995 if (Result.getNode()) 5996 return Result; 5997 } 5998 5999 // If we really need inline code and the target declined to provide it, 6000 // use a (potentially long) sequence of loads and stores. 6001 if (AlwaysInline) { 6002 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6003 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6004 ConstantSize->getZExtValue(), Align, isVol, 6005 true, DstPtrInfo, SrcPtrInfo); 6006 } 6007 6008 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6009 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6010 6011 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6012 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6013 // respect volatile, so they may do things like read or write memory 6014 // beyond the given memory regions. But fixing this isn't easy, and most 6015 // people don't care. 6016 6017 // Emit a library call. 6018 TargetLowering::ArgListTy Args; 6019 TargetLowering::ArgListEntry Entry; 6020 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6021 Entry.Node = Dst; Args.push_back(Entry); 6022 Entry.Node = Src; Args.push_back(Entry); 6023 Entry.Node = Size; Args.push_back(Entry); 6024 // FIXME: pass in SDLoc 6025 TargetLowering::CallLoweringInfo CLI(*this); 6026 CLI.setDebugLoc(dl) 6027 .setChain(Chain) 6028 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6029 Dst.getValueType().getTypeForEVT(*getContext()), 6030 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6031 TLI->getPointerTy(getDataLayout())), 6032 std::move(Args)) 6033 .setDiscardResult() 6034 .setTailCall(isTailCall); 6035 6036 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6037 return CallResult.second; 6038 } 6039 6040 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6041 SDValue Dst, unsigned DstAlign, 6042 SDValue Src, unsigned SrcAlign, 6043 SDValue Size, Type *SizeTy, 6044 unsigned ElemSz, bool isTailCall, 6045 MachinePointerInfo DstPtrInfo, 6046 MachinePointerInfo SrcPtrInfo) { 6047 // Emit a library call. 6048 TargetLowering::ArgListTy Args; 6049 TargetLowering::ArgListEntry Entry; 6050 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6051 Entry.Node = Dst; 6052 Args.push_back(Entry); 6053 6054 Entry.Node = Src; 6055 Args.push_back(Entry); 6056 6057 Entry.Ty = SizeTy; 6058 Entry.Node = Size; 6059 Args.push_back(Entry); 6060 6061 RTLIB::Libcall LibraryCall = 6062 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6063 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6064 report_fatal_error("Unsupported element size"); 6065 6066 TargetLowering::CallLoweringInfo CLI(*this); 6067 CLI.setDebugLoc(dl) 6068 .setChain(Chain) 6069 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6070 Type::getVoidTy(*getContext()), 6071 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6072 TLI->getPointerTy(getDataLayout())), 6073 std::move(Args)) 6074 .setDiscardResult() 6075 .setTailCall(isTailCall); 6076 6077 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6078 return CallResult.second; 6079 } 6080 6081 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6082 SDValue Src, SDValue Size, unsigned Align, 6083 bool isVol, bool isTailCall, 6084 MachinePointerInfo DstPtrInfo, 6085 MachinePointerInfo SrcPtrInfo) { 6086 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6087 6088 // Check to see if we should lower the memmove to loads and stores first. 6089 // For cases within the target-specified limits, this is the best choice. 6090 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6091 if (ConstantSize) { 6092 // Memmove with size zero? Just return the original chain. 6093 if (ConstantSize->isNullValue()) 6094 return Chain; 6095 6096 SDValue Result = 6097 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 6098 ConstantSize->getZExtValue(), Align, isVol, 6099 false, DstPtrInfo, SrcPtrInfo); 6100 if (Result.getNode()) 6101 return Result; 6102 } 6103 6104 // Then check to see if we should lower the memmove with target-specific 6105 // code. If the target chooses to do this, this is the next best. 6106 if (TSI) { 6107 SDValue Result = TSI->EmitTargetCodeForMemmove( 6108 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 6109 if (Result.getNode()) 6110 return Result; 6111 } 6112 6113 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6114 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6115 6116 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6117 // not be safe. See memcpy above for more details. 6118 6119 // Emit a library call. 6120 TargetLowering::ArgListTy Args; 6121 TargetLowering::ArgListEntry Entry; 6122 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6123 Entry.Node = Dst; Args.push_back(Entry); 6124 Entry.Node = Src; Args.push_back(Entry); 6125 Entry.Node = Size; Args.push_back(Entry); 6126 // FIXME: pass in SDLoc 6127 TargetLowering::CallLoweringInfo CLI(*this); 6128 CLI.setDebugLoc(dl) 6129 .setChain(Chain) 6130 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6131 Dst.getValueType().getTypeForEVT(*getContext()), 6132 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6133 TLI->getPointerTy(getDataLayout())), 6134 std::move(Args)) 6135 .setDiscardResult() 6136 .setTailCall(isTailCall); 6137 6138 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6139 return CallResult.second; 6140 } 6141 6142 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6143 SDValue Dst, unsigned DstAlign, 6144 SDValue Src, unsigned SrcAlign, 6145 SDValue Size, Type *SizeTy, 6146 unsigned ElemSz, bool isTailCall, 6147 MachinePointerInfo DstPtrInfo, 6148 MachinePointerInfo SrcPtrInfo) { 6149 // Emit a library call. 6150 TargetLowering::ArgListTy Args; 6151 TargetLowering::ArgListEntry Entry; 6152 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6153 Entry.Node = Dst; 6154 Args.push_back(Entry); 6155 6156 Entry.Node = Src; 6157 Args.push_back(Entry); 6158 6159 Entry.Ty = SizeTy; 6160 Entry.Node = Size; 6161 Args.push_back(Entry); 6162 6163 RTLIB::Libcall LibraryCall = 6164 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6165 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6166 report_fatal_error("Unsupported element size"); 6167 6168 TargetLowering::CallLoweringInfo CLI(*this); 6169 CLI.setDebugLoc(dl) 6170 .setChain(Chain) 6171 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6172 Type::getVoidTy(*getContext()), 6173 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6174 TLI->getPointerTy(getDataLayout())), 6175 std::move(Args)) 6176 .setDiscardResult() 6177 .setTailCall(isTailCall); 6178 6179 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6180 return CallResult.second; 6181 } 6182 6183 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6184 SDValue Src, SDValue Size, unsigned Align, 6185 bool isVol, bool isTailCall, 6186 MachinePointerInfo DstPtrInfo) { 6187 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6188 6189 // Check to see if we should lower the memset to stores first. 6190 // For cases within the target-specified limits, this is the best choice. 6191 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6192 if (ConstantSize) { 6193 // Memset with size zero? Just return the original chain. 6194 if (ConstantSize->isNullValue()) 6195 return Chain; 6196 6197 SDValue Result = 6198 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 6199 Align, isVol, DstPtrInfo); 6200 6201 if (Result.getNode()) 6202 return Result; 6203 } 6204 6205 // Then check to see if we should lower the memset with target-specific 6206 // code. If the target chooses to do this, this is the next best. 6207 if (TSI) { 6208 SDValue Result = TSI->EmitTargetCodeForMemset( 6209 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 6210 if (Result.getNode()) 6211 return Result; 6212 } 6213 6214 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6215 6216 // Emit a library call. 6217 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 6218 TargetLowering::ArgListTy Args; 6219 TargetLowering::ArgListEntry Entry; 6220 Entry.Node = Dst; Entry.Ty = IntPtrTy; 6221 Args.push_back(Entry); 6222 Entry.Node = Src; 6223 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6224 Args.push_back(Entry); 6225 Entry.Node = Size; 6226 Entry.Ty = IntPtrTy; 6227 Args.push_back(Entry); 6228 6229 // FIXME: pass in SDLoc 6230 TargetLowering::CallLoweringInfo CLI(*this); 6231 CLI.setDebugLoc(dl) 6232 .setChain(Chain) 6233 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6234 Dst.getValueType().getTypeForEVT(*getContext()), 6235 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6236 TLI->getPointerTy(getDataLayout())), 6237 std::move(Args)) 6238 .setDiscardResult() 6239 .setTailCall(isTailCall); 6240 6241 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6242 return CallResult.second; 6243 } 6244 6245 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6246 SDValue Dst, unsigned DstAlign, 6247 SDValue Value, SDValue Size, Type *SizeTy, 6248 unsigned ElemSz, bool isTailCall, 6249 MachinePointerInfo DstPtrInfo) { 6250 // Emit a library call. 6251 TargetLowering::ArgListTy Args; 6252 TargetLowering::ArgListEntry Entry; 6253 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6254 Entry.Node = Dst; 6255 Args.push_back(Entry); 6256 6257 Entry.Ty = Type::getInt8Ty(*getContext()); 6258 Entry.Node = Value; 6259 Args.push_back(Entry); 6260 6261 Entry.Ty = SizeTy; 6262 Entry.Node = Size; 6263 Args.push_back(Entry); 6264 6265 RTLIB::Libcall LibraryCall = 6266 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6267 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6268 report_fatal_error("Unsupported element size"); 6269 6270 TargetLowering::CallLoweringInfo CLI(*this); 6271 CLI.setDebugLoc(dl) 6272 .setChain(Chain) 6273 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6274 Type::getVoidTy(*getContext()), 6275 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6276 TLI->getPointerTy(getDataLayout())), 6277 std::move(Args)) 6278 .setDiscardResult() 6279 .setTailCall(isTailCall); 6280 6281 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6282 return CallResult.second; 6283 } 6284 6285 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6286 SDVTList VTList, ArrayRef<SDValue> Ops, 6287 MachineMemOperand *MMO) { 6288 FoldingSetNodeID ID; 6289 ID.AddInteger(MemVT.getRawBits()); 6290 AddNodeIDNode(ID, Opcode, VTList, Ops); 6291 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6292 void* IP = nullptr; 6293 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6294 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6295 return SDValue(E, 0); 6296 } 6297 6298 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6299 VTList, MemVT, MMO); 6300 createOperands(N, Ops); 6301 6302 CSEMap.InsertNode(N, IP); 6303 InsertNode(N); 6304 return SDValue(N, 0); 6305 } 6306 6307 SDValue SelectionDAG::getAtomicCmpSwap( 6308 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 6309 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 6310 unsigned Alignment, AtomicOrdering SuccessOrdering, 6311 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 6312 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6313 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6314 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6315 6316 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6317 Alignment = getEVTAlignment(MemVT); 6318 6319 MachineFunction &MF = getMachineFunction(); 6320 6321 // FIXME: Volatile isn't really correct; we should keep track of atomic 6322 // orderings in the memoperand. 6323 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 6324 MachineMemOperand::MOStore; 6325 MachineMemOperand *MMO = 6326 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 6327 AAMDNodes(), nullptr, SSID, SuccessOrdering, 6328 FailureOrdering); 6329 6330 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 6331 } 6332 6333 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6334 EVT MemVT, SDVTList VTs, SDValue Chain, 6335 SDValue Ptr, SDValue Cmp, SDValue Swp, 6336 MachineMemOperand *MMO) { 6337 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6338 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6339 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6340 6341 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6342 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6343 } 6344 6345 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6346 SDValue Chain, SDValue Ptr, SDValue Val, 6347 const Value *PtrVal, unsigned Alignment, 6348 AtomicOrdering Ordering, 6349 SyncScope::ID SSID) { 6350 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6351 Alignment = getEVTAlignment(MemVT); 6352 6353 MachineFunction &MF = getMachineFunction(); 6354 // An atomic store does not load. An atomic load does not store. 6355 // (An atomicrmw obviously both loads and stores.) 6356 // For now, atomics are considered to be volatile always, and they are 6357 // chained as such. 6358 // FIXME: Volatile isn't really correct; we should keep track of atomic 6359 // orderings in the memoperand. 6360 auto Flags = MachineMemOperand::MOVolatile; 6361 if (Opcode != ISD::ATOMIC_STORE) 6362 Flags |= MachineMemOperand::MOLoad; 6363 if (Opcode != ISD::ATOMIC_LOAD) 6364 Flags |= MachineMemOperand::MOStore; 6365 6366 MachineMemOperand *MMO = 6367 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 6368 MemVT.getStoreSize(), Alignment, AAMDNodes(), 6369 nullptr, SSID, Ordering); 6370 6371 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 6372 } 6373 6374 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6375 SDValue Chain, SDValue Ptr, SDValue Val, 6376 MachineMemOperand *MMO) { 6377 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6378 Opcode == ISD::ATOMIC_LOAD_SUB || 6379 Opcode == ISD::ATOMIC_LOAD_AND || 6380 Opcode == ISD::ATOMIC_LOAD_CLR || 6381 Opcode == ISD::ATOMIC_LOAD_OR || 6382 Opcode == ISD::ATOMIC_LOAD_XOR || 6383 Opcode == ISD::ATOMIC_LOAD_NAND || 6384 Opcode == ISD::ATOMIC_LOAD_MIN || 6385 Opcode == ISD::ATOMIC_LOAD_MAX || 6386 Opcode == ISD::ATOMIC_LOAD_UMIN || 6387 Opcode == ISD::ATOMIC_LOAD_UMAX || 6388 Opcode == ISD::ATOMIC_SWAP || 6389 Opcode == ISD::ATOMIC_STORE) && 6390 "Invalid Atomic Op"); 6391 6392 EVT VT = Val.getValueType(); 6393 6394 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6395 getVTList(VT, MVT::Other); 6396 SDValue Ops[] = {Chain, Ptr, Val}; 6397 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6398 } 6399 6400 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6401 EVT VT, SDValue Chain, SDValue Ptr, 6402 MachineMemOperand *MMO) { 6403 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6404 6405 SDVTList VTs = getVTList(VT, MVT::Other); 6406 SDValue Ops[] = {Chain, Ptr}; 6407 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6408 } 6409 6410 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6411 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6412 if (Ops.size() == 1) 6413 return Ops[0]; 6414 6415 SmallVector<EVT, 4> VTs; 6416 VTs.reserve(Ops.size()); 6417 for (unsigned i = 0; i < Ops.size(); ++i) 6418 VTs.push_back(Ops[i].getValueType()); 6419 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6420 } 6421 6422 SDValue SelectionDAG::getMemIntrinsicNode( 6423 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6424 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 6425 MachineMemOperand::Flags Flags, unsigned Size) { 6426 if (Align == 0) // Ensure that codegen never sees alignment 0 6427 Align = getEVTAlignment(MemVT); 6428 6429 if (!Size) 6430 Size = MemVT.getStoreSize(); 6431 6432 MachineFunction &MF = getMachineFunction(); 6433 MachineMemOperand *MMO = 6434 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 6435 6436 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6437 } 6438 6439 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6440 SDVTList VTList, 6441 ArrayRef<SDValue> Ops, EVT MemVT, 6442 MachineMemOperand *MMO) { 6443 assert((Opcode == ISD::INTRINSIC_VOID || 6444 Opcode == ISD::INTRINSIC_W_CHAIN || 6445 Opcode == ISD::PREFETCH || 6446 Opcode == ISD::LIFETIME_START || 6447 Opcode == ISD::LIFETIME_END || 6448 ((int)Opcode <= std::numeric_limits<int>::max() && 6449 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6450 "Opcode is not a memory-accessing opcode!"); 6451 6452 // Memoize the node unless it returns a flag. 6453 MemIntrinsicSDNode *N; 6454 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6455 FoldingSetNodeID ID; 6456 AddNodeIDNode(ID, Opcode, VTList, Ops); 6457 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6458 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6459 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6460 void *IP = nullptr; 6461 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6462 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6463 return SDValue(E, 0); 6464 } 6465 6466 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6467 VTList, MemVT, MMO); 6468 createOperands(N, Ops); 6469 6470 CSEMap.InsertNode(N, IP); 6471 } else { 6472 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6473 VTList, MemVT, MMO); 6474 createOperands(N, Ops); 6475 } 6476 InsertNode(N); 6477 return SDValue(N, 0); 6478 } 6479 6480 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6481 /// MachinePointerInfo record from it. This is particularly useful because the 6482 /// code generator has many cases where it doesn't bother passing in a 6483 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6484 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6485 SelectionDAG &DAG, SDValue Ptr, 6486 int64_t Offset = 0) { 6487 // If this is FI+Offset, we can model it. 6488 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6489 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6490 FI->getIndex(), Offset); 6491 6492 // If this is (FI+Offset1)+Offset2, we can model it. 6493 if (Ptr.getOpcode() != ISD::ADD || 6494 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6495 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6496 return Info; 6497 6498 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6499 return MachinePointerInfo::getFixedStack( 6500 DAG.getMachineFunction(), FI, 6501 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6502 } 6503 6504 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6505 /// MachinePointerInfo record from it. This is particularly useful because the 6506 /// code generator has many cases where it doesn't bother passing in a 6507 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6508 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6509 SelectionDAG &DAG, SDValue Ptr, 6510 SDValue OffsetOp) { 6511 // If the 'Offset' value isn't a constant, we can't handle this. 6512 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6513 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6514 if (OffsetOp.isUndef()) 6515 return InferPointerInfo(Info, DAG, Ptr); 6516 return Info; 6517 } 6518 6519 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6520 EVT VT, const SDLoc &dl, SDValue Chain, 6521 SDValue Ptr, SDValue Offset, 6522 MachinePointerInfo PtrInfo, EVT MemVT, 6523 unsigned Alignment, 6524 MachineMemOperand::Flags MMOFlags, 6525 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6526 assert(Chain.getValueType() == MVT::Other && 6527 "Invalid chain type"); 6528 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6529 Alignment = getEVTAlignment(MemVT); 6530 6531 MMOFlags |= MachineMemOperand::MOLoad; 6532 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6533 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6534 // clients. 6535 if (PtrInfo.V.isNull()) 6536 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6537 6538 MachineFunction &MF = getMachineFunction(); 6539 MachineMemOperand *MMO = MF.getMachineMemOperand( 6540 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6541 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6542 } 6543 6544 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6545 EVT VT, const SDLoc &dl, SDValue Chain, 6546 SDValue Ptr, SDValue Offset, EVT MemVT, 6547 MachineMemOperand *MMO) { 6548 if (VT == MemVT) { 6549 ExtType = ISD::NON_EXTLOAD; 6550 } else if (ExtType == ISD::NON_EXTLOAD) { 6551 assert(VT == MemVT && "Non-extending load from different memory type!"); 6552 } else { 6553 // Extending load. 6554 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6555 "Should only be an extending load, not truncating!"); 6556 assert(VT.isInteger() == MemVT.isInteger() && 6557 "Cannot convert from FP to Int or Int -> FP!"); 6558 assert(VT.isVector() == MemVT.isVector() && 6559 "Cannot use an ext load to convert to or from a vector!"); 6560 assert((!VT.isVector() || 6561 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6562 "Cannot use an ext load to change the number of vector elements!"); 6563 } 6564 6565 bool Indexed = AM != ISD::UNINDEXED; 6566 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6567 6568 SDVTList VTs = Indexed ? 6569 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6570 SDValue Ops[] = { Chain, Ptr, Offset }; 6571 FoldingSetNodeID ID; 6572 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6573 ID.AddInteger(MemVT.getRawBits()); 6574 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6575 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6576 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6577 void *IP = nullptr; 6578 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6579 cast<LoadSDNode>(E)->refineAlignment(MMO); 6580 return SDValue(E, 0); 6581 } 6582 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6583 ExtType, MemVT, MMO); 6584 createOperands(N, Ops); 6585 6586 CSEMap.InsertNode(N, IP); 6587 InsertNode(N); 6588 SDValue V(N, 0); 6589 NewSDValueDbgMsg(V, "Creating new node: ", this); 6590 return V; 6591 } 6592 6593 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6594 SDValue Ptr, MachinePointerInfo PtrInfo, 6595 unsigned Alignment, 6596 MachineMemOperand::Flags MMOFlags, 6597 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6598 SDValue Undef = getUNDEF(Ptr.getValueType()); 6599 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6600 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6601 } 6602 6603 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6604 SDValue Ptr, MachineMemOperand *MMO) { 6605 SDValue Undef = getUNDEF(Ptr.getValueType()); 6606 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6607 VT, MMO); 6608 } 6609 6610 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6611 EVT VT, SDValue Chain, SDValue Ptr, 6612 MachinePointerInfo PtrInfo, EVT MemVT, 6613 unsigned Alignment, 6614 MachineMemOperand::Flags MMOFlags, 6615 const AAMDNodes &AAInfo) { 6616 SDValue Undef = getUNDEF(Ptr.getValueType()); 6617 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6618 MemVT, Alignment, MMOFlags, AAInfo); 6619 } 6620 6621 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6622 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6623 MachineMemOperand *MMO) { 6624 SDValue Undef = getUNDEF(Ptr.getValueType()); 6625 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6626 MemVT, MMO); 6627 } 6628 6629 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6630 SDValue Base, SDValue Offset, 6631 ISD::MemIndexedMode AM) { 6632 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6633 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6634 // Don't propagate the invariant or dereferenceable flags. 6635 auto MMOFlags = 6636 LD->getMemOperand()->getFlags() & 6637 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6638 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6639 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6640 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6641 LD->getAAInfo()); 6642 } 6643 6644 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6645 SDValue Ptr, MachinePointerInfo PtrInfo, 6646 unsigned Alignment, 6647 MachineMemOperand::Flags MMOFlags, 6648 const AAMDNodes &AAInfo) { 6649 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6650 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6651 Alignment = getEVTAlignment(Val.getValueType()); 6652 6653 MMOFlags |= MachineMemOperand::MOStore; 6654 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6655 6656 if (PtrInfo.V.isNull()) 6657 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6658 6659 MachineFunction &MF = getMachineFunction(); 6660 MachineMemOperand *MMO = MF.getMachineMemOperand( 6661 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6662 return getStore(Chain, dl, Val, Ptr, MMO); 6663 } 6664 6665 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6666 SDValue Ptr, MachineMemOperand *MMO) { 6667 assert(Chain.getValueType() == MVT::Other && 6668 "Invalid chain type"); 6669 EVT VT = Val.getValueType(); 6670 SDVTList VTs = getVTList(MVT::Other); 6671 SDValue Undef = getUNDEF(Ptr.getValueType()); 6672 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6673 FoldingSetNodeID ID; 6674 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6675 ID.AddInteger(VT.getRawBits()); 6676 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6677 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6678 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6679 void *IP = nullptr; 6680 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6681 cast<StoreSDNode>(E)->refineAlignment(MMO); 6682 return SDValue(E, 0); 6683 } 6684 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6685 ISD::UNINDEXED, false, VT, MMO); 6686 createOperands(N, Ops); 6687 6688 CSEMap.InsertNode(N, IP); 6689 InsertNode(N); 6690 SDValue V(N, 0); 6691 NewSDValueDbgMsg(V, "Creating new node: ", this); 6692 return V; 6693 } 6694 6695 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6696 SDValue Ptr, MachinePointerInfo PtrInfo, 6697 EVT SVT, unsigned Alignment, 6698 MachineMemOperand::Flags MMOFlags, 6699 const AAMDNodes &AAInfo) { 6700 assert(Chain.getValueType() == MVT::Other && 6701 "Invalid chain type"); 6702 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6703 Alignment = getEVTAlignment(SVT); 6704 6705 MMOFlags |= MachineMemOperand::MOStore; 6706 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6707 6708 if (PtrInfo.V.isNull()) 6709 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6710 6711 MachineFunction &MF = getMachineFunction(); 6712 MachineMemOperand *MMO = MF.getMachineMemOperand( 6713 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6714 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6715 } 6716 6717 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6718 SDValue Ptr, EVT SVT, 6719 MachineMemOperand *MMO) { 6720 EVT VT = Val.getValueType(); 6721 6722 assert(Chain.getValueType() == MVT::Other && 6723 "Invalid chain type"); 6724 if (VT == SVT) 6725 return getStore(Chain, dl, Val, Ptr, MMO); 6726 6727 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6728 "Should only be a truncating store, not extending!"); 6729 assert(VT.isInteger() == SVT.isInteger() && 6730 "Can't do FP-INT conversion!"); 6731 assert(VT.isVector() == SVT.isVector() && 6732 "Cannot use trunc store to convert to or from a vector!"); 6733 assert((!VT.isVector() || 6734 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6735 "Cannot use trunc store to change the number of vector elements!"); 6736 6737 SDVTList VTs = getVTList(MVT::Other); 6738 SDValue Undef = getUNDEF(Ptr.getValueType()); 6739 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6740 FoldingSetNodeID ID; 6741 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6742 ID.AddInteger(SVT.getRawBits()); 6743 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6744 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6745 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6746 void *IP = nullptr; 6747 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6748 cast<StoreSDNode>(E)->refineAlignment(MMO); 6749 return SDValue(E, 0); 6750 } 6751 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6752 ISD::UNINDEXED, true, SVT, MMO); 6753 createOperands(N, Ops); 6754 6755 CSEMap.InsertNode(N, IP); 6756 InsertNode(N); 6757 SDValue V(N, 0); 6758 NewSDValueDbgMsg(V, "Creating new node: ", this); 6759 return V; 6760 } 6761 6762 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6763 SDValue Base, SDValue Offset, 6764 ISD::MemIndexedMode AM) { 6765 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6766 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6767 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6768 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6769 FoldingSetNodeID ID; 6770 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6771 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6772 ID.AddInteger(ST->getRawSubclassData()); 6773 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6774 void *IP = nullptr; 6775 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6776 return SDValue(E, 0); 6777 6778 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6779 ST->isTruncatingStore(), ST->getMemoryVT(), 6780 ST->getMemOperand()); 6781 createOperands(N, Ops); 6782 6783 CSEMap.InsertNode(N, IP); 6784 InsertNode(N); 6785 SDValue V(N, 0); 6786 NewSDValueDbgMsg(V, "Creating new node: ", this); 6787 return V; 6788 } 6789 6790 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6791 SDValue Ptr, SDValue Mask, SDValue PassThru, 6792 EVT MemVT, MachineMemOperand *MMO, 6793 ISD::LoadExtType ExtTy, bool isExpanding) { 6794 SDVTList VTs = getVTList(VT, MVT::Other); 6795 SDValue Ops[] = { Chain, Ptr, Mask, PassThru }; 6796 FoldingSetNodeID ID; 6797 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6798 ID.AddInteger(VT.getRawBits()); 6799 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6800 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6801 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6802 void *IP = nullptr; 6803 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6804 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6805 return SDValue(E, 0); 6806 } 6807 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6808 ExtTy, isExpanding, MemVT, MMO); 6809 createOperands(N, Ops); 6810 6811 CSEMap.InsertNode(N, IP); 6812 InsertNode(N); 6813 SDValue V(N, 0); 6814 NewSDValueDbgMsg(V, "Creating new node: ", this); 6815 return V; 6816 } 6817 6818 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6819 SDValue Val, SDValue Ptr, SDValue Mask, 6820 EVT MemVT, MachineMemOperand *MMO, 6821 bool IsTruncating, bool IsCompressing) { 6822 assert(Chain.getValueType() == MVT::Other && 6823 "Invalid chain type"); 6824 EVT VT = Val.getValueType(); 6825 SDVTList VTs = getVTList(MVT::Other); 6826 SDValue Ops[] = { Chain, Val, Ptr, Mask }; 6827 FoldingSetNodeID ID; 6828 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6829 ID.AddInteger(VT.getRawBits()); 6830 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6831 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6832 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6833 void *IP = nullptr; 6834 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6835 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 6836 return SDValue(E, 0); 6837 } 6838 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6839 IsTruncating, IsCompressing, MemVT, MMO); 6840 createOperands(N, Ops); 6841 6842 CSEMap.InsertNode(N, IP); 6843 InsertNode(N); 6844 SDValue V(N, 0); 6845 NewSDValueDbgMsg(V, "Creating new node: ", this); 6846 return V; 6847 } 6848 6849 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 6850 ArrayRef<SDValue> Ops, 6851 MachineMemOperand *MMO) { 6852 assert(Ops.size() == 6 && "Incompatible number of operands"); 6853 6854 FoldingSetNodeID ID; 6855 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 6856 ID.AddInteger(VT.getRawBits()); 6857 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 6858 dl.getIROrder(), VTs, VT, MMO)); 6859 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6860 void *IP = nullptr; 6861 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6862 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 6863 return SDValue(E, 0); 6864 } 6865 6866 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6867 VTs, VT, MMO); 6868 createOperands(N, Ops); 6869 6870 assert(N->getPassThru().getValueType() == N->getValueType(0) && 6871 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 6872 assert(N->getMask().getValueType().getVectorNumElements() == 6873 N->getValueType(0).getVectorNumElements() && 6874 "Vector width mismatch between mask and data"); 6875 assert(N->getIndex().getValueType().getVectorNumElements() >= 6876 N->getValueType(0).getVectorNumElements() && 6877 "Vector width mismatch between index and data"); 6878 assert(isa<ConstantSDNode>(N->getScale()) && 6879 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 6880 "Scale should be a constant power of 2"); 6881 6882 CSEMap.InsertNode(N, IP); 6883 InsertNode(N); 6884 SDValue V(N, 0); 6885 NewSDValueDbgMsg(V, "Creating new node: ", this); 6886 return V; 6887 } 6888 6889 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6890 ArrayRef<SDValue> Ops, 6891 MachineMemOperand *MMO) { 6892 assert(Ops.size() == 6 && "Incompatible number of operands"); 6893 6894 FoldingSetNodeID ID; 6895 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6896 ID.AddInteger(VT.getRawBits()); 6897 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6898 dl.getIROrder(), VTs, VT, MMO)); 6899 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6900 void *IP = nullptr; 6901 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6902 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6903 return SDValue(E, 0); 6904 } 6905 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6906 VTs, VT, MMO); 6907 createOperands(N, Ops); 6908 6909 assert(N->getMask().getValueType().getVectorNumElements() == 6910 N->getValue().getValueType().getVectorNumElements() && 6911 "Vector width mismatch between mask and data"); 6912 assert(N->getIndex().getValueType().getVectorNumElements() >= 6913 N->getValue().getValueType().getVectorNumElements() && 6914 "Vector width mismatch between index and data"); 6915 assert(isa<ConstantSDNode>(N->getScale()) && 6916 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 6917 "Scale should be a constant power of 2"); 6918 6919 CSEMap.InsertNode(N, IP); 6920 InsertNode(N); 6921 SDValue V(N, 0); 6922 NewSDValueDbgMsg(V, "Creating new node: ", this); 6923 return V; 6924 } 6925 6926 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 6927 // select undef, T, F --> T (if T is a constant), otherwise F 6928 // select, ?, undef, F --> F 6929 // select, ?, T, undef --> T 6930 if (Cond.isUndef()) 6931 return isConstantValueOfAnyType(T) ? T : F; 6932 if (T.isUndef()) 6933 return F; 6934 if (F.isUndef()) 6935 return T; 6936 6937 // select true, T, F --> T 6938 // select false, T, F --> F 6939 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 6940 return CondC->isNullValue() ? F : T; 6941 6942 // TODO: This should simplify VSELECT with constant condition using something 6943 // like this (but check boolean contents to be complete?): 6944 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 6945 // return T; 6946 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 6947 // return F; 6948 6949 // select ?, T, T --> T 6950 if (T == F) 6951 return T; 6952 6953 return SDValue(); 6954 } 6955 6956 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 6957 // shift undef, Y --> 0 (can always assume that the undef value is 0) 6958 if (X.isUndef()) 6959 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 6960 // shift X, undef --> undef (because it may shift by the bitwidth) 6961 if (Y.isUndef()) 6962 return getUNDEF(X.getValueType()); 6963 6964 // shift 0, Y --> 0 6965 // shift X, 0 --> X 6966 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 6967 return X; 6968 6969 // shift X, C >= bitwidth(X) --> undef 6970 // All vector elements must be too big to avoid partial undefs. 6971 auto isShiftTooBig = [X](ConstantSDNode *Val) { 6972 return Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 6973 }; 6974 if (ISD::matchUnaryPredicate(Y, isShiftTooBig)) 6975 return getUNDEF(X.getValueType()); 6976 6977 return SDValue(); 6978 } 6979 6980 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6981 SDValue Ptr, SDValue SV, unsigned Align) { 6982 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6983 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6984 } 6985 6986 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6987 ArrayRef<SDUse> Ops) { 6988 switch (Ops.size()) { 6989 case 0: return getNode(Opcode, DL, VT); 6990 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6991 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6992 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6993 default: break; 6994 } 6995 6996 // Copy from an SDUse array into an SDValue array for use with 6997 // the regular getNode logic. 6998 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6999 return getNode(Opcode, DL, VT, NewOps); 7000 } 7001 7002 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7003 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7004 unsigned NumOps = Ops.size(); 7005 switch (NumOps) { 7006 case 0: return getNode(Opcode, DL, VT); 7007 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7008 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7009 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7010 default: break; 7011 } 7012 7013 switch (Opcode) { 7014 default: break; 7015 case ISD::BUILD_VECTOR: 7016 // Attempt to simplify BUILD_VECTOR. 7017 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7018 return V; 7019 break; 7020 case ISD::CONCAT_VECTORS: 7021 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 7022 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 7023 return V; 7024 break; 7025 case ISD::SELECT_CC: 7026 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7027 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7028 "LHS and RHS of condition must have same type!"); 7029 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7030 "True and False arms of SelectCC must have same type!"); 7031 assert(Ops[2].getValueType() == VT && 7032 "select_cc node must be of same type as true and false value!"); 7033 break; 7034 case ISD::BR_CC: 7035 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7036 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7037 "LHS/RHS of comparison should match types!"); 7038 break; 7039 } 7040 7041 // Memoize nodes. 7042 SDNode *N; 7043 SDVTList VTs = getVTList(VT); 7044 7045 if (VT != MVT::Glue) { 7046 FoldingSetNodeID ID; 7047 AddNodeIDNode(ID, Opcode, VTs, Ops); 7048 void *IP = nullptr; 7049 7050 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7051 return SDValue(E, 0); 7052 7053 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7054 createOperands(N, Ops); 7055 7056 CSEMap.InsertNode(N, IP); 7057 } else { 7058 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7059 createOperands(N, Ops); 7060 } 7061 7062 InsertNode(N); 7063 SDValue V(N, 0); 7064 NewSDValueDbgMsg(V, "Creating new node: ", this); 7065 return V; 7066 } 7067 7068 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7069 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7070 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7071 } 7072 7073 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7074 ArrayRef<SDValue> Ops) { 7075 if (VTList.NumVTs == 1) 7076 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7077 7078 #if 0 7079 switch (Opcode) { 7080 // FIXME: figure out how to safely handle things like 7081 // int foo(int x) { return 1 << (x & 255); } 7082 // int bar() { return foo(256); } 7083 case ISD::SRA_PARTS: 7084 case ISD::SRL_PARTS: 7085 case ISD::SHL_PARTS: 7086 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7087 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7088 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7089 else if (N3.getOpcode() == ISD::AND) 7090 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7091 // If the and is only masking out bits that cannot effect the shift, 7092 // eliminate the and. 7093 unsigned NumBits = VT.getScalarSizeInBits()*2; 7094 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7095 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7096 } 7097 break; 7098 } 7099 #endif 7100 7101 // Memoize the node unless it returns a flag. 7102 SDNode *N; 7103 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7104 FoldingSetNodeID ID; 7105 AddNodeIDNode(ID, Opcode, VTList, Ops); 7106 void *IP = nullptr; 7107 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7108 return SDValue(E, 0); 7109 7110 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7111 createOperands(N, Ops); 7112 CSEMap.InsertNode(N, IP); 7113 } else { 7114 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7115 createOperands(N, Ops); 7116 } 7117 InsertNode(N); 7118 SDValue V(N, 0); 7119 NewSDValueDbgMsg(V, "Creating new node: ", this); 7120 return V; 7121 } 7122 7123 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7124 SDVTList VTList) { 7125 return getNode(Opcode, DL, VTList, None); 7126 } 7127 7128 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7129 SDValue N1) { 7130 SDValue Ops[] = { N1 }; 7131 return getNode(Opcode, DL, VTList, Ops); 7132 } 7133 7134 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7135 SDValue N1, SDValue N2) { 7136 SDValue Ops[] = { N1, N2 }; 7137 return getNode(Opcode, DL, VTList, Ops); 7138 } 7139 7140 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7141 SDValue N1, SDValue N2, SDValue N3) { 7142 SDValue Ops[] = { N1, N2, N3 }; 7143 return getNode(Opcode, DL, VTList, Ops); 7144 } 7145 7146 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7147 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7148 SDValue Ops[] = { N1, N2, N3, N4 }; 7149 return getNode(Opcode, DL, VTList, Ops); 7150 } 7151 7152 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7153 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7154 SDValue N5) { 7155 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7156 return getNode(Opcode, DL, VTList, Ops); 7157 } 7158 7159 SDVTList SelectionDAG::getVTList(EVT VT) { 7160 return makeVTList(SDNode::getValueTypeList(VT), 1); 7161 } 7162 7163 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7164 FoldingSetNodeID ID; 7165 ID.AddInteger(2U); 7166 ID.AddInteger(VT1.getRawBits()); 7167 ID.AddInteger(VT2.getRawBits()); 7168 7169 void *IP = nullptr; 7170 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7171 if (!Result) { 7172 EVT *Array = Allocator.Allocate<EVT>(2); 7173 Array[0] = VT1; 7174 Array[1] = VT2; 7175 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7176 VTListMap.InsertNode(Result, IP); 7177 } 7178 return Result->getSDVTList(); 7179 } 7180 7181 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7182 FoldingSetNodeID ID; 7183 ID.AddInteger(3U); 7184 ID.AddInteger(VT1.getRawBits()); 7185 ID.AddInteger(VT2.getRawBits()); 7186 ID.AddInteger(VT3.getRawBits()); 7187 7188 void *IP = nullptr; 7189 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7190 if (!Result) { 7191 EVT *Array = Allocator.Allocate<EVT>(3); 7192 Array[0] = VT1; 7193 Array[1] = VT2; 7194 Array[2] = VT3; 7195 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7196 VTListMap.InsertNode(Result, IP); 7197 } 7198 return Result->getSDVTList(); 7199 } 7200 7201 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7202 FoldingSetNodeID ID; 7203 ID.AddInteger(4U); 7204 ID.AddInteger(VT1.getRawBits()); 7205 ID.AddInteger(VT2.getRawBits()); 7206 ID.AddInteger(VT3.getRawBits()); 7207 ID.AddInteger(VT4.getRawBits()); 7208 7209 void *IP = nullptr; 7210 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7211 if (!Result) { 7212 EVT *Array = Allocator.Allocate<EVT>(4); 7213 Array[0] = VT1; 7214 Array[1] = VT2; 7215 Array[2] = VT3; 7216 Array[3] = VT4; 7217 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7218 VTListMap.InsertNode(Result, IP); 7219 } 7220 return Result->getSDVTList(); 7221 } 7222 7223 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7224 unsigned NumVTs = VTs.size(); 7225 FoldingSetNodeID ID; 7226 ID.AddInteger(NumVTs); 7227 for (unsigned index = 0; index < NumVTs; index++) { 7228 ID.AddInteger(VTs[index].getRawBits()); 7229 } 7230 7231 void *IP = nullptr; 7232 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7233 if (!Result) { 7234 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7235 llvm::copy(VTs, Array); 7236 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7237 VTListMap.InsertNode(Result, IP); 7238 } 7239 return Result->getSDVTList(); 7240 } 7241 7242 7243 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7244 /// specified operands. If the resultant node already exists in the DAG, 7245 /// this does not modify the specified node, instead it returns the node that 7246 /// already exists. If the resultant node does not exist in the DAG, the 7247 /// input node is returned. As a degenerate case, if you specify the same 7248 /// input operands as the node already has, the input node is returned. 7249 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7250 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7251 7252 // Check to see if there is no change. 7253 if (Op == N->getOperand(0)) return N; 7254 7255 // See if the modified node already exists. 7256 void *InsertPos = nullptr; 7257 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7258 return Existing; 7259 7260 // Nope it doesn't. Remove the node from its current place in the maps. 7261 if (InsertPos) 7262 if (!RemoveNodeFromCSEMaps(N)) 7263 InsertPos = nullptr; 7264 7265 // Now we update the operands. 7266 N->OperandList[0].set(Op); 7267 7268 updateDivergence(N); 7269 // If this gets put into a CSE map, add it. 7270 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7271 return N; 7272 } 7273 7274 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7275 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7276 7277 // Check to see if there is no change. 7278 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7279 return N; // No operands changed, just return the input node. 7280 7281 // See if the modified node already exists. 7282 void *InsertPos = nullptr; 7283 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7284 return Existing; 7285 7286 // Nope it doesn't. Remove the node from its current place in the maps. 7287 if (InsertPos) 7288 if (!RemoveNodeFromCSEMaps(N)) 7289 InsertPos = nullptr; 7290 7291 // Now we update the operands. 7292 if (N->OperandList[0] != Op1) 7293 N->OperandList[0].set(Op1); 7294 if (N->OperandList[1] != Op2) 7295 N->OperandList[1].set(Op2); 7296 7297 updateDivergence(N); 7298 // If this gets put into a CSE map, add it. 7299 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7300 return N; 7301 } 7302 7303 SDNode *SelectionDAG:: 7304 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7305 SDValue Ops[] = { Op1, Op2, Op3 }; 7306 return UpdateNodeOperands(N, Ops); 7307 } 7308 7309 SDNode *SelectionDAG:: 7310 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7311 SDValue Op3, SDValue Op4) { 7312 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7313 return UpdateNodeOperands(N, Ops); 7314 } 7315 7316 SDNode *SelectionDAG:: 7317 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7318 SDValue Op3, SDValue Op4, SDValue Op5) { 7319 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7320 return UpdateNodeOperands(N, Ops); 7321 } 7322 7323 SDNode *SelectionDAG:: 7324 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7325 unsigned NumOps = Ops.size(); 7326 assert(N->getNumOperands() == NumOps && 7327 "Update with wrong number of operands"); 7328 7329 // If no operands changed just return the input node. 7330 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7331 return N; 7332 7333 // See if the modified node already exists. 7334 void *InsertPos = nullptr; 7335 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7336 return Existing; 7337 7338 // Nope it doesn't. Remove the node from its current place in the maps. 7339 if (InsertPos) 7340 if (!RemoveNodeFromCSEMaps(N)) 7341 InsertPos = nullptr; 7342 7343 // Now we update the operands. 7344 for (unsigned i = 0; i != NumOps; ++i) 7345 if (N->OperandList[i] != Ops[i]) 7346 N->OperandList[i].set(Ops[i]); 7347 7348 updateDivergence(N); 7349 // If this gets put into a CSE map, add it. 7350 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7351 return N; 7352 } 7353 7354 /// DropOperands - Release the operands and set this node to have 7355 /// zero operands. 7356 void SDNode::DropOperands() { 7357 // Unlike the code in MorphNodeTo that does this, we don't need to 7358 // watch for dead nodes here. 7359 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7360 SDUse &Use = *I++; 7361 Use.set(SDValue()); 7362 } 7363 } 7364 7365 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7366 ArrayRef<MachineMemOperand *> NewMemRefs) { 7367 if (NewMemRefs.empty()) { 7368 N->clearMemRefs(); 7369 return; 7370 } 7371 7372 // Check if we can avoid allocating by storing a single reference directly. 7373 if (NewMemRefs.size() == 1) { 7374 N->MemRefs = NewMemRefs[0]; 7375 N->NumMemRefs = 1; 7376 return; 7377 } 7378 7379 MachineMemOperand **MemRefsBuffer = 7380 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7381 llvm::copy(NewMemRefs, MemRefsBuffer); 7382 N->MemRefs = MemRefsBuffer; 7383 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7384 } 7385 7386 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7387 /// machine opcode. 7388 /// 7389 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7390 EVT VT) { 7391 SDVTList VTs = getVTList(VT); 7392 return SelectNodeTo(N, MachineOpc, VTs, None); 7393 } 7394 7395 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7396 EVT VT, SDValue Op1) { 7397 SDVTList VTs = getVTList(VT); 7398 SDValue Ops[] = { Op1 }; 7399 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7400 } 7401 7402 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7403 EVT VT, SDValue Op1, 7404 SDValue Op2) { 7405 SDVTList VTs = getVTList(VT); 7406 SDValue Ops[] = { Op1, Op2 }; 7407 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7408 } 7409 7410 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7411 EVT VT, SDValue Op1, 7412 SDValue Op2, SDValue Op3) { 7413 SDVTList VTs = getVTList(VT); 7414 SDValue Ops[] = { Op1, Op2, Op3 }; 7415 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7416 } 7417 7418 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7419 EVT VT, ArrayRef<SDValue> Ops) { 7420 SDVTList VTs = getVTList(VT); 7421 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7422 } 7423 7424 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7425 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7426 SDVTList VTs = getVTList(VT1, VT2); 7427 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7428 } 7429 7430 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7431 EVT VT1, EVT VT2) { 7432 SDVTList VTs = getVTList(VT1, VT2); 7433 return SelectNodeTo(N, MachineOpc, VTs, None); 7434 } 7435 7436 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7437 EVT VT1, EVT VT2, EVT VT3, 7438 ArrayRef<SDValue> Ops) { 7439 SDVTList VTs = getVTList(VT1, VT2, VT3); 7440 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7441 } 7442 7443 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7444 EVT VT1, EVT VT2, 7445 SDValue Op1, SDValue Op2) { 7446 SDVTList VTs = getVTList(VT1, VT2); 7447 SDValue Ops[] = { Op1, Op2 }; 7448 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7449 } 7450 7451 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7452 SDVTList VTs,ArrayRef<SDValue> Ops) { 7453 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7454 // Reset the NodeID to -1. 7455 New->setNodeId(-1); 7456 if (New != N) { 7457 ReplaceAllUsesWith(N, New); 7458 RemoveDeadNode(N); 7459 } 7460 return New; 7461 } 7462 7463 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7464 /// the line number information on the merged node since it is not possible to 7465 /// preserve the information that operation is associated with multiple lines. 7466 /// This will make the debugger working better at -O0, were there is a higher 7467 /// probability having other instructions associated with that line. 7468 /// 7469 /// For IROrder, we keep the smaller of the two 7470 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7471 DebugLoc NLoc = N->getDebugLoc(); 7472 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7473 N->setDebugLoc(DebugLoc()); 7474 } 7475 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7476 N->setIROrder(Order); 7477 return N; 7478 } 7479 7480 /// MorphNodeTo - This *mutates* the specified node to have the specified 7481 /// return type, opcode, and operands. 7482 /// 7483 /// Note that MorphNodeTo returns the resultant node. If there is already a 7484 /// node of the specified opcode and operands, it returns that node instead of 7485 /// the current one. Note that the SDLoc need not be the same. 7486 /// 7487 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7488 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7489 /// node, and because it doesn't require CSE recalculation for any of 7490 /// the node's users. 7491 /// 7492 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7493 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7494 /// the legalizer which maintain worklists that would need to be updated when 7495 /// deleting things. 7496 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7497 SDVTList VTs, ArrayRef<SDValue> Ops) { 7498 // If an identical node already exists, use it. 7499 void *IP = nullptr; 7500 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7501 FoldingSetNodeID ID; 7502 AddNodeIDNode(ID, Opc, VTs, Ops); 7503 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7504 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7505 } 7506 7507 if (!RemoveNodeFromCSEMaps(N)) 7508 IP = nullptr; 7509 7510 // Start the morphing. 7511 N->NodeType = Opc; 7512 N->ValueList = VTs.VTs; 7513 N->NumValues = VTs.NumVTs; 7514 7515 // Clear the operands list, updating used nodes to remove this from their 7516 // use list. Keep track of any operands that become dead as a result. 7517 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7518 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7519 SDUse &Use = *I++; 7520 SDNode *Used = Use.getNode(); 7521 Use.set(SDValue()); 7522 if (Used->use_empty()) 7523 DeadNodeSet.insert(Used); 7524 } 7525 7526 // For MachineNode, initialize the memory references information. 7527 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7528 MN->clearMemRefs(); 7529 7530 // Swap for an appropriately sized array from the recycler. 7531 removeOperands(N); 7532 createOperands(N, Ops); 7533 7534 // Delete any nodes that are still dead after adding the uses for the 7535 // new operands. 7536 if (!DeadNodeSet.empty()) { 7537 SmallVector<SDNode *, 16> DeadNodes; 7538 for (SDNode *N : DeadNodeSet) 7539 if (N->use_empty()) 7540 DeadNodes.push_back(N); 7541 RemoveDeadNodes(DeadNodes); 7542 } 7543 7544 if (IP) 7545 CSEMap.InsertNode(N, IP); // Memoize the new node. 7546 return N; 7547 } 7548 7549 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7550 unsigned OrigOpc = Node->getOpcode(); 7551 unsigned NewOpc; 7552 bool IsUnary = false; 7553 bool IsTernary = false; 7554 switch (OrigOpc) { 7555 default: 7556 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7557 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 7558 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 7559 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 7560 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 7561 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 7562 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; 7563 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 7564 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 7565 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 7566 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 7567 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 7568 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 7569 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 7570 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 7571 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 7572 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 7573 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 7574 case ISD::STRICT_FNEARBYINT: 7575 NewOpc = ISD::FNEARBYINT; 7576 IsUnary = true; 7577 break; 7578 case ISD::STRICT_FMAXNUM: NewOpc = ISD::FMAXNUM; break; 7579 case ISD::STRICT_FMINNUM: NewOpc = ISD::FMINNUM; break; 7580 case ISD::STRICT_FCEIL: NewOpc = ISD::FCEIL; IsUnary = true; break; 7581 case ISD::STRICT_FFLOOR: NewOpc = ISD::FFLOOR; IsUnary = true; break; 7582 case ISD::STRICT_FROUND: NewOpc = ISD::FROUND; IsUnary = true; break; 7583 case ISD::STRICT_FTRUNC: NewOpc = ISD::FTRUNC; IsUnary = true; break; 7584 } 7585 7586 // We're taking this node out of the chain, so we need to re-link things. 7587 SDValue InputChain = Node->getOperand(0); 7588 SDValue OutputChain = SDValue(Node, 1); 7589 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7590 7591 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 7592 SDNode *Res = nullptr; 7593 if (IsUnary) 7594 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 7595 else if (IsTernary) 7596 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 7597 Node->getOperand(2), 7598 Node->getOperand(3)}); 7599 else 7600 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 7601 Node->getOperand(2) }); 7602 7603 // MorphNodeTo can operate in two ways: if an existing node with the 7604 // specified operands exists, it can just return it. Otherwise, it 7605 // updates the node in place to have the requested operands. 7606 if (Res == Node) { 7607 // If we updated the node in place, reset the node ID. To the isel, 7608 // this should be just like a newly allocated machine node. 7609 Res->setNodeId(-1); 7610 } else { 7611 ReplaceAllUsesWith(Node, Res); 7612 RemoveDeadNode(Node); 7613 } 7614 7615 return Res; 7616 } 7617 7618 /// getMachineNode - These are used for target selectors to create a new node 7619 /// with specified return type(s), MachineInstr opcode, and operands. 7620 /// 7621 /// Note that getMachineNode returns the resultant node. If there is already a 7622 /// node of the specified opcode and operands, it returns that node instead of 7623 /// the current one. 7624 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7625 EVT VT) { 7626 SDVTList VTs = getVTList(VT); 7627 return getMachineNode(Opcode, dl, VTs, None); 7628 } 7629 7630 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7631 EVT VT, SDValue Op1) { 7632 SDVTList VTs = getVTList(VT); 7633 SDValue Ops[] = { Op1 }; 7634 return getMachineNode(Opcode, dl, VTs, Ops); 7635 } 7636 7637 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7638 EVT VT, SDValue Op1, SDValue Op2) { 7639 SDVTList VTs = getVTList(VT); 7640 SDValue Ops[] = { Op1, Op2 }; 7641 return getMachineNode(Opcode, dl, VTs, Ops); 7642 } 7643 7644 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7645 EVT VT, SDValue Op1, SDValue Op2, 7646 SDValue Op3) { 7647 SDVTList VTs = getVTList(VT); 7648 SDValue Ops[] = { Op1, Op2, Op3 }; 7649 return getMachineNode(Opcode, dl, VTs, Ops); 7650 } 7651 7652 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7653 EVT VT, ArrayRef<SDValue> Ops) { 7654 SDVTList VTs = getVTList(VT); 7655 return getMachineNode(Opcode, dl, VTs, Ops); 7656 } 7657 7658 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7659 EVT VT1, EVT VT2, SDValue Op1, 7660 SDValue Op2) { 7661 SDVTList VTs = getVTList(VT1, VT2); 7662 SDValue Ops[] = { Op1, Op2 }; 7663 return getMachineNode(Opcode, dl, VTs, Ops); 7664 } 7665 7666 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7667 EVT VT1, EVT VT2, SDValue Op1, 7668 SDValue Op2, SDValue Op3) { 7669 SDVTList VTs = getVTList(VT1, VT2); 7670 SDValue Ops[] = { Op1, Op2, Op3 }; 7671 return getMachineNode(Opcode, dl, VTs, Ops); 7672 } 7673 7674 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7675 EVT VT1, EVT VT2, 7676 ArrayRef<SDValue> Ops) { 7677 SDVTList VTs = getVTList(VT1, VT2); 7678 return getMachineNode(Opcode, dl, VTs, Ops); 7679 } 7680 7681 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7682 EVT VT1, EVT VT2, EVT VT3, 7683 SDValue Op1, SDValue Op2) { 7684 SDVTList VTs = getVTList(VT1, VT2, VT3); 7685 SDValue Ops[] = { Op1, Op2 }; 7686 return getMachineNode(Opcode, dl, VTs, Ops); 7687 } 7688 7689 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7690 EVT VT1, EVT VT2, EVT VT3, 7691 SDValue Op1, SDValue Op2, 7692 SDValue Op3) { 7693 SDVTList VTs = getVTList(VT1, VT2, VT3); 7694 SDValue Ops[] = { Op1, Op2, Op3 }; 7695 return getMachineNode(Opcode, dl, VTs, Ops); 7696 } 7697 7698 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7699 EVT VT1, EVT VT2, EVT VT3, 7700 ArrayRef<SDValue> Ops) { 7701 SDVTList VTs = getVTList(VT1, VT2, VT3); 7702 return getMachineNode(Opcode, dl, VTs, Ops); 7703 } 7704 7705 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7706 ArrayRef<EVT> ResultTys, 7707 ArrayRef<SDValue> Ops) { 7708 SDVTList VTs = getVTList(ResultTys); 7709 return getMachineNode(Opcode, dl, VTs, Ops); 7710 } 7711 7712 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 7713 SDVTList VTs, 7714 ArrayRef<SDValue> Ops) { 7715 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 7716 MachineSDNode *N; 7717 void *IP = nullptr; 7718 7719 if (DoCSE) { 7720 FoldingSetNodeID ID; 7721 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 7722 IP = nullptr; 7723 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 7724 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 7725 } 7726 } 7727 7728 // Allocate a new MachineSDNode. 7729 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7730 createOperands(N, Ops); 7731 7732 if (DoCSE) 7733 CSEMap.InsertNode(N, IP); 7734 7735 InsertNode(N); 7736 return N; 7737 } 7738 7739 /// getTargetExtractSubreg - A convenience function for creating 7740 /// TargetOpcode::EXTRACT_SUBREG nodes. 7741 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7742 SDValue Operand) { 7743 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7744 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 7745 VT, Operand, SRIdxVal); 7746 return SDValue(Subreg, 0); 7747 } 7748 7749 /// getTargetInsertSubreg - A convenience function for creating 7750 /// TargetOpcode::INSERT_SUBREG nodes. 7751 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7752 SDValue Operand, SDValue Subreg) { 7753 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7754 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 7755 VT, Operand, Subreg, SRIdxVal); 7756 return SDValue(Result, 0); 7757 } 7758 7759 /// getNodeIfExists - Get the specified node if it's already available, or 7760 /// else return NULL. 7761 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 7762 ArrayRef<SDValue> Ops, 7763 const SDNodeFlags Flags) { 7764 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 7765 FoldingSetNodeID ID; 7766 AddNodeIDNode(ID, Opcode, VTList, Ops); 7767 void *IP = nullptr; 7768 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 7769 E->intersectFlagsWith(Flags); 7770 return E; 7771 } 7772 } 7773 return nullptr; 7774 } 7775 7776 /// getDbgValue - Creates a SDDbgValue node. 7777 /// 7778 /// SDNode 7779 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 7780 SDNode *N, unsigned R, bool IsIndirect, 7781 const DebugLoc &DL, unsigned O) { 7782 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7783 "Expected inlined-at fields to agree"); 7784 return new (DbgInfo->getAlloc()) 7785 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 7786 } 7787 7788 /// Constant 7789 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 7790 DIExpression *Expr, 7791 const Value *C, 7792 const DebugLoc &DL, unsigned O) { 7793 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7794 "Expected inlined-at fields to agree"); 7795 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 7796 } 7797 7798 /// FrameIndex 7799 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 7800 DIExpression *Expr, unsigned FI, 7801 bool IsIndirect, 7802 const DebugLoc &DL, 7803 unsigned O) { 7804 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7805 "Expected inlined-at fields to agree"); 7806 return new (DbgInfo->getAlloc()) 7807 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 7808 } 7809 7810 /// VReg 7811 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 7812 DIExpression *Expr, 7813 unsigned VReg, bool IsIndirect, 7814 const DebugLoc &DL, unsigned O) { 7815 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7816 "Expected inlined-at fields to agree"); 7817 return new (DbgInfo->getAlloc()) 7818 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 7819 } 7820 7821 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 7822 unsigned OffsetInBits, unsigned SizeInBits, 7823 bool InvalidateDbg) { 7824 SDNode *FromNode = From.getNode(); 7825 SDNode *ToNode = To.getNode(); 7826 assert(FromNode && ToNode && "Can't modify dbg values"); 7827 7828 // PR35338 7829 // TODO: assert(From != To && "Redundant dbg value transfer"); 7830 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 7831 if (From == To || FromNode == ToNode) 7832 return; 7833 7834 if (!FromNode->getHasDebugValue()) 7835 return; 7836 7837 SmallVector<SDDbgValue *, 2> ClonedDVs; 7838 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 7839 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 7840 continue; 7841 7842 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 7843 7844 // Just transfer the dbg value attached to From. 7845 if (Dbg->getResNo() != From.getResNo()) 7846 continue; 7847 7848 DIVariable *Var = Dbg->getVariable(); 7849 auto *Expr = Dbg->getExpression(); 7850 // If a fragment is requested, update the expression. 7851 if (SizeInBits) { 7852 // When splitting a larger (e.g., sign-extended) value whose 7853 // lower bits are described with an SDDbgValue, do not attempt 7854 // to transfer the SDDbgValue to the upper bits. 7855 if (auto FI = Expr->getFragmentInfo()) 7856 if (OffsetInBits + SizeInBits > FI->SizeInBits) 7857 continue; 7858 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 7859 SizeInBits); 7860 if (!Fragment) 7861 continue; 7862 Expr = *Fragment; 7863 } 7864 // Clone the SDDbgValue and move it to To. 7865 SDDbgValue *Clone = 7866 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), 7867 Dbg->getDebugLoc(), Dbg->getOrder()); 7868 ClonedDVs.push_back(Clone); 7869 7870 if (InvalidateDbg) { 7871 // Invalidate value and indicate the SDDbgValue should not be emitted. 7872 Dbg->setIsInvalidated(); 7873 Dbg->setIsEmitted(); 7874 } 7875 } 7876 7877 for (SDDbgValue *Dbg : ClonedDVs) 7878 AddDbgValue(Dbg, ToNode, false); 7879 } 7880 7881 void SelectionDAG::salvageDebugInfo(SDNode &N) { 7882 if (!N.getHasDebugValue()) 7883 return; 7884 7885 SmallVector<SDDbgValue *, 2> ClonedDVs; 7886 for (auto DV : GetDbgValues(&N)) { 7887 if (DV->isInvalidated()) 7888 continue; 7889 switch (N.getOpcode()) { 7890 default: 7891 break; 7892 case ISD::ADD: 7893 SDValue N0 = N.getOperand(0); 7894 SDValue N1 = N.getOperand(1); 7895 if (!isConstantIntBuildVectorOrConstantInt(N0) && 7896 isConstantIntBuildVectorOrConstantInt(N1)) { 7897 uint64_t Offset = N.getConstantOperandVal(1); 7898 // Rewrite an ADD constant node into a DIExpression. Since we are 7899 // performing arithmetic to compute the variable's *value* in the 7900 // DIExpression, we need to mark the expression with a 7901 // DW_OP_stack_value. 7902 auto *DIExpr = DV->getExpression(); 7903 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset, 7904 DIExpression::NoDeref, 7905 DIExpression::WithStackValue); 7906 SDDbgValue *Clone = 7907 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 7908 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 7909 ClonedDVs.push_back(Clone); 7910 DV->setIsInvalidated(); 7911 DV->setIsEmitted(); 7912 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 7913 N0.getNode()->dumprFull(this); 7914 dbgs() << " into " << *DIExpr << '\n'); 7915 } 7916 } 7917 } 7918 7919 for (SDDbgValue *Dbg : ClonedDVs) 7920 AddDbgValue(Dbg, Dbg->getSDNode(), false); 7921 } 7922 7923 /// Creates a SDDbgLabel node. 7924 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 7925 const DebugLoc &DL, unsigned O) { 7926 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 7927 "Expected inlined-at fields to agree"); 7928 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 7929 } 7930 7931 namespace { 7932 7933 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 7934 /// pointed to by a use iterator is deleted, increment the use iterator 7935 /// so that it doesn't dangle. 7936 /// 7937 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 7938 SDNode::use_iterator &UI; 7939 SDNode::use_iterator &UE; 7940 7941 void NodeDeleted(SDNode *N, SDNode *E) override { 7942 // Increment the iterator as needed. 7943 while (UI != UE && N == *UI) 7944 ++UI; 7945 } 7946 7947 public: 7948 RAUWUpdateListener(SelectionDAG &d, 7949 SDNode::use_iterator &ui, 7950 SDNode::use_iterator &ue) 7951 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 7952 }; 7953 7954 } // end anonymous namespace 7955 7956 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7957 /// This can cause recursive merging of nodes in the DAG. 7958 /// 7959 /// This version assumes From has a single result value. 7960 /// 7961 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 7962 SDNode *From = FromN.getNode(); 7963 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 7964 "Cannot replace with this method!"); 7965 assert(From != To.getNode() && "Cannot replace uses of with self"); 7966 7967 // Preserve Debug Values 7968 transferDbgValues(FromN, To); 7969 7970 // Iterate over all the existing uses of From. New uses will be added 7971 // to the beginning of the use list, which we avoid visiting. 7972 // This specifically avoids visiting uses of From that arise while the 7973 // replacement is happening, because any such uses would be the result 7974 // of CSE: If an existing node looks like From after one of its operands 7975 // is replaced by To, we don't want to replace of all its users with To 7976 // too. See PR3018 for more info. 7977 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7978 RAUWUpdateListener Listener(*this, UI, UE); 7979 while (UI != UE) { 7980 SDNode *User = *UI; 7981 7982 // This node is about to morph, remove its old self from the CSE maps. 7983 RemoveNodeFromCSEMaps(User); 7984 7985 // A user can appear in a use list multiple times, and when this 7986 // happens the uses are usually next to each other in the list. 7987 // To help reduce the number of CSE recomputations, process all 7988 // the uses of this user that we can find this way. 7989 do { 7990 SDUse &Use = UI.getUse(); 7991 ++UI; 7992 Use.set(To); 7993 if (To->isDivergent() != From->isDivergent()) 7994 updateDivergence(User); 7995 } while (UI != UE && *UI == User); 7996 // Now that we have modified User, add it back to the CSE maps. If it 7997 // already exists there, recursively merge the results together. 7998 AddModifiedNodeToCSEMaps(User); 7999 } 8000 8001 // If we just RAUW'd the root, take note. 8002 if (FromN == getRoot()) 8003 setRoot(To); 8004 } 8005 8006 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8007 /// This can cause recursive merging of nodes in the DAG. 8008 /// 8009 /// This version assumes that for each value of From, there is a 8010 /// corresponding value in To in the same position with the same type. 8011 /// 8012 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8013 #ifndef NDEBUG 8014 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8015 assert((!From->hasAnyUseOfValue(i) || 8016 From->getValueType(i) == To->getValueType(i)) && 8017 "Cannot use this version of ReplaceAllUsesWith!"); 8018 #endif 8019 8020 // Handle the trivial case. 8021 if (From == To) 8022 return; 8023 8024 // Preserve Debug Info. Only do this if there's a use. 8025 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8026 if (From->hasAnyUseOfValue(i)) { 8027 assert((i < To->getNumValues()) && "Invalid To location"); 8028 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8029 } 8030 8031 // Iterate over just the existing users of From. See the comments in 8032 // the ReplaceAllUsesWith above. 8033 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8034 RAUWUpdateListener Listener(*this, UI, UE); 8035 while (UI != UE) { 8036 SDNode *User = *UI; 8037 8038 // This node is about to morph, remove its old self from the CSE maps. 8039 RemoveNodeFromCSEMaps(User); 8040 8041 // A user can appear in a use list multiple times, and when this 8042 // happens the uses are usually next to each other in the list. 8043 // To help reduce the number of CSE recomputations, process all 8044 // the uses of this user that we can find this way. 8045 do { 8046 SDUse &Use = UI.getUse(); 8047 ++UI; 8048 Use.setNode(To); 8049 if (To->isDivergent() != From->isDivergent()) 8050 updateDivergence(User); 8051 } while (UI != UE && *UI == User); 8052 8053 // Now that we have modified User, add it back to the CSE maps. If it 8054 // already exists there, recursively merge the results together. 8055 AddModifiedNodeToCSEMaps(User); 8056 } 8057 8058 // If we just RAUW'd the root, take note. 8059 if (From == getRoot().getNode()) 8060 setRoot(SDValue(To, getRoot().getResNo())); 8061 } 8062 8063 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8064 /// This can cause recursive merging of nodes in the DAG. 8065 /// 8066 /// This version can replace From with any result values. To must match the 8067 /// number and types of values returned by From. 8068 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8069 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8070 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8071 8072 // Preserve Debug Info. 8073 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8074 transferDbgValues(SDValue(From, i), To[i]); 8075 8076 // Iterate over just the existing users of From. See the comments in 8077 // the ReplaceAllUsesWith above. 8078 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8079 RAUWUpdateListener Listener(*this, UI, UE); 8080 while (UI != UE) { 8081 SDNode *User = *UI; 8082 8083 // This node is about to morph, remove its old self from the CSE maps. 8084 RemoveNodeFromCSEMaps(User); 8085 8086 // A user can appear in a use list multiple times, and when this happens the 8087 // uses are usually next to each other in the list. To help reduce the 8088 // number of CSE and divergence recomputations, process all the uses of this 8089 // user that we can find this way. 8090 bool To_IsDivergent = false; 8091 do { 8092 SDUse &Use = UI.getUse(); 8093 const SDValue &ToOp = To[Use.getResNo()]; 8094 ++UI; 8095 Use.set(ToOp); 8096 To_IsDivergent |= ToOp->isDivergent(); 8097 } while (UI != UE && *UI == User); 8098 8099 if (To_IsDivergent != From->isDivergent()) 8100 updateDivergence(User); 8101 8102 // Now that we have modified User, add it back to the CSE maps. If it 8103 // already exists there, recursively merge the results together. 8104 AddModifiedNodeToCSEMaps(User); 8105 } 8106 8107 // If we just RAUW'd the root, take note. 8108 if (From == getRoot().getNode()) 8109 setRoot(SDValue(To[getRoot().getResNo()])); 8110 } 8111 8112 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8113 /// uses of other values produced by From.getNode() alone. The Deleted 8114 /// vector is handled the same way as for ReplaceAllUsesWith. 8115 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8116 // Handle the really simple, really trivial case efficiently. 8117 if (From == To) return; 8118 8119 // Handle the simple, trivial, case efficiently. 8120 if (From.getNode()->getNumValues() == 1) { 8121 ReplaceAllUsesWith(From, To); 8122 return; 8123 } 8124 8125 // Preserve Debug Info. 8126 transferDbgValues(From, To); 8127 8128 // Iterate over just the existing users of From. See the comments in 8129 // the ReplaceAllUsesWith above. 8130 SDNode::use_iterator UI = From.getNode()->use_begin(), 8131 UE = From.getNode()->use_end(); 8132 RAUWUpdateListener Listener(*this, UI, UE); 8133 while (UI != UE) { 8134 SDNode *User = *UI; 8135 bool UserRemovedFromCSEMaps = false; 8136 8137 // A user can appear in a use list multiple times, and when this 8138 // happens the uses are usually next to each other in the list. 8139 // To help reduce the number of CSE recomputations, process all 8140 // the uses of this user that we can find this way. 8141 do { 8142 SDUse &Use = UI.getUse(); 8143 8144 // Skip uses of different values from the same node. 8145 if (Use.getResNo() != From.getResNo()) { 8146 ++UI; 8147 continue; 8148 } 8149 8150 // If this node hasn't been modified yet, it's still in the CSE maps, 8151 // so remove its old self from the CSE maps. 8152 if (!UserRemovedFromCSEMaps) { 8153 RemoveNodeFromCSEMaps(User); 8154 UserRemovedFromCSEMaps = true; 8155 } 8156 8157 ++UI; 8158 Use.set(To); 8159 if (To->isDivergent() != From->isDivergent()) 8160 updateDivergence(User); 8161 } while (UI != UE && *UI == User); 8162 // We are iterating over all uses of the From node, so if a use 8163 // doesn't use the specific value, no changes are made. 8164 if (!UserRemovedFromCSEMaps) 8165 continue; 8166 8167 // Now that we have modified User, add it back to the CSE maps. If it 8168 // already exists there, recursively merge the results together. 8169 AddModifiedNodeToCSEMaps(User); 8170 } 8171 8172 // If we just RAUW'd the root, take note. 8173 if (From == getRoot()) 8174 setRoot(To); 8175 } 8176 8177 namespace { 8178 8179 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8180 /// to record information about a use. 8181 struct UseMemo { 8182 SDNode *User; 8183 unsigned Index; 8184 SDUse *Use; 8185 }; 8186 8187 /// operator< - Sort Memos by User. 8188 bool operator<(const UseMemo &L, const UseMemo &R) { 8189 return (intptr_t)L.User < (intptr_t)R.User; 8190 } 8191 8192 } // end anonymous namespace 8193 8194 void SelectionDAG::updateDivergence(SDNode * N) 8195 { 8196 if (TLI->isSDNodeAlwaysUniform(N)) 8197 return; 8198 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8199 for (auto &Op : N->ops()) { 8200 if (Op.Val.getValueType() != MVT::Other) 8201 IsDivergent |= Op.getNode()->isDivergent(); 8202 } 8203 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8204 N->SDNodeBits.IsDivergent = IsDivergent; 8205 for (auto U : N->uses()) { 8206 updateDivergence(U); 8207 } 8208 } 8209 } 8210 8211 8212 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode*>& Order) { 8213 DenseMap<SDNode *, unsigned> Degree; 8214 Order.reserve(AllNodes.size()); 8215 for (auto & N : allnodes()) { 8216 unsigned NOps = N.getNumOperands(); 8217 Degree[&N] = NOps; 8218 if (0 == NOps) 8219 Order.push_back(&N); 8220 } 8221 for (std::vector<SDNode *>::iterator I = Order.begin(); 8222 I!=Order.end();++I) { 8223 SDNode * N = *I; 8224 for (auto U : N->uses()) { 8225 unsigned &UnsortedOps = Degree[U]; 8226 if (0 == --UnsortedOps) 8227 Order.push_back(U); 8228 } 8229 } 8230 } 8231 8232 #ifndef NDEBUG 8233 void SelectionDAG::VerifyDAGDiverence() 8234 { 8235 std::vector<SDNode*> TopoOrder; 8236 CreateTopologicalOrder(TopoOrder); 8237 const TargetLowering &TLI = getTargetLoweringInfo(); 8238 DenseMap<const SDNode *, bool> DivergenceMap; 8239 for (auto &N : allnodes()) { 8240 DivergenceMap[&N] = false; 8241 } 8242 for (auto N : TopoOrder) { 8243 bool IsDivergent = DivergenceMap[N]; 8244 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8245 for (auto &Op : N->ops()) { 8246 if (Op.Val.getValueType() != MVT::Other) 8247 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8248 } 8249 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8250 DivergenceMap[N] = true; 8251 } 8252 } 8253 for (auto &N : allnodes()) { 8254 (void)N; 8255 assert(DivergenceMap[&N] == N.isDivergent() && 8256 "Divergence bit inconsistency detected\n"); 8257 } 8258 } 8259 #endif 8260 8261 8262 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8263 /// uses of other values produced by From.getNode() alone. The same value 8264 /// may appear in both the From and To list. The Deleted vector is 8265 /// handled the same way as for ReplaceAllUsesWith. 8266 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8267 const SDValue *To, 8268 unsigned Num){ 8269 // Handle the simple, trivial case efficiently. 8270 if (Num == 1) 8271 return ReplaceAllUsesOfValueWith(*From, *To); 8272 8273 transferDbgValues(*From, *To); 8274 8275 // Read up all the uses and make records of them. This helps 8276 // processing new uses that are introduced during the 8277 // replacement process. 8278 SmallVector<UseMemo, 4> Uses; 8279 for (unsigned i = 0; i != Num; ++i) { 8280 unsigned FromResNo = From[i].getResNo(); 8281 SDNode *FromNode = From[i].getNode(); 8282 for (SDNode::use_iterator UI = FromNode->use_begin(), 8283 E = FromNode->use_end(); UI != E; ++UI) { 8284 SDUse &Use = UI.getUse(); 8285 if (Use.getResNo() == FromResNo) { 8286 UseMemo Memo = { *UI, i, &Use }; 8287 Uses.push_back(Memo); 8288 } 8289 } 8290 } 8291 8292 // Sort the uses, so that all the uses from a given User are together. 8293 llvm::sort(Uses); 8294 8295 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8296 UseIndex != UseIndexEnd; ) { 8297 // We know that this user uses some value of From. If it is the right 8298 // value, update it. 8299 SDNode *User = Uses[UseIndex].User; 8300 8301 // This node is about to morph, remove its old self from the CSE maps. 8302 RemoveNodeFromCSEMaps(User); 8303 8304 // The Uses array is sorted, so all the uses for a given User 8305 // are next to each other in the list. 8306 // To help reduce the number of CSE recomputations, process all 8307 // the uses of this user that we can find this way. 8308 do { 8309 unsigned i = Uses[UseIndex].Index; 8310 SDUse &Use = *Uses[UseIndex].Use; 8311 ++UseIndex; 8312 8313 Use.set(To[i]); 8314 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8315 8316 // Now that we have modified User, add it back to the CSE maps. If it 8317 // already exists there, recursively merge the results together. 8318 AddModifiedNodeToCSEMaps(User); 8319 } 8320 } 8321 8322 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8323 /// based on their topological order. It returns the maximum id and a vector 8324 /// of the SDNodes* in assigned order by reference. 8325 unsigned SelectionDAG::AssignTopologicalOrder() { 8326 unsigned DAGSize = 0; 8327 8328 // SortedPos tracks the progress of the algorithm. Nodes before it are 8329 // sorted, nodes after it are unsorted. When the algorithm completes 8330 // it is at the end of the list. 8331 allnodes_iterator SortedPos = allnodes_begin(); 8332 8333 // Visit all the nodes. Move nodes with no operands to the front of 8334 // the list immediately. Annotate nodes that do have operands with their 8335 // operand count. Before we do this, the Node Id fields of the nodes 8336 // may contain arbitrary values. After, the Node Id fields for nodes 8337 // before SortedPos will contain the topological sort index, and the 8338 // Node Id fields for nodes At SortedPos and after will contain the 8339 // count of outstanding operands. 8340 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8341 SDNode *N = &*I++; 8342 checkForCycles(N, this); 8343 unsigned Degree = N->getNumOperands(); 8344 if (Degree == 0) { 8345 // A node with no uses, add it to the result array immediately. 8346 N->setNodeId(DAGSize++); 8347 allnodes_iterator Q(N); 8348 if (Q != SortedPos) 8349 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8350 assert(SortedPos != AllNodes.end() && "Overran node list"); 8351 ++SortedPos; 8352 } else { 8353 // Temporarily use the Node Id as scratch space for the degree count. 8354 N->setNodeId(Degree); 8355 } 8356 } 8357 8358 // Visit all the nodes. As we iterate, move nodes into sorted order, 8359 // such that by the time the end is reached all nodes will be sorted. 8360 for (SDNode &Node : allnodes()) { 8361 SDNode *N = &Node; 8362 checkForCycles(N, this); 8363 // N is in sorted position, so all its uses have one less operand 8364 // that needs to be sorted. 8365 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8366 UI != UE; ++UI) { 8367 SDNode *P = *UI; 8368 unsigned Degree = P->getNodeId(); 8369 assert(Degree != 0 && "Invalid node degree"); 8370 --Degree; 8371 if (Degree == 0) { 8372 // All of P's operands are sorted, so P may sorted now. 8373 P->setNodeId(DAGSize++); 8374 if (P->getIterator() != SortedPos) 8375 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8376 assert(SortedPos != AllNodes.end() && "Overran node list"); 8377 ++SortedPos; 8378 } else { 8379 // Update P's outstanding operand count. 8380 P->setNodeId(Degree); 8381 } 8382 } 8383 if (Node.getIterator() == SortedPos) { 8384 #ifndef NDEBUG 8385 allnodes_iterator I(N); 8386 SDNode *S = &*++I; 8387 dbgs() << "Overran sorted position:\n"; 8388 S->dumprFull(this); dbgs() << "\n"; 8389 dbgs() << "Checking if this is due to cycles\n"; 8390 checkForCycles(this, true); 8391 #endif 8392 llvm_unreachable(nullptr); 8393 } 8394 } 8395 8396 assert(SortedPos == AllNodes.end() && 8397 "Topological sort incomplete!"); 8398 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8399 "First node in topological sort is not the entry token!"); 8400 assert(AllNodes.front().getNodeId() == 0 && 8401 "First node in topological sort has non-zero id!"); 8402 assert(AllNodes.front().getNumOperands() == 0 && 8403 "First node in topological sort has operands!"); 8404 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8405 "Last node in topologic sort has unexpected id!"); 8406 assert(AllNodes.back().use_empty() && 8407 "Last node in topologic sort has users!"); 8408 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8409 return DAGSize; 8410 } 8411 8412 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8413 /// value is produced by SD. 8414 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8415 if (SD) { 8416 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8417 SD->setHasDebugValue(true); 8418 } 8419 DbgInfo->add(DB, SD, isParameter); 8420 } 8421 8422 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8423 DbgInfo->add(DB); 8424 } 8425 8426 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8427 SDValue NewMemOp) { 8428 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8429 // The new memory operation must have the same position as the old load in 8430 // terms of memory dependency. Create a TokenFactor for the old load and new 8431 // memory operation and update uses of the old load's output chain to use that 8432 // TokenFactor. 8433 SDValue OldChain = SDValue(OldLoad, 1); 8434 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8435 if (!OldLoad->hasAnyUseOfValue(1)) 8436 return NewChain; 8437 8438 SDValue TokenFactor = 8439 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8440 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8441 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8442 return TokenFactor; 8443 } 8444 8445 //===----------------------------------------------------------------------===// 8446 // SDNode Class 8447 //===----------------------------------------------------------------------===// 8448 8449 bool llvm::isNullConstant(SDValue V) { 8450 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8451 return Const != nullptr && Const->isNullValue(); 8452 } 8453 8454 bool llvm::isNullFPConstant(SDValue V) { 8455 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8456 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8457 } 8458 8459 bool llvm::isAllOnesConstant(SDValue V) { 8460 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8461 return Const != nullptr && Const->isAllOnesValue(); 8462 } 8463 8464 bool llvm::isOneConstant(SDValue V) { 8465 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8466 return Const != nullptr && Const->isOne(); 8467 } 8468 8469 SDValue llvm::peekThroughBitcasts(SDValue V) { 8470 while (V.getOpcode() == ISD::BITCAST) 8471 V = V.getOperand(0); 8472 return V; 8473 } 8474 8475 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8476 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8477 V = V.getOperand(0); 8478 return V; 8479 } 8480 8481 bool llvm::isBitwiseNot(SDValue V) { 8482 if (V.getOpcode() != ISD::XOR) 8483 return false; 8484 ConstantSDNode *C = isConstOrConstSplat(peekThroughBitcasts(V.getOperand(1))); 8485 return C && C->isAllOnesValue(); 8486 } 8487 8488 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs) { 8489 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8490 return CN; 8491 8492 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8493 BitVector UndefElements; 8494 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8495 8496 // BuildVectors can truncate their operands. Ignore that case here. 8497 if (CN && (UndefElements.none() || AllowUndefs) && 8498 CN->getValueType(0) == N.getValueType().getScalarType()) 8499 return CN; 8500 } 8501 8502 return nullptr; 8503 } 8504 8505 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8506 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8507 return CN; 8508 8509 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8510 BitVector UndefElements; 8511 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8512 if (CN && (UndefElements.none() || AllowUndefs)) 8513 return CN; 8514 } 8515 8516 return nullptr; 8517 } 8518 8519 bool llvm::isNullOrNullSplat(SDValue N) { 8520 // TODO: may want to use peekThroughBitcast() here. 8521 ConstantSDNode *C = isConstOrConstSplat(N); 8522 return C && C->isNullValue(); 8523 } 8524 8525 bool llvm::isOneOrOneSplat(SDValue N) { 8526 // TODO: may want to use peekThroughBitcast() here. 8527 unsigned BitWidth = N.getScalarValueSizeInBits(); 8528 ConstantSDNode *C = isConstOrConstSplat(N); 8529 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8530 } 8531 8532 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8533 N = peekThroughBitcasts(N); 8534 unsigned BitWidth = N.getScalarValueSizeInBits(); 8535 ConstantSDNode *C = isConstOrConstSplat(N); 8536 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8537 } 8538 8539 HandleSDNode::~HandleSDNode() { 8540 DropOperands(); 8541 } 8542 8543 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8544 const DebugLoc &DL, 8545 const GlobalValue *GA, EVT VT, 8546 int64_t o, unsigned char TF) 8547 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 8548 TheGlobal = GA; 8549 } 8550 8551 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 8552 EVT VT, unsigned SrcAS, 8553 unsigned DestAS) 8554 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 8555 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 8556 8557 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 8558 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 8559 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 8560 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 8561 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 8562 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 8563 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 8564 8565 // We check here that the size of the memory operand fits within the size of 8566 // the MMO. This is because the MMO might indicate only a possible address 8567 // range instead of specifying the affected memory addresses precisely. 8568 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 8569 } 8570 8571 /// Profile - Gather unique data for the node. 8572 /// 8573 void SDNode::Profile(FoldingSetNodeID &ID) const { 8574 AddNodeIDNode(ID, this); 8575 } 8576 8577 namespace { 8578 8579 struct EVTArray { 8580 std::vector<EVT> VTs; 8581 8582 EVTArray() { 8583 VTs.reserve(MVT::LAST_VALUETYPE); 8584 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 8585 VTs.push_back(MVT((MVT::SimpleValueType)i)); 8586 } 8587 }; 8588 8589 } // end anonymous namespace 8590 8591 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 8592 static ManagedStatic<EVTArray> SimpleVTArray; 8593 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 8594 8595 /// getValueTypeList - Return a pointer to the specified value type. 8596 /// 8597 const EVT *SDNode::getValueTypeList(EVT VT) { 8598 if (VT.isExtended()) { 8599 sys::SmartScopedLock<true> Lock(*VTMutex); 8600 return &(*EVTs->insert(VT).first); 8601 } else { 8602 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 8603 "Value type out of range!"); 8604 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 8605 } 8606 } 8607 8608 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 8609 /// indicated value. This method ignores uses of other values defined by this 8610 /// operation. 8611 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 8612 assert(Value < getNumValues() && "Bad value!"); 8613 8614 // TODO: Only iterate over uses of a given value of the node 8615 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 8616 if (UI.getUse().getResNo() == Value) { 8617 if (NUses == 0) 8618 return false; 8619 --NUses; 8620 } 8621 } 8622 8623 // Found exactly the right number of uses? 8624 return NUses == 0; 8625 } 8626 8627 /// hasAnyUseOfValue - Return true if there are any use of the indicated 8628 /// value. This method ignores uses of other values defined by this operation. 8629 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 8630 assert(Value < getNumValues() && "Bad value!"); 8631 8632 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 8633 if (UI.getUse().getResNo() == Value) 8634 return true; 8635 8636 return false; 8637 } 8638 8639 /// isOnlyUserOf - Return true if this node is the only use of N. 8640 bool SDNode::isOnlyUserOf(const SDNode *N) const { 8641 bool Seen = false; 8642 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 8643 SDNode *User = *I; 8644 if (User == this) 8645 Seen = true; 8646 else 8647 return false; 8648 } 8649 8650 return Seen; 8651 } 8652 8653 /// Return true if the only users of N are contained in Nodes. 8654 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 8655 bool Seen = false; 8656 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 8657 SDNode *User = *I; 8658 if (llvm::any_of(Nodes, 8659 [&User](const SDNode *Node) { return User == Node; })) 8660 Seen = true; 8661 else 8662 return false; 8663 } 8664 8665 return Seen; 8666 } 8667 8668 /// isOperand - Return true if this node is an operand of N. 8669 bool SDValue::isOperandOf(const SDNode *N) const { 8670 for (const SDValue &Op : N->op_values()) 8671 if (*this == Op) 8672 return true; 8673 return false; 8674 } 8675 8676 bool SDNode::isOperandOf(const SDNode *N) const { 8677 for (const SDValue &Op : N->op_values()) 8678 if (this == Op.getNode()) 8679 return true; 8680 return false; 8681 } 8682 8683 /// reachesChainWithoutSideEffects - Return true if this operand (which must 8684 /// be a chain) reaches the specified operand without crossing any 8685 /// side-effecting instructions on any chain path. In practice, this looks 8686 /// through token factors and non-volatile loads. In order to remain efficient, 8687 /// this only looks a couple of nodes in, it does not do an exhaustive search. 8688 /// 8689 /// Note that we only need to examine chains when we're searching for 8690 /// side-effects; SelectionDAG requires that all side-effects are represented 8691 /// by chains, even if another operand would force a specific ordering. This 8692 /// constraint is necessary to allow transformations like splitting loads. 8693 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 8694 unsigned Depth) const { 8695 if (*this == Dest) return true; 8696 8697 // Don't search too deeply, we just want to be able to see through 8698 // TokenFactor's etc. 8699 if (Depth == 0) return false; 8700 8701 // If this is a token factor, all inputs to the TF happen in parallel. 8702 if (getOpcode() == ISD::TokenFactor) { 8703 // First, try a shallow search. 8704 if (is_contained((*this)->ops(), Dest)) { 8705 // We found the chain we want as an operand of this TokenFactor. 8706 // Essentially, we reach the chain without side-effects if we could 8707 // serialize the TokenFactor into a simple chain of operations with 8708 // Dest as the last operation. This is automatically true if the 8709 // chain has one use: there are no other ordering constraints. 8710 // If the chain has more than one use, we give up: some other 8711 // use of Dest might force a side-effect between Dest and the current 8712 // node. 8713 if (Dest.hasOneUse()) 8714 return true; 8715 } 8716 // Next, try a deep search: check whether every operand of the TokenFactor 8717 // reaches Dest. 8718 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 8719 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 8720 }); 8721 } 8722 8723 // Loads don't have side effects, look through them. 8724 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 8725 if (!Ld->isVolatile()) 8726 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 8727 } 8728 return false; 8729 } 8730 8731 bool SDNode::hasPredecessor(const SDNode *N) const { 8732 SmallPtrSet<const SDNode *, 32> Visited; 8733 SmallVector<const SDNode *, 16> Worklist; 8734 Worklist.push_back(this); 8735 return hasPredecessorHelper(N, Visited, Worklist); 8736 } 8737 8738 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 8739 this->Flags.intersectWith(Flags); 8740 } 8741 8742 SDValue 8743 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 8744 ArrayRef<ISD::NodeType> CandidateBinOps) { 8745 // The pattern must end in an extract from index 0. 8746 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 8747 !isNullConstant(Extract->getOperand(1))) 8748 return SDValue(); 8749 8750 SDValue Op = Extract->getOperand(0); 8751 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 8752 8753 // Match against one of the candidate binary ops. 8754 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 8755 return Op.getOpcode() == unsigned(BinOp); 8756 })) 8757 return SDValue(); 8758 8759 // At each stage, we're looking for something that looks like: 8760 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 8761 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 8762 // i32 undef, i32 undef, i32 undef, i32 undef> 8763 // %a = binop <8 x i32> %op, %s 8764 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 8765 // we expect something like: 8766 // <4,5,6,7,u,u,u,u> 8767 // <2,3,u,u,u,u,u,u> 8768 // <1,u,u,u,u,u,u,u> 8769 unsigned CandidateBinOp = Op.getOpcode(); 8770 for (unsigned i = 0; i < Stages; ++i) { 8771 if (Op.getOpcode() != CandidateBinOp) 8772 return SDValue(); 8773 8774 SDValue Op0 = Op.getOperand(0); 8775 SDValue Op1 = Op.getOperand(1); 8776 8777 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 8778 if (Shuffle) { 8779 Op = Op1; 8780 } else { 8781 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 8782 Op = Op0; 8783 } 8784 8785 // The first operand of the shuffle should be the same as the other operand 8786 // of the binop. 8787 if (!Shuffle || Shuffle->getOperand(0) != Op) 8788 return SDValue(); 8789 8790 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 8791 for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index) 8792 if (Shuffle->getMaskElt(Index) != MaskEnd + Index) 8793 return SDValue(); 8794 } 8795 8796 BinOp = (ISD::NodeType)CandidateBinOp; 8797 return Op; 8798 } 8799 8800 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 8801 assert(N->getNumValues() == 1 && 8802 "Can't unroll a vector with multiple results!"); 8803 8804 EVT VT = N->getValueType(0); 8805 unsigned NE = VT.getVectorNumElements(); 8806 EVT EltVT = VT.getVectorElementType(); 8807 SDLoc dl(N); 8808 8809 SmallVector<SDValue, 8> Scalars; 8810 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 8811 8812 // If ResNE is 0, fully unroll the vector op. 8813 if (ResNE == 0) 8814 ResNE = NE; 8815 else if (NE > ResNE) 8816 NE = ResNE; 8817 8818 unsigned i; 8819 for (i= 0; i != NE; ++i) { 8820 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 8821 SDValue Operand = N->getOperand(j); 8822 EVT OperandVT = Operand.getValueType(); 8823 if (OperandVT.isVector()) { 8824 // A vector operand; extract a single element. 8825 EVT OperandEltVT = OperandVT.getVectorElementType(); 8826 Operands[j] = 8827 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 8828 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 8829 } else { 8830 // A scalar operand; just use it as is. 8831 Operands[j] = Operand; 8832 } 8833 } 8834 8835 switch (N->getOpcode()) { 8836 default: { 8837 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 8838 N->getFlags())); 8839 break; 8840 } 8841 case ISD::VSELECT: 8842 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 8843 break; 8844 case ISD::SHL: 8845 case ISD::SRA: 8846 case ISD::SRL: 8847 case ISD::ROTL: 8848 case ISD::ROTR: 8849 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 8850 getShiftAmountOperand(Operands[0].getValueType(), 8851 Operands[1]))); 8852 break; 8853 case ISD::SIGN_EXTEND_INREG: 8854 case ISD::FP_ROUND_INREG: { 8855 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 8856 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 8857 Operands[0], 8858 getValueType(ExtVT))); 8859 } 8860 } 8861 } 8862 8863 for (; i < ResNE; ++i) 8864 Scalars.push_back(getUNDEF(EltVT)); 8865 8866 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 8867 return getBuildVector(VecVT, dl, Scalars); 8868 } 8869 8870 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 8871 LoadSDNode *Base, 8872 unsigned Bytes, 8873 int Dist) const { 8874 if (LD->isVolatile() || Base->isVolatile()) 8875 return false; 8876 if (LD->isIndexed() || Base->isIndexed()) 8877 return false; 8878 if (LD->getChain() != Base->getChain()) 8879 return false; 8880 EVT VT = LD->getValueType(0); 8881 if (VT.getSizeInBits() / 8 != Bytes) 8882 return false; 8883 8884 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 8885 auto LocDecomp = BaseIndexOffset::match(LD, *this); 8886 8887 int64_t Offset = 0; 8888 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 8889 return (Dist * Bytes == Offset); 8890 return false; 8891 } 8892 8893 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 8894 /// it cannot be inferred. 8895 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 8896 // If this is a GlobalAddress + cst, return the alignment. 8897 const GlobalValue *GV; 8898 int64_t GVOffset = 0; 8899 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 8900 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType()); 8901 KnownBits Known(IdxWidth); 8902 llvm::computeKnownBits(GV, Known, getDataLayout()); 8903 unsigned AlignBits = Known.countMinTrailingZeros(); 8904 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 8905 if (Align) 8906 return MinAlign(Align, GVOffset); 8907 } 8908 8909 // If this is a direct reference to a stack slot, use information about the 8910 // stack slot's alignment. 8911 int FrameIdx = 1 << 31; 8912 int64_t FrameOffset = 0; 8913 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 8914 FrameIdx = FI->getIndex(); 8915 } else if (isBaseWithConstantOffset(Ptr) && 8916 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 8917 // Handle FI+Cst 8918 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 8919 FrameOffset = Ptr.getConstantOperandVal(1); 8920 } 8921 8922 if (FrameIdx != (1 << 31)) { 8923 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 8924 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 8925 FrameOffset); 8926 return FIInfoAlign; 8927 } 8928 8929 return 0; 8930 } 8931 8932 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 8933 /// which is split (or expanded) into two not necessarily identical pieces. 8934 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 8935 // Currently all types are split in half. 8936 EVT LoVT, HiVT; 8937 if (!VT.isVector()) 8938 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 8939 else 8940 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 8941 8942 return std::make_pair(LoVT, HiVT); 8943 } 8944 8945 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 8946 /// low/high part. 8947 std::pair<SDValue, SDValue> 8948 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 8949 const EVT &HiVT) { 8950 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 8951 N.getValueType().getVectorNumElements() && 8952 "More vector elements requested than available!"); 8953 SDValue Lo, Hi; 8954 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 8955 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 8956 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 8957 getConstant(LoVT.getVectorNumElements(), DL, 8958 TLI->getVectorIdxTy(getDataLayout()))); 8959 return std::make_pair(Lo, Hi); 8960 } 8961 8962 void SelectionDAG::ExtractVectorElements(SDValue Op, 8963 SmallVectorImpl<SDValue> &Args, 8964 unsigned Start, unsigned Count) { 8965 EVT VT = Op.getValueType(); 8966 if (Count == 0) 8967 Count = VT.getVectorNumElements(); 8968 8969 EVT EltVT = VT.getVectorElementType(); 8970 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 8971 SDLoc SL(Op); 8972 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 8973 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8974 Op, getConstant(i, SL, IdxTy))); 8975 } 8976 } 8977 8978 // getAddressSpace - Return the address space this GlobalAddress belongs to. 8979 unsigned GlobalAddressSDNode::getAddressSpace() const { 8980 return getGlobal()->getType()->getAddressSpace(); 8981 } 8982 8983 Type *ConstantPoolSDNode::getType() const { 8984 if (isMachineConstantPoolEntry()) 8985 return Val.MachineCPVal->getType(); 8986 return Val.ConstVal->getType(); 8987 } 8988 8989 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 8990 unsigned &SplatBitSize, 8991 bool &HasAnyUndefs, 8992 unsigned MinSplatBits, 8993 bool IsBigEndian) const { 8994 EVT VT = getValueType(0); 8995 assert(VT.isVector() && "Expected a vector type"); 8996 unsigned VecWidth = VT.getSizeInBits(); 8997 if (MinSplatBits > VecWidth) 8998 return false; 8999 9000 // FIXME: The widths are based on this node's type, but build vectors can 9001 // truncate their operands. 9002 SplatValue = APInt(VecWidth, 0); 9003 SplatUndef = APInt(VecWidth, 0); 9004 9005 // Get the bits. Bits with undefined values (when the corresponding element 9006 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9007 // in SplatValue. If any of the values are not constant, give up and return 9008 // false. 9009 unsigned int NumOps = getNumOperands(); 9010 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9011 unsigned EltWidth = VT.getScalarSizeInBits(); 9012 9013 for (unsigned j = 0; j < NumOps; ++j) { 9014 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9015 SDValue OpVal = getOperand(i); 9016 unsigned BitPos = j * EltWidth; 9017 9018 if (OpVal.isUndef()) 9019 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9020 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9021 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9022 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9023 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9024 else 9025 return false; 9026 } 9027 9028 // The build_vector is all constants or undefs. Find the smallest element 9029 // size that splats the vector. 9030 HasAnyUndefs = (SplatUndef != 0); 9031 9032 // FIXME: This does not work for vectors with elements less than 8 bits. 9033 while (VecWidth > 8) { 9034 unsigned HalfSize = VecWidth / 2; 9035 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9036 APInt LowValue = SplatValue.trunc(HalfSize); 9037 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9038 APInt LowUndef = SplatUndef.trunc(HalfSize); 9039 9040 // If the two halves do not match (ignoring undef bits), stop here. 9041 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9042 MinSplatBits > HalfSize) 9043 break; 9044 9045 SplatValue = HighValue | LowValue; 9046 SplatUndef = HighUndef & LowUndef; 9047 9048 VecWidth = HalfSize; 9049 } 9050 9051 SplatBitSize = VecWidth; 9052 return true; 9053 } 9054 9055 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9056 if (UndefElements) { 9057 UndefElements->clear(); 9058 UndefElements->resize(getNumOperands()); 9059 } 9060 SDValue Splatted; 9061 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9062 SDValue Op = getOperand(i); 9063 if (Op.isUndef()) { 9064 if (UndefElements) 9065 (*UndefElements)[i] = true; 9066 } else if (!Splatted) { 9067 Splatted = Op; 9068 } else if (Splatted != Op) { 9069 return SDValue(); 9070 } 9071 } 9072 9073 if (!Splatted) { 9074 assert(getOperand(0).isUndef() && 9075 "Can only have a splat without a constant for all undefs."); 9076 return getOperand(0); 9077 } 9078 9079 return Splatted; 9080 } 9081 9082 ConstantSDNode * 9083 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9084 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9085 } 9086 9087 ConstantFPSDNode * 9088 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9089 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9090 } 9091 9092 int32_t 9093 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9094 uint32_t BitWidth) const { 9095 if (ConstantFPSDNode *CN = 9096 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9097 bool IsExact; 9098 APSInt IntVal(BitWidth); 9099 const APFloat &APF = CN->getValueAPF(); 9100 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9101 APFloat::opOK || 9102 !IsExact) 9103 return -1; 9104 9105 return IntVal.exactLogBase2(); 9106 } 9107 return -1; 9108 } 9109 9110 bool BuildVectorSDNode::isConstant() const { 9111 for (const SDValue &Op : op_values()) { 9112 unsigned Opc = Op.getOpcode(); 9113 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9114 return false; 9115 } 9116 return true; 9117 } 9118 9119 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9120 // Find the first non-undef value in the shuffle mask. 9121 unsigned i, e; 9122 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9123 /* search */; 9124 9125 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 9126 9127 // Make sure all remaining elements are either undef or the same as the first 9128 // non-undef value. 9129 for (int Idx = Mask[i]; i != e; ++i) 9130 if (Mask[i] >= 0 && Mask[i] != Idx) 9131 return false; 9132 return true; 9133 } 9134 9135 // Returns the SDNode if it is a constant integer BuildVector 9136 // or constant integer. 9137 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9138 if (isa<ConstantSDNode>(N)) 9139 return N.getNode(); 9140 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9141 return N.getNode(); 9142 // Treat a GlobalAddress supporting constant offset folding as a 9143 // constant integer. 9144 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9145 if (GA->getOpcode() == ISD::GlobalAddress && 9146 TLI->isOffsetFoldingLegal(GA)) 9147 return GA; 9148 return nullptr; 9149 } 9150 9151 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9152 if (isa<ConstantFPSDNode>(N)) 9153 return N.getNode(); 9154 9155 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9156 return N.getNode(); 9157 9158 return nullptr; 9159 } 9160 9161 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9162 assert(!Node->OperandList && "Node already has operands"); 9163 assert(std::numeric_limits<decltype(SDNode::NumOperands)>::max() > 9164 Vals.size() && 9165 "too many operands to fit into SDNode"); 9166 SDUse *Ops = OperandRecycler.allocate( 9167 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9168 9169 bool IsDivergent = false; 9170 for (unsigned I = 0; I != Vals.size(); ++I) { 9171 Ops[I].setUser(Node); 9172 Ops[I].setInitial(Vals[I]); 9173 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9174 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9175 } 9176 Node->NumOperands = Vals.size(); 9177 Node->OperandList = Ops; 9178 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9179 if (!TLI->isSDNodeAlwaysUniform(Node)) 9180 Node->SDNodeBits.IsDivergent = IsDivergent; 9181 checkForCycles(Node); 9182 } 9183 9184 #ifndef NDEBUG 9185 static void checkForCyclesHelper(const SDNode *N, 9186 SmallPtrSetImpl<const SDNode*> &Visited, 9187 SmallPtrSetImpl<const SDNode*> &Checked, 9188 const llvm::SelectionDAG *DAG) { 9189 // If this node has already been checked, don't check it again. 9190 if (Checked.count(N)) 9191 return; 9192 9193 // If a node has already been visited on this depth-first walk, reject it as 9194 // a cycle. 9195 if (!Visited.insert(N).second) { 9196 errs() << "Detected cycle in SelectionDAG\n"; 9197 dbgs() << "Offending node:\n"; 9198 N->dumprFull(DAG); dbgs() << "\n"; 9199 abort(); 9200 } 9201 9202 for (const SDValue &Op : N->op_values()) 9203 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9204 9205 Checked.insert(N); 9206 Visited.erase(N); 9207 } 9208 #endif 9209 9210 void llvm::checkForCycles(const llvm::SDNode *N, 9211 const llvm::SelectionDAG *DAG, 9212 bool force) { 9213 #ifndef NDEBUG 9214 bool check = force; 9215 #ifdef EXPENSIVE_CHECKS 9216 check = true; 9217 #endif // EXPENSIVE_CHECKS 9218 if (check) { 9219 assert(N && "Checking nonexistent SDNode"); 9220 SmallPtrSet<const SDNode*, 32> visited; 9221 SmallPtrSet<const SDNode*, 32> checked; 9222 checkForCyclesHelper(N, visited, checked, DAG); 9223 } 9224 #endif // !NDEBUG 9225 } 9226 9227 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9228 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9229 } 9230