1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/RuntimeLibcalls.h" 36 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 37 #include "llvm/CodeGen/SelectionDAGNodes.h" 38 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 39 #include "llvm/CodeGen/TargetLowering.h" 40 #include "llvm/CodeGen/TargetRegisterInfo.h" 41 #include "llvm/CodeGen/TargetSubtargetInfo.h" 42 #include "llvm/CodeGen/ValueTypes.h" 43 #include "llvm/IR/Constant.h" 44 #include "llvm/IR/Constants.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/DebugInfoMetadata.h" 47 #include "llvm/IR/DebugLoc.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CodeGen.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/KnownBits.h" 60 #include "llvm/Support/MachineValueType.h" 61 #include "llvm/Support/ManagedStatic.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/Mutex.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetMachine.h" 66 #include "llvm/Target/TargetOptions.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 #define DEBUG_TYPE "selectiondag" 91 92 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 93 DEBUG( 94 dbgs() << Msg; 95 V.getNode()->dump(G); 96 ); 97 } 98 99 //===----------------------------------------------------------------------===// 100 // ConstantFPSDNode Class 101 //===----------------------------------------------------------------------===// 102 103 /// isExactlyValue - We don't rely on operator== working on double values, as 104 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 105 /// As such, this method can be used to do an exact bit-for-bit comparison of 106 /// two floating point values. 107 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 108 return getValueAPF().bitwiseIsEqual(V); 109 } 110 111 bool ConstantFPSDNode::isValueValidForType(EVT VT, 112 const APFloat& Val) { 113 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 114 115 // convert modifies in place, so make a copy. 116 APFloat Val2 = APFloat(Val); 117 bool losesInfo; 118 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 119 APFloat::rmNearestTiesToEven, 120 &losesInfo); 121 return !losesInfo; 122 } 123 124 //===----------------------------------------------------------------------===// 125 // ISD Namespace 126 //===----------------------------------------------------------------------===// 127 128 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 129 auto *BV = dyn_cast<BuildVectorSDNode>(N); 130 if (!BV) 131 return false; 132 133 APInt SplatUndef; 134 unsigned SplatBitSize; 135 bool HasUndefs; 136 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 137 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 138 EltSize) && 139 EltSize == SplatBitSize; 140 } 141 142 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 143 // specializations of the more general isConstantSplatVector()? 144 145 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 146 // Look through a bit convert. 147 while (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 unsigned i = 0, e = N->getNumOperands(); 153 154 // Skip over all of the undef values. 155 while (i != e && N->getOperand(i).isUndef()) 156 ++i; 157 158 // Do not accept an all-undef vector. 159 if (i == e) return false; 160 161 // Do not accept build_vectors that aren't all constants or which have non-~0 162 // elements. We have to be a bit careful here, as the type of the constant 163 // may not be the same as the type of the vector elements due to type 164 // legalization (the elements are promoted to a legal type for the target and 165 // a vector of a type may be legal when the base element type is not). 166 // We only want to check enough bits to cover the vector elements, because 167 // we care if the resultant vector is all ones, not whether the individual 168 // constants are. 169 SDValue NotZero = N->getOperand(i); 170 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 172 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 173 return false; 174 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 175 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 176 return false; 177 } else 178 return false; 179 180 // Okay, we have at least one ~0 value, check to see if the rest match or are 181 // undefs. Even with the above element type twiddling, this should be OK, as 182 // the same type legalization should have applied to all the elements. 183 for (++i; i != e; ++i) 184 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 185 return false; 186 return true; 187 } 188 189 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 190 // Look through a bit convert. 191 while (N->getOpcode() == ISD::BITCAST) 192 N = N->getOperand(0).getNode(); 193 194 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 195 196 bool IsAllUndef = true; 197 for (const SDValue &Op : N->op_values()) { 198 if (Op.isUndef()) 199 continue; 200 IsAllUndef = false; 201 // Do not accept build_vectors that aren't all constants or which have non-0 202 // elements. We have to be a bit careful here, as the type of the constant 203 // may not be the same as the type of the vector elements due to type 204 // legalization (the elements are promoted to a legal type for the target 205 // and a vector of a type may be legal when the base element type is not). 206 // We only want to check enough bits to cover the vector elements, because 207 // we care if the resultant vector is all zeros, not whether the individual 208 // constants are. 209 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 210 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 211 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 212 return false; 213 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 214 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 215 return false; 216 } else 217 return false; 218 } 219 220 // Do not accept an all-undef vector. 221 if (IsAllUndef) 222 return false; 223 return true; 224 } 225 226 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 227 if (N->getOpcode() != ISD::BUILD_VECTOR) 228 return false; 229 230 for (const SDValue &Op : N->op_values()) { 231 if (Op.isUndef()) 232 continue; 233 if (!isa<ConstantSDNode>(Op)) 234 return false; 235 } 236 return true; 237 } 238 239 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 240 if (N->getOpcode() != ISD::BUILD_VECTOR) 241 return false; 242 243 for (const SDValue &Op : N->op_values()) { 244 if (Op.isUndef()) 245 continue; 246 if (!isa<ConstantFPSDNode>(Op)) 247 return false; 248 } 249 return true; 250 } 251 252 bool ISD::allOperandsUndef(const SDNode *N) { 253 // Return false if the node has no operands. 254 // This is "logically inconsistent" with the definition of "all" but 255 // is probably the desired behavior. 256 if (N->getNumOperands() == 0) 257 return false; 258 259 for (const SDValue &Op : N->op_values()) 260 if (!Op.isUndef()) 261 return false; 262 263 return true; 264 } 265 266 bool ISD::matchUnaryPredicate(SDValue Op, 267 std::function<bool(ConstantSDNode *)> Match) { 268 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 269 return Match(Cst); 270 271 if (ISD::BUILD_VECTOR != Op.getOpcode()) 272 return false; 273 274 EVT SVT = Op.getValueType().getScalarType(); 275 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 276 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 277 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 278 return false; 279 } 280 return true; 281 } 282 283 bool ISD::matchBinaryPredicate( 284 SDValue LHS, SDValue RHS, 285 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match) { 286 if (LHS.getValueType() != RHS.getValueType()) 287 return false; 288 289 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 290 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 291 return Match(LHSCst, RHSCst); 292 293 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 294 ISD::BUILD_VECTOR != RHS.getOpcode()) 295 return false; 296 297 EVT SVT = LHS.getValueType().getScalarType(); 298 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 299 auto *LHSCst = dyn_cast<ConstantSDNode>(LHS.getOperand(i)); 300 auto *RHSCst = dyn_cast<ConstantSDNode>(RHS.getOperand(i)); 301 if (!LHSCst || !RHSCst) 302 return false; 303 if (LHSCst->getValueType(0) != SVT || 304 LHSCst->getValueType(0) != RHSCst->getValueType(0)) 305 return false; 306 if (!Match(LHSCst, RHSCst)) 307 return false; 308 } 309 return true; 310 } 311 312 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 313 switch (ExtType) { 314 case ISD::EXTLOAD: 315 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 316 case ISD::SEXTLOAD: 317 return ISD::SIGN_EXTEND; 318 case ISD::ZEXTLOAD: 319 return ISD::ZERO_EXTEND; 320 default: 321 break; 322 } 323 324 llvm_unreachable("Invalid LoadExtType"); 325 } 326 327 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 328 // To perform this operation, we just need to swap the L and G bits of the 329 // operation. 330 unsigned OldL = (Operation >> 2) & 1; 331 unsigned OldG = (Operation >> 1) & 1; 332 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 333 (OldL << 1) | // New G bit 334 (OldG << 2)); // New L bit. 335 } 336 337 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 338 unsigned Operation = Op; 339 if (isInteger) 340 Operation ^= 7; // Flip L, G, E bits, but not U. 341 else 342 Operation ^= 15; // Flip all of the condition bits. 343 344 if (Operation > ISD::SETTRUE2) 345 Operation &= ~8; // Don't let N and U bits get set. 346 347 return ISD::CondCode(Operation); 348 } 349 350 /// For an integer comparison, return 1 if the comparison is a signed operation 351 /// and 2 if the result is an unsigned comparison. Return zero if the operation 352 /// does not depend on the sign of the input (setne and seteq). 353 static int isSignedOp(ISD::CondCode Opcode) { 354 switch (Opcode) { 355 default: llvm_unreachable("Illegal integer setcc operation!"); 356 case ISD::SETEQ: 357 case ISD::SETNE: return 0; 358 case ISD::SETLT: 359 case ISD::SETLE: 360 case ISD::SETGT: 361 case ISD::SETGE: return 1; 362 case ISD::SETULT: 363 case ISD::SETULE: 364 case ISD::SETUGT: 365 case ISD::SETUGE: return 2; 366 } 367 } 368 369 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 370 bool IsInteger) { 371 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 372 // Cannot fold a signed integer setcc with an unsigned integer setcc. 373 return ISD::SETCC_INVALID; 374 375 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 376 377 // If the N and U bits get set, then the resultant comparison DOES suddenly 378 // care about orderedness, and it is true when ordered. 379 if (Op > ISD::SETTRUE2) 380 Op &= ~16; // Clear the U bit if the N bit is set. 381 382 // Canonicalize illegal integer setcc's. 383 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 384 Op = ISD::SETNE; 385 386 return ISD::CondCode(Op); 387 } 388 389 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 390 bool IsInteger) { 391 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 392 // Cannot fold a signed setcc with an unsigned setcc. 393 return ISD::SETCC_INVALID; 394 395 // Combine all of the condition bits. 396 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 397 398 // Canonicalize illegal integer setcc's. 399 if (IsInteger) { 400 switch (Result) { 401 default: break; 402 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 403 case ISD::SETOEQ: // SETEQ & SETU[LG]E 404 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 405 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 406 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 407 } 408 } 409 410 return Result; 411 } 412 413 //===----------------------------------------------------------------------===// 414 // SDNode Profile Support 415 //===----------------------------------------------------------------------===// 416 417 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 418 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 419 ID.AddInteger(OpC); 420 } 421 422 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 423 /// solely with their pointer. 424 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 425 ID.AddPointer(VTList.VTs); 426 } 427 428 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 429 static void AddNodeIDOperands(FoldingSetNodeID &ID, 430 ArrayRef<SDValue> Ops) { 431 for (auto& Op : Ops) { 432 ID.AddPointer(Op.getNode()); 433 ID.AddInteger(Op.getResNo()); 434 } 435 } 436 437 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 438 static void AddNodeIDOperands(FoldingSetNodeID &ID, 439 ArrayRef<SDUse> Ops) { 440 for (auto& Op : Ops) { 441 ID.AddPointer(Op.getNode()); 442 ID.AddInteger(Op.getResNo()); 443 } 444 } 445 446 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 447 SDVTList VTList, ArrayRef<SDValue> OpList) { 448 AddNodeIDOpcode(ID, OpC); 449 AddNodeIDValueTypes(ID, VTList); 450 AddNodeIDOperands(ID, OpList); 451 } 452 453 /// If this is an SDNode with special info, add this info to the NodeID data. 454 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 455 switch (N->getOpcode()) { 456 case ISD::TargetExternalSymbol: 457 case ISD::ExternalSymbol: 458 case ISD::MCSymbol: 459 llvm_unreachable("Should only be used on nodes with operands"); 460 default: break; // Normal nodes don't need extra info. 461 case ISD::TargetConstant: 462 case ISD::Constant: { 463 const ConstantSDNode *C = cast<ConstantSDNode>(N); 464 ID.AddPointer(C->getConstantIntValue()); 465 ID.AddBoolean(C->isOpaque()); 466 break; 467 } 468 case ISD::TargetConstantFP: 469 case ISD::ConstantFP: 470 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 471 break; 472 case ISD::TargetGlobalAddress: 473 case ISD::GlobalAddress: 474 case ISD::TargetGlobalTLSAddress: 475 case ISD::GlobalTLSAddress: { 476 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 477 ID.AddPointer(GA->getGlobal()); 478 ID.AddInteger(GA->getOffset()); 479 ID.AddInteger(GA->getTargetFlags()); 480 break; 481 } 482 case ISD::BasicBlock: 483 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 484 break; 485 case ISD::Register: 486 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 487 break; 488 case ISD::RegisterMask: 489 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 490 break; 491 case ISD::SRCVALUE: 492 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 493 break; 494 case ISD::FrameIndex: 495 case ISD::TargetFrameIndex: 496 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 497 break; 498 case ISD::JumpTable: 499 case ISD::TargetJumpTable: 500 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 501 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 502 break; 503 case ISD::ConstantPool: 504 case ISD::TargetConstantPool: { 505 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 506 ID.AddInteger(CP->getAlignment()); 507 ID.AddInteger(CP->getOffset()); 508 if (CP->isMachineConstantPoolEntry()) 509 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 510 else 511 ID.AddPointer(CP->getConstVal()); 512 ID.AddInteger(CP->getTargetFlags()); 513 break; 514 } 515 case ISD::TargetIndex: { 516 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 517 ID.AddInteger(TI->getIndex()); 518 ID.AddInteger(TI->getOffset()); 519 ID.AddInteger(TI->getTargetFlags()); 520 break; 521 } 522 case ISD::LOAD: { 523 const LoadSDNode *LD = cast<LoadSDNode>(N); 524 ID.AddInteger(LD->getMemoryVT().getRawBits()); 525 ID.AddInteger(LD->getRawSubclassData()); 526 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 527 break; 528 } 529 case ISD::STORE: { 530 const StoreSDNode *ST = cast<StoreSDNode>(N); 531 ID.AddInteger(ST->getMemoryVT().getRawBits()); 532 ID.AddInteger(ST->getRawSubclassData()); 533 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 534 break; 535 } 536 case ISD::ATOMIC_CMP_SWAP: 537 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 538 case ISD::ATOMIC_SWAP: 539 case ISD::ATOMIC_LOAD_ADD: 540 case ISD::ATOMIC_LOAD_SUB: 541 case ISD::ATOMIC_LOAD_AND: 542 case ISD::ATOMIC_LOAD_CLR: 543 case ISD::ATOMIC_LOAD_OR: 544 case ISD::ATOMIC_LOAD_XOR: 545 case ISD::ATOMIC_LOAD_NAND: 546 case ISD::ATOMIC_LOAD_MIN: 547 case ISD::ATOMIC_LOAD_MAX: 548 case ISD::ATOMIC_LOAD_UMIN: 549 case ISD::ATOMIC_LOAD_UMAX: 550 case ISD::ATOMIC_LOAD: 551 case ISD::ATOMIC_STORE: { 552 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 553 ID.AddInteger(AT->getMemoryVT().getRawBits()); 554 ID.AddInteger(AT->getRawSubclassData()); 555 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 556 break; 557 } 558 case ISD::PREFETCH: { 559 const MemSDNode *PF = cast<MemSDNode>(N); 560 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 561 break; 562 } 563 case ISD::VECTOR_SHUFFLE: { 564 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 565 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 566 i != e; ++i) 567 ID.AddInteger(SVN->getMaskElt(i)); 568 break; 569 } 570 case ISD::TargetBlockAddress: 571 case ISD::BlockAddress: { 572 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 573 ID.AddPointer(BA->getBlockAddress()); 574 ID.AddInteger(BA->getOffset()); 575 ID.AddInteger(BA->getTargetFlags()); 576 break; 577 } 578 } // end switch (N->getOpcode()) 579 580 // Target specific memory nodes could also have address spaces to check. 581 if (N->isTargetMemoryOpcode()) 582 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 583 } 584 585 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 586 /// data. 587 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 588 AddNodeIDOpcode(ID, N->getOpcode()); 589 // Add the return value info. 590 AddNodeIDValueTypes(ID, N->getVTList()); 591 // Add the operand info. 592 AddNodeIDOperands(ID, N->ops()); 593 594 // Handle SDNode leafs with special info. 595 AddNodeIDCustom(ID, N); 596 } 597 598 //===----------------------------------------------------------------------===// 599 // SelectionDAG Class 600 //===----------------------------------------------------------------------===// 601 602 /// doNotCSE - Return true if CSE should not be performed for this node. 603 static bool doNotCSE(SDNode *N) { 604 if (N->getValueType(0) == MVT::Glue) 605 return true; // Never CSE anything that produces a flag. 606 607 switch (N->getOpcode()) { 608 default: break; 609 case ISD::HANDLENODE: 610 case ISD::EH_LABEL: 611 return true; // Never CSE these nodes. 612 } 613 614 // Check that remaining values produced are not flags. 615 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 616 if (N->getValueType(i) == MVT::Glue) 617 return true; // Never CSE anything that produces a flag. 618 619 return false; 620 } 621 622 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 623 /// SelectionDAG. 624 void SelectionDAG::RemoveDeadNodes() { 625 // Create a dummy node (which is not added to allnodes), that adds a reference 626 // to the root node, preventing it from being deleted. 627 HandleSDNode Dummy(getRoot()); 628 629 SmallVector<SDNode*, 128> DeadNodes; 630 631 // Add all obviously-dead nodes to the DeadNodes worklist. 632 for (SDNode &Node : allnodes()) 633 if (Node.use_empty()) 634 DeadNodes.push_back(&Node); 635 636 RemoveDeadNodes(DeadNodes); 637 638 // If the root changed (e.g. it was a dead load, update the root). 639 setRoot(Dummy.getValue()); 640 } 641 642 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 643 /// given list, and any nodes that become unreachable as a result. 644 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 645 646 // Process the worklist, deleting the nodes and adding their uses to the 647 // worklist. 648 while (!DeadNodes.empty()) { 649 SDNode *N = DeadNodes.pop_back_val(); 650 // Skip to next node if we've already managed to delete the node. This could 651 // happen if replacing a node causes a node previously added to the node to 652 // be deleted. 653 if (N->getOpcode() == ISD::DELETED_NODE) 654 continue; 655 656 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 657 DUL->NodeDeleted(N, nullptr); 658 659 // Take the node out of the appropriate CSE map. 660 RemoveNodeFromCSEMaps(N); 661 662 // Next, brutally remove the operand list. This is safe to do, as there are 663 // no cycles in the graph. 664 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 665 SDUse &Use = *I++; 666 SDNode *Operand = Use.getNode(); 667 Use.set(SDValue()); 668 669 // Now that we removed this operand, see if there are no uses of it left. 670 if (Operand->use_empty()) 671 DeadNodes.push_back(Operand); 672 } 673 674 DeallocateNode(N); 675 } 676 } 677 678 void SelectionDAG::RemoveDeadNode(SDNode *N){ 679 SmallVector<SDNode*, 16> DeadNodes(1, N); 680 681 // Create a dummy node that adds a reference to the root node, preventing 682 // it from being deleted. (This matters if the root is an operand of the 683 // dead node.) 684 HandleSDNode Dummy(getRoot()); 685 686 RemoveDeadNodes(DeadNodes); 687 } 688 689 void SelectionDAG::DeleteNode(SDNode *N) { 690 // First take this out of the appropriate CSE map. 691 RemoveNodeFromCSEMaps(N); 692 693 // Finally, remove uses due to operands of this node, remove from the 694 // AllNodes list, and delete the node. 695 DeleteNodeNotInCSEMaps(N); 696 } 697 698 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 699 assert(N->getIterator() != AllNodes.begin() && 700 "Cannot delete the entry node!"); 701 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 702 703 // Drop all of the operands and decrement used node's use counts. 704 N->DropOperands(); 705 706 DeallocateNode(N); 707 } 708 709 void SDDbgInfo::erase(const SDNode *Node) { 710 DbgValMapType::iterator I = DbgValMap.find(Node); 711 if (I == DbgValMap.end()) 712 return; 713 for (auto &Val: I->second) 714 Val->setIsInvalidated(); 715 DbgValMap.erase(I); 716 } 717 718 void SelectionDAG::DeallocateNode(SDNode *N) { 719 // If we have operands, deallocate them. 720 removeOperands(N); 721 722 NodeAllocator.Deallocate(AllNodes.remove(N)); 723 724 // Set the opcode to DELETED_NODE to help catch bugs when node 725 // memory is reallocated. 726 // FIXME: There are places in SDag that have grown a dependency on the opcode 727 // value in the released node. 728 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 729 N->NodeType = ISD::DELETED_NODE; 730 731 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 732 // them and forget about that node. 733 DbgInfo->erase(N); 734 } 735 736 #ifndef NDEBUG 737 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 738 static void VerifySDNode(SDNode *N) { 739 switch (N->getOpcode()) { 740 default: 741 break; 742 case ISD::BUILD_PAIR: { 743 EVT VT = N->getValueType(0); 744 assert(N->getNumValues() == 1 && "Too many results!"); 745 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 746 "Wrong return type!"); 747 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 748 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 749 "Mismatched operand types!"); 750 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 751 "Wrong operand type!"); 752 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 753 "Wrong return type size"); 754 break; 755 } 756 case ISD::BUILD_VECTOR: { 757 assert(N->getNumValues() == 1 && "Too many results!"); 758 assert(N->getValueType(0).isVector() && "Wrong return type!"); 759 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 760 "Wrong number of operands!"); 761 EVT EltVT = N->getValueType(0).getVectorElementType(); 762 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 763 assert((I->getValueType() == EltVT || 764 (EltVT.isInteger() && I->getValueType().isInteger() && 765 EltVT.bitsLE(I->getValueType()))) && 766 "Wrong operand type!"); 767 assert(I->getValueType() == N->getOperand(0).getValueType() && 768 "Operands must all have the same type"); 769 } 770 break; 771 } 772 } 773 } 774 #endif // NDEBUG 775 776 /// \brief Insert a newly allocated node into the DAG. 777 /// 778 /// Handles insertion into the all nodes list and CSE map, as well as 779 /// verification and other common operations when a new node is allocated. 780 void SelectionDAG::InsertNode(SDNode *N) { 781 AllNodes.push_back(N); 782 #ifndef NDEBUG 783 N->PersistentId = NextPersistentId++; 784 VerifySDNode(N); 785 #endif 786 } 787 788 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 789 /// correspond to it. This is useful when we're about to delete or repurpose 790 /// the node. We don't want future request for structurally identical nodes 791 /// to return N anymore. 792 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 793 bool Erased = false; 794 switch (N->getOpcode()) { 795 case ISD::HANDLENODE: return false; // noop. 796 case ISD::CONDCODE: 797 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 798 "Cond code doesn't exist!"); 799 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 800 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 801 break; 802 case ISD::ExternalSymbol: 803 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 804 break; 805 case ISD::TargetExternalSymbol: { 806 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 807 Erased = TargetExternalSymbols.erase( 808 std::pair<std::string,unsigned char>(ESN->getSymbol(), 809 ESN->getTargetFlags())); 810 break; 811 } 812 case ISD::MCSymbol: { 813 auto *MCSN = cast<MCSymbolSDNode>(N); 814 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 815 break; 816 } 817 case ISD::VALUETYPE: { 818 EVT VT = cast<VTSDNode>(N)->getVT(); 819 if (VT.isExtended()) { 820 Erased = ExtendedValueTypeNodes.erase(VT); 821 } else { 822 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 823 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 824 } 825 break; 826 } 827 default: 828 // Remove it from the CSE Map. 829 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 830 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 831 Erased = CSEMap.RemoveNode(N); 832 break; 833 } 834 #ifndef NDEBUG 835 // Verify that the node was actually in one of the CSE maps, unless it has a 836 // flag result (which cannot be CSE'd) or is one of the special cases that are 837 // not subject to CSE. 838 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 839 !N->isMachineOpcode() && !doNotCSE(N)) { 840 N->dump(this); 841 dbgs() << "\n"; 842 llvm_unreachable("Node is not in map!"); 843 } 844 #endif 845 return Erased; 846 } 847 848 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 849 /// maps and modified in place. Add it back to the CSE maps, unless an identical 850 /// node already exists, in which case transfer all its users to the existing 851 /// node. This transfer can potentially trigger recursive merging. 852 void 853 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 854 // For node types that aren't CSE'd, just act as if no identical node 855 // already exists. 856 if (!doNotCSE(N)) { 857 SDNode *Existing = CSEMap.GetOrInsertNode(N); 858 if (Existing != N) { 859 // If there was already an existing matching node, use ReplaceAllUsesWith 860 // to replace the dead one with the existing one. This can cause 861 // recursive merging of other unrelated nodes down the line. 862 ReplaceAllUsesWith(N, Existing); 863 864 // N is now dead. Inform the listeners and delete it. 865 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 866 DUL->NodeDeleted(N, Existing); 867 DeleteNodeNotInCSEMaps(N); 868 return; 869 } 870 } 871 872 // If the node doesn't already exist, we updated it. Inform listeners. 873 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 874 DUL->NodeUpdated(N); 875 } 876 877 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 878 /// were replaced with those specified. If this node is never memoized, 879 /// return null, otherwise return a pointer to the slot it would take. If a 880 /// node already exists with these operands, the slot will be non-null. 881 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 882 void *&InsertPos) { 883 if (doNotCSE(N)) 884 return nullptr; 885 886 SDValue Ops[] = { Op }; 887 FoldingSetNodeID ID; 888 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 889 AddNodeIDCustom(ID, N); 890 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 891 if (Node) 892 Node->intersectFlagsWith(N->getFlags()); 893 return Node; 894 } 895 896 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 897 /// were replaced with those specified. If this node is never memoized, 898 /// return null, otherwise return a pointer to the slot it would take. If a 899 /// node already exists with these operands, the slot will be non-null. 900 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 901 SDValue Op1, SDValue Op2, 902 void *&InsertPos) { 903 if (doNotCSE(N)) 904 return nullptr; 905 906 SDValue Ops[] = { Op1, Op2 }; 907 FoldingSetNodeID ID; 908 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 909 AddNodeIDCustom(ID, N); 910 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 911 if (Node) 912 Node->intersectFlagsWith(N->getFlags()); 913 return Node; 914 } 915 916 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 917 /// were replaced with those specified. If this node is never memoized, 918 /// return null, otherwise return a pointer to the slot it would take. If a 919 /// node already exists with these operands, the slot will be non-null. 920 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 921 void *&InsertPos) { 922 if (doNotCSE(N)) 923 return nullptr; 924 925 FoldingSetNodeID ID; 926 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 927 AddNodeIDCustom(ID, N); 928 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 929 if (Node) 930 Node->intersectFlagsWith(N->getFlags()); 931 return Node; 932 } 933 934 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 935 Type *Ty = VT == MVT::iPTR ? 936 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 937 VT.getTypeForEVT(*getContext()); 938 939 return getDataLayout().getABITypeAlignment(Ty); 940 } 941 942 // EntryNode could meaningfully have debug info if we can find it... 943 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 944 : TM(tm), OptLevel(OL), 945 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 946 Root(getEntryNode()) { 947 InsertNode(&EntryNode); 948 DbgInfo = new SDDbgInfo(); 949 } 950 951 void SelectionDAG::init(MachineFunction &NewMF, 952 OptimizationRemarkEmitter &NewORE, 953 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 954 DivergenceAnalysis * Divergence) { 955 MF = &NewMF; 956 SDAGISelPass = PassPtr; 957 ORE = &NewORE; 958 TLI = getSubtarget().getTargetLowering(); 959 TSI = getSubtarget().getSelectionDAGInfo(); 960 LibInfo = LibraryInfo; 961 Context = &MF->getFunction().getContext(); 962 DA = Divergence; 963 } 964 965 SelectionDAG::~SelectionDAG() { 966 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 967 allnodes_clear(); 968 OperandRecycler.clear(OperandAllocator); 969 delete DbgInfo; 970 } 971 972 void SelectionDAG::allnodes_clear() { 973 assert(&*AllNodes.begin() == &EntryNode); 974 AllNodes.remove(AllNodes.begin()); 975 while (!AllNodes.empty()) 976 DeallocateNode(&AllNodes.front()); 977 #ifndef NDEBUG 978 NextPersistentId = 0; 979 #endif 980 } 981 982 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 983 void *&InsertPos) { 984 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 985 if (N) { 986 switch (N->getOpcode()) { 987 default: break; 988 case ISD::Constant: 989 case ISD::ConstantFP: 990 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 991 "debug location. Use another overload."); 992 } 993 } 994 return N; 995 } 996 997 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 998 const SDLoc &DL, void *&InsertPos) { 999 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1000 if (N) { 1001 switch (N->getOpcode()) { 1002 case ISD::Constant: 1003 case ISD::ConstantFP: 1004 // Erase debug location from the node if the node is used at several 1005 // different places. Do not propagate one location to all uses as it 1006 // will cause a worse single stepping debugging experience. 1007 if (N->getDebugLoc() != DL.getDebugLoc()) 1008 N->setDebugLoc(DebugLoc()); 1009 break; 1010 default: 1011 // When the node's point of use is located earlier in the instruction 1012 // sequence than its prior point of use, update its debug info to the 1013 // earlier location. 1014 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1015 N->setDebugLoc(DL.getDebugLoc()); 1016 break; 1017 } 1018 } 1019 return N; 1020 } 1021 1022 void SelectionDAG::clear() { 1023 allnodes_clear(); 1024 OperandRecycler.clear(OperandAllocator); 1025 OperandAllocator.Reset(); 1026 CSEMap.clear(); 1027 1028 ExtendedValueTypeNodes.clear(); 1029 ExternalSymbols.clear(); 1030 TargetExternalSymbols.clear(); 1031 MCSymbols.clear(); 1032 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1033 static_cast<CondCodeSDNode*>(nullptr)); 1034 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1035 static_cast<SDNode*>(nullptr)); 1036 1037 EntryNode.UseList = nullptr; 1038 InsertNode(&EntryNode); 1039 Root = getEntryNode(); 1040 DbgInfo->clear(); 1041 } 1042 1043 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1044 return VT.bitsGT(Op.getValueType()) 1045 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1046 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1047 } 1048 1049 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1050 return VT.bitsGT(Op.getValueType()) ? 1051 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1052 getNode(ISD::TRUNCATE, DL, VT, Op); 1053 } 1054 1055 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1056 return VT.bitsGT(Op.getValueType()) ? 1057 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1058 getNode(ISD::TRUNCATE, DL, VT, Op); 1059 } 1060 1061 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1062 return VT.bitsGT(Op.getValueType()) ? 1063 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1064 getNode(ISD::TRUNCATE, DL, VT, Op); 1065 } 1066 1067 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1068 EVT OpVT) { 1069 if (VT.bitsLE(Op.getValueType())) 1070 return getNode(ISD::TRUNCATE, SL, VT, Op); 1071 1072 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1073 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1074 } 1075 1076 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1077 assert(!VT.isVector() && 1078 "getZeroExtendInReg should use the vector element type instead of " 1079 "the vector type!"); 1080 if (Op.getValueType().getScalarType() == VT) return Op; 1081 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1082 APInt Imm = APInt::getLowBitsSet(BitWidth, 1083 VT.getSizeInBits()); 1084 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1085 getConstant(Imm, DL, Op.getValueType())); 1086 } 1087 1088 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1089 EVT VT) { 1090 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1091 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1092 "The sizes of the input and result must match in order to perform the " 1093 "extend in-register."); 1094 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1095 "The destination vector type must have fewer lanes than the input."); 1096 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1097 } 1098 1099 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1100 EVT VT) { 1101 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1102 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1103 "The sizes of the input and result must match in order to perform the " 1104 "extend in-register."); 1105 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1106 "The destination vector type must have fewer lanes than the input."); 1107 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1108 } 1109 1110 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1111 EVT VT) { 1112 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1113 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1114 "The sizes of the input and result must match in order to perform the " 1115 "extend in-register."); 1116 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1117 "The destination vector type must have fewer lanes than the input."); 1118 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1119 } 1120 1121 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1122 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1123 EVT EltVT = VT.getScalarType(); 1124 SDValue NegOne = 1125 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1126 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1127 } 1128 1129 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1130 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1131 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1132 } 1133 1134 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1135 EVT OpVT) { 1136 if (!V) 1137 return getConstant(0, DL, VT); 1138 1139 switch (TLI->getBooleanContents(OpVT)) { 1140 case TargetLowering::ZeroOrOneBooleanContent: 1141 case TargetLowering::UndefinedBooleanContent: 1142 return getConstant(1, DL, VT); 1143 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1144 return getAllOnesConstant(DL, VT); 1145 } 1146 llvm_unreachable("Unexpected boolean content enum!"); 1147 } 1148 1149 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1150 bool isT, bool isO) { 1151 EVT EltVT = VT.getScalarType(); 1152 assert((EltVT.getSizeInBits() >= 64 || 1153 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1154 "getConstant with a uint64_t value that doesn't fit in the type!"); 1155 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1156 } 1157 1158 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1159 bool isT, bool isO) { 1160 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1161 } 1162 1163 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1164 EVT VT, bool isT, bool isO) { 1165 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1166 1167 EVT EltVT = VT.getScalarType(); 1168 const ConstantInt *Elt = &Val; 1169 1170 // In some cases the vector type is legal but the element type is illegal and 1171 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1172 // inserted value (the type does not need to match the vector element type). 1173 // Any extra bits introduced will be truncated away. 1174 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1175 TargetLowering::TypePromoteInteger) { 1176 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1177 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1178 Elt = ConstantInt::get(*getContext(), NewVal); 1179 } 1180 // In other cases the element type is illegal and needs to be expanded, for 1181 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1182 // the value into n parts and use a vector type with n-times the elements. 1183 // Then bitcast to the type requested. 1184 // Legalizing constants too early makes the DAGCombiner's job harder so we 1185 // only legalize if the DAG tells us we must produce legal types. 1186 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1187 TLI->getTypeAction(*getContext(), EltVT) == 1188 TargetLowering::TypeExpandInteger) { 1189 const APInt &NewVal = Elt->getValue(); 1190 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1191 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1192 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1193 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1194 1195 // Check the temporary vector is the correct size. If this fails then 1196 // getTypeToTransformTo() probably returned a type whose size (in bits) 1197 // isn't a power-of-2 factor of the requested type size. 1198 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1199 1200 SmallVector<SDValue, 2> EltParts; 1201 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1202 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1203 .zextOrTrunc(ViaEltSizeInBits), DL, 1204 ViaEltVT, isT, isO)); 1205 } 1206 1207 // EltParts is currently in little endian order. If we actually want 1208 // big-endian order then reverse it now. 1209 if (getDataLayout().isBigEndian()) 1210 std::reverse(EltParts.begin(), EltParts.end()); 1211 1212 // The elements must be reversed when the element order is different 1213 // to the endianness of the elements (because the BITCAST is itself a 1214 // vector shuffle in this situation). However, we do not need any code to 1215 // perform this reversal because getConstant() is producing a vector 1216 // splat. 1217 // This situation occurs in MIPS MSA. 1218 1219 SmallVector<SDValue, 8> Ops; 1220 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1221 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1222 1223 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1224 return V; 1225 } 1226 1227 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1228 "APInt size does not match type size!"); 1229 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1230 FoldingSetNodeID ID; 1231 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1232 ID.AddPointer(Elt); 1233 ID.AddBoolean(isO); 1234 void *IP = nullptr; 1235 SDNode *N = nullptr; 1236 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1237 if (!VT.isVector()) 1238 return SDValue(N, 0); 1239 1240 if (!N) { 1241 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1242 CSEMap.InsertNode(N, IP); 1243 InsertNode(N); 1244 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1245 } 1246 1247 SDValue Result(N, 0); 1248 if (VT.isVector()) 1249 Result = getSplatBuildVector(VT, DL, Result); 1250 1251 return Result; 1252 } 1253 1254 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1255 bool isTarget) { 1256 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1257 } 1258 1259 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1260 bool isTarget) { 1261 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1262 } 1263 1264 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1265 EVT VT, bool isTarget) { 1266 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1267 1268 EVT EltVT = VT.getScalarType(); 1269 1270 // Do the map lookup using the actual bit pattern for the floating point 1271 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1272 // we don't have issues with SNANs. 1273 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1274 FoldingSetNodeID ID; 1275 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1276 ID.AddPointer(&V); 1277 void *IP = nullptr; 1278 SDNode *N = nullptr; 1279 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1280 if (!VT.isVector()) 1281 return SDValue(N, 0); 1282 1283 if (!N) { 1284 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1285 CSEMap.InsertNode(N, IP); 1286 InsertNode(N); 1287 } 1288 1289 SDValue Result(N, 0); 1290 if (VT.isVector()) 1291 Result = getSplatBuildVector(VT, DL, Result); 1292 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1293 return Result; 1294 } 1295 1296 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1297 bool isTarget) { 1298 EVT EltVT = VT.getScalarType(); 1299 if (EltVT == MVT::f32) 1300 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1301 else if (EltVT == MVT::f64) 1302 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1303 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1304 EltVT == MVT::f16) { 1305 bool Ignored; 1306 APFloat APF = APFloat(Val); 1307 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1308 &Ignored); 1309 return getConstantFP(APF, DL, VT, isTarget); 1310 } else 1311 llvm_unreachable("Unsupported type in getConstantFP"); 1312 } 1313 1314 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1315 EVT VT, int64_t Offset, bool isTargetGA, 1316 unsigned char TargetFlags) { 1317 assert((TargetFlags == 0 || isTargetGA) && 1318 "Cannot set target flags on target-independent globals"); 1319 1320 // Truncate (with sign-extension) the offset value to the pointer size. 1321 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1322 if (BitWidth < 64) 1323 Offset = SignExtend64(Offset, BitWidth); 1324 1325 unsigned Opc; 1326 if (GV->isThreadLocal()) 1327 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1328 else 1329 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1330 1331 FoldingSetNodeID ID; 1332 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1333 ID.AddPointer(GV); 1334 ID.AddInteger(Offset); 1335 ID.AddInteger(TargetFlags); 1336 void *IP = nullptr; 1337 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1338 return SDValue(E, 0); 1339 1340 auto *N = newSDNode<GlobalAddressSDNode>( 1341 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1342 CSEMap.InsertNode(N, IP); 1343 InsertNode(N); 1344 return SDValue(N, 0); 1345 } 1346 1347 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1348 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1349 FoldingSetNodeID ID; 1350 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1351 ID.AddInteger(FI); 1352 void *IP = nullptr; 1353 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1354 return SDValue(E, 0); 1355 1356 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1357 CSEMap.InsertNode(N, IP); 1358 InsertNode(N); 1359 return SDValue(N, 0); 1360 } 1361 1362 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1363 unsigned char TargetFlags) { 1364 assert((TargetFlags == 0 || isTarget) && 1365 "Cannot set target flags on target-independent jump tables"); 1366 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1367 FoldingSetNodeID ID; 1368 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1369 ID.AddInteger(JTI); 1370 ID.AddInteger(TargetFlags); 1371 void *IP = nullptr; 1372 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1373 return SDValue(E, 0); 1374 1375 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1376 CSEMap.InsertNode(N, IP); 1377 InsertNode(N); 1378 return SDValue(N, 0); 1379 } 1380 1381 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1382 unsigned Alignment, int Offset, 1383 bool isTarget, 1384 unsigned char TargetFlags) { 1385 assert((TargetFlags == 0 || isTarget) && 1386 "Cannot set target flags on target-independent globals"); 1387 if (Alignment == 0) 1388 Alignment = MF->getFunction().optForSize() 1389 ? getDataLayout().getABITypeAlignment(C->getType()) 1390 : getDataLayout().getPrefTypeAlignment(C->getType()); 1391 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1392 FoldingSetNodeID ID; 1393 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1394 ID.AddInteger(Alignment); 1395 ID.AddInteger(Offset); 1396 ID.AddPointer(C); 1397 ID.AddInteger(TargetFlags); 1398 void *IP = nullptr; 1399 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1400 return SDValue(E, 0); 1401 1402 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1403 TargetFlags); 1404 CSEMap.InsertNode(N, IP); 1405 InsertNode(N); 1406 return SDValue(N, 0); 1407 } 1408 1409 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1410 unsigned Alignment, int Offset, 1411 bool isTarget, 1412 unsigned char TargetFlags) { 1413 assert((TargetFlags == 0 || isTarget) && 1414 "Cannot set target flags on target-independent globals"); 1415 if (Alignment == 0) 1416 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1417 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1418 FoldingSetNodeID ID; 1419 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1420 ID.AddInteger(Alignment); 1421 ID.AddInteger(Offset); 1422 C->addSelectionDAGCSEId(ID); 1423 ID.AddInteger(TargetFlags); 1424 void *IP = nullptr; 1425 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1426 return SDValue(E, 0); 1427 1428 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1429 TargetFlags); 1430 CSEMap.InsertNode(N, IP); 1431 InsertNode(N); 1432 return SDValue(N, 0); 1433 } 1434 1435 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1436 unsigned char TargetFlags) { 1437 FoldingSetNodeID ID; 1438 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1439 ID.AddInteger(Index); 1440 ID.AddInteger(Offset); 1441 ID.AddInteger(TargetFlags); 1442 void *IP = nullptr; 1443 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1444 return SDValue(E, 0); 1445 1446 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1447 CSEMap.InsertNode(N, IP); 1448 InsertNode(N); 1449 return SDValue(N, 0); 1450 } 1451 1452 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1453 FoldingSetNodeID ID; 1454 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1455 ID.AddPointer(MBB); 1456 void *IP = nullptr; 1457 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1458 return SDValue(E, 0); 1459 1460 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1461 CSEMap.InsertNode(N, IP); 1462 InsertNode(N); 1463 return SDValue(N, 0); 1464 } 1465 1466 SDValue SelectionDAG::getValueType(EVT VT) { 1467 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1468 ValueTypeNodes.size()) 1469 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1470 1471 SDNode *&N = VT.isExtended() ? 1472 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1473 1474 if (N) return SDValue(N, 0); 1475 N = newSDNode<VTSDNode>(VT); 1476 InsertNode(N); 1477 return SDValue(N, 0); 1478 } 1479 1480 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1481 SDNode *&N = ExternalSymbols[Sym]; 1482 if (N) return SDValue(N, 0); 1483 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1484 InsertNode(N); 1485 return SDValue(N, 0); 1486 } 1487 1488 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1489 SDNode *&N = MCSymbols[Sym]; 1490 if (N) 1491 return SDValue(N, 0); 1492 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1493 InsertNode(N); 1494 return SDValue(N, 0); 1495 } 1496 1497 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1498 unsigned char TargetFlags) { 1499 SDNode *&N = 1500 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1501 TargetFlags)]; 1502 if (N) return SDValue(N, 0); 1503 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1504 InsertNode(N); 1505 return SDValue(N, 0); 1506 } 1507 1508 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1509 if ((unsigned)Cond >= CondCodeNodes.size()) 1510 CondCodeNodes.resize(Cond+1); 1511 1512 if (!CondCodeNodes[Cond]) { 1513 auto *N = newSDNode<CondCodeSDNode>(Cond); 1514 CondCodeNodes[Cond] = N; 1515 InsertNode(N); 1516 } 1517 1518 return SDValue(CondCodeNodes[Cond], 0); 1519 } 1520 1521 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1522 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1523 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1524 std::swap(N1, N2); 1525 ShuffleVectorSDNode::commuteMask(M); 1526 } 1527 1528 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1529 SDValue N2, ArrayRef<int> Mask) { 1530 assert(VT.getVectorNumElements() == Mask.size() && 1531 "Must have the same number of vector elements as mask elements!"); 1532 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1533 "Invalid VECTOR_SHUFFLE"); 1534 1535 // Canonicalize shuffle undef, undef -> undef 1536 if (N1.isUndef() && N2.isUndef()) 1537 return getUNDEF(VT); 1538 1539 // Validate that all indices in Mask are within the range of the elements 1540 // input to the shuffle. 1541 int NElts = Mask.size(); 1542 assert(llvm::all_of(Mask, 1543 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1544 "Index out of range"); 1545 1546 // Copy the mask so we can do any needed cleanup. 1547 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1548 1549 // Canonicalize shuffle v, v -> v, undef 1550 if (N1 == N2) { 1551 N2 = getUNDEF(VT); 1552 for (int i = 0; i != NElts; ++i) 1553 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1554 } 1555 1556 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1557 if (N1.isUndef()) 1558 commuteShuffle(N1, N2, MaskVec); 1559 1560 if (TLI->hasVectorBlend()) { 1561 // If shuffling a splat, try to blend the splat instead. We do this here so 1562 // that even when this arises during lowering we don't have to re-handle it. 1563 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1564 BitVector UndefElements; 1565 SDValue Splat = BV->getSplatValue(&UndefElements); 1566 if (!Splat) 1567 return; 1568 1569 for (int i = 0; i < NElts; ++i) { 1570 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1571 continue; 1572 1573 // If this input comes from undef, mark it as such. 1574 if (UndefElements[MaskVec[i] - Offset]) { 1575 MaskVec[i] = -1; 1576 continue; 1577 } 1578 1579 // If we can blend a non-undef lane, use that instead. 1580 if (!UndefElements[i]) 1581 MaskVec[i] = i + Offset; 1582 } 1583 }; 1584 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1585 BlendSplat(N1BV, 0); 1586 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1587 BlendSplat(N2BV, NElts); 1588 } 1589 1590 // Canonicalize all index into lhs, -> shuffle lhs, undef 1591 // Canonicalize all index into rhs, -> shuffle rhs, undef 1592 bool AllLHS = true, AllRHS = true; 1593 bool N2Undef = N2.isUndef(); 1594 for (int i = 0; i != NElts; ++i) { 1595 if (MaskVec[i] >= NElts) { 1596 if (N2Undef) 1597 MaskVec[i] = -1; 1598 else 1599 AllLHS = false; 1600 } else if (MaskVec[i] >= 0) { 1601 AllRHS = false; 1602 } 1603 } 1604 if (AllLHS && AllRHS) 1605 return getUNDEF(VT); 1606 if (AllLHS && !N2Undef) 1607 N2 = getUNDEF(VT); 1608 if (AllRHS) { 1609 N1 = getUNDEF(VT); 1610 commuteShuffle(N1, N2, MaskVec); 1611 } 1612 // Reset our undef status after accounting for the mask. 1613 N2Undef = N2.isUndef(); 1614 // Re-check whether both sides ended up undef. 1615 if (N1.isUndef() && N2Undef) 1616 return getUNDEF(VT); 1617 1618 // If Identity shuffle return that node. 1619 bool Identity = true, AllSame = true; 1620 for (int i = 0; i != NElts; ++i) { 1621 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1622 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1623 } 1624 if (Identity && NElts) 1625 return N1; 1626 1627 // Shuffling a constant splat doesn't change the result. 1628 if (N2Undef) { 1629 SDValue V = N1; 1630 1631 // Look through any bitcasts. We check that these don't change the number 1632 // (and size) of elements and just changes their types. 1633 while (V.getOpcode() == ISD::BITCAST) 1634 V = V->getOperand(0); 1635 1636 // A splat should always show up as a build vector node. 1637 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1638 BitVector UndefElements; 1639 SDValue Splat = BV->getSplatValue(&UndefElements); 1640 // If this is a splat of an undef, shuffling it is also undef. 1641 if (Splat && Splat.isUndef()) 1642 return getUNDEF(VT); 1643 1644 bool SameNumElts = 1645 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1646 1647 // We only have a splat which can skip shuffles if there is a splatted 1648 // value and no undef lanes rearranged by the shuffle. 1649 if (Splat && UndefElements.none()) { 1650 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1651 // number of elements match or the value splatted is a zero constant. 1652 if (SameNumElts) 1653 return N1; 1654 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1655 if (C->isNullValue()) 1656 return N1; 1657 } 1658 1659 // If the shuffle itself creates a splat, build the vector directly. 1660 if (AllSame && SameNumElts) { 1661 EVT BuildVT = BV->getValueType(0); 1662 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1663 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1664 1665 // We may have jumped through bitcasts, so the type of the 1666 // BUILD_VECTOR may not match the type of the shuffle. 1667 if (BuildVT != VT) 1668 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1669 return NewBV; 1670 } 1671 } 1672 } 1673 1674 FoldingSetNodeID ID; 1675 SDValue Ops[2] = { N1, N2 }; 1676 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1677 for (int i = 0; i != NElts; ++i) 1678 ID.AddInteger(MaskVec[i]); 1679 1680 void* IP = nullptr; 1681 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1682 return SDValue(E, 0); 1683 1684 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1685 // SDNode doesn't have access to it. This memory will be "leaked" when 1686 // the node is deallocated, but recovered when the NodeAllocator is released. 1687 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1688 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1689 1690 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1691 dl.getDebugLoc(), MaskAlloc); 1692 createOperands(N, Ops); 1693 1694 CSEMap.InsertNode(N, IP); 1695 InsertNode(N); 1696 SDValue V = SDValue(N, 0); 1697 NewSDValueDbgMsg(V, "Creating new node: ", this); 1698 return V; 1699 } 1700 1701 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1702 EVT VT = SV.getValueType(0); 1703 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1704 ShuffleVectorSDNode::commuteMask(MaskVec); 1705 1706 SDValue Op0 = SV.getOperand(0); 1707 SDValue Op1 = SV.getOperand(1); 1708 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1709 } 1710 1711 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1712 FoldingSetNodeID ID; 1713 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1714 ID.AddInteger(RegNo); 1715 void *IP = nullptr; 1716 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1717 return SDValue(E, 0); 1718 1719 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1720 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1721 CSEMap.InsertNode(N, IP); 1722 InsertNode(N); 1723 return SDValue(N, 0); 1724 } 1725 1726 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1727 FoldingSetNodeID ID; 1728 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1729 ID.AddPointer(RegMask); 1730 void *IP = nullptr; 1731 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1732 return SDValue(E, 0); 1733 1734 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1735 CSEMap.InsertNode(N, IP); 1736 InsertNode(N); 1737 return SDValue(N, 0); 1738 } 1739 1740 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1741 MCSymbol *Label) { 1742 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1743 } 1744 1745 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1746 SDValue Root, MCSymbol *Label) { 1747 FoldingSetNodeID ID; 1748 SDValue Ops[] = { Root }; 1749 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1750 ID.AddPointer(Label); 1751 void *IP = nullptr; 1752 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1753 return SDValue(E, 0); 1754 1755 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1756 createOperands(N, Ops); 1757 1758 CSEMap.InsertNode(N, IP); 1759 InsertNode(N); 1760 return SDValue(N, 0); 1761 } 1762 1763 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1764 int64_t Offset, 1765 bool isTarget, 1766 unsigned char TargetFlags) { 1767 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1768 1769 FoldingSetNodeID ID; 1770 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1771 ID.AddPointer(BA); 1772 ID.AddInteger(Offset); 1773 ID.AddInteger(TargetFlags); 1774 void *IP = nullptr; 1775 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1776 return SDValue(E, 0); 1777 1778 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1779 CSEMap.InsertNode(N, IP); 1780 InsertNode(N); 1781 return SDValue(N, 0); 1782 } 1783 1784 SDValue SelectionDAG::getSrcValue(const Value *V) { 1785 assert((!V || V->getType()->isPointerTy()) && 1786 "SrcValue is not a pointer?"); 1787 1788 FoldingSetNodeID ID; 1789 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1790 ID.AddPointer(V); 1791 1792 void *IP = nullptr; 1793 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1794 return SDValue(E, 0); 1795 1796 auto *N = newSDNode<SrcValueSDNode>(V); 1797 CSEMap.InsertNode(N, IP); 1798 InsertNode(N); 1799 return SDValue(N, 0); 1800 } 1801 1802 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1803 FoldingSetNodeID ID; 1804 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1805 ID.AddPointer(MD); 1806 1807 void *IP = nullptr; 1808 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1809 return SDValue(E, 0); 1810 1811 auto *N = newSDNode<MDNodeSDNode>(MD); 1812 CSEMap.InsertNode(N, IP); 1813 InsertNode(N); 1814 return SDValue(N, 0); 1815 } 1816 1817 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1818 if (VT == V.getValueType()) 1819 return V; 1820 1821 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1822 } 1823 1824 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1825 unsigned SrcAS, unsigned DestAS) { 1826 SDValue Ops[] = {Ptr}; 1827 FoldingSetNodeID ID; 1828 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1829 ID.AddInteger(SrcAS); 1830 ID.AddInteger(DestAS); 1831 1832 void *IP = nullptr; 1833 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1834 return SDValue(E, 0); 1835 1836 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1837 VT, SrcAS, DestAS); 1838 createOperands(N, Ops); 1839 1840 CSEMap.InsertNode(N, IP); 1841 InsertNode(N); 1842 return SDValue(N, 0); 1843 } 1844 1845 /// getShiftAmountOperand - Return the specified value casted to 1846 /// the target's desired shift amount type. 1847 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1848 EVT OpTy = Op.getValueType(); 1849 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1850 if (OpTy == ShTy || OpTy.isVector()) return Op; 1851 1852 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1853 } 1854 1855 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1856 SDLoc dl(Node); 1857 const TargetLowering &TLI = getTargetLoweringInfo(); 1858 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1859 EVT VT = Node->getValueType(0); 1860 SDValue Tmp1 = Node->getOperand(0); 1861 SDValue Tmp2 = Node->getOperand(1); 1862 unsigned Align = Node->getConstantOperandVal(3); 1863 1864 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1865 Tmp2, MachinePointerInfo(V)); 1866 SDValue VAList = VAListLoad; 1867 1868 if (Align > TLI.getMinStackArgumentAlignment()) { 1869 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1870 1871 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1872 getConstant(Align - 1, dl, VAList.getValueType())); 1873 1874 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1875 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1876 } 1877 1878 // Increment the pointer, VAList, to the next vaarg 1879 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1880 getConstant(getDataLayout().getTypeAllocSize( 1881 VT.getTypeForEVT(*getContext())), 1882 dl, VAList.getValueType())); 1883 // Store the incremented VAList to the legalized pointer 1884 Tmp1 = 1885 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1886 // Load the actual argument out of the pointer VAList 1887 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1888 } 1889 1890 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1891 SDLoc dl(Node); 1892 const TargetLowering &TLI = getTargetLoweringInfo(); 1893 // This defaults to loading a pointer from the input and storing it to the 1894 // output, returning the chain. 1895 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1896 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1897 SDValue Tmp1 = 1898 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1899 Node->getOperand(2), MachinePointerInfo(VS)); 1900 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1901 MachinePointerInfo(VD)); 1902 } 1903 1904 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1905 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1906 unsigned ByteSize = VT.getStoreSize(); 1907 Type *Ty = VT.getTypeForEVT(*getContext()); 1908 unsigned StackAlign = 1909 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1910 1911 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1912 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1913 } 1914 1915 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1916 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1917 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1918 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1919 const DataLayout &DL = getDataLayout(); 1920 unsigned Align = 1921 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1922 1923 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1924 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1925 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1926 } 1927 1928 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1929 ISD::CondCode Cond, const SDLoc &dl) { 1930 EVT OpVT = N1.getValueType(); 1931 1932 // These setcc operations always fold. 1933 switch (Cond) { 1934 default: break; 1935 case ISD::SETFALSE: 1936 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 1937 case ISD::SETTRUE: 1938 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 1939 1940 case ISD::SETOEQ: 1941 case ISD::SETOGT: 1942 case ISD::SETOGE: 1943 case ISD::SETOLT: 1944 case ISD::SETOLE: 1945 case ISD::SETONE: 1946 case ISD::SETO: 1947 case ISD::SETUO: 1948 case ISD::SETUEQ: 1949 case ISD::SETUNE: 1950 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1951 break; 1952 } 1953 1954 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1955 const APInt &C2 = N2C->getAPIntValue(); 1956 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1957 const APInt &C1 = N1C->getAPIntValue(); 1958 1959 switch (Cond) { 1960 default: llvm_unreachable("Unknown integer setcc!"); 1961 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 1962 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 1963 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 1964 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 1965 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 1966 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 1967 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 1968 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 1969 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 1970 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 1971 } 1972 } 1973 } 1974 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1975 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1976 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1977 switch (Cond) { 1978 default: break; 1979 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1980 return getUNDEF(VT); 1981 LLVM_FALLTHROUGH; 1982 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 1983 OpVT); 1984 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1985 return getUNDEF(VT); 1986 LLVM_FALLTHROUGH; 1987 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 1988 R==APFloat::cmpLessThan, dl, VT, 1989 OpVT); 1990 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1991 return getUNDEF(VT); 1992 LLVM_FALLTHROUGH; 1993 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 1994 OpVT); 1995 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1996 return getUNDEF(VT); 1997 LLVM_FALLTHROUGH; 1998 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 1999 VT, OpVT); 2000 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2001 return getUNDEF(VT); 2002 LLVM_FALLTHROUGH; 2003 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2004 R==APFloat::cmpEqual, dl, VT, 2005 OpVT); 2006 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2007 return getUNDEF(VT); 2008 LLVM_FALLTHROUGH; 2009 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2010 R==APFloat::cmpEqual, dl, VT, OpVT); 2011 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2012 OpVT); 2013 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2014 OpVT); 2015 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2016 R==APFloat::cmpEqual, dl, VT, 2017 OpVT); 2018 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2019 OpVT); 2020 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2021 R==APFloat::cmpLessThan, dl, VT, 2022 OpVT); 2023 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2024 R==APFloat::cmpUnordered, dl, VT, 2025 OpVT); 2026 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2027 VT, OpVT); 2028 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2029 OpVT); 2030 } 2031 } else { 2032 // Ensure that the constant occurs on the RHS. 2033 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2034 MVT CompVT = N1.getValueType().getSimpleVT(); 2035 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 2036 return SDValue(); 2037 2038 return getSetCC(dl, VT, N2, N1, SwappedCond); 2039 } 2040 } 2041 2042 // Could not fold it. 2043 return SDValue(); 2044 } 2045 2046 /// See if the specified operand can be simplified with the knowledge that only 2047 /// the bits specified by Mask are used. 2048 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) { 2049 switch (V.getOpcode()) { 2050 default: 2051 break; 2052 case ISD::Constant: { 2053 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 2054 assert(CV && "Const value should be ConstSDNode."); 2055 const APInt &CVal = CV->getAPIntValue(); 2056 APInt NewVal = CVal & Mask; 2057 if (NewVal != CVal) 2058 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2059 break; 2060 } 2061 case ISD::OR: 2062 case ISD::XOR: 2063 // If the LHS or RHS don't contribute bits to the or, drop them. 2064 if (MaskedValueIsZero(V.getOperand(0), Mask)) 2065 return V.getOperand(1); 2066 if (MaskedValueIsZero(V.getOperand(1), Mask)) 2067 return V.getOperand(0); 2068 break; 2069 case ISD::SRL: 2070 // Only look at single-use SRLs. 2071 if (!V.getNode()->hasOneUse()) 2072 break; 2073 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2074 // See if we can recursively simplify the LHS. 2075 unsigned Amt = RHSC->getZExtValue(); 2076 2077 // Watch out for shift count overflow though. 2078 if (Amt >= Mask.getBitWidth()) 2079 break; 2080 APInt NewMask = Mask << Amt; 2081 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 2082 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2083 V.getOperand(1)); 2084 } 2085 break; 2086 case ISD::AND: { 2087 // X & -1 -> X (ignoring bits which aren't demanded). 2088 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 2089 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue())) 2090 return V.getOperand(0); 2091 break; 2092 } 2093 case ISD::ANY_EXTEND: { 2094 SDValue Src = V.getOperand(0); 2095 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2096 // Being conservative here - only peek through if we only demand bits in the 2097 // non-extended source (even though the extended bits are technically undef). 2098 if (Mask.getActiveBits() > SrcBitWidth) 2099 break; 2100 APInt SrcMask = Mask.trunc(SrcBitWidth); 2101 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask)) 2102 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2103 break; 2104 } 2105 } 2106 return SDValue(); 2107 } 2108 2109 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2110 /// use this predicate to simplify operations downstream. 2111 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2112 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2113 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2114 } 2115 2116 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2117 /// this predicate to simplify operations downstream. Mask is known to be zero 2118 /// for bits that V cannot have. 2119 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2120 unsigned Depth) const { 2121 KnownBits Known; 2122 computeKnownBits(Op, Known, Depth); 2123 return Mask.isSubsetOf(Known.Zero); 2124 } 2125 2126 /// Helper function that checks to see if a node is a constant or a 2127 /// build vector of splat constants at least within the demanded elts. 2128 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N, 2129 const APInt &DemandedElts) { 2130 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 2131 return CN; 2132 if (N.getOpcode() != ISD::BUILD_VECTOR) 2133 return nullptr; 2134 EVT VT = N.getValueType(); 2135 ConstantSDNode *Cst = nullptr; 2136 unsigned NumElts = VT.getVectorNumElements(); 2137 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size"); 2138 for (unsigned i = 0; i != NumElts; ++i) { 2139 if (!DemandedElts[i]) 2140 continue; 2141 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i)); 2142 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) || 2143 C->getValueType(0) != VT.getScalarType()) 2144 return nullptr; 2145 Cst = C; 2146 } 2147 return Cst; 2148 } 2149 2150 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2151 /// is less than the element bit-width of the shift node, return it. 2152 static const APInt *getValidShiftAmountConstant(SDValue V) { 2153 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2154 // Shifting more than the bitwidth is not valid. 2155 const APInt &ShAmt = SA->getAPIntValue(); 2156 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2157 return &ShAmt; 2158 } 2159 return nullptr; 2160 } 2161 2162 /// Determine which bits of Op are known to be either zero or one and return 2163 /// them in Known. For vectors, the known bits are those that are shared by 2164 /// every vector element. 2165 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2166 unsigned Depth) const { 2167 EVT VT = Op.getValueType(); 2168 APInt DemandedElts = VT.isVector() 2169 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2170 : APInt(1, 1); 2171 computeKnownBits(Op, Known, DemandedElts, Depth); 2172 } 2173 2174 /// Determine which bits of Op are known to be either zero or one and return 2175 /// them in Known. The DemandedElts argument allows us to only collect the known 2176 /// bits that are shared by the requested vector elements. 2177 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2178 const APInt &DemandedElts, 2179 unsigned Depth) const { 2180 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2181 2182 Known = KnownBits(BitWidth); // Don't know anything. 2183 2184 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2185 // We know all of the bits for a constant! 2186 Known.One = C->getAPIntValue(); 2187 Known.Zero = ~Known.One; 2188 return; 2189 } 2190 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2191 // We know all of the bits for a constant fp! 2192 Known.One = C->getValueAPF().bitcastToAPInt(); 2193 Known.Zero = ~Known.One; 2194 return; 2195 } 2196 2197 if (Depth == 6) 2198 return; // Limit search depth. 2199 2200 KnownBits Known2; 2201 unsigned NumElts = DemandedElts.getBitWidth(); 2202 2203 if (!DemandedElts) 2204 return; // No demanded elts, better to assume we don't know anything. 2205 2206 unsigned Opcode = Op.getOpcode(); 2207 switch (Opcode) { 2208 case ISD::BUILD_VECTOR: 2209 // Collect the known bits that are shared by every demanded vector element. 2210 assert(NumElts == Op.getValueType().getVectorNumElements() && 2211 "Unexpected vector size"); 2212 Known.Zero.setAllBits(); Known.One.setAllBits(); 2213 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2214 if (!DemandedElts[i]) 2215 continue; 2216 2217 SDValue SrcOp = Op.getOperand(i); 2218 computeKnownBits(SrcOp, Known2, Depth + 1); 2219 2220 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2221 if (SrcOp.getValueSizeInBits() != BitWidth) { 2222 assert(SrcOp.getValueSizeInBits() > BitWidth && 2223 "Expected BUILD_VECTOR implicit truncation"); 2224 Known2 = Known2.trunc(BitWidth); 2225 } 2226 2227 // Known bits are the values that are shared by every demanded element. 2228 Known.One &= Known2.One; 2229 Known.Zero &= Known2.Zero; 2230 2231 // If we don't know any bits, early out. 2232 if (Known.isUnknown()) 2233 break; 2234 } 2235 break; 2236 case ISD::VECTOR_SHUFFLE: { 2237 // Collect the known bits that are shared by every vector element referenced 2238 // by the shuffle. 2239 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2240 Known.Zero.setAllBits(); Known.One.setAllBits(); 2241 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2242 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2243 for (unsigned i = 0; i != NumElts; ++i) { 2244 if (!DemandedElts[i]) 2245 continue; 2246 2247 int M = SVN->getMaskElt(i); 2248 if (M < 0) { 2249 // For UNDEF elements, we don't know anything about the common state of 2250 // the shuffle result. 2251 Known.resetAll(); 2252 DemandedLHS.clearAllBits(); 2253 DemandedRHS.clearAllBits(); 2254 break; 2255 } 2256 2257 if ((unsigned)M < NumElts) 2258 DemandedLHS.setBit((unsigned)M % NumElts); 2259 else 2260 DemandedRHS.setBit((unsigned)M % NumElts); 2261 } 2262 // Known bits are the values that are shared by every demanded element. 2263 if (!!DemandedLHS) { 2264 SDValue LHS = Op.getOperand(0); 2265 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2266 Known.One &= Known2.One; 2267 Known.Zero &= Known2.Zero; 2268 } 2269 // If we don't know any bits, early out. 2270 if (Known.isUnknown()) 2271 break; 2272 if (!!DemandedRHS) { 2273 SDValue RHS = Op.getOperand(1); 2274 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2275 Known.One &= Known2.One; 2276 Known.Zero &= Known2.Zero; 2277 } 2278 break; 2279 } 2280 case ISD::CONCAT_VECTORS: { 2281 // Split DemandedElts and test each of the demanded subvectors. 2282 Known.Zero.setAllBits(); Known.One.setAllBits(); 2283 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2284 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2285 unsigned NumSubVectors = Op.getNumOperands(); 2286 for (unsigned i = 0; i != NumSubVectors; ++i) { 2287 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2288 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2289 if (!!DemandedSub) { 2290 SDValue Sub = Op.getOperand(i); 2291 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2292 Known.One &= Known2.One; 2293 Known.Zero &= Known2.Zero; 2294 } 2295 // If we don't know any bits, early out. 2296 if (Known.isUnknown()) 2297 break; 2298 } 2299 break; 2300 } 2301 case ISD::INSERT_SUBVECTOR: { 2302 // If we know the element index, demand any elements from the subvector and 2303 // the remainder from the src its inserted into, otherwise demand them all. 2304 SDValue Src = Op.getOperand(0); 2305 SDValue Sub = Op.getOperand(1); 2306 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2307 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2308 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2309 Known.One.setAllBits(); 2310 Known.Zero.setAllBits(); 2311 uint64_t Idx = SubIdx->getZExtValue(); 2312 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2313 if (!!DemandedSubElts) { 2314 computeKnownBits(Sub, Known, DemandedSubElts, Depth + 1); 2315 if (Known.isUnknown()) 2316 break; // early-out. 2317 } 2318 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2319 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2320 if (!!DemandedSrcElts) { 2321 computeKnownBits(Src, Known2, DemandedSrcElts, Depth + 1); 2322 Known.One &= Known2.One; 2323 Known.Zero &= Known2.Zero; 2324 } 2325 } else { 2326 computeKnownBits(Sub, Known, Depth + 1); 2327 if (Known.isUnknown()) 2328 break; // early-out. 2329 computeKnownBits(Src, Known2, Depth + 1); 2330 Known.One &= Known2.One; 2331 Known.Zero &= Known2.Zero; 2332 } 2333 break; 2334 } 2335 case ISD::EXTRACT_SUBVECTOR: { 2336 // If we know the element index, just demand that subvector elements, 2337 // otherwise demand them all. 2338 SDValue Src = Op.getOperand(0); 2339 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2340 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2341 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2342 // Offset the demanded elts by the subvector index. 2343 uint64_t Idx = SubIdx->getZExtValue(); 2344 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2345 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2346 } else { 2347 computeKnownBits(Src, Known, Depth + 1); 2348 } 2349 break; 2350 } 2351 case ISD::BITCAST: { 2352 SDValue N0 = Op.getOperand(0); 2353 EVT SubVT = N0.getValueType(); 2354 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2355 2356 // Ignore bitcasts from unsupported types. 2357 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2358 break; 2359 2360 // Fast handling of 'identity' bitcasts. 2361 if (BitWidth == SubBitWidth) { 2362 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2363 break; 2364 } 2365 2366 bool IsLE = getDataLayout().isLittleEndian(); 2367 2368 // Bitcast 'small element' vector to 'large element' scalar/vector. 2369 if ((BitWidth % SubBitWidth) == 0) { 2370 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2371 2372 // Collect known bits for the (larger) output by collecting the known 2373 // bits from each set of sub elements and shift these into place. 2374 // We need to separately call computeKnownBits for each set of 2375 // sub elements as the knownbits for each is likely to be different. 2376 unsigned SubScale = BitWidth / SubBitWidth; 2377 APInt SubDemandedElts(NumElts * SubScale, 0); 2378 for (unsigned i = 0; i != NumElts; ++i) 2379 if (DemandedElts[i]) 2380 SubDemandedElts.setBit(i * SubScale); 2381 2382 for (unsigned i = 0; i != SubScale; ++i) { 2383 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2384 Depth + 1); 2385 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2386 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2387 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2388 } 2389 } 2390 2391 // Bitcast 'large element' scalar/vector to 'small element' vector. 2392 if ((SubBitWidth % BitWidth) == 0) { 2393 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2394 2395 // Collect known bits for the (smaller) output by collecting the known 2396 // bits from the overlapping larger input elements and extracting the 2397 // sub sections we actually care about. 2398 unsigned SubScale = SubBitWidth / BitWidth; 2399 APInt SubDemandedElts(NumElts / SubScale, 0); 2400 for (unsigned i = 0; i != NumElts; ++i) 2401 if (DemandedElts[i]) 2402 SubDemandedElts.setBit(i / SubScale); 2403 2404 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2405 2406 Known.Zero.setAllBits(); Known.One.setAllBits(); 2407 for (unsigned i = 0; i != NumElts; ++i) 2408 if (DemandedElts[i]) { 2409 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2410 unsigned Offset = (Shifts % SubScale) * BitWidth; 2411 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2412 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2413 // If we don't know any bits, early out. 2414 if (Known.isUnknown()) 2415 break; 2416 } 2417 } 2418 break; 2419 } 2420 case ISD::AND: 2421 // If either the LHS or the RHS are Zero, the result is zero. 2422 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2423 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2424 2425 // Output known-1 bits are only known if set in both the LHS & RHS. 2426 Known.One &= Known2.One; 2427 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2428 Known.Zero |= Known2.Zero; 2429 break; 2430 case ISD::OR: 2431 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2432 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2433 2434 // Output known-0 bits are only known if clear in both the LHS & RHS. 2435 Known.Zero &= Known2.Zero; 2436 // Output known-1 are known to be set if set in either the LHS | RHS. 2437 Known.One |= Known2.One; 2438 break; 2439 case ISD::XOR: { 2440 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2441 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2442 2443 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2444 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2445 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2446 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2447 Known.Zero = KnownZeroOut; 2448 break; 2449 } 2450 case ISD::MUL: { 2451 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2452 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2453 2454 // If low bits are zero in either operand, output low known-0 bits. 2455 // Also compute a conservative estimate for high known-0 bits. 2456 // More trickiness is possible, but this is sufficient for the 2457 // interesting case of alignment computation. 2458 unsigned TrailZ = Known.countMinTrailingZeros() + 2459 Known2.countMinTrailingZeros(); 2460 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2461 Known2.countMinLeadingZeros(), 2462 BitWidth) - BitWidth; 2463 2464 Known.resetAll(); 2465 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2466 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2467 break; 2468 } 2469 case ISD::UDIV: { 2470 // For the purposes of computing leading zeros we can conservatively 2471 // treat a udiv as a logical right shift by the power of 2 known to 2472 // be less than the denominator. 2473 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2474 unsigned LeadZ = Known2.countMinLeadingZeros(); 2475 2476 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2477 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2478 if (RHSMaxLeadingZeros != BitWidth) 2479 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2480 2481 Known.Zero.setHighBits(LeadZ); 2482 break; 2483 } 2484 case ISD::SELECT: 2485 case ISD::VSELECT: 2486 computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1); 2487 // If we don't know any bits, early out. 2488 if (Known.isUnknown()) 2489 break; 2490 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1); 2491 2492 // Only known if known in both the LHS and RHS. 2493 Known.One &= Known2.One; 2494 Known.Zero &= Known2.Zero; 2495 break; 2496 case ISD::SELECT_CC: 2497 computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1); 2498 // If we don't know any bits, early out. 2499 if (Known.isUnknown()) 2500 break; 2501 computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1); 2502 2503 // Only known if known in both the LHS and RHS. 2504 Known.One &= Known2.One; 2505 Known.Zero &= Known2.Zero; 2506 break; 2507 case ISD::SMULO: 2508 case ISD::UMULO: 2509 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2510 if (Op.getResNo() != 1) 2511 break; 2512 // The boolean result conforms to getBooleanContents. 2513 // If we know the result of a setcc has the top bits zero, use this info. 2514 // We know that we have an integer-based boolean since these operations 2515 // are only available for integer. 2516 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2517 TargetLowering::ZeroOrOneBooleanContent && 2518 BitWidth > 1) 2519 Known.Zero.setBitsFrom(1); 2520 break; 2521 case ISD::SETCC: 2522 // If we know the result of a setcc has the top bits zero, use this info. 2523 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2524 TargetLowering::ZeroOrOneBooleanContent && 2525 BitWidth > 1) 2526 Known.Zero.setBitsFrom(1); 2527 break; 2528 case ISD::SHL: 2529 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2530 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2531 unsigned Shift = ShAmt->getZExtValue(); 2532 Known.Zero <<= Shift; 2533 Known.One <<= Shift; 2534 // Low bits are known zero. 2535 Known.Zero.setLowBits(Shift); 2536 } 2537 break; 2538 case ISD::SRL: 2539 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2540 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2541 unsigned Shift = ShAmt->getZExtValue(); 2542 Known.Zero.lshrInPlace(Shift); 2543 Known.One.lshrInPlace(Shift); 2544 // High bits are known zero. 2545 Known.Zero.setHighBits(Shift); 2546 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) { 2547 // If the shift amount is a vector of constants see if we can bound 2548 // the number of upper zero bits. 2549 unsigned ShiftAmountMin = BitWidth; 2550 for (unsigned i = 0; i != BV->getNumOperands(); ++i) { 2551 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) { 2552 const APInt &ShAmt = C->getAPIntValue(); 2553 if (ShAmt.ult(BitWidth)) { 2554 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin, 2555 ShAmt.getZExtValue()); 2556 continue; 2557 } 2558 } 2559 // Don't know anything. 2560 ShiftAmountMin = 0; 2561 break; 2562 } 2563 2564 Known.Zero.setHighBits(ShiftAmountMin); 2565 } 2566 break; 2567 case ISD::SRA: 2568 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2569 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2570 unsigned Shift = ShAmt->getZExtValue(); 2571 // Sign extend known zero/one bit (else is unknown). 2572 Known.Zero.ashrInPlace(Shift); 2573 Known.One.ashrInPlace(Shift); 2574 } 2575 break; 2576 case ISD::SIGN_EXTEND_INREG: { 2577 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2578 unsigned EBits = EVT.getScalarSizeInBits(); 2579 2580 // Sign extension. Compute the demanded bits in the result that are not 2581 // present in the input. 2582 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2583 2584 APInt InSignMask = APInt::getSignMask(EBits); 2585 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2586 2587 // If the sign extended bits are demanded, we know that the sign 2588 // bit is demanded. 2589 InSignMask = InSignMask.zext(BitWidth); 2590 if (NewBits.getBoolValue()) 2591 InputDemandedBits |= InSignMask; 2592 2593 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2594 Known.One &= InputDemandedBits; 2595 Known.Zero &= InputDemandedBits; 2596 2597 // If the sign bit of the input is known set or clear, then we know the 2598 // top bits of the result. 2599 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2600 Known.Zero |= NewBits; 2601 Known.One &= ~NewBits; 2602 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2603 Known.One |= NewBits; 2604 Known.Zero &= ~NewBits; 2605 } else { // Input sign bit unknown 2606 Known.Zero &= ~NewBits; 2607 Known.One &= ~NewBits; 2608 } 2609 break; 2610 } 2611 case ISD::CTTZ: 2612 case ISD::CTTZ_ZERO_UNDEF: { 2613 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2614 // If we have a known 1, its position is our upper bound. 2615 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2616 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2617 Known.Zero.setBitsFrom(LowBits); 2618 break; 2619 } 2620 case ISD::CTLZ: 2621 case ISD::CTLZ_ZERO_UNDEF: { 2622 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2623 // If we have a known 1, its position is our upper bound. 2624 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2625 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2626 Known.Zero.setBitsFrom(LowBits); 2627 break; 2628 } 2629 case ISD::CTPOP: { 2630 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2631 // If we know some of the bits are zero, they can't be one. 2632 unsigned PossibleOnes = Known2.countMaxPopulation(); 2633 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2634 break; 2635 } 2636 case ISD::LOAD: { 2637 LoadSDNode *LD = cast<LoadSDNode>(Op); 2638 // If this is a ZEXTLoad and we are looking at the loaded value. 2639 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2640 EVT VT = LD->getMemoryVT(); 2641 unsigned MemBits = VT.getScalarSizeInBits(); 2642 Known.Zero.setBitsFrom(MemBits); 2643 } else if (const MDNode *Ranges = LD->getRanges()) { 2644 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2645 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2646 } 2647 break; 2648 } 2649 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2650 EVT InVT = Op.getOperand(0).getValueType(); 2651 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); 2652 computeKnownBits(Op.getOperand(0), Known, InDemandedElts, Depth + 1); 2653 Known = Known.zext(BitWidth); 2654 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2655 break; 2656 } 2657 case ISD::ZERO_EXTEND: { 2658 EVT InVT = Op.getOperand(0).getValueType(); 2659 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2660 Known = Known.zext(BitWidth); 2661 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2662 break; 2663 } 2664 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2665 case ISD::SIGN_EXTEND: { 2666 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2667 // If the sign bit is known to be zero or one, then sext will extend 2668 // it to the top bits, else it will just zext. 2669 Known = Known.sext(BitWidth); 2670 break; 2671 } 2672 case ISD::ANY_EXTEND: { 2673 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2674 Known = Known.zext(BitWidth); 2675 break; 2676 } 2677 case ISD::TRUNCATE: { 2678 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2679 Known = Known.trunc(BitWidth); 2680 break; 2681 } 2682 case ISD::AssertZext: { 2683 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2684 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2685 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2686 Known.Zero |= (~InMask); 2687 Known.One &= (~Known.Zero); 2688 break; 2689 } 2690 case ISD::FGETSIGN: 2691 // All bits are zero except the low bit. 2692 Known.Zero.setBitsFrom(1); 2693 break; 2694 case ISD::USUBO: 2695 case ISD::SSUBO: 2696 if (Op.getResNo() == 1) { 2697 // If we know the result of a setcc has the top bits zero, use this info. 2698 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2699 TargetLowering::ZeroOrOneBooleanContent && 2700 BitWidth > 1) 2701 Known.Zero.setBitsFrom(1); 2702 break; 2703 } 2704 LLVM_FALLTHROUGH; 2705 case ISD::SUB: 2706 case ISD::SUBC: { 2707 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2708 // We know that the top bits of C-X are clear if X contains less bits 2709 // than C (i.e. no wrap-around can happen). For example, 20-X is 2710 // positive if we can prove that X is >= 0 and < 16. 2711 if (CLHS->getAPIntValue().isNonNegative()) { 2712 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2713 // NLZ can't be BitWidth with no sign bit 2714 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2715 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2716 Depth + 1); 2717 2718 // If all of the MaskV bits are known to be zero, then we know the 2719 // output top bits are zero, because we now know that the output is 2720 // from [0-C]. 2721 if ((Known2.Zero & MaskV) == MaskV) { 2722 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2723 // Top bits known zero. 2724 Known.Zero.setHighBits(NLZ2); 2725 } 2726 } 2727 } 2728 2729 // If low bits are know to be zero in both operands, then we know they are 2730 // going to be 0 in the result. Both addition and complement operations 2731 // preserve the low zero bits. 2732 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2733 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2734 if (KnownZeroLow == 0) 2735 break; 2736 2737 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2738 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2739 Known.Zero.setLowBits(KnownZeroLow); 2740 break; 2741 } 2742 case ISD::UADDO: 2743 case ISD::SADDO: 2744 case ISD::ADDCARRY: 2745 if (Op.getResNo() == 1) { 2746 // If we know the result of a setcc has the top bits zero, use this info. 2747 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2748 TargetLowering::ZeroOrOneBooleanContent && 2749 BitWidth > 1) 2750 Known.Zero.setBitsFrom(1); 2751 break; 2752 } 2753 LLVM_FALLTHROUGH; 2754 case ISD::ADD: 2755 case ISD::ADDC: 2756 case ISD::ADDE: { 2757 // Output known-0 bits are known if clear or set in both the low clear bits 2758 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2759 // low 3 bits clear. 2760 // Output known-0 bits are also known if the top bits of each input are 2761 // known to be clear. For example, if one input has the top 10 bits clear 2762 // and the other has the top 8 bits clear, we know the top 7 bits of the 2763 // output must be clear. 2764 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2765 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2766 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2767 2768 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2769 Depth + 1); 2770 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2771 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2772 2773 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2774 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2775 // use this information if we know (at least) that the low two bits are 2776 // clear. We then return to the caller that the low bit is unknown but 2777 // that other bits are known zero. 2778 if (KnownZeroLow >= 2) 2779 Known.Zero.setBits(1, KnownZeroLow); 2780 break; 2781 } 2782 2783 Known.Zero.setLowBits(KnownZeroLow); 2784 if (KnownZeroHigh > 1) 2785 Known.Zero.setHighBits(KnownZeroHigh - 1); 2786 break; 2787 } 2788 case ISD::SREM: 2789 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2790 const APInt &RA = Rem->getAPIntValue().abs(); 2791 if (RA.isPowerOf2()) { 2792 APInt LowBits = RA - 1; 2793 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2794 2795 // The low bits of the first operand are unchanged by the srem. 2796 Known.Zero = Known2.Zero & LowBits; 2797 Known.One = Known2.One & LowBits; 2798 2799 // If the first operand is non-negative or has all low bits zero, then 2800 // the upper bits are all zero. 2801 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2802 Known.Zero |= ~LowBits; 2803 2804 // If the first operand is negative and not all low bits are zero, then 2805 // the upper bits are all one. 2806 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2807 Known.One |= ~LowBits; 2808 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2809 } 2810 } 2811 break; 2812 case ISD::UREM: { 2813 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2814 const APInt &RA = Rem->getAPIntValue(); 2815 if (RA.isPowerOf2()) { 2816 APInt LowBits = (RA - 1); 2817 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2818 2819 // The upper bits are all zero, the lower ones are unchanged. 2820 Known.Zero = Known2.Zero | ~LowBits; 2821 Known.One = Known2.One & LowBits; 2822 break; 2823 } 2824 } 2825 2826 // Since the result is less than or equal to either operand, any leading 2827 // zero bits in either operand must also exist in the result. 2828 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2829 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2830 2831 uint32_t Leaders = 2832 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2833 Known.resetAll(); 2834 Known.Zero.setHighBits(Leaders); 2835 break; 2836 } 2837 case ISD::EXTRACT_ELEMENT: { 2838 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2839 const unsigned Index = Op.getConstantOperandVal(1); 2840 const unsigned BitWidth = Op.getValueSizeInBits(); 2841 2842 // Remove low part of known bits mask 2843 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2844 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2845 2846 // Remove high part of known bit mask 2847 Known = Known.trunc(BitWidth); 2848 break; 2849 } 2850 case ISD::EXTRACT_VECTOR_ELT: { 2851 SDValue InVec = Op.getOperand(0); 2852 SDValue EltNo = Op.getOperand(1); 2853 EVT VecVT = InVec.getValueType(); 2854 const unsigned BitWidth = Op.getValueSizeInBits(); 2855 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2856 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2857 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2858 // anything about the extended bits. 2859 if (BitWidth > EltBitWidth) 2860 Known = Known.trunc(EltBitWidth); 2861 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2862 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2863 // If we know the element index, just demand that vector element. 2864 unsigned Idx = ConstEltNo->getZExtValue(); 2865 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2866 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2867 } else { 2868 // Unknown element index, so ignore DemandedElts and demand them all. 2869 computeKnownBits(InVec, Known, Depth + 1); 2870 } 2871 if (BitWidth > EltBitWidth) 2872 Known = Known.zext(BitWidth); 2873 break; 2874 } 2875 case ISD::INSERT_VECTOR_ELT: { 2876 SDValue InVec = Op.getOperand(0); 2877 SDValue InVal = Op.getOperand(1); 2878 SDValue EltNo = Op.getOperand(2); 2879 2880 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2881 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2882 // If we know the element index, split the demand between the 2883 // source vector and the inserted element. 2884 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2885 unsigned EltIdx = CEltNo->getZExtValue(); 2886 2887 // If we demand the inserted element then add its common known bits. 2888 if (DemandedElts[EltIdx]) { 2889 computeKnownBits(InVal, Known2, Depth + 1); 2890 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2891 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2892 } 2893 2894 // If we demand the source vector then add its common known bits, ensuring 2895 // that we don't demand the inserted element. 2896 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2897 if (!!VectorElts) { 2898 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2899 Known.One &= Known2.One; 2900 Known.Zero &= Known2.Zero; 2901 } 2902 } else { 2903 // Unknown element index, so ignore DemandedElts and demand them all. 2904 computeKnownBits(InVec, Known, Depth + 1); 2905 computeKnownBits(InVal, Known2, Depth + 1); 2906 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2907 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2908 } 2909 break; 2910 } 2911 case ISD::BITREVERSE: { 2912 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2913 Known.Zero = Known2.Zero.reverseBits(); 2914 Known.One = Known2.One.reverseBits(); 2915 break; 2916 } 2917 case ISD::BSWAP: { 2918 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2919 Known.Zero = Known2.Zero.byteSwap(); 2920 Known.One = Known2.One.byteSwap(); 2921 break; 2922 } 2923 case ISD::ABS: { 2924 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2925 2926 // If the source's MSB is zero then we know the rest of the bits already. 2927 if (Known2.isNonNegative()) { 2928 Known.Zero = Known2.Zero; 2929 Known.One = Known2.One; 2930 break; 2931 } 2932 2933 // We only know that the absolute values's MSB will be zero iff there is 2934 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2935 Known2.One.clearSignBit(); 2936 if (Known2.One.getBoolValue()) { 2937 Known.Zero = APInt::getSignMask(BitWidth); 2938 break; 2939 } 2940 break; 2941 } 2942 case ISD::UMIN: { 2943 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2944 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2945 2946 // UMIN - we know that the result will have the maximum of the 2947 // known zero leading bits of the inputs. 2948 unsigned LeadZero = Known.countMinLeadingZeros(); 2949 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2950 2951 Known.Zero &= Known2.Zero; 2952 Known.One &= Known2.One; 2953 Known.Zero.setHighBits(LeadZero); 2954 break; 2955 } 2956 case ISD::UMAX: { 2957 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2958 Depth + 1); 2959 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2960 2961 // UMAX - we know that the result will have the maximum of the 2962 // known one leading bits of the inputs. 2963 unsigned LeadOne = Known.countMinLeadingOnes(); 2964 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2965 2966 Known.Zero &= Known2.Zero; 2967 Known.One &= Known2.One; 2968 Known.One.setHighBits(LeadOne); 2969 break; 2970 } 2971 case ISD::SMIN: 2972 case ISD::SMAX: { 2973 // If we have a clamp pattern, we know that the number of sign bits will be 2974 // the minimum of the clamp min/max range. 2975 bool IsMax = (Opcode == ISD::SMAX); 2976 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 2977 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts))) 2978 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 2979 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1), 2980 DemandedElts); 2981 if (CstLow && CstHigh) { 2982 if (!IsMax) 2983 std::swap(CstLow, CstHigh); 2984 2985 const APInt &ValueLow = CstLow->getAPIntValue(); 2986 const APInt &ValueHigh = CstHigh->getAPIntValue(); 2987 if (ValueLow.sle(ValueHigh)) { 2988 unsigned LowSignBits = ValueLow.getNumSignBits(); 2989 unsigned HighSignBits = ValueHigh.getNumSignBits(); 2990 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 2991 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 2992 Known.One.setHighBits(MinSignBits); 2993 break; 2994 } 2995 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 2996 Known.Zero.setHighBits(MinSignBits); 2997 break; 2998 } 2999 } 3000 } 3001 3002 // Fallback - just get the shared known bits of the operands. 3003 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 3004 if (Known.isUnknown()) break; // Early-out 3005 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 3006 Known.Zero &= Known2.Zero; 3007 Known.One &= Known2.One; 3008 break; 3009 } 3010 case ISD::FrameIndex: 3011 case ISD::TargetFrameIndex: 3012 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3013 break; 3014 3015 default: 3016 if (Opcode < ISD::BUILTIN_OP_END) 3017 break; 3018 LLVM_FALLTHROUGH; 3019 case ISD::INTRINSIC_WO_CHAIN: 3020 case ISD::INTRINSIC_W_CHAIN: 3021 case ISD::INTRINSIC_VOID: 3022 // Allow the target to implement this method for its nodes. 3023 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3024 break; 3025 } 3026 3027 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3028 } 3029 3030 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3031 SDValue N1) const { 3032 // X + 0 never overflow 3033 if (isNullConstant(N1)) 3034 return OFK_Never; 3035 3036 KnownBits N1Known; 3037 computeKnownBits(N1, N1Known); 3038 if (N1Known.Zero.getBoolValue()) { 3039 KnownBits N0Known; 3040 computeKnownBits(N0, N0Known); 3041 3042 bool overflow; 3043 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 3044 if (!overflow) 3045 return OFK_Never; 3046 } 3047 3048 // mulhi + 1 never overflow 3049 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3050 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 3051 return OFK_Never; 3052 3053 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3054 KnownBits N0Known; 3055 computeKnownBits(N0, N0Known); 3056 3057 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 3058 return OFK_Never; 3059 } 3060 3061 return OFK_Sometime; 3062 } 3063 3064 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3065 EVT OpVT = Val.getValueType(); 3066 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3067 3068 // Is the constant a known power of 2? 3069 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3070 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3071 3072 // A left-shift of a constant one will have exactly one bit set because 3073 // shifting the bit off the end is undefined. 3074 if (Val.getOpcode() == ISD::SHL) { 3075 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3076 if (C && C->getAPIntValue() == 1) 3077 return true; 3078 } 3079 3080 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3081 // one bit set. 3082 if (Val.getOpcode() == ISD::SRL) { 3083 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3084 if (C && C->getAPIntValue().isSignMask()) 3085 return true; 3086 } 3087 3088 // Are all operands of a build vector constant powers of two? 3089 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3090 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3091 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3092 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3093 return false; 3094 })) 3095 return true; 3096 3097 // More could be done here, though the above checks are enough 3098 // to handle some common cases. 3099 3100 // Fall back to computeKnownBits to catch other known cases. 3101 KnownBits Known; 3102 computeKnownBits(Val, Known); 3103 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3104 } 3105 3106 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3107 EVT VT = Op.getValueType(); 3108 APInt DemandedElts = VT.isVector() 3109 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3110 : APInt(1, 1); 3111 return ComputeNumSignBits(Op, DemandedElts, Depth); 3112 } 3113 3114 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3115 unsigned Depth) const { 3116 EVT VT = Op.getValueType(); 3117 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3118 unsigned VTBits = VT.getScalarSizeInBits(); 3119 unsigned NumElts = DemandedElts.getBitWidth(); 3120 unsigned Tmp, Tmp2; 3121 unsigned FirstAnswer = 1; 3122 3123 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3124 const APInt &Val = C->getAPIntValue(); 3125 return Val.getNumSignBits(); 3126 } 3127 3128 if (Depth == 6) 3129 return 1; // Limit search depth. 3130 3131 if (!DemandedElts) 3132 return 1; // No demanded elts, better to assume we don't know anything. 3133 3134 unsigned Opcode = Op.getOpcode(); 3135 switch (Opcode) { 3136 default: break; 3137 case ISD::AssertSext: 3138 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3139 return VTBits-Tmp+1; 3140 case ISD::AssertZext: 3141 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3142 return VTBits-Tmp; 3143 3144 case ISD::BUILD_VECTOR: 3145 Tmp = VTBits; 3146 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3147 if (!DemandedElts[i]) 3148 continue; 3149 3150 SDValue SrcOp = Op.getOperand(i); 3151 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3152 3153 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3154 if (SrcOp.getValueSizeInBits() != VTBits) { 3155 assert(SrcOp.getValueSizeInBits() > VTBits && 3156 "Expected BUILD_VECTOR implicit truncation"); 3157 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3158 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3159 } 3160 Tmp = std::min(Tmp, Tmp2); 3161 } 3162 return Tmp; 3163 3164 case ISD::VECTOR_SHUFFLE: { 3165 // Collect the minimum number of sign bits that are shared by every vector 3166 // element referenced by the shuffle. 3167 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3168 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3169 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3170 for (unsigned i = 0; i != NumElts; ++i) { 3171 int M = SVN->getMaskElt(i); 3172 if (!DemandedElts[i]) 3173 continue; 3174 // For UNDEF elements, we don't know anything about the common state of 3175 // the shuffle result. 3176 if (M < 0) 3177 return 1; 3178 if ((unsigned)M < NumElts) 3179 DemandedLHS.setBit((unsigned)M % NumElts); 3180 else 3181 DemandedRHS.setBit((unsigned)M % NumElts); 3182 } 3183 Tmp = std::numeric_limits<unsigned>::max(); 3184 if (!!DemandedLHS) 3185 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3186 if (!!DemandedRHS) { 3187 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3188 Tmp = std::min(Tmp, Tmp2); 3189 } 3190 // If we don't know anything, early out and try computeKnownBits fall-back. 3191 if (Tmp == 1) 3192 break; 3193 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3194 return Tmp; 3195 } 3196 3197 case ISD::BITCAST: { 3198 SDValue N0 = Op.getOperand(0); 3199 EVT SrcVT = N0.getValueType(); 3200 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3201 3202 // Ignore bitcasts from unsupported types.. 3203 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3204 break; 3205 3206 // Fast handling of 'identity' bitcasts. 3207 if (VTBits == SrcBits) 3208 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3209 3210 // Bitcast 'large element' scalar/vector to 'small element' vector. 3211 // TODO: Handle cases other than 'sign splat' when we have a use case. 3212 // Requires handling of DemandedElts and Endianness. 3213 if ((SrcBits % VTBits) == 0) { 3214 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 3215 Tmp = ComputeNumSignBits(N0, Depth + 1); 3216 if (Tmp == SrcBits) 3217 return VTBits; 3218 } 3219 break; 3220 } 3221 3222 case ISD::SIGN_EXTEND: 3223 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3224 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3225 case ISD::SIGN_EXTEND_INREG: 3226 // Max of the input and what this extends. 3227 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3228 Tmp = VTBits-Tmp+1; 3229 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3230 return std::max(Tmp, Tmp2); 3231 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3232 SDValue Src = Op.getOperand(0); 3233 EVT SrcVT = Src.getValueType(); 3234 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); 3235 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3236 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3237 } 3238 3239 case ISD::SRA: 3240 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3241 // SRA X, C -> adds C sign bits. 3242 if (ConstantSDNode *C = 3243 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3244 APInt ShiftVal = C->getAPIntValue(); 3245 ShiftVal += Tmp; 3246 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3247 } 3248 return Tmp; 3249 case ISD::SHL: 3250 if (ConstantSDNode *C = 3251 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3252 // shl destroys sign bits. 3253 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3254 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3255 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3256 return Tmp - C->getZExtValue(); 3257 } 3258 break; 3259 case ISD::AND: 3260 case ISD::OR: 3261 case ISD::XOR: // NOT is handled here. 3262 // Logical binary ops preserve the number of sign bits at the worst. 3263 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3264 if (Tmp != 1) { 3265 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3266 FirstAnswer = std::min(Tmp, Tmp2); 3267 // We computed what we know about the sign bits as our first 3268 // answer. Now proceed to the generic code that uses 3269 // computeKnownBits, and pick whichever answer is better. 3270 } 3271 break; 3272 3273 case ISD::SELECT: 3274 case ISD::VSELECT: 3275 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3276 if (Tmp == 1) return 1; // Early out. 3277 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3278 return std::min(Tmp, Tmp2); 3279 case ISD::SELECT_CC: 3280 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3281 if (Tmp == 1) return 1; // Early out. 3282 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3283 return std::min(Tmp, Tmp2); 3284 3285 case ISD::SMIN: 3286 case ISD::SMAX: { 3287 // If we have a clamp pattern, we know that the number of sign bits will be 3288 // the minimum of the clamp min/max range. 3289 bool IsMax = (Opcode == ISD::SMAX); 3290 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3291 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts))) 3292 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3293 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1), 3294 DemandedElts); 3295 if (CstLow && CstHigh) { 3296 if (!IsMax) 3297 std::swap(CstLow, CstHigh); 3298 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3299 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3300 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3301 return std::min(Tmp, Tmp2); 3302 } 3303 } 3304 3305 // Fallback - just get the minimum number of sign bits of the operands. 3306 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3307 if (Tmp == 1) 3308 return 1; // Early out. 3309 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3310 return std::min(Tmp, Tmp2); 3311 } 3312 case ISD::UMIN: 3313 case ISD::UMAX: 3314 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3315 if (Tmp == 1) 3316 return 1; // Early out. 3317 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3318 return std::min(Tmp, Tmp2); 3319 case ISD::SADDO: 3320 case ISD::UADDO: 3321 case ISD::SSUBO: 3322 case ISD::USUBO: 3323 case ISD::SMULO: 3324 case ISD::UMULO: 3325 if (Op.getResNo() != 1) 3326 break; 3327 // The boolean result conforms to getBooleanContents. Fall through. 3328 // If setcc returns 0/-1, all bits are sign bits. 3329 // We know that we have an integer-based boolean since these operations 3330 // are only available for integer. 3331 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3332 TargetLowering::ZeroOrNegativeOneBooleanContent) 3333 return VTBits; 3334 break; 3335 case ISD::SETCC: 3336 // If setcc returns 0/-1, all bits are sign bits. 3337 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3338 TargetLowering::ZeroOrNegativeOneBooleanContent) 3339 return VTBits; 3340 break; 3341 case ISD::ROTL: 3342 case ISD::ROTR: 3343 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3344 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3345 3346 // Handle rotate right by N like a rotate left by 32-N. 3347 if (Opcode == ISD::ROTR) 3348 RotAmt = (VTBits - RotAmt) % VTBits; 3349 3350 // If we aren't rotating out all of the known-in sign bits, return the 3351 // number that are left. This handles rotl(sext(x), 1) for example. 3352 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3353 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3354 } 3355 break; 3356 case ISD::ADD: 3357 case ISD::ADDC: 3358 // Add can have at most one carry bit. Thus we know that the output 3359 // is, at worst, one more bit than the inputs. 3360 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3361 if (Tmp == 1) return 1; // Early out. 3362 3363 // Special case decrementing a value (ADD X, -1): 3364 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3365 if (CRHS->isAllOnesValue()) { 3366 KnownBits Known; 3367 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3368 3369 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3370 // sign bits set. 3371 if ((Known.Zero | 1).isAllOnesValue()) 3372 return VTBits; 3373 3374 // If we are subtracting one from a positive number, there is no carry 3375 // out of the result. 3376 if (Known.isNonNegative()) 3377 return Tmp; 3378 } 3379 3380 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3381 if (Tmp2 == 1) return 1; 3382 return std::min(Tmp, Tmp2)-1; 3383 3384 case ISD::SUB: 3385 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3386 if (Tmp2 == 1) return 1; 3387 3388 // Handle NEG. 3389 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3390 if (CLHS->isNullValue()) { 3391 KnownBits Known; 3392 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3393 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3394 // sign bits set. 3395 if ((Known.Zero | 1).isAllOnesValue()) 3396 return VTBits; 3397 3398 // If the input is known to be positive (the sign bit is known clear), 3399 // the output of the NEG has the same number of sign bits as the input. 3400 if (Known.isNonNegative()) 3401 return Tmp2; 3402 3403 // Otherwise, we treat this like a SUB. 3404 } 3405 3406 // Sub can have at most one carry bit. Thus we know that the output 3407 // is, at worst, one more bit than the inputs. 3408 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3409 if (Tmp == 1) return 1; // Early out. 3410 return std::min(Tmp, Tmp2)-1; 3411 case ISD::TRUNCATE: { 3412 // Check if the sign bits of source go down as far as the truncated value. 3413 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3414 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3415 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3416 return NumSrcSignBits - (NumSrcBits - VTBits); 3417 break; 3418 } 3419 case ISD::EXTRACT_ELEMENT: { 3420 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3421 const int BitWidth = Op.getValueSizeInBits(); 3422 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3423 3424 // Get reverse index (starting from 1), Op1 value indexes elements from 3425 // little end. Sign starts at big end. 3426 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3427 3428 // If the sign portion ends in our element the subtraction gives correct 3429 // result. Otherwise it gives either negative or > bitwidth result 3430 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3431 } 3432 case ISD::INSERT_VECTOR_ELT: { 3433 SDValue InVec = Op.getOperand(0); 3434 SDValue InVal = Op.getOperand(1); 3435 SDValue EltNo = Op.getOperand(2); 3436 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3437 3438 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3439 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3440 // If we know the element index, split the demand between the 3441 // source vector and the inserted element. 3442 unsigned EltIdx = CEltNo->getZExtValue(); 3443 3444 // If we demand the inserted element then get its sign bits. 3445 Tmp = std::numeric_limits<unsigned>::max(); 3446 if (DemandedElts[EltIdx]) { 3447 // TODO - handle implicit truncation of inserted elements. 3448 if (InVal.getScalarValueSizeInBits() != VTBits) 3449 break; 3450 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3451 } 3452 3453 // If we demand the source vector then get its sign bits, and determine 3454 // the minimum. 3455 APInt VectorElts = DemandedElts; 3456 VectorElts.clearBit(EltIdx); 3457 if (!!VectorElts) { 3458 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3459 Tmp = std::min(Tmp, Tmp2); 3460 } 3461 } else { 3462 // Unknown element index, so ignore DemandedElts and demand them all. 3463 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3464 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3465 Tmp = std::min(Tmp, Tmp2); 3466 } 3467 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3468 return Tmp; 3469 } 3470 case ISD::EXTRACT_VECTOR_ELT: { 3471 SDValue InVec = Op.getOperand(0); 3472 SDValue EltNo = Op.getOperand(1); 3473 EVT VecVT = InVec.getValueType(); 3474 const unsigned BitWidth = Op.getValueSizeInBits(); 3475 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3476 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3477 3478 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3479 // anything about sign bits. But if the sizes match we can derive knowledge 3480 // about sign bits from the vector operand. 3481 if (BitWidth != EltBitWidth) 3482 break; 3483 3484 // If we know the element index, just demand that vector element, else for 3485 // an unknown element index, ignore DemandedElts and demand them all. 3486 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3487 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3488 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3489 DemandedSrcElts = 3490 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3491 3492 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3493 } 3494 case ISD::EXTRACT_SUBVECTOR: { 3495 // If we know the element index, just demand that subvector elements, 3496 // otherwise demand them all. 3497 SDValue Src = Op.getOperand(0); 3498 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3499 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3500 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3501 // Offset the demanded elts by the subvector index. 3502 uint64_t Idx = SubIdx->getZExtValue(); 3503 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3504 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3505 } 3506 return ComputeNumSignBits(Src, Depth + 1); 3507 } 3508 case ISD::CONCAT_VECTORS: 3509 // Determine the minimum number of sign bits across all demanded 3510 // elts of the input vectors. Early out if the result is already 1. 3511 Tmp = std::numeric_limits<unsigned>::max(); 3512 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3513 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3514 unsigned NumSubVectors = Op.getNumOperands(); 3515 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3516 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3517 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3518 if (!DemandedSub) 3519 continue; 3520 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3521 Tmp = std::min(Tmp, Tmp2); 3522 } 3523 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3524 return Tmp; 3525 } 3526 3527 // If we are looking at the loaded value of the SDNode. 3528 if (Op.getResNo() == 0) { 3529 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3530 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3531 unsigned ExtType = LD->getExtensionType(); 3532 switch (ExtType) { 3533 default: break; 3534 case ISD::SEXTLOAD: // '17' bits known 3535 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3536 return VTBits-Tmp+1; 3537 case ISD::ZEXTLOAD: // '16' bits known 3538 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3539 return VTBits-Tmp; 3540 } 3541 } 3542 } 3543 3544 // Allow the target to implement this method for its nodes. 3545 if (Opcode >= ISD::BUILTIN_OP_END || 3546 Opcode == ISD::INTRINSIC_WO_CHAIN || 3547 Opcode == ISD::INTRINSIC_W_CHAIN || 3548 Opcode == ISD::INTRINSIC_VOID) { 3549 unsigned NumBits = 3550 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3551 if (NumBits > 1) 3552 FirstAnswer = std::max(FirstAnswer, NumBits); 3553 } 3554 3555 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3556 // use this information. 3557 KnownBits Known; 3558 computeKnownBits(Op, Known, DemandedElts, Depth); 3559 3560 APInt Mask; 3561 if (Known.isNonNegative()) { // sign bit is 0 3562 Mask = Known.Zero; 3563 } else if (Known.isNegative()) { // sign bit is 1; 3564 Mask = Known.One; 3565 } else { 3566 // Nothing known. 3567 return FirstAnswer; 3568 } 3569 3570 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3571 // the number of identical bits in the top of the input value. 3572 Mask = ~Mask; 3573 Mask <<= Mask.getBitWidth()-VTBits; 3574 // Return # leading zeros. We use 'min' here in case Val was zero before 3575 // shifting. We don't want to return '64' as for an i32 "0". 3576 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3577 } 3578 3579 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3580 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3581 !isa<ConstantSDNode>(Op.getOperand(1))) 3582 return false; 3583 3584 if (Op.getOpcode() == ISD::OR && 3585 !MaskedValueIsZero(Op.getOperand(0), 3586 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3587 return false; 3588 3589 return true; 3590 } 3591 3592 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3593 // If we're told that NaNs won't happen, assume they won't. 3594 if (getTarget().Options.NoNaNsFPMath) 3595 return true; 3596 3597 if (Op->getFlags().hasNoNaNs()) 3598 return true; 3599 3600 // If the value is a constant, we can obviously see if it is a NaN or not. 3601 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3602 return !C->getValueAPF().isNaN(); 3603 3604 // TODO: Recognize more cases here. 3605 3606 return false; 3607 } 3608 3609 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3610 // If the value is a constant, we can obviously see if it is a zero or not. 3611 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3612 return !C->isZero(); 3613 3614 // TODO: Recognize more cases here. 3615 switch (Op.getOpcode()) { 3616 default: break; 3617 case ISD::OR: 3618 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3619 return !C->isNullValue(); 3620 break; 3621 } 3622 3623 return false; 3624 } 3625 3626 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3627 // Check the obvious case. 3628 if (A == B) return true; 3629 3630 // For for negative and positive zero. 3631 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3632 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3633 if (CA->isZero() && CB->isZero()) return true; 3634 3635 // Otherwise they may not be equal. 3636 return false; 3637 } 3638 3639 // FIXME: unify with llvm::haveNoCommonBitsSet. 3640 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 3641 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3642 assert(A.getValueType() == B.getValueType() && 3643 "Values must have the same type"); 3644 KnownBits AKnown, BKnown; 3645 computeKnownBits(A, AKnown); 3646 computeKnownBits(B, BKnown); 3647 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3648 } 3649 3650 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3651 ArrayRef<SDValue> Ops, 3652 SelectionDAG &DAG) { 3653 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3654 assert(llvm::all_of(Ops, 3655 [Ops](SDValue Op) { 3656 return Ops[0].getValueType() == Op.getValueType(); 3657 }) && 3658 "Concatenation of vectors with inconsistent value types!"); 3659 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3660 VT.getVectorNumElements() && 3661 "Incorrect element count in vector concatenation!"); 3662 3663 if (Ops.size() == 1) 3664 return Ops[0]; 3665 3666 // Concat of UNDEFs is UNDEF. 3667 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3668 return DAG.getUNDEF(VT); 3669 3670 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3671 // simplified to one big BUILD_VECTOR. 3672 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3673 EVT SVT = VT.getScalarType(); 3674 SmallVector<SDValue, 16> Elts; 3675 for (SDValue Op : Ops) { 3676 EVT OpVT = Op.getValueType(); 3677 if (Op.isUndef()) 3678 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3679 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3680 Elts.append(Op->op_begin(), Op->op_end()); 3681 else 3682 return SDValue(); 3683 } 3684 3685 // BUILD_VECTOR requires all inputs to be of the same type, find the 3686 // maximum type and extend them all. 3687 for (SDValue Op : Elts) 3688 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3689 3690 if (SVT.bitsGT(VT.getScalarType())) 3691 for (SDValue &Op : Elts) 3692 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3693 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3694 : DAG.getSExtOrTrunc(Op, DL, SVT); 3695 3696 SDValue V = DAG.getBuildVector(VT, DL, Elts); 3697 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 3698 return V; 3699 } 3700 3701 /// Gets or creates the specified node. 3702 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3703 FoldingSetNodeID ID; 3704 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3705 void *IP = nullptr; 3706 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3707 return SDValue(E, 0); 3708 3709 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3710 getVTList(VT)); 3711 CSEMap.InsertNode(N, IP); 3712 3713 InsertNode(N); 3714 SDValue V = SDValue(N, 0); 3715 NewSDValueDbgMsg(V, "Creating new node: ", this); 3716 return V; 3717 } 3718 3719 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3720 SDValue Operand, const SDNodeFlags Flags) { 3721 // Constant fold unary operations with an integer constant operand. Even 3722 // opaque constant will be folded, because the folding of unary operations 3723 // doesn't create new constants with different values. Nevertheless, the 3724 // opaque flag is preserved during folding to prevent future folding with 3725 // other constants. 3726 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3727 const APInt &Val = C->getAPIntValue(); 3728 switch (Opcode) { 3729 default: break; 3730 case ISD::SIGN_EXTEND: 3731 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3732 C->isTargetOpcode(), C->isOpaque()); 3733 case ISD::ANY_EXTEND: 3734 case ISD::ZERO_EXTEND: 3735 case ISD::TRUNCATE: 3736 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3737 C->isTargetOpcode(), C->isOpaque()); 3738 case ISD::UINT_TO_FP: 3739 case ISD::SINT_TO_FP: { 3740 APFloat apf(EVTToAPFloatSemantics(VT), 3741 APInt::getNullValue(VT.getSizeInBits())); 3742 (void)apf.convertFromAPInt(Val, 3743 Opcode==ISD::SINT_TO_FP, 3744 APFloat::rmNearestTiesToEven); 3745 return getConstantFP(apf, DL, VT); 3746 } 3747 case ISD::BITCAST: 3748 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3749 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3750 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3751 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3752 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3753 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3754 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3755 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3756 break; 3757 case ISD::ABS: 3758 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3759 C->isOpaque()); 3760 case ISD::BITREVERSE: 3761 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3762 C->isOpaque()); 3763 case ISD::BSWAP: 3764 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3765 C->isOpaque()); 3766 case ISD::CTPOP: 3767 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3768 C->isOpaque()); 3769 case ISD::CTLZ: 3770 case ISD::CTLZ_ZERO_UNDEF: 3771 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3772 C->isOpaque()); 3773 case ISD::CTTZ: 3774 case ISD::CTTZ_ZERO_UNDEF: 3775 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3776 C->isOpaque()); 3777 case ISD::FP16_TO_FP: { 3778 bool Ignored; 3779 APFloat FPV(APFloat::IEEEhalf(), 3780 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3781 3782 // This can return overflow, underflow, or inexact; we don't care. 3783 // FIXME need to be more flexible about rounding mode. 3784 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3785 APFloat::rmNearestTiesToEven, &Ignored); 3786 return getConstantFP(FPV, DL, VT); 3787 } 3788 } 3789 } 3790 3791 // Constant fold unary operations with a floating point constant operand. 3792 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3793 APFloat V = C->getValueAPF(); // make copy 3794 switch (Opcode) { 3795 case ISD::FNEG: 3796 V.changeSign(); 3797 return getConstantFP(V, DL, VT); 3798 case ISD::FABS: 3799 V.clearSign(); 3800 return getConstantFP(V, DL, VT); 3801 case ISD::FCEIL: { 3802 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3803 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3804 return getConstantFP(V, DL, VT); 3805 break; 3806 } 3807 case ISD::FTRUNC: { 3808 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3809 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3810 return getConstantFP(V, DL, VT); 3811 break; 3812 } 3813 case ISD::FFLOOR: { 3814 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3815 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3816 return getConstantFP(V, DL, VT); 3817 break; 3818 } 3819 case ISD::FP_EXTEND: { 3820 bool ignored; 3821 // This can return overflow, underflow, or inexact; we don't care. 3822 // FIXME need to be more flexible about rounding mode. 3823 (void)V.convert(EVTToAPFloatSemantics(VT), 3824 APFloat::rmNearestTiesToEven, &ignored); 3825 return getConstantFP(V, DL, VT); 3826 } 3827 case ISD::FP_TO_SINT: 3828 case ISD::FP_TO_UINT: { 3829 bool ignored; 3830 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3831 // FIXME need to be more flexible about rounding mode. 3832 APFloat::opStatus s = 3833 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3834 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3835 break; 3836 return getConstant(IntVal, DL, VT); 3837 } 3838 case ISD::BITCAST: 3839 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3840 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3841 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3842 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3843 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3844 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3845 break; 3846 case ISD::FP_TO_FP16: { 3847 bool Ignored; 3848 // This can return overflow, underflow, or inexact; we don't care. 3849 // FIXME need to be more flexible about rounding mode. 3850 (void)V.convert(APFloat::IEEEhalf(), 3851 APFloat::rmNearestTiesToEven, &Ignored); 3852 return getConstant(V.bitcastToAPInt(), DL, VT); 3853 } 3854 } 3855 } 3856 3857 // Constant fold unary operations with a vector integer or float operand. 3858 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3859 if (BV->isConstant()) { 3860 switch (Opcode) { 3861 default: 3862 // FIXME: Entirely reasonable to perform folding of other unary 3863 // operations here as the need arises. 3864 break; 3865 case ISD::FNEG: 3866 case ISD::FABS: 3867 case ISD::FCEIL: 3868 case ISD::FTRUNC: 3869 case ISD::FFLOOR: 3870 case ISD::FP_EXTEND: 3871 case ISD::FP_TO_SINT: 3872 case ISD::FP_TO_UINT: 3873 case ISD::TRUNCATE: 3874 case ISD::ANY_EXTEND: 3875 case ISD::ZERO_EXTEND: 3876 case ISD::SIGN_EXTEND: 3877 case ISD::UINT_TO_FP: 3878 case ISD::SINT_TO_FP: 3879 case ISD::ABS: 3880 case ISD::BITREVERSE: 3881 case ISD::BSWAP: 3882 case ISD::CTLZ: 3883 case ISD::CTLZ_ZERO_UNDEF: 3884 case ISD::CTTZ: 3885 case ISD::CTTZ_ZERO_UNDEF: 3886 case ISD::CTPOP: { 3887 SDValue Ops = { Operand }; 3888 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3889 return Fold; 3890 } 3891 } 3892 } 3893 } 3894 3895 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3896 switch (Opcode) { 3897 case ISD::TokenFactor: 3898 case ISD::MERGE_VALUES: 3899 case ISD::CONCAT_VECTORS: 3900 return Operand; // Factor, merge or concat of one node? No need. 3901 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3902 case ISD::FP_EXTEND: 3903 assert(VT.isFloatingPoint() && 3904 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3905 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3906 assert((!VT.isVector() || 3907 VT.getVectorNumElements() == 3908 Operand.getValueType().getVectorNumElements()) && 3909 "Vector element count mismatch!"); 3910 assert(Operand.getValueType().bitsLT(VT) && 3911 "Invalid fpext node, dst < src!"); 3912 if (Operand.isUndef()) 3913 return getUNDEF(VT); 3914 break; 3915 case ISD::SIGN_EXTEND: 3916 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3917 "Invalid SIGN_EXTEND!"); 3918 if (Operand.getValueType() == VT) return Operand; // noop extension 3919 assert((!VT.isVector() || 3920 VT.getVectorNumElements() == 3921 Operand.getValueType().getVectorNumElements()) && 3922 "Vector element count mismatch!"); 3923 assert(Operand.getValueType().bitsLT(VT) && 3924 "Invalid sext node, dst < src!"); 3925 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3926 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3927 else if (OpOpcode == ISD::UNDEF) 3928 // sext(undef) = 0, because the top bits will all be the same. 3929 return getConstant(0, DL, VT); 3930 break; 3931 case ISD::ZERO_EXTEND: 3932 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3933 "Invalid ZERO_EXTEND!"); 3934 if (Operand.getValueType() == VT) return Operand; // noop extension 3935 assert((!VT.isVector() || 3936 VT.getVectorNumElements() == 3937 Operand.getValueType().getVectorNumElements()) && 3938 "Vector element count mismatch!"); 3939 assert(Operand.getValueType().bitsLT(VT) && 3940 "Invalid zext node, dst < src!"); 3941 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3942 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3943 else if (OpOpcode == ISD::UNDEF) 3944 // zext(undef) = 0, because the top bits will be zero. 3945 return getConstant(0, DL, VT); 3946 break; 3947 case ISD::ANY_EXTEND: 3948 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3949 "Invalid ANY_EXTEND!"); 3950 if (Operand.getValueType() == VT) return Operand; // noop extension 3951 assert((!VT.isVector() || 3952 VT.getVectorNumElements() == 3953 Operand.getValueType().getVectorNumElements()) && 3954 "Vector element count mismatch!"); 3955 assert(Operand.getValueType().bitsLT(VT) && 3956 "Invalid anyext node, dst < src!"); 3957 3958 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3959 OpOpcode == ISD::ANY_EXTEND) 3960 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3961 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3962 else if (OpOpcode == ISD::UNDEF) 3963 return getUNDEF(VT); 3964 3965 // (ext (trunx x)) -> x 3966 if (OpOpcode == ISD::TRUNCATE) { 3967 SDValue OpOp = Operand.getOperand(0); 3968 if (OpOp.getValueType() == VT) 3969 return OpOp; 3970 } 3971 break; 3972 case ISD::TRUNCATE: 3973 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3974 "Invalid TRUNCATE!"); 3975 if (Operand.getValueType() == VT) return Operand; // noop truncate 3976 assert((!VT.isVector() || 3977 VT.getVectorNumElements() == 3978 Operand.getValueType().getVectorNumElements()) && 3979 "Vector element count mismatch!"); 3980 assert(Operand.getValueType().bitsGT(VT) && 3981 "Invalid truncate node, src < dst!"); 3982 if (OpOpcode == ISD::TRUNCATE) 3983 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3984 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3985 OpOpcode == ISD::ANY_EXTEND) { 3986 // If the source is smaller than the dest, we still need an extend. 3987 if (Operand.getOperand(0).getValueType().getScalarType() 3988 .bitsLT(VT.getScalarType())) 3989 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3990 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3991 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3992 return Operand.getOperand(0); 3993 } 3994 if (OpOpcode == ISD::UNDEF) 3995 return getUNDEF(VT); 3996 break; 3997 case ISD::ABS: 3998 assert(VT.isInteger() && VT == Operand.getValueType() && 3999 "Invalid ABS!"); 4000 if (OpOpcode == ISD::UNDEF) 4001 return getUNDEF(VT); 4002 break; 4003 case ISD::BSWAP: 4004 assert(VT.isInteger() && VT == Operand.getValueType() && 4005 "Invalid BSWAP!"); 4006 assert((VT.getScalarSizeInBits() % 16 == 0) && 4007 "BSWAP types must be a multiple of 16 bits!"); 4008 if (OpOpcode == ISD::UNDEF) 4009 return getUNDEF(VT); 4010 break; 4011 case ISD::BITREVERSE: 4012 assert(VT.isInteger() && VT == Operand.getValueType() && 4013 "Invalid BITREVERSE!"); 4014 if (OpOpcode == ISD::UNDEF) 4015 return getUNDEF(VT); 4016 break; 4017 case ISD::BITCAST: 4018 // Basic sanity checking. 4019 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4020 "Cannot BITCAST between types of different sizes!"); 4021 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4022 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4023 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4024 if (OpOpcode == ISD::UNDEF) 4025 return getUNDEF(VT); 4026 break; 4027 case ISD::SCALAR_TO_VECTOR: 4028 assert(VT.isVector() && !Operand.getValueType().isVector() && 4029 (VT.getVectorElementType() == Operand.getValueType() || 4030 (VT.getVectorElementType().isInteger() && 4031 Operand.getValueType().isInteger() && 4032 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4033 "Illegal SCALAR_TO_VECTOR node!"); 4034 if (OpOpcode == ISD::UNDEF) 4035 return getUNDEF(VT); 4036 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4037 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4038 isa<ConstantSDNode>(Operand.getOperand(1)) && 4039 Operand.getConstantOperandVal(1) == 0 && 4040 Operand.getOperand(0).getValueType() == VT) 4041 return Operand.getOperand(0); 4042 break; 4043 case ISD::FNEG: 4044 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 4045 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 4046 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 4047 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 4048 Operand.getOperand(0), Operand.getNode()->getFlags()); 4049 if (OpOpcode == ISD::FNEG) // --X -> X 4050 return Operand.getOperand(0); 4051 break; 4052 case ISD::FABS: 4053 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4054 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4055 break; 4056 } 4057 4058 SDNode *N; 4059 SDVTList VTs = getVTList(VT); 4060 SDValue Ops[] = {Operand}; 4061 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4062 FoldingSetNodeID ID; 4063 AddNodeIDNode(ID, Opcode, VTs, Ops); 4064 void *IP = nullptr; 4065 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4066 E->intersectFlagsWith(Flags); 4067 return SDValue(E, 0); 4068 } 4069 4070 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4071 N->setFlags(Flags); 4072 createOperands(N, Ops); 4073 CSEMap.InsertNode(N, IP); 4074 } else { 4075 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4076 createOperands(N, Ops); 4077 } 4078 4079 InsertNode(N); 4080 SDValue V = SDValue(N, 0); 4081 NewSDValueDbgMsg(V, "Creating new node: ", this); 4082 return V; 4083 } 4084 4085 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 4086 const APInt &C2) { 4087 switch (Opcode) { 4088 case ISD::ADD: return std::make_pair(C1 + C2, true); 4089 case ISD::SUB: return std::make_pair(C1 - C2, true); 4090 case ISD::MUL: return std::make_pair(C1 * C2, true); 4091 case ISD::AND: return std::make_pair(C1 & C2, true); 4092 case ISD::OR: return std::make_pair(C1 | C2, true); 4093 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 4094 case ISD::SHL: return std::make_pair(C1 << C2, true); 4095 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 4096 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 4097 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 4098 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 4099 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 4100 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 4101 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 4102 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 4103 case ISD::UDIV: 4104 if (!C2.getBoolValue()) 4105 break; 4106 return std::make_pair(C1.udiv(C2), true); 4107 case ISD::UREM: 4108 if (!C2.getBoolValue()) 4109 break; 4110 return std::make_pair(C1.urem(C2), true); 4111 case ISD::SDIV: 4112 if (!C2.getBoolValue()) 4113 break; 4114 return std::make_pair(C1.sdiv(C2), true); 4115 case ISD::SREM: 4116 if (!C2.getBoolValue()) 4117 break; 4118 return std::make_pair(C1.srem(C2), true); 4119 } 4120 return std::make_pair(APInt(1, 0), false); 4121 } 4122 4123 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4124 EVT VT, const ConstantSDNode *Cst1, 4125 const ConstantSDNode *Cst2) { 4126 if (Cst1->isOpaque() || Cst2->isOpaque()) 4127 return SDValue(); 4128 4129 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 4130 Cst2->getAPIntValue()); 4131 if (!Folded.second) 4132 return SDValue(); 4133 return getConstant(Folded.first, DL, VT); 4134 } 4135 4136 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4137 const GlobalAddressSDNode *GA, 4138 const SDNode *N2) { 4139 if (GA->getOpcode() != ISD::GlobalAddress) 4140 return SDValue(); 4141 if (!TLI->isOffsetFoldingLegal(GA)) 4142 return SDValue(); 4143 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 4144 if (!Cst2) 4145 return SDValue(); 4146 int64_t Offset = Cst2->getSExtValue(); 4147 switch (Opcode) { 4148 case ISD::ADD: break; 4149 case ISD::SUB: Offset = -uint64_t(Offset); break; 4150 default: return SDValue(); 4151 } 4152 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 4153 GA->getOffset() + uint64_t(Offset)); 4154 } 4155 4156 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4157 switch (Opcode) { 4158 case ISD::SDIV: 4159 case ISD::UDIV: 4160 case ISD::SREM: 4161 case ISD::UREM: { 4162 // If a divisor is zero/undef or any element of a divisor vector is 4163 // zero/undef, the whole op is undef. 4164 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4165 SDValue Divisor = Ops[1]; 4166 if (Divisor.isUndef() || isNullConstant(Divisor)) 4167 return true; 4168 4169 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4170 llvm::any_of(Divisor->op_values(), 4171 [](SDValue V) { return V.isUndef() || 4172 isNullConstant(V); }); 4173 // TODO: Handle signed overflow. 4174 } 4175 // TODO: Handle oversized shifts. 4176 default: 4177 return false; 4178 } 4179 } 4180 4181 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4182 EVT VT, SDNode *Cst1, 4183 SDNode *Cst2) { 4184 // If the opcode is a target-specific ISD node, there's nothing we can 4185 // do here and the operand rules may not line up with the below, so 4186 // bail early. 4187 if (Opcode >= ISD::BUILTIN_OP_END) 4188 return SDValue(); 4189 4190 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 4191 return getUNDEF(VT); 4192 4193 // Handle the case of two scalars. 4194 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 4195 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 4196 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 4197 assert((!Folded || !VT.isVector()) && 4198 "Can't fold vectors ops with scalar operands"); 4199 return Folded; 4200 } 4201 } 4202 4203 // fold (add Sym, c) -> Sym+c 4204 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 4205 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 4206 if (TLI->isCommutativeBinOp(Opcode)) 4207 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 4208 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 4209 4210 // For vectors extract each constant element into Inputs so we can constant 4211 // fold them individually. 4212 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 4213 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 4214 if (!BV1 || !BV2) 4215 return SDValue(); 4216 4217 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 4218 4219 EVT SVT = VT.getScalarType(); 4220 EVT LegalSVT = SVT; 4221 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4222 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4223 if (LegalSVT.bitsLT(SVT)) 4224 return SDValue(); 4225 } 4226 SmallVector<SDValue, 4> Outputs; 4227 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 4228 SDValue V1 = BV1->getOperand(I); 4229 SDValue V2 = BV2->getOperand(I); 4230 4231 if (SVT.isInteger()) { 4232 if (V1->getValueType(0).bitsGT(SVT)) 4233 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4234 if (V2->getValueType(0).bitsGT(SVT)) 4235 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4236 } 4237 4238 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4239 return SDValue(); 4240 4241 // Fold one vector element. 4242 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4243 if (LegalSVT != SVT) 4244 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4245 4246 // Scalar folding only succeeded if the result is a constant or UNDEF. 4247 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4248 ScalarResult.getOpcode() != ISD::ConstantFP) 4249 return SDValue(); 4250 Outputs.push_back(ScalarResult); 4251 } 4252 4253 assert(VT.getVectorNumElements() == Outputs.size() && 4254 "Vector size mismatch!"); 4255 4256 // We may have a vector type but a scalar result. Create a splat. 4257 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4258 4259 // Build a big vector out of the scalar elements we generated. 4260 return getBuildVector(VT, SDLoc(), Outputs); 4261 } 4262 4263 // TODO: Merge with FoldConstantArithmetic 4264 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4265 const SDLoc &DL, EVT VT, 4266 ArrayRef<SDValue> Ops, 4267 const SDNodeFlags Flags) { 4268 // If the opcode is a target-specific ISD node, there's nothing we can 4269 // do here and the operand rules may not line up with the below, so 4270 // bail early. 4271 if (Opcode >= ISD::BUILTIN_OP_END) 4272 return SDValue(); 4273 4274 if (isUndef(Opcode, Ops)) 4275 return getUNDEF(VT); 4276 4277 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4278 if (!VT.isVector()) 4279 return SDValue(); 4280 4281 unsigned NumElts = VT.getVectorNumElements(); 4282 4283 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4284 return !Op.getValueType().isVector() || 4285 Op.getValueType().getVectorNumElements() == NumElts; 4286 }; 4287 4288 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4289 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4290 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4291 (BV && BV->isConstant()); 4292 }; 4293 4294 // All operands must be vector types with the same number of elements as 4295 // the result type and must be either UNDEF or a build vector of constant 4296 // or UNDEF scalars. 4297 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4298 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4299 return SDValue(); 4300 4301 // If we are comparing vectors, then the result needs to be a i1 boolean 4302 // that is then sign-extended back to the legal result type. 4303 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4304 4305 // Find legal integer scalar type for constant promotion and 4306 // ensure that its scalar size is at least as large as source. 4307 EVT LegalSVT = VT.getScalarType(); 4308 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4309 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4310 if (LegalSVT.bitsLT(VT.getScalarType())) 4311 return SDValue(); 4312 } 4313 4314 // Constant fold each scalar lane separately. 4315 SmallVector<SDValue, 4> ScalarResults; 4316 for (unsigned i = 0; i != NumElts; i++) { 4317 SmallVector<SDValue, 4> ScalarOps; 4318 for (SDValue Op : Ops) { 4319 EVT InSVT = Op.getValueType().getScalarType(); 4320 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4321 if (!InBV) { 4322 // We've checked that this is UNDEF or a constant of some kind. 4323 if (Op.isUndef()) 4324 ScalarOps.push_back(getUNDEF(InSVT)); 4325 else 4326 ScalarOps.push_back(Op); 4327 continue; 4328 } 4329 4330 SDValue ScalarOp = InBV->getOperand(i); 4331 EVT ScalarVT = ScalarOp.getValueType(); 4332 4333 // Build vector (integer) scalar operands may need implicit 4334 // truncation - do this before constant folding. 4335 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4336 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4337 4338 ScalarOps.push_back(ScalarOp); 4339 } 4340 4341 // Constant fold the scalar operands. 4342 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4343 4344 // Legalize the (integer) scalar constant if necessary. 4345 if (LegalSVT != SVT) 4346 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4347 4348 // Scalar folding only succeeded if the result is a constant or UNDEF. 4349 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4350 ScalarResult.getOpcode() != ISD::ConstantFP) 4351 return SDValue(); 4352 ScalarResults.push_back(ScalarResult); 4353 } 4354 4355 SDValue V = getBuildVector(VT, DL, ScalarResults); 4356 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4357 return V; 4358 } 4359 4360 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4361 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4362 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4363 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4364 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4365 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4366 4367 // Canonicalize constant to RHS if commutative. 4368 if (TLI->isCommutativeBinOp(Opcode)) { 4369 if (N1C && !N2C) { 4370 std::swap(N1C, N2C); 4371 std::swap(N1, N2); 4372 } else if (N1CFP && !N2CFP) { 4373 std::swap(N1CFP, N2CFP); 4374 std::swap(N1, N2); 4375 } 4376 } 4377 4378 switch (Opcode) { 4379 default: break; 4380 case ISD::TokenFactor: 4381 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4382 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4383 // Fold trivial token factors. 4384 if (N1.getOpcode() == ISD::EntryToken) return N2; 4385 if (N2.getOpcode() == ISD::EntryToken) return N1; 4386 if (N1 == N2) return N1; 4387 break; 4388 case ISD::CONCAT_VECTORS: { 4389 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4390 SDValue Ops[] = {N1, N2}; 4391 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4392 return V; 4393 break; 4394 } 4395 case ISD::AND: 4396 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4397 assert(N1.getValueType() == N2.getValueType() && 4398 N1.getValueType() == VT && "Binary operator types must match!"); 4399 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4400 // worth handling here. 4401 if (N2C && N2C->isNullValue()) 4402 return N2; 4403 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4404 return N1; 4405 break; 4406 case ISD::OR: 4407 case ISD::XOR: 4408 case ISD::ADD: 4409 case ISD::SUB: 4410 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4411 assert(N1.getValueType() == N2.getValueType() && 4412 N1.getValueType() == VT && "Binary operator types must match!"); 4413 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4414 // it's worth handling here. 4415 if (N2C && N2C->isNullValue()) 4416 return N1; 4417 break; 4418 case ISD::UDIV: 4419 case ISD::UREM: 4420 case ISD::MULHU: 4421 case ISD::MULHS: 4422 case ISD::MUL: 4423 case ISD::SDIV: 4424 case ISD::SREM: 4425 case ISD::SMIN: 4426 case ISD::SMAX: 4427 case ISD::UMIN: 4428 case ISD::UMAX: 4429 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4430 assert(N1.getValueType() == N2.getValueType() && 4431 N1.getValueType() == VT && "Binary operator types must match!"); 4432 break; 4433 case ISD::FADD: 4434 case ISD::FSUB: 4435 case ISD::FMUL: 4436 case ISD::FDIV: 4437 case ISD::FREM: 4438 if (getTarget().Options.UnsafeFPMath) { 4439 if (Opcode == ISD::FADD) { 4440 // x+0 --> x 4441 if (N2CFP && N2CFP->getValueAPF().isZero()) 4442 return N1; 4443 } else if (Opcode == ISD::FSUB) { 4444 // x-0 --> x 4445 if (N2CFP && N2CFP->getValueAPF().isZero()) 4446 return N1; 4447 } else if (Opcode == ISD::FMUL) { 4448 // x*0 --> 0 4449 if (N2CFP && N2CFP->isZero()) 4450 return N2; 4451 // x*1 --> x 4452 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4453 return N1; 4454 } 4455 } 4456 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4457 assert(N1.getValueType() == N2.getValueType() && 4458 N1.getValueType() == VT && "Binary operator types must match!"); 4459 break; 4460 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4461 assert(N1.getValueType() == VT && 4462 N1.getValueType().isFloatingPoint() && 4463 N2.getValueType().isFloatingPoint() && 4464 "Invalid FCOPYSIGN!"); 4465 break; 4466 case ISD::SHL: 4467 case ISD::SRA: 4468 case ISD::SRL: 4469 case ISD::ROTL: 4470 case ISD::ROTR: 4471 assert(VT == N1.getValueType() && 4472 "Shift operators return type must be the same as their first arg"); 4473 assert(VT.isInteger() && N2.getValueType().isInteger() && 4474 "Shifts only work on integers"); 4475 assert((!VT.isVector() || VT == N2.getValueType()) && 4476 "Vector shift amounts must be in the same as their first arg"); 4477 // Verify that the shift amount VT is bit enough to hold valid shift 4478 // amounts. This catches things like trying to shift an i1024 value by an 4479 // i8, which is easy to fall into in generic code that uses 4480 // TLI.getShiftAmount(). 4481 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4482 "Invalid use of small shift amount with oversized value!"); 4483 4484 // Always fold shifts of i1 values so the code generator doesn't need to 4485 // handle them. Since we know the size of the shift has to be less than the 4486 // size of the value, the shift/rotate count is guaranteed to be zero. 4487 if (VT == MVT::i1) 4488 return N1; 4489 if (N2C && N2C->isNullValue()) 4490 return N1; 4491 break; 4492 case ISD::FP_ROUND_INREG: { 4493 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4494 assert(VT == N1.getValueType() && "Not an inreg round!"); 4495 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4496 "Cannot FP_ROUND_INREG integer types"); 4497 assert(EVT.isVector() == VT.isVector() && 4498 "FP_ROUND_INREG type should be vector iff the operand " 4499 "type is vector!"); 4500 assert((!EVT.isVector() || 4501 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4502 "Vector element counts must match in FP_ROUND_INREG"); 4503 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4504 (void)EVT; 4505 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4506 break; 4507 } 4508 case ISD::FP_ROUND: 4509 assert(VT.isFloatingPoint() && 4510 N1.getValueType().isFloatingPoint() && 4511 VT.bitsLE(N1.getValueType()) && 4512 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4513 "Invalid FP_ROUND!"); 4514 if (N1.getValueType() == VT) return N1; // noop conversion. 4515 break; 4516 case ISD::AssertSext: 4517 case ISD::AssertZext: { 4518 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4519 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4520 assert(VT.isInteger() && EVT.isInteger() && 4521 "Cannot *_EXTEND_INREG FP types"); 4522 assert(!EVT.isVector() && 4523 "AssertSExt/AssertZExt type should be the vector element type " 4524 "rather than the vector type!"); 4525 assert(EVT.bitsLE(VT) && "Not extending!"); 4526 if (VT == EVT) return N1; // noop assertion. 4527 break; 4528 } 4529 case ISD::SIGN_EXTEND_INREG: { 4530 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4531 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4532 assert(VT.isInteger() && EVT.isInteger() && 4533 "Cannot *_EXTEND_INREG FP types"); 4534 assert(EVT.isVector() == VT.isVector() && 4535 "SIGN_EXTEND_INREG type should be vector iff the operand " 4536 "type is vector!"); 4537 assert((!EVT.isVector() || 4538 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4539 "Vector element counts must match in SIGN_EXTEND_INREG"); 4540 assert(EVT.bitsLE(VT) && "Not extending!"); 4541 if (EVT == VT) return N1; // Not actually extending 4542 4543 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4544 unsigned FromBits = EVT.getScalarSizeInBits(); 4545 Val <<= Val.getBitWidth() - FromBits; 4546 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4547 return getConstant(Val, DL, ConstantVT); 4548 }; 4549 4550 if (N1C) { 4551 const APInt &Val = N1C->getAPIntValue(); 4552 return SignExtendInReg(Val, VT); 4553 } 4554 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4555 SmallVector<SDValue, 8> Ops; 4556 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4557 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4558 SDValue Op = N1.getOperand(i); 4559 if (Op.isUndef()) { 4560 Ops.push_back(getUNDEF(OpVT)); 4561 continue; 4562 } 4563 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4564 APInt Val = C->getAPIntValue(); 4565 Ops.push_back(SignExtendInReg(Val, OpVT)); 4566 } 4567 return getBuildVector(VT, DL, Ops); 4568 } 4569 break; 4570 } 4571 case ISD::EXTRACT_VECTOR_ELT: 4572 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 4573 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 4574 element type of the vector."); 4575 4576 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4577 if (N1.isUndef()) 4578 return getUNDEF(VT); 4579 4580 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4581 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 4582 return getUNDEF(VT); 4583 4584 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4585 // expanding copies of large vectors from registers. 4586 if (N2C && 4587 N1.getOpcode() == ISD::CONCAT_VECTORS && 4588 N1.getNumOperands() > 0) { 4589 unsigned Factor = 4590 N1.getOperand(0).getValueType().getVectorNumElements(); 4591 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4592 N1.getOperand(N2C->getZExtValue() / Factor), 4593 getConstant(N2C->getZExtValue() % Factor, DL, 4594 N2.getValueType())); 4595 } 4596 4597 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4598 // expanding large vector constants. 4599 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4600 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4601 4602 if (VT != Elt.getValueType()) 4603 // If the vector element type is not legal, the BUILD_VECTOR operands 4604 // are promoted and implicitly truncated, and the result implicitly 4605 // extended. Make that explicit here. 4606 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4607 4608 return Elt; 4609 } 4610 4611 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4612 // operations are lowered to scalars. 4613 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4614 // If the indices are the same, return the inserted element else 4615 // if the indices are known different, extract the element from 4616 // the original vector. 4617 SDValue N1Op2 = N1.getOperand(2); 4618 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4619 4620 if (N1Op2C && N2C) { 4621 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4622 if (VT == N1.getOperand(1).getValueType()) 4623 return N1.getOperand(1); 4624 else 4625 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4626 } 4627 4628 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4629 } 4630 } 4631 4632 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 4633 // when vector types are scalarized and v1iX is legal. 4634 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 4635 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 4636 N1.getValueType().getVectorNumElements() == 1) { 4637 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 4638 N1.getOperand(1)); 4639 } 4640 break; 4641 case ISD::EXTRACT_ELEMENT: 4642 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4643 assert(!N1.getValueType().isVector() && !VT.isVector() && 4644 (N1.getValueType().isInteger() == VT.isInteger()) && 4645 N1.getValueType() != VT && 4646 "Wrong types for EXTRACT_ELEMENT!"); 4647 4648 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4649 // 64-bit integers into 32-bit parts. Instead of building the extract of 4650 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4651 if (N1.getOpcode() == ISD::BUILD_PAIR) 4652 return N1.getOperand(N2C->getZExtValue()); 4653 4654 // EXTRACT_ELEMENT of a constant int is also very common. 4655 if (N1C) { 4656 unsigned ElementSize = VT.getSizeInBits(); 4657 unsigned Shift = ElementSize * N2C->getZExtValue(); 4658 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4659 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4660 } 4661 break; 4662 case ISD::EXTRACT_SUBVECTOR: 4663 if (VT.isSimple() && N1.getValueType().isSimple()) { 4664 assert(VT.isVector() && N1.getValueType().isVector() && 4665 "Extract subvector VTs must be a vectors!"); 4666 assert(VT.getVectorElementType() == 4667 N1.getValueType().getVectorElementType() && 4668 "Extract subvector VTs must have the same element type!"); 4669 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4670 "Extract subvector must be from larger vector to smaller vector!"); 4671 4672 if (N2C) { 4673 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4674 <= N1.getValueType().getVectorNumElements()) 4675 && "Extract subvector overflow!"); 4676 } 4677 4678 // Trivial extraction. 4679 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4680 return N1; 4681 4682 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4683 if (N1.isUndef()) 4684 return getUNDEF(VT); 4685 4686 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4687 // the concat have the same type as the extract. 4688 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4689 N1.getNumOperands() > 0 && 4690 VT == N1.getOperand(0).getValueType()) { 4691 unsigned Factor = VT.getVectorNumElements(); 4692 return N1.getOperand(N2C->getZExtValue() / Factor); 4693 } 4694 4695 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4696 // during shuffle legalization. 4697 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4698 VT == N1.getOperand(1).getValueType()) 4699 return N1.getOperand(1); 4700 } 4701 break; 4702 } 4703 4704 // Perform trivial constant folding. 4705 if (SDValue SV = 4706 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4707 return SV; 4708 4709 // Constant fold FP operations. 4710 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4711 if (N1CFP) { 4712 if (N2CFP) { 4713 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4714 APFloat::opStatus s; 4715 switch (Opcode) { 4716 case ISD::FADD: 4717 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4718 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4719 return getConstantFP(V1, DL, VT); 4720 break; 4721 case ISD::FSUB: 4722 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4723 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4724 return getConstantFP(V1, DL, VT); 4725 break; 4726 case ISD::FMUL: 4727 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4728 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4729 return getConstantFP(V1, DL, VT); 4730 break; 4731 case ISD::FDIV: 4732 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4733 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4734 s!=APFloat::opDivByZero)) { 4735 return getConstantFP(V1, DL, VT); 4736 } 4737 break; 4738 case ISD::FREM : 4739 s = V1.mod(V2); 4740 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4741 s!=APFloat::opDivByZero)) { 4742 return getConstantFP(V1, DL, VT); 4743 } 4744 break; 4745 case ISD::FCOPYSIGN: 4746 V1.copySign(V2); 4747 return getConstantFP(V1, DL, VT); 4748 default: break; 4749 } 4750 } 4751 4752 if (Opcode == ISD::FP_ROUND) { 4753 APFloat V = N1CFP->getValueAPF(); // make copy 4754 bool ignored; 4755 // This can return overflow, underflow, or inexact; we don't care. 4756 // FIXME need to be more flexible about rounding mode. 4757 (void)V.convert(EVTToAPFloatSemantics(VT), 4758 APFloat::rmNearestTiesToEven, &ignored); 4759 return getConstantFP(V, DL, VT); 4760 } 4761 } 4762 4763 // Canonicalize an UNDEF to the RHS, even over a constant. 4764 if (N1.isUndef()) { 4765 if (TLI->isCommutativeBinOp(Opcode)) { 4766 std::swap(N1, N2); 4767 } else { 4768 switch (Opcode) { 4769 case ISD::FP_ROUND_INREG: 4770 case ISD::SIGN_EXTEND_INREG: 4771 case ISD::SUB: 4772 case ISD::FSUB: 4773 case ISD::FDIV: 4774 case ISD::FREM: 4775 return getUNDEF(VT); // fold op(undef, arg2) -> undef 4776 case ISD::UDIV: 4777 case ISD::SDIV: 4778 case ISD::UREM: 4779 case ISD::SREM: 4780 case ISD::SRA: 4781 case ISD::SRL: 4782 case ISD::SHL: 4783 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4784 } 4785 } 4786 } 4787 4788 // Fold a bunch of operators when the RHS is undef. 4789 if (N2.isUndef()) { 4790 switch (Opcode) { 4791 case ISD::XOR: 4792 if (N1.isUndef()) 4793 // Handle undef ^ undef -> 0 special case. This is a common 4794 // idiom (misuse). 4795 return getConstant(0, DL, VT); 4796 LLVM_FALLTHROUGH; 4797 case ISD::ADD: 4798 case ISD::ADDC: 4799 case ISD::ADDE: 4800 case ISD::SUB: 4801 case ISD::UDIV: 4802 case ISD::SDIV: 4803 case ISD::UREM: 4804 case ISD::SREM: 4805 case ISD::SRA: 4806 case ISD::SRL: 4807 case ISD::SHL: 4808 return getUNDEF(VT); // fold op(arg1, undef) -> undef 4809 case ISD::FADD: 4810 case ISD::FSUB: 4811 case ISD::FMUL: 4812 case ISD::FDIV: 4813 case ISD::FREM: 4814 if (getTarget().Options.UnsafeFPMath) 4815 return N2; 4816 break; 4817 case ISD::MUL: 4818 case ISD::AND: 4819 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4820 case ISD::OR: 4821 return getAllOnesConstant(DL, VT); 4822 } 4823 } 4824 4825 // Memoize this node if possible. 4826 SDNode *N; 4827 SDVTList VTs = getVTList(VT); 4828 SDValue Ops[] = {N1, N2}; 4829 if (VT != MVT::Glue) { 4830 FoldingSetNodeID ID; 4831 AddNodeIDNode(ID, Opcode, VTs, Ops); 4832 void *IP = nullptr; 4833 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4834 E->intersectFlagsWith(Flags); 4835 return SDValue(E, 0); 4836 } 4837 4838 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4839 N->setFlags(Flags); 4840 createOperands(N, Ops); 4841 CSEMap.InsertNode(N, IP); 4842 } else { 4843 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4844 createOperands(N, Ops); 4845 } 4846 4847 InsertNode(N); 4848 SDValue V = SDValue(N, 0); 4849 NewSDValueDbgMsg(V, "Creating new node: ", this); 4850 return V; 4851 } 4852 4853 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4854 SDValue N1, SDValue N2, SDValue N3) { 4855 // Perform various simplifications. 4856 switch (Opcode) { 4857 case ISD::FMA: { 4858 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4859 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4860 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4861 if (N1CFP && N2CFP && N3CFP) { 4862 APFloat V1 = N1CFP->getValueAPF(); 4863 const APFloat &V2 = N2CFP->getValueAPF(); 4864 const APFloat &V3 = N3CFP->getValueAPF(); 4865 APFloat::opStatus s = 4866 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4867 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4868 return getConstantFP(V1, DL, VT); 4869 } 4870 break; 4871 } 4872 case ISD::CONCAT_VECTORS: { 4873 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4874 SDValue Ops[] = {N1, N2, N3}; 4875 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4876 return V; 4877 break; 4878 } 4879 case ISD::SETCC: { 4880 // Use FoldSetCC to simplify SETCC's. 4881 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4882 return V; 4883 // Vector constant folding. 4884 SDValue Ops[] = {N1, N2, N3}; 4885 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 4886 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 4887 return V; 4888 } 4889 break; 4890 } 4891 case ISD::SELECT: 4892 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4893 if (N1C->getZExtValue()) 4894 return N2; // select true, X, Y -> X 4895 return N3; // select false, X, Y -> Y 4896 } 4897 4898 if (N2 == N3) return N2; // select C, X, X -> X 4899 break; 4900 case ISD::VECTOR_SHUFFLE: 4901 llvm_unreachable("should use getVectorShuffle constructor!"); 4902 case ISD::INSERT_VECTOR_ELT: { 4903 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4904 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4905 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4906 return getUNDEF(VT); 4907 break; 4908 } 4909 case ISD::INSERT_SUBVECTOR: { 4910 SDValue Index = N3; 4911 if (VT.isSimple() && N1.getValueType().isSimple() 4912 && N2.getValueType().isSimple()) { 4913 assert(VT.isVector() && N1.getValueType().isVector() && 4914 N2.getValueType().isVector() && 4915 "Insert subvector VTs must be a vectors"); 4916 assert(VT == N1.getValueType() && 4917 "Dest and insert subvector source types must match!"); 4918 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4919 "Insert subvector must be from smaller vector to larger vector!"); 4920 if (isa<ConstantSDNode>(Index)) { 4921 assert((N2.getValueType().getVectorNumElements() + 4922 cast<ConstantSDNode>(Index)->getZExtValue() 4923 <= VT.getVectorNumElements()) 4924 && "Insert subvector overflow!"); 4925 } 4926 4927 // Trivial insertion. 4928 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4929 return N2; 4930 } 4931 break; 4932 } 4933 case ISD::BITCAST: 4934 // Fold bit_convert nodes from a type to themselves. 4935 if (N1.getValueType() == VT) 4936 return N1; 4937 break; 4938 } 4939 4940 // Memoize node if it doesn't produce a flag. 4941 SDNode *N; 4942 SDVTList VTs = getVTList(VT); 4943 SDValue Ops[] = {N1, N2, N3}; 4944 if (VT != MVT::Glue) { 4945 FoldingSetNodeID ID; 4946 AddNodeIDNode(ID, Opcode, VTs, Ops); 4947 void *IP = nullptr; 4948 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4949 return SDValue(E, 0); 4950 4951 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4952 createOperands(N, Ops); 4953 CSEMap.InsertNode(N, IP); 4954 } else { 4955 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4956 createOperands(N, Ops); 4957 } 4958 4959 InsertNode(N); 4960 SDValue V = SDValue(N, 0); 4961 NewSDValueDbgMsg(V, "Creating new node: ", this); 4962 return V; 4963 } 4964 4965 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4966 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4967 SDValue Ops[] = { N1, N2, N3, N4 }; 4968 return getNode(Opcode, DL, VT, Ops); 4969 } 4970 4971 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4972 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4973 SDValue N5) { 4974 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4975 return getNode(Opcode, DL, VT, Ops); 4976 } 4977 4978 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4979 /// the incoming stack arguments to be loaded from the stack. 4980 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4981 SmallVector<SDValue, 8> ArgChains; 4982 4983 // Include the original chain at the beginning of the list. When this is 4984 // used by target LowerCall hooks, this helps legalize find the 4985 // CALLSEQ_BEGIN node. 4986 ArgChains.push_back(Chain); 4987 4988 // Add a chain value for each stack argument. 4989 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4990 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4991 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4992 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4993 if (FI->getIndex() < 0) 4994 ArgChains.push_back(SDValue(L, 1)); 4995 4996 // Build a tokenfactor for all the chains. 4997 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4998 } 4999 5000 /// getMemsetValue - Vectorized representation of the memset value 5001 /// operand. 5002 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5003 const SDLoc &dl) { 5004 assert(!Value.isUndef()); 5005 5006 unsigned NumBits = VT.getScalarSizeInBits(); 5007 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5008 assert(C->getAPIntValue().getBitWidth() == 8); 5009 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5010 if (VT.isInteger()) 5011 return DAG.getConstant(Val, dl, VT); 5012 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5013 VT); 5014 } 5015 5016 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5017 EVT IntVT = VT.getScalarType(); 5018 if (!IntVT.isInteger()) 5019 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5020 5021 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5022 if (NumBits > 8) { 5023 // Use a multiplication with 0x010101... to extend the input to the 5024 // required length. 5025 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5026 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5027 DAG.getConstant(Magic, dl, IntVT)); 5028 } 5029 5030 if (VT != Value.getValueType() && !VT.isInteger()) 5031 Value = DAG.getBitcast(VT.getScalarType(), Value); 5032 if (VT != Value.getValueType()) 5033 Value = DAG.getSplatBuildVector(VT, dl, Value); 5034 5035 return Value; 5036 } 5037 5038 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5039 /// used when a memcpy is turned into a memset when the source is a constant 5040 /// string ptr. 5041 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5042 const TargetLowering &TLI, 5043 const ConstantDataArraySlice &Slice) { 5044 // Handle vector with all elements zero. 5045 if (Slice.Array == nullptr) { 5046 if (VT.isInteger()) 5047 return DAG.getConstant(0, dl, VT); 5048 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5049 return DAG.getConstantFP(0.0, dl, VT); 5050 else if (VT.isVector()) { 5051 unsigned NumElts = VT.getVectorNumElements(); 5052 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5053 return DAG.getNode(ISD::BITCAST, dl, VT, 5054 DAG.getConstant(0, dl, 5055 EVT::getVectorVT(*DAG.getContext(), 5056 EltVT, NumElts))); 5057 } else 5058 llvm_unreachable("Expected type!"); 5059 } 5060 5061 assert(!VT.isVector() && "Can't handle vector type here!"); 5062 unsigned NumVTBits = VT.getSizeInBits(); 5063 unsigned NumVTBytes = NumVTBits / 8; 5064 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5065 5066 APInt Val(NumVTBits, 0); 5067 if (DAG.getDataLayout().isLittleEndian()) { 5068 for (unsigned i = 0; i != NumBytes; ++i) 5069 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5070 } else { 5071 for (unsigned i = 0; i != NumBytes; ++i) 5072 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5073 } 5074 5075 // If the "cost" of materializing the integer immediate is less than the cost 5076 // of a load, then it is cost effective to turn the load into the immediate. 5077 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5078 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5079 return DAG.getConstant(Val, dl, VT); 5080 return SDValue(nullptr, 0); 5081 } 5082 5083 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 5084 const SDLoc &DL) { 5085 EVT VT = Base.getValueType(); 5086 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 5087 } 5088 5089 /// Returns true if memcpy source is constant data. 5090 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5091 uint64_t SrcDelta = 0; 5092 GlobalAddressSDNode *G = nullptr; 5093 if (Src.getOpcode() == ISD::GlobalAddress) 5094 G = cast<GlobalAddressSDNode>(Src); 5095 else if (Src.getOpcode() == ISD::ADD && 5096 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5097 Src.getOperand(1).getOpcode() == ISD::Constant) { 5098 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5099 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5100 } 5101 if (!G) 5102 return false; 5103 5104 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5105 SrcDelta + G->getOffset()); 5106 } 5107 5108 /// Determines the optimal series of memory ops to replace the memset / memcpy. 5109 /// Return true if the number of memory ops is below the threshold (Limit). 5110 /// It returns the types of the sequence of memory ops to perform 5111 /// memset / memcpy by reference. 5112 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 5113 unsigned Limit, uint64_t Size, 5114 unsigned DstAlign, unsigned SrcAlign, 5115 bool IsMemset, 5116 bool ZeroMemset, 5117 bool MemcpyStrSrc, 5118 bool AllowOverlap, 5119 unsigned DstAS, unsigned SrcAS, 5120 SelectionDAG &DAG, 5121 const TargetLowering &TLI) { 5122 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 5123 "Expecting memcpy / memset source to meet alignment requirement!"); 5124 // If 'SrcAlign' is zero, that means the memory operation does not need to 5125 // load the value, i.e. memset or memcpy from constant string. Otherwise, 5126 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 5127 // is the specified alignment of the memory operation. If it is zero, that 5128 // means it's possible to change the alignment of the destination. 5129 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 5130 // not need to be loaded. 5131 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 5132 IsMemset, ZeroMemset, MemcpyStrSrc, 5133 DAG.getMachineFunction()); 5134 5135 if (VT == MVT::Other) { 5136 // Use the largest integer type whose alignment constraints are satisfied. 5137 // We only need to check DstAlign here as SrcAlign is always greater or 5138 // equal to DstAlign (or zero). 5139 VT = MVT::i64; 5140 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 5141 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 5142 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 5143 assert(VT.isInteger()); 5144 5145 // Find the largest legal integer type. 5146 MVT LVT = MVT::i64; 5147 while (!TLI.isTypeLegal(LVT)) 5148 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 5149 assert(LVT.isInteger()); 5150 5151 // If the type we've chosen is larger than the largest legal integer type 5152 // then use that instead. 5153 if (VT.bitsGT(LVT)) 5154 VT = LVT; 5155 } 5156 5157 unsigned NumMemOps = 0; 5158 while (Size != 0) { 5159 unsigned VTSize = VT.getSizeInBits() / 8; 5160 while (VTSize > Size) { 5161 // For now, only use non-vector load / store's for the left-over pieces. 5162 EVT NewVT = VT; 5163 unsigned NewVTSize; 5164 5165 bool Found = false; 5166 if (VT.isVector() || VT.isFloatingPoint()) { 5167 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 5168 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 5169 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 5170 Found = true; 5171 else if (NewVT == MVT::i64 && 5172 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 5173 TLI.isSafeMemOpType(MVT::f64)) { 5174 // i64 is usually not legal on 32-bit targets, but f64 may be. 5175 NewVT = MVT::f64; 5176 Found = true; 5177 } 5178 } 5179 5180 if (!Found) { 5181 do { 5182 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 5183 if (NewVT == MVT::i8) 5184 break; 5185 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 5186 } 5187 NewVTSize = NewVT.getSizeInBits() / 8; 5188 5189 // If the new VT cannot cover all of the remaining bits, then consider 5190 // issuing a (or a pair of) unaligned and overlapping load / store. 5191 // FIXME: Only does this for 64-bit or more since we don't have proper 5192 // cost model for unaligned load / store. 5193 bool Fast; 5194 if (NumMemOps && AllowOverlap && 5195 VTSize >= 8 && NewVTSize < Size && 5196 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 5197 VTSize = Size; 5198 else { 5199 VT = NewVT; 5200 VTSize = NewVTSize; 5201 } 5202 } 5203 5204 if (++NumMemOps > Limit) 5205 return false; 5206 5207 MemOps.push_back(VT); 5208 Size -= VTSize; 5209 } 5210 5211 return true; 5212 } 5213 5214 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5215 // On Darwin, -Os means optimize for size without hurting performance, so 5216 // only really optimize for size when -Oz (MinSize) is used. 5217 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5218 return MF.getFunction().optForMinSize(); 5219 return MF.getFunction().optForSize(); 5220 } 5221 5222 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5223 SDValue Chain, SDValue Dst, SDValue Src, 5224 uint64_t Size, unsigned Align, 5225 bool isVol, bool AlwaysInline, 5226 MachinePointerInfo DstPtrInfo, 5227 MachinePointerInfo SrcPtrInfo) { 5228 // Turn a memcpy of undef to nop. 5229 if (Src.isUndef()) 5230 return Chain; 5231 5232 // Expand memcpy to a series of load and store ops if the size operand falls 5233 // below a certain threshold. 5234 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5235 // rather than maybe a humongous number of loads and stores. 5236 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5237 const DataLayout &DL = DAG.getDataLayout(); 5238 LLVMContext &C = *DAG.getContext(); 5239 std::vector<EVT> MemOps; 5240 bool DstAlignCanChange = false; 5241 MachineFunction &MF = DAG.getMachineFunction(); 5242 MachineFrameInfo &MFI = MF.getFrameInfo(); 5243 bool OptSize = shouldLowerMemFuncForSize(MF); 5244 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5245 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5246 DstAlignCanChange = true; 5247 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5248 if (Align > SrcAlign) 5249 SrcAlign = Align; 5250 ConstantDataArraySlice Slice; 5251 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5252 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5253 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5254 5255 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5256 (DstAlignCanChange ? 0 : Align), 5257 (isZeroConstant ? 0 : SrcAlign), 5258 false, false, CopyFromConstant, true, 5259 DstPtrInfo.getAddrSpace(), 5260 SrcPtrInfo.getAddrSpace(), 5261 DAG, TLI)) 5262 return SDValue(); 5263 5264 if (DstAlignCanChange) { 5265 Type *Ty = MemOps[0].getTypeForEVT(C); 5266 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5267 5268 // Don't promote to an alignment that would require dynamic stack 5269 // realignment. 5270 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5271 if (!TRI->needsStackRealignment(MF)) 5272 while (NewAlign > Align && 5273 DL.exceedsNaturalStackAlignment(NewAlign)) 5274 NewAlign /= 2; 5275 5276 if (NewAlign > Align) { 5277 // Give the stack frame object a larger alignment if needed. 5278 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5279 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5280 Align = NewAlign; 5281 } 5282 } 5283 5284 MachineMemOperand::Flags MMOFlags = 5285 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5286 SmallVector<SDValue, 8> OutChains; 5287 unsigned NumMemOps = MemOps.size(); 5288 uint64_t SrcOff = 0, DstOff = 0; 5289 for (unsigned i = 0; i != NumMemOps; ++i) { 5290 EVT VT = MemOps[i]; 5291 unsigned VTSize = VT.getSizeInBits() / 8; 5292 SDValue Value, Store; 5293 5294 if (VTSize > Size) { 5295 // Issuing an unaligned load / store pair that overlaps with the previous 5296 // pair. Adjust the offset accordingly. 5297 assert(i == NumMemOps-1 && i != 0); 5298 SrcOff -= VTSize - Size; 5299 DstOff -= VTSize - Size; 5300 } 5301 5302 if (CopyFromConstant && 5303 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5304 // It's unlikely a store of a vector immediate can be done in a single 5305 // instruction. It would require a load from a constantpool first. 5306 // We only handle zero vectors here. 5307 // FIXME: Handle other cases where store of vector immediate is done in 5308 // a single instruction. 5309 ConstantDataArraySlice SubSlice; 5310 if (SrcOff < Slice.Length) { 5311 SubSlice = Slice; 5312 SubSlice.move(SrcOff); 5313 } else { 5314 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5315 SubSlice.Array = nullptr; 5316 SubSlice.Offset = 0; 5317 SubSlice.Length = VTSize; 5318 } 5319 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5320 if (Value.getNode()) 5321 Store = DAG.getStore(Chain, dl, Value, 5322 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5323 DstPtrInfo.getWithOffset(DstOff), Align, 5324 MMOFlags); 5325 } 5326 5327 if (!Store.getNode()) { 5328 // The type might not be legal for the target. This should only happen 5329 // if the type is smaller than a legal type, as on PPC, so the right 5330 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5331 // to Load/Store if NVT==VT. 5332 // FIXME does the case above also need this? 5333 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5334 assert(NVT.bitsGE(VT)); 5335 5336 bool isDereferenceable = 5337 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5338 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5339 if (isDereferenceable) 5340 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5341 5342 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5343 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5344 SrcPtrInfo.getWithOffset(SrcOff), VT, 5345 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5346 OutChains.push_back(Value.getValue(1)); 5347 Store = DAG.getTruncStore( 5348 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5349 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5350 } 5351 OutChains.push_back(Store); 5352 SrcOff += VTSize; 5353 DstOff += VTSize; 5354 Size -= VTSize; 5355 } 5356 5357 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5358 } 5359 5360 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5361 SDValue Chain, SDValue Dst, SDValue Src, 5362 uint64_t Size, unsigned Align, 5363 bool isVol, bool AlwaysInline, 5364 MachinePointerInfo DstPtrInfo, 5365 MachinePointerInfo SrcPtrInfo) { 5366 // Turn a memmove of undef to nop. 5367 if (Src.isUndef()) 5368 return Chain; 5369 5370 // Expand memmove to a series of load and store ops if the size operand falls 5371 // below a certain threshold. 5372 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5373 const DataLayout &DL = DAG.getDataLayout(); 5374 LLVMContext &C = *DAG.getContext(); 5375 std::vector<EVT> MemOps; 5376 bool DstAlignCanChange = false; 5377 MachineFunction &MF = DAG.getMachineFunction(); 5378 MachineFrameInfo &MFI = MF.getFrameInfo(); 5379 bool OptSize = shouldLowerMemFuncForSize(MF); 5380 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5381 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5382 DstAlignCanChange = true; 5383 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5384 if (Align > SrcAlign) 5385 SrcAlign = Align; 5386 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5387 5388 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5389 (DstAlignCanChange ? 0 : Align), SrcAlign, 5390 false, false, false, false, 5391 DstPtrInfo.getAddrSpace(), 5392 SrcPtrInfo.getAddrSpace(), 5393 DAG, TLI)) 5394 return SDValue(); 5395 5396 if (DstAlignCanChange) { 5397 Type *Ty = MemOps[0].getTypeForEVT(C); 5398 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5399 if (NewAlign > Align) { 5400 // Give the stack frame object a larger alignment if needed. 5401 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5402 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5403 Align = NewAlign; 5404 } 5405 } 5406 5407 MachineMemOperand::Flags MMOFlags = 5408 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5409 uint64_t SrcOff = 0, DstOff = 0; 5410 SmallVector<SDValue, 8> LoadValues; 5411 SmallVector<SDValue, 8> LoadChains; 5412 SmallVector<SDValue, 8> OutChains; 5413 unsigned NumMemOps = MemOps.size(); 5414 for (unsigned i = 0; i < NumMemOps; i++) { 5415 EVT VT = MemOps[i]; 5416 unsigned VTSize = VT.getSizeInBits() / 8; 5417 SDValue Value; 5418 5419 bool isDereferenceable = 5420 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5421 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5422 if (isDereferenceable) 5423 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5424 5425 Value = 5426 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5427 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5428 LoadValues.push_back(Value); 5429 LoadChains.push_back(Value.getValue(1)); 5430 SrcOff += VTSize; 5431 } 5432 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5433 OutChains.clear(); 5434 for (unsigned i = 0; i < NumMemOps; i++) { 5435 EVT VT = MemOps[i]; 5436 unsigned VTSize = VT.getSizeInBits() / 8; 5437 SDValue Store; 5438 5439 Store = DAG.getStore(Chain, dl, LoadValues[i], 5440 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5441 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5442 OutChains.push_back(Store); 5443 DstOff += VTSize; 5444 } 5445 5446 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5447 } 5448 5449 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5450 /// operations. 5451 /// 5452 /// \param DAG Selection DAG where lowered code is placed. 5453 /// \param dl Link to corresponding IR location. 5454 /// \param Chain Control flow dependency. 5455 /// \param Dst Pointer to destination memory location. 5456 /// \param Src Value of byte to write into the memory. 5457 /// \param Size Number of bytes to write. 5458 /// \param Align Alignment of the destination in bytes. 5459 /// \param isVol True if destination is volatile. 5460 /// \param DstPtrInfo IR information on the memory pointer. 5461 /// \returns New head in the control flow, if lowering was successful, empty 5462 /// SDValue otherwise. 5463 /// 5464 /// The function tries to replace 'llvm.memset' intrinsic with several store 5465 /// operations and value calculation code. This is usually profitable for small 5466 /// memory size. 5467 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5468 SDValue Chain, SDValue Dst, SDValue Src, 5469 uint64_t Size, unsigned Align, bool isVol, 5470 MachinePointerInfo DstPtrInfo) { 5471 // Turn a memset of undef to nop. 5472 if (Src.isUndef()) 5473 return Chain; 5474 5475 // Expand memset to a series of load/store ops if the size operand 5476 // falls below a certain threshold. 5477 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5478 std::vector<EVT> MemOps; 5479 bool DstAlignCanChange = false; 5480 MachineFunction &MF = DAG.getMachineFunction(); 5481 MachineFrameInfo &MFI = MF.getFrameInfo(); 5482 bool OptSize = shouldLowerMemFuncForSize(MF); 5483 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5484 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5485 DstAlignCanChange = true; 5486 bool IsZeroVal = 5487 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5488 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5489 Size, (DstAlignCanChange ? 0 : Align), 0, 5490 true, IsZeroVal, false, true, 5491 DstPtrInfo.getAddrSpace(), ~0u, 5492 DAG, TLI)) 5493 return SDValue(); 5494 5495 if (DstAlignCanChange) { 5496 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5497 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5498 if (NewAlign > Align) { 5499 // Give the stack frame object a larger alignment if needed. 5500 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5501 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5502 Align = NewAlign; 5503 } 5504 } 5505 5506 SmallVector<SDValue, 8> OutChains; 5507 uint64_t DstOff = 0; 5508 unsigned NumMemOps = MemOps.size(); 5509 5510 // Find the largest store and generate the bit pattern for it. 5511 EVT LargestVT = MemOps[0]; 5512 for (unsigned i = 1; i < NumMemOps; i++) 5513 if (MemOps[i].bitsGT(LargestVT)) 5514 LargestVT = MemOps[i]; 5515 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5516 5517 for (unsigned i = 0; i < NumMemOps; i++) { 5518 EVT VT = MemOps[i]; 5519 unsigned VTSize = VT.getSizeInBits() / 8; 5520 if (VTSize > Size) { 5521 // Issuing an unaligned load / store pair that overlaps with the previous 5522 // pair. Adjust the offset accordingly. 5523 assert(i == NumMemOps-1 && i != 0); 5524 DstOff -= VTSize - Size; 5525 } 5526 5527 // If this store is smaller than the largest store see whether we can get 5528 // the smaller value for free with a truncate. 5529 SDValue Value = MemSetValue; 5530 if (VT.bitsLT(LargestVT)) { 5531 if (!LargestVT.isVector() && !VT.isVector() && 5532 TLI.isTruncateFree(LargestVT, VT)) 5533 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5534 else 5535 Value = getMemsetValue(Src, VT, DAG, dl); 5536 } 5537 assert(Value.getValueType() == VT && "Value with wrong type."); 5538 SDValue Store = DAG.getStore( 5539 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5540 DstPtrInfo.getWithOffset(DstOff), Align, 5541 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5542 OutChains.push_back(Store); 5543 DstOff += VT.getSizeInBits() / 8; 5544 Size -= VTSize; 5545 } 5546 5547 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5548 } 5549 5550 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5551 unsigned AS) { 5552 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5553 // pointer operands can be losslessly bitcasted to pointers of address space 0 5554 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5555 report_fatal_error("cannot lower memory intrinsic in address space " + 5556 Twine(AS)); 5557 } 5558 } 5559 5560 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5561 SDValue Src, SDValue Size, unsigned Align, 5562 bool isVol, bool AlwaysInline, bool isTailCall, 5563 MachinePointerInfo DstPtrInfo, 5564 MachinePointerInfo SrcPtrInfo) { 5565 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5566 5567 // Check to see if we should lower the memcpy to loads and stores first. 5568 // For cases within the target-specified limits, this is the best choice. 5569 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5570 if (ConstantSize) { 5571 // Memcpy with size zero? Just return the original chain. 5572 if (ConstantSize->isNullValue()) 5573 return Chain; 5574 5575 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5576 ConstantSize->getZExtValue(),Align, 5577 isVol, false, DstPtrInfo, SrcPtrInfo); 5578 if (Result.getNode()) 5579 return Result; 5580 } 5581 5582 // Then check to see if we should lower the memcpy with target-specific 5583 // code. If the target chooses to do this, this is the next best. 5584 if (TSI) { 5585 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5586 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5587 DstPtrInfo, SrcPtrInfo); 5588 if (Result.getNode()) 5589 return Result; 5590 } 5591 5592 // If we really need inline code and the target declined to provide it, 5593 // use a (potentially long) sequence of loads and stores. 5594 if (AlwaysInline) { 5595 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5596 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5597 ConstantSize->getZExtValue(), Align, isVol, 5598 true, DstPtrInfo, SrcPtrInfo); 5599 } 5600 5601 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5602 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5603 5604 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5605 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5606 // respect volatile, so they may do things like read or write memory 5607 // beyond the given memory regions. But fixing this isn't easy, and most 5608 // people don't care. 5609 5610 // Emit a library call. 5611 TargetLowering::ArgListTy Args; 5612 TargetLowering::ArgListEntry Entry; 5613 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5614 Entry.Node = Dst; Args.push_back(Entry); 5615 Entry.Node = Src; Args.push_back(Entry); 5616 Entry.Node = Size; Args.push_back(Entry); 5617 // FIXME: pass in SDLoc 5618 TargetLowering::CallLoweringInfo CLI(*this); 5619 CLI.setDebugLoc(dl) 5620 .setChain(Chain) 5621 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5622 Dst.getValueType().getTypeForEVT(*getContext()), 5623 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5624 TLI->getPointerTy(getDataLayout())), 5625 std::move(Args)) 5626 .setDiscardResult() 5627 .setTailCall(isTailCall); 5628 5629 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5630 return CallResult.second; 5631 } 5632 5633 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5634 SDValue Src, SDValue Size, unsigned Align, 5635 bool isVol, bool isTailCall, 5636 MachinePointerInfo DstPtrInfo, 5637 MachinePointerInfo SrcPtrInfo) { 5638 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5639 5640 // Check to see if we should lower the memmove to loads and stores first. 5641 // For cases within the target-specified limits, this is the best choice. 5642 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5643 if (ConstantSize) { 5644 // Memmove with size zero? Just return the original chain. 5645 if (ConstantSize->isNullValue()) 5646 return Chain; 5647 5648 SDValue Result = 5649 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5650 ConstantSize->getZExtValue(), Align, isVol, 5651 false, DstPtrInfo, SrcPtrInfo); 5652 if (Result.getNode()) 5653 return Result; 5654 } 5655 5656 // Then check to see if we should lower the memmove with target-specific 5657 // code. If the target chooses to do this, this is the next best. 5658 if (TSI) { 5659 SDValue Result = TSI->EmitTargetCodeForMemmove( 5660 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5661 if (Result.getNode()) 5662 return Result; 5663 } 5664 5665 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5666 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5667 5668 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5669 // not be safe. See memcpy above for more details. 5670 5671 // Emit a library call. 5672 TargetLowering::ArgListTy Args; 5673 TargetLowering::ArgListEntry Entry; 5674 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5675 Entry.Node = Dst; Args.push_back(Entry); 5676 Entry.Node = Src; Args.push_back(Entry); 5677 Entry.Node = Size; Args.push_back(Entry); 5678 // FIXME: pass in SDLoc 5679 TargetLowering::CallLoweringInfo CLI(*this); 5680 CLI.setDebugLoc(dl) 5681 .setChain(Chain) 5682 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5683 Dst.getValueType().getTypeForEVT(*getContext()), 5684 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5685 TLI->getPointerTy(getDataLayout())), 5686 std::move(Args)) 5687 .setDiscardResult() 5688 .setTailCall(isTailCall); 5689 5690 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5691 return CallResult.second; 5692 } 5693 5694 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5695 SDValue Src, SDValue Size, unsigned Align, 5696 bool isVol, bool isTailCall, 5697 MachinePointerInfo DstPtrInfo) { 5698 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5699 5700 // Check to see if we should lower the memset to stores first. 5701 // For cases within the target-specified limits, this is the best choice. 5702 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5703 if (ConstantSize) { 5704 // Memset with size zero? Just return the original chain. 5705 if (ConstantSize->isNullValue()) 5706 return Chain; 5707 5708 SDValue Result = 5709 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5710 Align, isVol, DstPtrInfo); 5711 5712 if (Result.getNode()) 5713 return Result; 5714 } 5715 5716 // Then check to see if we should lower the memset with target-specific 5717 // code. If the target chooses to do this, this is the next best. 5718 if (TSI) { 5719 SDValue Result = TSI->EmitTargetCodeForMemset( 5720 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5721 if (Result.getNode()) 5722 return Result; 5723 } 5724 5725 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5726 5727 // Emit a library call. 5728 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5729 TargetLowering::ArgListTy Args; 5730 TargetLowering::ArgListEntry Entry; 5731 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5732 Args.push_back(Entry); 5733 Entry.Node = Src; 5734 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5735 Args.push_back(Entry); 5736 Entry.Node = Size; 5737 Entry.Ty = IntPtrTy; 5738 Args.push_back(Entry); 5739 5740 // FIXME: pass in SDLoc 5741 TargetLowering::CallLoweringInfo CLI(*this); 5742 CLI.setDebugLoc(dl) 5743 .setChain(Chain) 5744 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5745 Dst.getValueType().getTypeForEVT(*getContext()), 5746 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5747 TLI->getPointerTy(getDataLayout())), 5748 std::move(Args)) 5749 .setDiscardResult() 5750 .setTailCall(isTailCall); 5751 5752 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5753 return CallResult.second; 5754 } 5755 5756 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5757 SDVTList VTList, ArrayRef<SDValue> Ops, 5758 MachineMemOperand *MMO) { 5759 FoldingSetNodeID ID; 5760 ID.AddInteger(MemVT.getRawBits()); 5761 AddNodeIDNode(ID, Opcode, VTList, Ops); 5762 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5763 void* IP = nullptr; 5764 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5765 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5766 return SDValue(E, 0); 5767 } 5768 5769 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5770 VTList, MemVT, MMO); 5771 createOperands(N, Ops); 5772 5773 CSEMap.InsertNode(N, IP); 5774 InsertNode(N); 5775 return SDValue(N, 0); 5776 } 5777 5778 SDValue SelectionDAG::getAtomicCmpSwap( 5779 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5780 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5781 unsigned Alignment, AtomicOrdering SuccessOrdering, 5782 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 5783 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5784 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5785 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5786 5787 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5788 Alignment = getEVTAlignment(MemVT); 5789 5790 MachineFunction &MF = getMachineFunction(); 5791 5792 // FIXME: Volatile isn't really correct; we should keep track of atomic 5793 // orderings in the memoperand. 5794 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5795 MachineMemOperand::MOStore; 5796 MachineMemOperand *MMO = 5797 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5798 AAMDNodes(), nullptr, SSID, SuccessOrdering, 5799 FailureOrdering); 5800 5801 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5802 } 5803 5804 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5805 EVT MemVT, SDVTList VTs, SDValue Chain, 5806 SDValue Ptr, SDValue Cmp, SDValue Swp, 5807 MachineMemOperand *MMO) { 5808 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5809 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5810 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5811 5812 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5813 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5814 } 5815 5816 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5817 SDValue Chain, SDValue Ptr, SDValue Val, 5818 const Value *PtrVal, unsigned Alignment, 5819 AtomicOrdering Ordering, 5820 SyncScope::ID SSID) { 5821 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5822 Alignment = getEVTAlignment(MemVT); 5823 5824 MachineFunction &MF = getMachineFunction(); 5825 // An atomic store does not load. An atomic load does not store. 5826 // (An atomicrmw obviously both loads and stores.) 5827 // For now, atomics are considered to be volatile always, and they are 5828 // chained as such. 5829 // FIXME: Volatile isn't really correct; we should keep track of atomic 5830 // orderings in the memoperand. 5831 auto Flags = MachineMemOperand::MOVolatile; 5832 if (Opcode != ISD::ATOMIC_STORE) 5833 Flags |= MachineMemOperand::MOLoad; 5834 if (Opcode != ISD::ATOMIC_LOAD) 5835 Flags |= MachineMemOperand::MOStore; 5836 5837 MachineMemOperand *MMO = 5838 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5839 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5840 nullptr, SSID, Ordering); 5841 5842 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5843 } 5844 5845 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5846 SDValue Chain, SDValue Ptr, SDValue Val, 5847 MachineMemOperand *MMO) { 5848 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5849 Opcode == ISD::ATOMIC_LOAD_SUB || 5850 Opcode == ISD::ATOMIC_LOAD_AND || 5851 Opcode == ISD::ATOMIC_LOAD_CLR || 5852 Opcode == ISD::ATOMIC_LOAD_OR || 5853 Opcode == ISD::ATOMIC_LOAD_XOR || 5854 Opcode == ISD::ATOMIC_LOAD_NAND || 5855 Opcode == ISD::ATOMIC_LOAD_MIN || 5856 Opcode == ISD::ATOMIC_LOAD_MAX || 5857 Opcode == ISD::ATOMIC_LOAD_UMIN || 5858 Opcode == ISD::ATOMIC_LOAD_UMAX || 5859 Opcode == ISD::ATOMIC_SWAP || 5860 Opcode == ISD::ATOMIC_STORE) && 5861 "Invalid Atomic Op"); 5862 5863 EVT VT = Val.getValueType(); 5864 5865 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5866 getVTList(VT, MVT::Other); 5867 SDValue Ops[] = {Chain, Ptr, Val}; 5868 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5869 } 5870 5871 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5872 EVT VT, SDValue Chain, SDValue Ptr, 5873 MachineMemOperand *MMO) { 5874 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5875 5876 SDVTList VTs = getVTList(VT, MVT::Other); 5877 SDValue Ops[] = {Chain, Ptr}; 5878 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5879 } 5880 5881 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5882 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5883 if (Ops.size() == 1) 5884 return Ops[0]; 5885 5886 SmallVector<EVT, 4> VTs; 5887 VTs.reserve(Ops.size()); 5888 for (unsigned i = 0; i < Ops.size(); ++i) 5889 VTs.push_back(Ops[i].getValueType()); 5890 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5891 } 5892 5893 SDValue SelectionDAG::getMemIntrinsicNode( 5894 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5895 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 5896 MachineMemOperand::Flags Flags, unsigned Size) { 5897 if (Align == 0) // Ensure that codegen never sees alignment 0 5898 Align = getEVTAlignment(MemVT); 5899 5900 if (!Size) 5901 Size = MemVT.getStoreSize(); 5902 5903 MachineFunction &MF = getMachineFunction(); 5904 MachineMemOperand *MMO = 5905 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5906 5907 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5908 } 5909 5910 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5911 SDVTList VTList, 5912 ArrayRef<SDValue> Ops, EVT MemVT, 5913 MachineMemOperand *MMO) { 5914 assert((Opcode == ISD::INTRINSIC_VOID || 5915 Opcode == ISD::INTRINSIC_W_CHAIN || 5916 Opcode == ISD::PREFETCH || 5917 Opcode == ISD::LIFETIME_START || 5918 Opcode == ISD::LIFETIME_END || 5919 ((int)Opcode <= std::numeric_limits<int>::max() && 5920 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5921 "Opcode is not a memory-accessing opcode!"); 5922 5923 // Memoize the node unless it returns a flag. 5924 MemIntrinsicSDNode *N; 5925 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5926 FoldingSetNodeID ID; 5927 AddNodeIDNode(ID, Opcode, VTList, Ops); 5928 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 5929 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 5930 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5931 void *IP = nullptr; 5932 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5933 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5934 return SDValue(E, 0); 5935 } 5936 5937 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5938 VTList, MemVT, MMO); 5939 createOperands(N, Ops); 5940 5941 CSEMap.InsertNode(N, IP); 5942 } else { 5943 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5944 VTList, MemVT, MMO); 5945 createOperands(N, Ops); 5946 } 5947 InsertNode(N); 5948 return SDValue(N, 0); 5949 } 5950 5951 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5952 /// MachinePointerInfo record from it. This is particularly useful because the 5953 /// code generator has many cases where it doesn't bother passing in a 5954 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5955 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 5956 SelectionDAG &DAG, SDValue Ptr, 5957 int64_t Offset = 0) { 5958 // If this is FI+Offset, we can model it. 5959 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5960 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5961 FI->getIndex(), Offset); 5962 5963 // If this is (FI+Offset1)+Offset2, we can model it. 5964 if (Ptr.getOpcode() != ISD::ADD || 5965 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5966 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5967 return Info; 5968 5969 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5970 return MachinePointerInfo::getFixedStack( 5971 DAG.getMachineFunction(), FI, 5972 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5973 } 5974 5975 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5976 /// MachinePointerInfo record from it. This is particularly useful because the 5977 /// code generator has many cases where it doesn't bother passing in a 5978 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5979 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 5980 SelectionDAG &DAG, SDValue Ptr, 5981 SDValue OffsetOp) { 5982 // If the 'Offset' value isn't a constant, we can't handle this. 5983 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5984 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 5985 if (OffsetOp.isUndef()) 5986 return InferPointerInfo(Info, DAG, Ptr); 5987 return Info; 5988 } 5989 5990 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5991 EVT VT, const SDLoc &dl, SDValue Chain, 5992 SDValue Ptr, SDValue Offset, 5993 MachinePointerInfo PtrInfo, EVT MemVT, 5994 unsigned Alignment, 5995 MachineMemOperand::Flags MMOFlags, 5996 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5997 assert(Chain.getValueType() == MVT::Other && 5998 "Invalid chain type"); 5999 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6000 Alignment = getEVTAlignment(MemVT); 6001 6002 MMOFlags |= MachineMemOperand::MOLoad; 6003 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6004 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6005 // clients. 6006 if (PtrInfo.V.isNull()) 6007 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6008 6009 MachineFunction &MF = getMachineFunction(); 6010 MachineMemOperand *MMO = MF.getMachineMemOperand( 6011 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6012 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6013 } 6014 6015 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6016 EVT VT, const SDLoc &dl, SDValue Chain, 6017 SDValue Ptr, SDValue Offset, EVT MemVT, 6018 MachineMemOperand *MMO) { 6019 if (VT == MemVT) { 6020 ExtType = ISD::NON_EXTLOAD; 6021 } else if (ExtType == ISD::NON_EXTLOAD) { 6022 assert(VT == MemVT && "Non-extending load from different memory type!"); 6023 } else { 6024 // Extending load. 6025 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6026 "Should only be an extending load, not truncating!"); 6027 assert(VT.isInteger() == MemVT.isInteger() && 6028 "Cannot convert from FP to Int or Int -> FP!"); 6029 assert(VT.isVector() == MemVT.isVector() && 6030 "Cannot use an ext load to convert to or from a vector!"); 6031 assert((!VT.isVector() || 6032 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6033 "Cannot use an ext load to change the number of vector elements!"); 6034 } 6035 6036 bool Indexed = AM != ISD::UNINDEXED; 6037 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6038 6039 SDVTList VTs = Indexed ? 6040 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6041 SDValue Ops[] = { Chain, Ptr, Offset }; 6042 FoldingSetNodeID ID; 6043 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6044 ID.AddInteger(MemVT.getRawBits()); 6045 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6046 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6047 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6048 void *IP = nullptr; 6049 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6050 cast<LoadSDNode>(E)->refineAlignment(MMO); 6051 return SDValue(E, 0); 6052 } 6053 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6054 ExtType, MemVT, MMO); 6055 createOperands(N, Ops); 6056 6057 CSEMap.InsertNode(N, IP); 6058 InsertNode(N); 6059 SDValue V(N, 0); 6060 NewSDValueDbgMsg(V, "Creating new node: ", this); 6061 return V; 6062 } 6063 6064 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6065 SDValue Ptr, MachinePointerInfo PtrInfo, 6066 unsigned Alignment, 6067 MachineMemOperand::Flags MMOFlags, 6068 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6069 SDValue Undef = getUNDEF(Ptr.getValueType()); 6070 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6071 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6072 } 6073 6074 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6075 SDValue Ptr, MachineMemOperand *MMO) { 6076 SDValue Undef = getUNDEF(Ptr.getValueType()); 6077 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6078 VT, MMO); 6079 } 6080 6081 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6082 EVT VT, SDValue Chain, SDValue Ptr, 6083 MachinePointerInfo PtrInfo, EVT MemVT, 6084 unsigned Alignment, 6085 MachineMemOperand::Flags MMOFlags, 6086 const AAMDNodes &AAInfo) { 6087 SDValue Undef = getUNDEF(Ptr.getValueType()); 6088 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6089 MemVT, Alignment, MMOFlags, AAInfo); 6090 } 6091 6092 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6093 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6094 MachineMemOperand *MMO) { 6095 SDValue Undef = getUNDEF(Ptr.getValueType()); 6096 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6097 MemVT, MMO); 6098 } 6099 6100 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6101 SDValue Base, SDValue Offset, 6102 ISD::MemIndexedMode AM) { 6103 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6104 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6105 // Don't propagate the invariant or dereferenceable flags. 6106 auto MMOFlags = 6107 LD->getMemOperand()->getFlags() & 6108 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6109 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6110 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6111 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6112 LD->getAAInfo()); 6113 } 6114 6115 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6116 SDValue Ptr, MachinePointerInfo PtrInfo, 6117 unsigned Alignment, 6118 MachineMemOperand::Flags MMOFlags, 6119 const AAMDNodes &AAInfo) { 6120 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6121 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6122 Alignment = getEVTAlignment(Val.getValueType()); 6123 6124 MMOFlags |= MachineMemOperand::MOStore; 6125 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6126 6127 if (PtrInfo.V.isNull()) 6128 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6129 6130 MachineFunction &MF = getMachineFunction(); 6131 MachineMemOperand *MMO = MF.getMachineMemOperand( 6132 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6133 return getStore(Chain, dl, Val, Ptr, MMO); 6134 } 6135 6136 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6137 SDValue Ptr, MachineMemOperand *MMO) { 6138 assert(Chain.getValueType() == MVT::Other && 6139 "Invalid chain type"); 6140 EVT VT = Val.getValueType(); 6141 SDVTList VTs = getVTList(MVT::Other); 6142 SDValue Undef = getUNDEF(Ptr.getValueType()); 6143 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6144 FoldingSetNodeID ID; 6145 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6146 ID.AddInteger(VT.getRawBits()); 6147 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6148 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6149 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6150 void *IP = nullptr; 6151 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6152 cast<StoreSDNode>(E)->refineAlignment(MMO); 6153 return SDValue(E, 0); 6154 } 6155 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6156 ISD::UNINDEXED, false, VT, MMO); 6157 createOperands(N, Ops); 6158 6159 CSEMap.InsertNode(N, IP); 6160 InsertNode(N); 6161 SDValue V(N, 0); 6162 NewSDValueDbgMsg(V, "Creating new node: ", this); 6163 return V; 6164 } 6165 6166 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6167 SDValue Ptr, MachinePointerInfo PtrInfo, 6168 EVT SVT, unsigned Alignment, 6169 MachineMemOperand::Flags MMOFlags, 6170 const AAMDNodes &AAInfo) { 6171 assert(Chain.getValueType() == MVT::Other && 6172 "Invalid chain type"); 6173 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6174 Alignment = getEVTAlignment(SVT); 6175 6176 MMOFlags |= MachineMemOperand::MOStore; 6177 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6178 6179 if (PtrInfo.V.isNull()) 6180 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6181 6182 MachineFunction &MF = getMachineFunction(); 6183 MachineMemOperand *MMO = MF.getMachineMemOperand( 6184 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6185 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6186 } 6187 6188 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6189 SDValue Ptr, EVT SVT, 6190 MachineMemOperand *MMO) { 6191 EVT VT = Val.getValueType(); 6192 6193 assert(Chain.getValueType() == MVT::Other && 6194 "Invalid chain type"); 6195 if (VT == SVT) 6196 return getStore(Chain, dl, Val, Ptr, MMO); 6197 6198 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6199 "Should only be a truncating store, not extending!"); 6200 assert(VT.isInteger() == SVT.isInteger() && 6201 "Can't do FP-INT conversion!"); 6202 assert(VT.isVector() == SVT.isVector() && 6203 "Cannot use trunc store to convert to or from a vector!"); 6204 assert((!VT.isVector() || 6205 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6206 "Cannot use trunc store to change the number of vector elements!"); 6207 6208 SDVTList VTs = getVTList(MVT::Other); 6209 SDValue Undef = getUNDEF(Ptr.getValueType()); 6210 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6211 FoldingSetNodeID ID; 6212 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6213 ID.AddInteger(SVT.getRawBits()); 6214 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6215 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6216 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6217 void *IP = nullptr; 6218 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6219 cast<StoreSDNode>(E)->refineAlignment(MMO); 6220 return SDValue(E, 0); 6221 } 6222 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6223 ISD::UNINDEXED, true, SVT, MMO); 6224 createOperands(N, Ops); 6225 6226 CSEMap.InsertNode(N, IP); 6227 InsertNode(N); 6228 SDValue V(N, 0); 6229 NewSDValueDbgMsg(V, "Creating new node: ", this); 6230 return V; 6231 } 6232 6233 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6234 SDValue Base, SDValue Offset, 6235 ISD::MemIndexedMode AM) { 6236 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6237 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6238 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6239 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6240 FoldingSetNodeID ID; 6241 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6242 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6243 ID.AddInteger(ST->getRawSubclassData()); 6244 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6245 void *IP = nullptr; 6246 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6247 return SDValue(E, 0); 6248 6249 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6250 ST->isTruncatingStore(), ST->getMemoryVT(), 6251 ST->getMemOperand()); 6252 createOperands(N, Ops); 6253 6254 CSEMap.InsertNode(N, IP); 6255 InsertNode(N); 6256 SDValue V(N, 0); 6257 NewSDValueDbgMsg(V, "Creating new node: ", this); 6258 return V; 6259 } 6260 6261 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6262 SDValue Ptr, SDValue Mask, SDValue Src0, 6263 EVT MemVT, MachineMemOperand *MMO, 6264 ISD::LoadExtType ExtTy, bool isExpanding) { 6265 SDVTList VTs = getVTList(VT, MVT::Other); 6266 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 6267 FoldingSetNodeID ID; 6268 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6269 ID.AddInteger(VT.getRawBits()); 6270 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6271 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6272 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6273 void *IP = nullptr; 6274 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6275 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6276 return SDValue(E, 0); 6277 } 6278 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6279 ExtTy, isExpanding, MemVT, MMO); 6280 createOperands(N, Ops); 6281 6282 CSEMap.InsertNode(N, IP); 6283 InsertNode(N); 6284 SDValue V(N, 0); 6285 NewSDValueDbgMsg(V, "Creating new node: ", this); 6286 return V; 6287 } 6288 6289 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6290 SDValue Val, SDValue Ptr, SDValue Mask, 6291 EVT MemVT, MachineMemOperand *MMO, 6292 bool IsTruncating, bool IsCompressing) { 6293 assert(Chain.getValueType() == MVT::Other && 6294 "Invalid chain type"); 6295 EVT VT = Val.getValueType(); 6296 SDVTList VTs = getVTList(MVT::Other); 6297 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 6298 FoldingSetNodeID ID; 6299 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6300 ID.AddInteger(VT.getRawBits()); 6301 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6302 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6303 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6304 void *IP = nullptr; 6305 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6306 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 6307 return SDValue(E, 0); 6308 } 6309 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6310 IsTruncating, IsCompressing, MemVT, MMO); 6311 createOperands(N, Ops); 6312 6313 CSEMap.InsertNode(N, IP); 6314 InsertNode(N); 6315 SDValue V(N, 0); 6316 NewSDValueDbgMsg(V, "Creating new node: ", this); 6317 return V; 6318 } 6319 6320 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 6321 ArrayRef<SDValue> Ops, 6322 MachineMemOperand *MMO) { 6323 assert(Ops.size() == 6 && "Incompatible number of operands"); 6324 6325 FoldingSetNodeID ID; 6326 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 6327 ID.AddInteger(VT.getRawBits()); 6328 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 6329 dl.getIROrder(), VTs, VT, MMO)); 6330 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6331 void *IP = nullptr; 6332 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6333 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 6334 return SDValue(E, 0); 6335 } 6336 6337 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6338 VTs, VT, MMO); 6339 createOperands(N, Ops); 6340 6341 assert(N->getValue().getValueType() == N->getValueType(0) && 6342 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 6343 assert(N->getMask().getValueType().getVectorNumElements() == 6344 N->getValueType(0).getVectorNumElements() && 6345 "Vector width mismatch between mask and data"); 6346 assert(N->getIndex().getValueType().getVectorNumElements() == 6347 N->getValueType(0).getVectorNumElements() && 6348 "Vector width mismatch between index and data"); 6349 assert(isa<ConstantSDNode>(N->getScale()) && 6350 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 6351 "Scale should be a constant power of 2"); 6352 6353 CSEMap.InsertNode(N, IP); 6354 InsertNode(N); 6355 SDValue V(N, 0); 6356 NewSDValueDbgMsg(V, "Creating new node: ", this); 6357 return V; 6358 } 6359 6360 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6361 ArrayRef<SDValue> Ops, 6362 MachineMemOperand *MMO) { 6363 assert(Ops.size() == 6 && "Incompatible number of operands"); 6364 6365 FoldingSetNodeID ID; 6366 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6367 ID.AddInteger(VT.getRawBits()); 6368 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6369 dl.getIROrder(), VTs, VT, MMO)); 6370 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6371 void *IP = nullptr; 6372 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6373 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6374 return SDValue(E, 0); 6375 } 6376 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6377 VTs, VT, MMO); 6378 createOperands(N, Ops); 6379 6380 assert(N->getMask().getValueType().getVectorNumElements() == 6381 N->getValue().getValueType().getVectorNumElements() && 6382 "Vector width mismatch between mask and data"); 6383 assert(N->getIndex().getValueType().getVectorNumElements() == 6384 N->getValue().getValueType().getVectorNumElements() && 6385 "Vector width mismatch between index and data"); 6386 assert(isa<ConstantSDNode>(N->getScale()) && 6387 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 6388 "Scale should be a constant power of 2"); 6389 6390 CSEMap.InsertNode(N, IP); 6391 InsertNode(N); 6392 SDValue V(N, 0); 6393 NewSDValueDbgMsg(V, "Creating new node: ", this); 6394 return V; 6395 } 6396 6397 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6398 SDValue Ptr, SDValue SV, unsigned Align) { 6399 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6400 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6401 } 6402 6403 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6404 ArrayRef<SDUse> Ops) { 6405 switch (Ops.size()) { 6406 case 0: return getNode(Opcode, DL, VT); 6407 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6408 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6409 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6410 default: break; 6411 } 6412 6413 // Copy from an SDUse array into an SDValue array for use with 6414 // the regular getNode logic. 6415 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6416 return getNode(Opcode, DL, VT, NewOps); 6417 } 6418 6419 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6420 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6421 unsigned NumOps = Ops.size(); 6422 switch (NumOps) { 6423 case 0: return getNode(Opcode, DL, VT); 6424 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6425 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6426 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6427 default: break; 6428 } 6429 6430 switch (Opcode) { 6431 default: break; 6432 case ISD::CONCAT_VECTORS: 6433 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6434 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6435 return V; 6436 break; 6437 case ISD::SELECT_CC: 6438 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6439 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6440 "LHS and RHS of condition must have same type!"); 6441 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6442 "True and False arms of SelectCC must have same type!"); 6443 assert(Ops[2].getValueType() == VT && 6444 "select_cc node must be of same type as true and false value!"); 6445 break; 6446 case ISD::BR_CC: 6447 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6448 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6449 "LHS/RHS of comparison should match types!"); 6450 break; 6451 } 6452 6453 // Memoize nodes. 6454 SDNode *N; 6455 SDVTList VTs = getVTList(VT); 6456 6457 if (VT != MVT::Glue) { 6458 FoldingSetNodeID ID; 6459 AddNodeIDNode(ID, Opcode, VTs, Ops); 6460 void *IP = nullptr; 6461 6462 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6463 return SDValue(E, 0); 6464 6465 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6466 createOperands(N, Ops); 6467 6468 CSEMap.InsertNode(N, IP); 6469 } else { 6470 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6471 createOperands(N, Ops); 6472 } 6473 6474 InsertNode(N); 6475 SDValue V(N, 0); 6476 NewSDValueDbgMsg(V, "Creating new node: ", this); 6477 return V; 6478 } 6479 6480 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6481 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6482 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6483 } 6484 6485 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6486 ArrayRef<SDValue> Ops) { 6487 if (VTList.NumVTs == 1) 6488 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6489 6490 #if 0 6491 switch (Opcode) { 6492 // FIXME: figure out how to safely handle things like 6493 // int foo(int x) { return 1 << (x & 255); } 6494 // int bar() { return foo(256); } 6495 case ISD::SRA_PARTS: 6496 case ISD::SRL_PARTS: 6497 case ISD::SHL_PARTS: 6498 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6499 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6500 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6501 else if (N3.getOpcode() == ISD::AND) 6502 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6503 // If the and is only masking out bits that cannot effect the shift, 6504 // eliminate the and. 6505 unsigned NumBits = VT.getScalarSizeInBits()*2; 6506 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6507 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6508 } 6509 break; 6510 } 6511 #endif 6512 6513 // Memoize the node unless it returns a flag. 6514 SDNode *N; 6515 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6516 FoldingSetNodeID ID; 6517 AddNodeIDNode(ID, Opcode, VTList, Ops); 6518 void *IP = nullptr; 6519 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6520 return SDValue(E, 0); 6521 6522 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6523 createOperands(N, Ops); 6524 CSEMap.InsertNode(N, IP); 6525 } else { 6526 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6527 createOperands(N, Ops); 6528 } 6529 InsertNode(N); 6530 SDValue V(N, 0); 6531 NewSDValueDbgMsg(V, "Creating new node: ", this); 6532 return V; 6533 } 6534 6535 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6536 SDVTList VTList) { 6537 return getNode(Opcode, DL, VTList, None); 6538 } 6539 6540 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6541 SDValue N1) { 6542 SDValue Ops[] = { N1 }; 6543 return getNode(Opcode, DL, VTList, Ops); 6544 } 6545 6546 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6547 SDValue N1, SDValue N2) { 6548 SDValue Ops[] = { N1, N2 }; 6549 return getNode(Opcode, DL, VTList, Ops); 6550 } 6551 6552 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6553 SDValue N1, SDValue N2, SDValue N3) { 6554 SDValue Ops[] = { N1, N2, N3 }; 6555 return getNode(Opcode, DL, VTList, Ops); 6556 } 6557 6558 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6559 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6560 SDValue Ops[] = { N1, N2, N3, N4 }; 6561 return getNode(Opcode, DL, VTList, Ops); 6562 } 6563 6564 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6565 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6566 SDValue N5) { 6567 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6568 return getNode(Opcode, DL, VTList, Ops); 6569 } 6570 6571 SDVTList SelectionDAG::getVTList(EVT VT) { 6572 return makeVTList(SDNode::getValueTypeList(VT), 1); 6573 } 6574 6575 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6576 FoldingSetNodeID ID; 6577 ID.AddInteger(2U); 6578 ID.AddInteger(VT1.getRawBits()); 6579 ID.AddInteger(VT2.getRawBits()); 6580 6581 void *IP = nullptr; 6582 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6583 if (!Result) { 6584 EVT *Array = Allocator.Allocate<EVT>(2); 6585 Array[0] = VT1; 6586 Array[1] = VT2; 6587 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6588 VTListMap.InsertNode(Result, IP); 6589 } 6590 return Result->getSDVTList(); 6591 } 6592 6593 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6594 FoldingSetNodeID ID; 6595 ID.AddInteger(3U); 6596 ID.AddInteger(VT1.getRawBits()); 6597 ID.AddInteger(VT2.getRawBits()); 6598 ID.AddInteger(VT3.getRawBits()); 6599 6600 void *IP = nullptr; 6601 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6602 if (!Result) { 6603 EVT *Array = Allocator.Allocate<EVT>(3); 6604 Array[0] = VT1; 6605 Array[1] = VT2; 6606 Array[2] = VT3; 6607 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6608 VTListMap.InsertNode(Result, IP); 6609 } 6610 return Result->getSDVTList(); 6611 } 6612 6613 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6614 FoldingSetNodeID ID; 6615 ID.AddInteger(4U); 6616 ID.AddInteger(VT1.getRawBits()); 6617 ID.AddInteger(VT2.getRawBits()); 6618 ID.AddInteger(VT3.getRawBits()); 6619 ID.AddInteger(VT4.getRawBits()); 6620 6621 void *IP = nullptr; 6622 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6623 if (!Result) { 6624 EVT *Array = Allocator.Allocate<EVT>(4); 6625 Array[0] = VT1; 6626 Array[1] = VT2; 6627 Array[2] = VT3; 6628 Array[3] = VT4; 6629 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6630 VTListMap.InsertNode(Result, IP); 6631 } 6632 return Result->getSDVTList(); 6633 } 6634 6635 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6636 unsigned NumVTs = VTs.size(); 6637 FoldingSetNodeID ID; 6638 ID.AddInteger(NumVTs); 6639 for (unsigned index = 0; index < NumVTs; index++) { 6640 ID.AddInteger(VTs[index].getRawBits()); 6641 } 6642 6643 void *IP = nullptr; 6644 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6645 if (!Result) { 6646 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6647 std::copy(VTs.begin(), VTs.end(), Array); 6648 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6649 VTListMap.InsertNode(Result, IP); 6650 } 6651 return Result->getSDVTList(); 6652 } 6653 6654 6655 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6656 /// specified operands. If the resultant node already exists in the DAG, 6657 /// this does not modify the specified node, instead it returns the node that 6658 /// already exists. If the resultant node does not exist in the DAG, the 6659 /// input node is returned. As a degenerate case, if you specify the same 6660 /// input operands as the node already has, the input node is returned. 6661 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6662 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6663 6664 // Check to see if there is no change. 6665 if (Op == N->getOperand(0)) return N; 6666 6667 // See if the modified node already exists. 6668 void *InsertPos = nullptr; 6669 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6670 return Existing; 6671 6672 // Nope it doesn't. Remove the node from its current place in the maps. 6673 if (InsertPos) 6674 if (!RemoveNodeFromCSEMaps(N)) 6675 InsertPos = nullptr; 6676 6677 // Now we update the operands. 6678 N->OperandList[0].set(Op); 6679 6680 // If this gets put into a CSE map, add it. 6681 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6682 return N; 6683 } 6684 6685 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6686 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6687 6688 // Check to see if there is no change. 6689 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6690 return N; // No operands changed, just return the input node. 6691 6692 // See if the modified node already exists. 6693 void *InsertPos = nullptr; 6694 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6695 return Existing; 6696 6697 // Nope it doesn't. Remove the node from its current place in the maps. 6698 if (InsertPos) 6699 if (!RemoveNodeFromCSEMaps(N)) 6700 InsertPos = nullptr; 6701 6702 // Now we update the operands. 6703 if (N->OperandList[0] != Op1) 6704 N->OperandList[0].set(Op1); 6705 if (N->OperandList[1] != Op2) 6706 N->OperandList[1].set(Op2); 6707 6708 updateDivergence(N); 6709 // If this gets put into a CSE map, add it. 6710 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6711 return N; 6712 } 6713 6714 SDNode *SelectionDAG:: 6715 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6716 SDValue Ops[] = { Op1, Op2, Op3 }; 6717 return UpdateNodeOperands(N, Ops); 6718 } 6719 6720 SDNode *SelectionDAG:: 6721 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6722 SDValue Op3, SDValue Op4) { 6723 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6724 return UpdateNodeOperands(N, Ops); 6725 } 6726 6727 SDNode *SelectionDAG:: 6728 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6729 SDValue Op3, SDValue Op4, SDValue Op5) { 6730 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6731 return UpdateNodeOperands(N, Ops); 6732 } 6733 6734 SDNode *SelectionDAG:: 6735 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6736 unsigned NumOps = Ops.size(); 6737 assert(N->getNumOperands() == NumOps && 6738 "Update with wrong number of operands"); 6739 6740 // If no operands changed just return the input node. 6741 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6742 return N; 6743 6744 // See if the modified node already exists. 6745 void *InsertPos = nullptr; 6746 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6747 return Existing; 6748 6749 // Nope it doesn't. Remove the node from its current place in the maps. 6750 if (InsertPos) 6751 if (!RemoveNodeFromCSEMaps(N)) 6752 InsertPos = nullptr; 6753 6754 // Now we update the operands. 6755 for (unsigned i = 0; i != NumOps; ++i) 6756 if (N->OperandList[i] != Ops[i]) 6757 N->OperandList[i].set(Ops[i]); 6758 6759 // If this gets put into a CSE map, add it. 6760 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6761 return N; 6762 } 6763 6764 /// DropOperands - Release the operands and set this node to have 6765 /// zero operands. 6766 void SDNode::DropOperands() { 6767 // Unlike the code in MorphNodeTo that does this, we don't need to 6768 // watch for dead nodes here. 6769 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6770 SDUse &Use = *I++; 6771 Use.set(SDValue()); 6772 } 6773 } 6774 6775 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6776 /// machine opcode. 6777 /// 6778 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6779 EVT VT) { 6780 SDVTList VTs = getVTList(VT); 6781 return SelectNodeTo(N, MachineOpc, VTs, None); 6782 } 6783 6784 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6785 EVT VT, SDValue Op1) { 6786 SDVTList VTs = getVTList(VT); 6787 SDValue Ops[] = { Op1 }; 6788 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6789 } 6790 6791 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6792 EVT VT, SDValue Op1, 6793 SDValue Op2) { 6794 SDVTList VTs = getVTList(VT); 6795 SDValue Ops[] = { Op1, Op2 }; 6796 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6797 } 6798 6799 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6800 EVT VT, SDValue Op1, 6801 SDValue Op2, SDValue Op3) { 6802 SDVTList VTs = getVTList(VT); 6803 SDValue Ops[] = { Op1, Op2, Op3 }; 6804 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6805 } 6806 6807 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6808 EVT VT, ArrayRef<SDValue> Ops) { 6809 SDVTList VTs = getVTList(VT); 6810 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6811 } 6812 6813 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6814 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6815 SDVTList VTs = getVTList(VT1, VT2); 6816 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6817 } 6818 6819 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6820 EVT VT1, EVT VT2) { 6821 SDVTList VTs = getVTList(VT1, VT2); 6822 return SelectNodeTo(N, MachineOpc, VTs, None); 6823 } 6824 6825 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6826 EVT VT1, EVT VT2, EVT VT3, 6827 ArrayRef<SDValue> Ops) { 6828 SDVTList VTs = getVTList(VT1, VT2, VT3); 6829 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6830 } 6831 6832 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6833 EVT VT1, EVT VT2, 6834 SDValue Op1, SDValue Op2) { 6835 SDVTList VTs = getVTList(VT1, VT2); 6836 SDValue Ops[] = { Op1, Op2 }; 6837 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6838 } 6839 6840 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6841 SDVTList VTs,ArrayRef<SDValue> Ops) { 6842 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6843 // Reset the NodeID to -1. 6844 New->setNodeId(-1); 6845 if (New != N) { 6846 ReplaceAllUsesWith(N, New); 6847 RemoveDeadNode(N); 6848 } 6849 return New; 6850 } 6851 6852 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6853 /// the line number information on the merged node since it is not possible to 6854 /// preserve the information that operation is associated with multiple lines. 6855 /// This will make the debugger working better at -O0, were there is a higher 6856 /// probability having other instructions associated with that line. 6857 /// 6858 /// For IROrder, we keep the smaller of the two 6859 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6860 DebugLoc NLoc = N->getDebugLoc(); 6861 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6862 N->setDebugLoc(DebugLoc()); 6863 } 6864 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6865 N->setIROrder(Order); 6866 return N; 6867 } 6868 6869 /// MorphNodeTo - This *mutates* the specified node to have the specified 6870 /// return type, opcode, and operands. 6871 /// 6872 /// Note that MorphNodeTo returns the resultant node. If there is already a 6873 /// node of the specified opcode and operands, it returns that node instead of 6874 /// the current one. Note that the SDLoc need not be the same. 6875 /// 6876 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6877 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6878 /// node, and because it doesn't require CSE recalculation for any of 6879 /// the node's users. 6880 /// 6881 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6882 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6883 /// the legalizer which maintain worklists that would need to be updated when 6884 /// deleting things. 6885 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6886 SDVTList VTs, ArrayRef<SDValue> Ops) { 6887 // If an identical node already exists, use it. 6888 void *IP = nullptr; 6889 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6890 FoldingSetNodeID ID; 6891 AddNodeIDNode(ID, Opc, VTs, Ops); 6892 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6893 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6894 } 6895 6896 if (!RemoveNodeFromCSEMaps(N)) 6897 IP = nullptr; 6898 6899 // Start the morphing. 6900 N->NodeType = Opc; 6901 N->ValueList = VTs.VTs; 6902 N->NumValues = VTs.NumVTs; 6903 6904 // Clear the operands list, updating used nodes to remove this from their 6905 // use list. Keep track of any operands that become dead as a result. 6906 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6907 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6908 SDUse &Use = *I++; 6909 SDNode *Used = Use.getNode(); 6910 Use.set(SDValue()); 6911 if (Used->use_empty()) 6912 DeadNodeSet.insert(Used); 6913 } 6914 6915 // For MachineNode, initialize the memory references information. 6916 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6917 MN->setMemRefs(nullptr, nullptr); 6918 6919 // Swap for an appropriately sized array from the recycler. 6920 removeOperands(N); 6921 createOperands(N, Ops); 6922 6923 // Delete any nodes that are still dead after adding the uses for the 6924 // new operands. 6925 if (!DeadNodeSet.empty()) { 6926 SmallVector<SDNode *, 16> DeadNodes; 6927 for (SDNode *N : DeadNodeSet) 6928 if (N->use_empty()) 6929 DeadNodes.push_back(N); 6930 RemoveDeadNodes(DeadNodes); 6931 } 6932 6933 if (IP) 6934 CSEMap.InsertNode(N, IP); // Memoize the new node. 6935 return N; 6936 } 6937 6938 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 6939 unsigned OrigOpc = Node->getOpcode(); 6940 unsigned NewOpc; 6941 bool IsUnary = false; 6942 bool IsTernary = false; 6943 switch (OrigOpc) { 6944 default: 6945 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 6946 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 6947 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 6948 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 6949 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 6950 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 6951 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; 6952 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 6953 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 6954 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 6955 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 6956 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 6957 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 6958 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 6959 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 6960 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 6961 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 6962 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 6963 case ISD::STRICT_FNEARBYINT: 6964 NewOpc = ISD::FNEARBYINT; 6965 IsUnary = true; 6966 break; 6967 } 6968 6969 // We're taking this node out of the chain, so we need to re-link things. 6970 SDValue InputChain = Node->getOperand(0); 6971 SDValue OutputChain = SDValue(Node, 1); 6972 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 6973 6974 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 6975 SDNode *Res = nullptr; 6976 if (IsUnary) 6977 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 6978 else if (IsTernary) 6979 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6980 Node->getOperand(2), 6981 Node->getOperand(3)}); 6982 else 6983 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6984 Node->getOperand(2) }); 6985 6986 // MorphNodeTo can operate in two ways: if an existing node with the 6987 // specified operands exists, it can just return it. Otherwise, it 6988 // updates the node in place to have the requested operands. 6989 if (Res == Node) { 6990 // If we updated the node in place, reset the node ID. To the isel, 6991 // this should be just like a newly allocated machine node. 6992 Res->setNodeId(-1); 6993 } else { 6994 ReplaceAllUsesWith(Node, Res); 6995 RemoveDeadNode(Node); 6996 } 6997 6998 return Res; 6999 } 7000 7001 /// getMachineNode - These are used for target selectors to create a new node 7002 /// with specified return type(s), MachineInstr opcode, and operands. 7003 /// 7004 /// Note that getMachineNode returns the resultant node. If there is already a 7005 /// node of the specified opcode and operands, it returns that node instead of 7006 /// the current one. 7007 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7008 EVT VT) { 7009 SDVTList VTs = getVTList(VT); 7010 return getMachineNode(Opcode, dl, VTs, None); 7011 } 7012 7013 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7014 EVT VT, SDValue Op1) { 7015 SDVTList VTs = getVTList(VT); 7016 SDValue Ops[] = { Op1 }; 7017 return getMachineNode(Opcode, dl, VTs, Ops); 7018 } 7019 7020 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7021 EVT VT, SDValue Op1, SDValue Op2) { 7022 SDVTList VTs = getVTList(VT); 7023 SDValue Ops[] = { Op1, Op2 }; 7024 return getMachineNode(Opcode, dl, VTs, Ops); 7025 } 7026 7027 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7028 EVT VT, SDValue Op1, SDValue Op2, 7029 SDValue Op3) { 7030 SDVTList VTs = getVTList(VT); 7031 SDValue Ops[] = { Op1, Op2, Op3 }; 7032 return getMachineNode(Opcode, dl, VTs, Ops); 7033 } 7034 7035 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7036 EVT VT, ArrayRef<SDValue> Ops) { 7037 SDVTList VTs = getVTList(VT); 7038 return getMachineNode(Opcode, dl, VTs, Ops); 7039 } 7040 7041 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7042 EVT VT1, EVT VT2, SDValue Op1, 7043 SDValue Op2) { 7044 SDVTList VTs = getVTList(VT1, VT2); 7045 SDValue Ops[] = { Op1, Op2 }; 7046 return getMachineNode(Opcode, dl, VTs, Ops); 7047 } 7048 7049 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7050 EVT VT1, EVT VT2, SDValue Op1, 7051 SDValue Op2, SDValue Op3) { 7052 SDVTList VTs = getVTList(VT1, VT2); 7053 SDValue Ops[] = { Op1, Op2, Op3 }; 7054 return getMachineNode(Opcode, dl, VTs, Ops); 7055 } 7056 7057 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7058 EVT VT1, EVT VT2, 7059 ArrayRef<SDValue> Ops) { 7060 SDVTList VTs = getVTList(VT1, VT2); 7061 return getMachineNode(Opcode, dl, VTs, Ops); 7062 } 7063 7064 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7065 EVT VT1, EVT VT2, EVT VT3, 7066 SDValue Op1, SDValue Op2) { 7067 SDVTList VTs = getVTList(VT1, VT2, VT3); 7068 SDValue Ops[] = { Op1, Op2 }; 7069 return getMachineNode(Opcode, dl, VTs, Ops); 7070 } 7071 7072 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7073 EVT VT1, EVT VT2, EVT VT3, 7074 SDValue Op1, SDValue Op2, 7075 SDValue Op3) { 7076 SDVTList VTs = getVTList(VT1, VT2, VT3); 7077 SDValue Ops[] = { Op1, Op2, Op3 }; 7078 return getMachineNode(Opcode, dl, VTs, Ops); 7079 } 7080 7081 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7082 EVT VT1, EVT VT2, EVT VT3, 7083 ArrayRef<SDValue> Ops) { 7084 SDVTList VTs = getVTList(VT1, VT2, VT3); 7085 return getMachineNode(Opcode, dl, VTs, Ops); 7086 } 7087 7088 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7089 ArrayRef<EVT> ResultTys, 7090 ArrayRef<SDValue> Ops) { 7091 SDVTList VTs = getVTList(ResultTys); 7092 return getMachineNode(Opcode, dl, VTs, Ops); 7093 } 7094 7095 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 7096 SDVTList VTs, 7097 ArrayRef<SDValue> Ops) { 7098 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 7099 MachineSDNode *N; 7100 void *IP = nullptr; 7101 7102 if (DoCSE) { 7103 FoldingSetNodeID ID; 7104 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 7105 IP = nullptr; 7106 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 7107 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 7108 } 7109 } 7110 7111 // Allocate a new MachineSDNode. 7112 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7113 createOperands(N, Ops); 7114 7115 if (DoCSE) 7116 CSEMap.InsertNode(N, IP); 7117 7118 InsertNode(N); 7119 return N; 7120 } 7121 7122 /// getTargetExtractSubreg - A convenience function for creating 7123 /// TargetOpcode::EXTRACT_SUBREG nodes. 7124 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7125 SDValue Operand) { 7126 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7127 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 7128 VT, Operand, SRIdxVal); 7129 return SDValue(Subreg, 0); 7130 } 7131 7132 /// getTargetInsertSubreg - A convenience function for creating 7133 /// TargetOpcode::INSERT_SUBREG nodes. 7134 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7135 SDValue Operand, SDValue Subreg) { 7136 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7137 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 7138 VT, Operand, Subreg, SRIdxVal); 7139 return SDValue(Result, 0); 7140 } 7141 7142 /// getNodeIfExists - Get the specified node if it's already available, or 7143 /// else return NULL. 7144 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 7145 ArrayRef<SDValue> Ops, 7146 const SDNodeFlags Flags) { 7147 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 7148 FoldingSetNodeID ID; 7149 AddNodeIDNode(ID, Opcode, VTList, Ops); 7150 void *IP = nullptr; 7151 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 7152 E->intersectFlagsWith(Flags); 7153 return E; 7154 } 7155 } 7156 return nullptr; 7157 } 7158 7159 /// getDbgValue - Creates a SDDbgValue node. 7160 /// 7161 /// SDNode 7162 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 7163 SDNode *N, unsigned R, bool IsIndirect, 7164 const DebugLoc &DL, unsigned O) { 7165 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7166 "Expected inlined-at fields to agree"); 7167 return new (DbgInfo->getAlloc()) 7168 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 7169 } 7170 7171 /// Constant 7172 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 7173 DIExpression *Expr, 7174 const Value *C, 7175 const DebugLoc &DL, unsigned O) { 7176 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7177 "Expected inlined-at fields to agree"); 7178 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 7179 } 7180 7181 /// FrameIndex 7182 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 7183 DIExpression *Expr, unsigned FI, 7184 const DebugLoc &DL, 7185 unsigned O) { 7186 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7187 "Expected inlined-at fields to agree"); 7188 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, DL, O); 7189 } 7190 7191 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 7192 unsigned OffsetInBits, unsigned SizeInBits, 7193 bool InvalidateDbg) { 7194 SDNode *FromNode = From.getNode(); 7195 SDNode *ToNode = To.getNode(); 7196 assert(FromNode && ToNode && "Can't modify dbg values"); 7197 7198 // PR35338 7199 // TODO: assert(From != To && "Redundant dbg value transfer"); 7200 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 7201 if (From == To || FromNode == ToNode) 7202 return; 7203 7204 if (!FromNode->getHasDebugValue()) 7205 return; 7206 7207 SmallVector<SDDbgValue *, 2> ClonedDVs; 7208 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 7209 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 7210 continue; 7211 7212 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 7213 7214 // Just transfer the dbg value attached to From. 7215 if (Dbg->getResNo() != From.getResNo()) 7216 continue; 7217 7218 DIVariable *Var = Dbg->getVariable(); 7219 auto *Expr = Dbg->getExpression(); 7220 // If a fragment is requested, update the expression. 7221 if (SizeInBits) { 7222 // When splitting a larger (e.g., sign-extended) value whose 7223 // lower bits are described with an SDDbgValue, do not attempt 7224 // to transfer the SDDbgValue to the upper bits. 7225 if (auto FI = Expr->getFragmentInfo()) 7226 if (OffsetInBits + SizeInBits > FI->SizeInBits) 7227 continue; 7228 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 7229 SizeInBits); 7230 if (!Fragment) 7231 continue; 7232 Expr = *Fragment; 7233 } 7234 // Clone the SDDbgValue and move it to To. 7235 SDDbgValue *Clone = 7236 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), 7237 Dbg->getDebugLoc(), Dbg->getOrder()); 7238 ClonedDVs.push_back(Clone); 7239 7240 if (InvalidateDbg) 7241 Dbg->setIsInvalidated(); 7242 } 7243 7244 for (SDDbgValue *Dbg : ClonedDVs) 7245 AddDbgValue(Dbg, ToNode, false); 7246 } 7247 7248 void SelectionDAG::salvageDebugInfo(SDNode &N) { 7249 if (!N.getHasDebugValue()) 7250 return; 7251 7252 SmallVector<SDDbgValue *, 2> ClonedDVs; 7253 for (auto DV : GetDbgValues(&N)) { 7254 if (DV->isInvalidated()) 7255 continue; 7256 switch (N.getOpcode()) { 7257 default: 7258 break; 7259 case ISD::ADD: 7260 SDValue N0 = N.getOperand(0); 7261 SDValue N1 = N.getOperand(1); 7262 if (!isConstantIntBuildVectorOrConstantInt(N0) && 7263 isConstantIntBuildVectorOrConstantInt(N1)) { 7264 uint64_t Offset = N.getConstantOperandVal(1); 7265 // Rewrite an ADD constant node into a DIExpression. Since we are 7266 // performing arithmetic to compute the variable's *value* in the 7267 // DIExpression, we need to mark the expression with a 7268 // DW_OP_stack_value. 7269 auto *DIExpr = DV->getExpression(); 7270 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset, 7271 DIExpression::NoDeref, 7272 DIExpression::WithStackValue); 7273 SDDbgValue *Clone = 7274 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 7275 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 7276 ClonedDVs.push_back(Clone); 7277 DV->setIsInvalidated(); 7278 DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this); 7279 dbgs() << " into " << *DIExpr << '\n'); 7280 } 7281 } 7282 } 7283 7284 for (SDDbgValue *Dbg : ClonedDVs) 7285 AddDbgValue(Dbg, Dbg->getSDNode(), false); 7286 } 7287 7288 namespace { 7289 7290 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 7291 /// pointed to by a use iterator is deleted, increment the use iterator 7292 /// so that it doesn't dangle. 7293 /// 7294 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 7295 SDNode::use_iterator &UI; 7296 SDNode::use_iterator &UE; 7297 7298 void NodeDeleted(SDNode *N, SDNode *E) override { 7299 // Increment the iterator as needed. 7300 while (UI != UE && N == *UI) 7301 ++UI; 7302 } 7303 7304 public: 7305 RAUWUpdateListener(SelectionDAG &d, 7306 SDNode::use_iterator &ui, 7307 SDNode::use_iterator &ue) 7308 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 7309 }; 7310 7311 } // end anonymous namespace 7312 7313 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7314 /// This can cause recursive merging of nodes in the DAG. 7315 /// 7316 /// This version assumes From has a single result value. 7317 /// 7318 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 7319 SDNode *From = FromN.getNode(); 7320 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 7321 "Cannot replace with this method!"); 7322 assert(From != To.getNode() && "Cannot replace uses of with self"); 7323 7324 // Preserve Debug Values 7325 transferDbgValues(FromN, To); 7326 7327 // Iterate over all the existing uses of From. New uses will be added 7328 // to the beginning of the use list, which we avoid visiting. 7329 // This specifically avoids visiting uses of From that arise while the 7330 // replacement is happening, because any such uses would be the result 7331 // of CSE: If an existing node looks like From after one of its operands 7332 // is replaced by To, we don't want to replace of all its users with To 7333 // too. See PR3018 for more info. 7334 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7335 RAUWUpdateListener Listener(*this, UI, UE); 7336 while (UI != UE) { 7337 SDNode *User = *UI; 7338 7339 // This node is about to morph, remove its old self from the CSE maps. 7340 RemoveNodeFromCSEMaps(User); 7341 7342 // A user can appear in a use list multiple times, and when this 7343 // happens the uses are usually next to each other in the list. 7344 // To help reduce the number of CSE recomputations, process all 7345 // the uses of this user that we can find this way. 7346 do { 7347 SDUse &Use = UI.getUse(); 7348 ++UI; 7349 Use.set(To); 7350 if (To->isDivergent() != From->isDivergent()) 7351 updateDivergence(User); 7352 } while (UI != UE && *UI == User); 7353 // Now that we have modified User, add it back to the CSE maps. If it 7354 // already exists there, recursively merge the results together. 7355 AddModifiedNodeToCSEMaps(User); 7356 } 7357 7358 // If we just RAUW'd the root, take note. 7359 if (FromN == getRoot()) 7360 setRoot(To); 7361 } 7362 7363 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7364 /// This can cause recursive merging of nodes in the DAG. 7365 /// 7366 /// This version assumes that for each value of From, there is a 7367 /// corresponding value in To in the same position with the same type. 7368 /// 7369 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 7370 #ifndef NDEBUG 7371 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7372 assert((!From->hasAnyUseOfValue(i) || 7373 From->getValueType(i) == To->getValueType(i)) && 7374 "Cannot use this version of ReplaceAllUsesWith!"); 7375 #endif 7376 7377 // Handle the trivial case. 7378 if (From == To) 7379 return; 7380 7381 // Preserve Debug Info. Only do this if there's a use. 7382 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7383 if (From->hasAnyUseOfValue(i)) { 7384 assert((i < To->getNumValues()) && "Invalid To location"); 7385 transferDbgValues(SDValue(From, i), SDValue(To, i)); 7386 } 7387 7388 // Iterate over just the existing users of From. See the comments in 7389 // the ReplaceAllUsesWith above. 7390 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7391 RAUWUpdateListener Listener(*this, UI, UE); 7392 while (UI != UE) { 7393 SDNode *User = *UI; 7394 7395 // This node is about to morph, remove its old self from the CSE maps. 7396 RemoveNodeFromCSEMaps(User); 7397 7398 // A user can appear in a use list multiple times, and when this 7399 // happens the uses are usually next to each other in the list. 7400 // To help reduce the number of CSE recomputations, process all 7401 // the uses of this user that we can find this way. 7402 do { 7403 SDUse &Use = UI.getUse(); 7404 ++UI; 7405 Use.setNode(To); 7406 if (To->isDivergent() != From->isDivergent()) 7407 updateDivergence(User); 7408 } while (UI != UE && *UI == User); 7409 7410 // Now that we have modified User, add it back to the CSE maps. If it 7411 // already exists there, recursively merge the results together. 7412 AddModifiedNodeToCSEMaps(User); 7413 } 7414 7415 // If we just RAUW'd the root, take note. 7416 if (From == getRoot().getNode()) 7417 setRoot(SDValue(To, getRoot().getResNo())); 7418 } 7419 7420 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7421 /// This can cause recursive merging of nodes in the DAG. 7422 /// 7423 /// This version can replace From with any result values. To must match the 7424 /// number and types of values returned by From. 7425 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 7426 if (From->getNumValues() == 1) // Handle the simple case efficiently. 7427 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 7428 7429 // Preserve Debug Info. 7430 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7431 transferDbgValues(SDValue(From, i), *To); 7432 7433 // Iterate over just the existing users of From. See the comments in 7434 // the ReplaceAllUsesWith above. 7435 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7436 RAUWUpdateListener Listener(*this, UI, UE); 7437 while (UI != UE) { 7438 SDNode *User = *UI; 7439 7440 // This node is about to morph, remove its old self from the CSE maps. 7441 RemoveNodeFromCSEMaps(User); 7442 7443 // A user can appear in a use list multiple times, and when this 7444 // happens the uses are usually next to each other in the list. 7445 // To help reduce the number of CSE recomputations, process all 7446 // the uses of this user that we can find this way. 7447 do { 7448 SDUse &Use = UI.getUse(); 7449 const SDValue &ToOp = To[Use.getResNo()]; 7450 ++UI; 7451 Use.set(ToOp); 7452 if (To->getNode()->isDivergent() != From->isDivergent()) 7453 updateDivergence(User); 7454 } while (UI != UE && *UI == User); 7455 // Now that we have modified User, add it back to the CSE maps. If it 7456 // already exists there, recursively merge the results together. 7457 AddModifiedNodeToCSEMaps(User); 7458 } 7459 7460 // If we just RAUW'd the root, take note. 7461 if (From == getRoot().getNode()) 7462 setRoot(SDValue(To[getRoot().getResNo()])); 7463 } 7464 7465 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 7466 /// uses of other values produced by From.getNode() alone. The Deleted 7467 /// vector is handled the same way as for ReplaceAllUsesWith. 7468 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 7469 // Handle the really simple, really trivial case efficiently. 7470 if (From == To) return; 7471 7472 // Handle the simple, trivial, case efficiently. 7473 if (From.getNode()->getNumValues() == 1) { 7474 ReplaceAllUsesWith(From, To); 7475 return; 7476 } 7477 7478 // Preserve Debug Info. 7479 transferDbgValues(From, To); 7480 7481 // Iterate over just the existing users of From. See the comments in 7482 // the ReplaceAllUsesWith above. 7483 SDNode::use_iterator UI = From.getNode()->use_begin(), 7484 UE = From.getNode()->use_end(); 7485 RAUWUpdateListener Listener(*this, UI, UE); 7486 while (UI != UE) { 7487 SDNode *User = *UI; 7488 bool UserRemovedFromCSEMaps = false; 7489 7490 // A user can appear in a use list multiple times, and when this 7491 // happens the uses are usually next to each other in the list. 7492 // To help reduce the number of CSE recomputations, process all 7493 // the uses of this user that we can find this way. 7494 do { 7495 SDUse &Use = UI.getUse(); 7496 7497 // Skip uses of different values from the same node. 7498 if (Use.getResNo() != From.getResNo()) { 7499 ++UI; 7500 continue; 7501 } 7502 7503 // If this node hasn't been modified yet, it's still in the CSE maps, 7504 // so remove its old self from the CSE maps. 7505 if (!UserRemovedFromCSEMaps) { 7506 RemoveNodeFromCSEMaps(User); 7507 UserRemovedFromCSEMaps = true; 7508 } 7509 7510 ++UI; 7511 Use.set(To); 7512 if (To->isDivergent() != From->isDivergent()) 7513 updateDivergence(User); 7514 } while (UI != UE && *UI == User); 7515 // We are iterating over all uses of the From node, so if a use 7516 // doesn't use the specific value, no changes are made. 7517 if (!UserRemovedFromCSEMaps) 7518 continue; 7519 7520 // Now that we have modified User, add it back to the CSE maps. If it 7521 // already exists there, recursively merge the results together. 7522 AddModifiedNodeToCSEMaps(User); 7523 } 7524 7525 // If we just RAUW'd the root, take note. 7526 if (From == getRoot()) 7527 setRoot(To); 7528 } 7529 7530 namespace { 7531 7532 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 7533 /// to record information about a use. 7534 struct UseMemo { 7535 SDNode *User; 7536 unsigned Index; 7537 SDUse *Use; 7538 }; 7539 7540 /// operator< - Sort Memos by User. 7541 bool operator<(const UseMemo &L, const UseMemo &R) { 7542 return (intptr_t)L.User < (intptr_t)R.User; 7543 } 7544 7545 } // end anonymous namespace 7546 7547 void SelectionDAG::updateDivergence(SDNode * N) 7548 { 7549 if (TLI->isSDNodeAlwaysUniform(N)) 7550 return; 7551 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 7552 for (auto &Op : N->ops()) { 7553 if (Op.Val.getValueType() != MVT::Other) 7554 IsDivergent |= Op.getNode()->isDivergent(); 7555 } 7556 if (N->SDNodeBits.IsDivergent != IsDivergent) { 7557 N->SDNodeBits.IsDivergent = IsDivergent; 7558 for (auto U : N->uses()) { 7559 updateDivergence(U); 7560 } 7561 } 7562 } 7563 7564 7565 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode*>& Order) { 7566 DenseMap<SDNode *, unsigned> Degree; 7567 Order.reserve(AllNodes.size()); 7568 for (auto & N : allnodes()) { 7569 unsigned NOps = N.getNumOperands(); 7570 Degree[&N] = NOps; 7571 if (0 == NOps) 7572 Order.push_back(&N); 7573 } 7574 for (std::vector<SDNode *>::iterator I = Order.begin(); 7575 I!=Order.end();++I) { 7576 SDNode * N = *I; 7577 for (auto U : N->uses()) { 7578 unsigned &UnsortedOps = Degree[U]; 7579 if (0 == --UnsortedOps) 7580 Order.push_back(U); 7581 } 7582 } 7583 } 7584 7585 void SelectionDAG::VerifyDAGDiverence() 7586 { 7587 std::vector<SDNode*> TopoOrder; 7588 CreateTopologicalOrder(TopoOrder); 7589 const TargetLowering &TLI = getTargetLoweringInfo(); 7590 DenseMap<const SDNode *, bool> DivergenceMap; 7591 for (auto &N : allnodes()) { 7592 DivergenceMap[&N] = false; 7593 } 7594 for (auto N : TopoOrder) { 7595 bool IsDivergent = DivergenceMap[N]; 7596 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 7597 for (auto &Op : N->ops()) { 7598 if (Op.Val.getValueType() != MVT::Other) 7599 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 7600 } 7601 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 7602 DivergenceMap[N] = true; 7603 } 7604 } 7605 for (auto &N : allnodes()) { 7606 (void)N; 7607 assert(DivergenceMap[&N] == N.isDivergent() && 7608 "Divergence bit inconsistency detected\n"); 7609 } 7610 } 7611 7612 7613 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 7614 /// uses of other values produced by From.getNode() alone. The same value 7615 /// may appear in both the From and To list. The Deleted vector is 7616 /// handled the same way as for ReplaceAllUsesWith. 7617 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 7618 const SDValue *To, 7619 unsigned Num){ 7620 // Handle the simple, trivial case efficiently. 7621 if (Num == 1) 7622 return ReplaceAllUsesOfValueWith(*From, *To); 7623 7624 transferDbgValues(*From, *To); 7625 7626 // Read up all the uses and make records of them. This helps 7627 // processing new uses that are introduced during the 7628 // replacement process. 7629 SmallVector<UseMemo, 4> Uses; 7630 for (unsigned i = 0; i != Num; ++i) { 7631 unsigned FromResNo = From[i].getResNo(); 7632 SDNode *FromNode = From[i].getNode(); 7633 for (SDNode::use_iterator UI = FromNode->use_begin(), 7634 E = FromNode->use_end(); UI != E; ++UI) { 7635 SDUse &Use = UI.getUse(); 7636 if (Use.getResNo() == FromResNo) { 7637 UseMemo Memo = { *UI, i, &Use }; 7638 Uses.push_back(Memo); 7639 } 7640 } 7641 } 7642 7643 // Sort the uses, so that all the uses from a given User are together. 7644 llvm::sort(Uses.begin(), Uses.end()); 7645 7646 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7647 UseIndex != UseIndexEnd; ) { 7648 // We know that this user uses some value of From. If it is the right 7649 // value, update it. 7650 SDNode *User = Uses[UseIndex].User; 7651 7652 // This node is about to morph, remove its old self from the CSE maps. 7653 RemoveNodeFromCSEMaps(User); 7654 7655 // The Uses array is sorted, so all the uses for a given User 7656 // are next to each other in the list. 7657 // To help reduce the number of CSE recomputations, process all 7658 // the uses of this user that we can find this way. 7659 do { 7660 unsigned i = Uses[UseIndex].Index; 7661 SDUse &Use = *Uses[UseIndex].Use; 7662 ++UseIndex; 7663 7664 Use.set(To[i]); 7665 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7666 7667 // Now that we have modified User, add it back to the CSE maps. If it 7668 // already exists there, recursively merge the results together. 7669 AddModifiedNodeToCSEMaps(User); 7670 } 7671 } 7672 7673 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7674 /// based on their topological order. It returns the maximum id and a vector 7675 /// of the SDNodes* in assigned order by reference. 7676 unsigned SelectionDAG::AssignTopologicalOrder() { 7677 unsigned DAGSize = 0; 7678 7679 // SortedPos tracks the progress of the algorithm. Nodes before it are 7680 // sorted, nodes after it are unsorted. When the algorithm completes 7681 // it is at the end of the list. 7682 allnodes_iterator SortedPos = allnodes_begin(); 7683 7684 // Visit all the nodes. Move nodes with no operands to the front of 7685 // the list immediately. Annotate nodes that do have operands with their 7686 // operand count. Before we do this, the Node Id fields of the nodes 7687 // may contain arbitrary values. After, the Node Id fields for nodes 7688 // before SortedPos will contain the topological sort index, and the 7689 // Node Id fields for nodes At SortedPos and after will contain the 7690 // count of outstanding operands. 7691 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7692 SDNode *N = &*I++; 7693 checkForCycles(N, this); 7694 unsigned Degree = N->getNumOperands(); 7695 if (Degree == 0) { 7696 // A node with no uses, add it to the result array immediately. 7697 N->setNodeId(DAGSize++); 7698 allnodes_iterator Q(N); 7699 if (Q != SortedPos) 7700 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7701 assert(SortedPos != AllNodes.end() && "Overran node list"); 7702 ++SortedPos; 7703 } else { 7704 // Temporarily use the Node Id as scratch space for the degree count. 7705 N->setNodeId(Degree); 7706 } 7707 } 7708 7709 // Visit all the nodes. As we iterate, move nodes into sorted order, 7710 // such that by the time the end is reached all nodes will be sorted. 7711 for (SDNode &Node : allnodes()) { 7712 SDNode *N = &Node; 7713 checkForCycles(N, this); 7714 // N is in sorted position, so all its uses have one less operand 7715 // that needs to be sorted. 7716 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7717 UI != UE; ++UI) { 7718 SDNode *P = *UI; 7719 unsigned Degree = P->getNodeId(); 7720 assert(Degree != 0 && "Invalid node degree"); 7721 --Degree; 7722 if (Degree == 0) { 7723 // All of P's operands are sorted, so P may sorted now. 7724 P->setNodeId(DAGSize++); 7725 if (P->getIterator() != SortedPos) 7726 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7727 assert(SortedPos != AllNodes.end() && "Overran node list"); 7728 ++SortedPos; 7729 } else { 7730 // Update P's outstanding operand count. 7731 P->setNodeId(Degree); 7732 } 7733 } 7734 if (Node.getIterator() == SortedPos) { 7735 #ifndef NDEBUG 7736 allnodes_iterator I(N); 7737 SDNode *S = &*++I; 7738 dbgs() << "Overran sorted position:\n"; 7739 S->dumprFull(this); dbgs() << "\n"; 7740 dbgs() << "Checking if this is due to cycles\n"; 7741 checkForCycles(this, true); 7742 #endif 7743 llvm_unreachable(nullptr); 7744 } 7745 } 7746 7747 assert(SortedPos == AllNodes.end() && 7748 "Topological sort incomplete!"); 7749 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7750 "First node in topological sort is not the entry token!"); 7751 assert(AllNodes.front().getNodeId() == 0 && 7752 "First node in topological sort has non-zero id!"); 7753 assert(AllNodes.front().getNumOperands() == 0 && 7754 "First node in topological sort has operands!"); 7755 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7756 "Last node in topologic sort has unexpected id!"); 7757 assert(AllNodes.back().use_empty() && 7758 "Last node in topologic sort has users!"); 7759 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7760 return DAGSize; 7761 } 7762 7763 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7764 /// value is produced by SD. 7765 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7766 if (SD) { 7767 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7768 SD->setHasDebugValue(true); 7769 } 7770 DbgInfo->add(DB, SD, isParameter); 7771 } 7772 7773 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 7774 SDValue NewMemOp) { 7775 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 7776 // The new memory operation must have the same position as the old load in 7777 // terms of memory dependency. Create a TokenFactor for the old load and new 7778 // memory operation and update uses of the old load's output chain to use that 7779 // TokenFactor. 7780 SDValue OldChain = SDValue(OldLoad, 1); 7781 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 7782 if (!OldLoad->hasAnyUseOfValue(1)) 7783 return NewChain; 7784 7785 SDValue TokenFactor = 7786 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 7787 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 7788 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 7789 return TokenFactor; 7790 } 7791 7792 //===----------------------------------------------------------------------===// 7793 // SDNode Class 7794 //===----------------------------------------------------------------------===// 7795 7796 bool llvm::isNullConstant(SDValue V) { 7797 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7798 return Const != nullptr && Const->isNullValue(); 7799 } 7800 7801 bool llvm::isNullFPConstant(SDValue V) { 7802 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7803 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7804 } 7805 7806 bool llvm::isAllOnesConstant(SDValue V) { 7807 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7808 return Const != nullptr && Const->isAllOnesValue(); 7809 } 7810 7811 bool llvm::isOneConstant(SDValue V) { 7812 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7813 return Const != nullptr && Const->isOne(); 7814 } 7815 7816 bool llvm::isBitwiseNot(SDValue V) { 7817 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7818 } 7819 7820 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7821 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7822 return CN; 7823 7824 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7825 BitVector UndefElements; 7826 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7827 7828 // BuildVectors can truncate their operands. Ignore that case here. 7829 // FIXME: We blindly ignore splats which include undef which is overly 7830 // pessimistic. 7831 if (CN && UndefElements.none() && 7832 CN->getValueType(0) == N.getValueType().getScalarType()) 7833 return CN; 7834 } 7835 7836 return nullptr; 7837 } 7838 7839 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7840 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7841 return CN; 7842 7843 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7844 BitVector UndefElements; 7845 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7846 7847 if (CN && UndefElements.none()) 7848 return CN; 7849 } 7850 7851 return nullptr; 7852 } 7853 7854 HandleSDNode::~HandleSDNode() { 7855 DropOperands(); 7856 } 7857 7858 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7859 const DebugLoc &DL, 7860 const GlobalValue *GA, EVT VT, 7861 int64_t o, unsigned char TF) 7862 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7863 TheGlobal = GA; 7864 } 7865 7866 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7867 EVT VT, unsigned SrcAS, 7868 unsigned DestAS) 7869 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7870 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7871 7872 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7873 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7874 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7875 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7876 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7877 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7878 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7879 7880 // We check here that the size of the memory operand fits within the size of 7881 // the MMO. This is because the MMO might indicate only a possible address 7882 // range instead of specifying the affected memory addresses precisely. 7883 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7884 } 7885 7886 /// Profile - Gather unique data for the node. 7887 /// 7888 void SDNode::Profile(FoldingSetNodeID &ID) const { 7889 AddNodeIDNode(ID, this); 7890 } 7891 7892 namespace { 7893 7894 struct EVTArray { 7895 std::vector<EVT> VTs; 7896 7897 EVTArray() { 7898 VTs.reserve(MVT::LAST_VALUETYPE); 7899 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7900 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7901 } 7902 }; 7903 7904 } // end anonymous namespace 7905 7906 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 7907 static ManagedStatic<EVTArray> SimpleVTArray; 7908 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 7909 7910 /// getValueTypeList - Return a pointer to the specified value type. 7911 /// 7912 const EVT *SDNode::getValueTypeList(EVT VT) { 7913 if (VT.isExtended()) { 7914 sys::SmartScopedLock<true> Lock(*VTMutex); 7915 return &(*EVTs->insert(VT).first); 7916 } else { 7917 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7918 "Value type out of range!"); 7919 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7920 } 7921 } 7922 7923 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7924 /// indicated value. This method ignores uses of other values defined by this 7925 /// operation. 7926 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7927 assert(Value < getNumValues() && "Bad value!"); 7928 7929 // TODO: Only iterate over uses of a given value of the node 7930 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7931 if (UI.getUse().getResNo() == Value) { 7932 if (NUses == 0) 7933 return false; 7934 --NUses; 7935 } 7936 } 7937 7938 // Found exactly the right number of uses? 7939 return NUses == 0; 7940 } 7941 7942 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7943 /// value. This method ignores uses of other values defined by this operation. 7944 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7945 assert(Value < getNumValues() && "Bad value!"); 7946 7947 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7948 if (UI.getUse().getResNo() == Value) 7949 return true; 7950 7951 return false; 7952 } 7953 7954 /// isOnlyUserOf - Return true if this node is the only use of N. 7955 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7956 bool Seen = false; 7957 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7958 SDNode *User = *I; 7959 if (User == this) 7960 Seen = true; 7961 else 7962 return false; 7963 } 7964 7965 return Seen; 7966 } 7967 7968 /// Return true if the only users of N are contained in Nodes. 7969 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7970 bool Seen = false; 7971 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7972 SDNode *User = *I; 7973 if (llvm::any_of(Nodes, 7974 [&User](const SDNode *Node) { return User == Node; })) 7975 Seen = true; 7976 else 7977 return false; 7978 } 7979 7980 return Seen; 7981 } 7982 7983 /// isOperand - Return true if this node is an operand of N. 7984 bool SDValue::isOperandOf(const SDNode *N) const { 7985 for (const SDValue &Op : N->op_values()) 7986 if (*this == Op) 7987 return true; 7988 return false; 7989 } 7990 7991 bool SDNode::isOperandOf(const SDNode *N) const { 7992 for (const SDValue &Op : N->op_values()) 7993 if (this == Op.getNode()) 7994 return true; 7995 return false; 7996 } 7997 7998 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7999 /// be a chain) reaches the specified operand without crossing any 8000 /// side-effecting instructions on any chain path. In practice, this looks 8001 /// through token factors and non-volatile loads. In order to remain efficient, 8002 /// this only looks a couple of nodes in, it does not do an exhaustive search. 8003 /// 8004 /// Note that we only need to examine chains when we're searching for 8005 /// side-effects; SelectionDAG requires that all side-effects are represented 8006 /// by chains, even if another operand would force a specific ordering. This 8007 /// constraint is necessary to allow transformations like splitting loads. 8008 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 8009 unsigned Depth) const { 8010 if (*this == Dest) return true; 8011 8012 // Don't search too deeply, we just want to be able to see through 8013 // TokenFactor's etc. 8014 if (Depth == 0) return false; 8015 8016 // If this is a token factor, all inputs to the TF happen in parallel. 8017 if (getOpcode() == ISD::TokenFactor) { 8018 // First, try a shallow search. 8019 if (is_contained((*this)->ops(), Dest)) { 8020 // We found the chain we want as an operand of this TokenFactor. 8021 // Essentially, we reach the chain without side-effects if we could 8022 // serialize the TokenFactor into a simple chain of operations with 8023 // Dest as the last operation. This is automatically true if the 8024 // chain has one use: there are no other ordering constraints. 8025 // If the chain has more than one use, we give up: some other 8026 // use of Dest might force a side-effect between Dest and the current 8027 // node. 8028 if (Dest.hasOneUse()) 8029 return true; 8030 } 8031 // Next, try a deep search: check whether every operand of the TokenFactor 8032 // reaches Dest. 8033 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 8034 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 8035 }); 8036 } 8037 8038 // Loads don't have side effects, look through them. 8039 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 8040 if (!Ld->isVolatile()) 8041 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 8042 } 8043 return false; 8044 } 8045 8046 bool SDNode::hasPredecessor(const SDNode *N) const { 8047 SmallPtrSet<const SDNode *, 32> Visited; 8048 SmallVector<const SDNode *, 16> Worklist; 8049 Worklist.push_back(this); 8050 return hasPredecessorHelper(N, Visited, Worklist); 8051 } 8052 8053 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 8054 this->Flags.intersectWith(Flags); 8055 } 8056 8057 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 8058 assert(N->getNumValues() == 1 && 8059 "Can't unroll a vector with multiple results!"); 8060 8061 EVT VT = N->getValueType(0); 8062 unsigned NE = VT.getVectorNumElements(); 8063 EVT EltVT = VT.getVectorElementType(); 8064 SDLoc dl(N); 8065 8066 SmallVector<SDValue, 8> Scalars; 8067 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 8068 8069 // If ResNE is 0, fully unroll the vector op. 8070 if (ResNE == 0) 8071 ResNE = NE; 8072 else if (NE > ResNE) 8073 NE = ResNE; 8074 8075 unsigned i; 8076 for (i= 0; i != NE; ++i) { 8077 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 8078 SDValue Operand = N->getOperand(j); 8079 EVT OperandVT = Operand.getValueType(); 8080 if (OperandVT.isVector()) { 8081 // A vector operand; extract a single element. 8082 EVT OperandEltVT = OperandVT.getVectorElementType(); 8083 Operands[j] = 8084 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 8085 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 8086 } else { 8087 // A scalar operand; just use it as is. 8088 Operands[j] = Operand; 8089 } 8090 } 8091 8092 switch (N->getOpcode()) { 8093 default: { 8094 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 8095 N->getFlags())); 8096 break; 8097 } 8098 case ISD::VSELECT: 8099 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 8100 break; 8101 case ISD::SHL: 8102 case ISD::SRA: 8103 case ISD::SRL: 8104 case ISD::ROTL: 8105 case ISD::ROTR: 8106 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 8107 getShiftAmountOperand(Operands[0].getValueType(), 8108 Operands[1]))); 8109 break; 8110 case ISD::SIGN_EXTEND_INREG: 8111 case ISD::FP_ROUND_INREG: { 8112 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 8113 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 8114 Operands[0], 8115 getValueType(ExtVT))); 8116 } 8117 } 8118 } 8119 8120 for (; i < ResNE; ++i) 8121 Scalars.push_back(getUNDEF(EltVT)); 8122 8123 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 8124 return getBuildVector(VecVT, dl, Scalars); 8125 } 8126 8127 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 8128 LoadSDNode *Base, 8129 unsigned Bytes, 8130 int Dist) const { 8131 if (LD->isVolatile() || Base->isVolatile()) 8132 return false; 8133 if (LD->isIndexed() || Base->isIndexed()) 8134 return false; 8135 if (LD->getChain() != Base->getChain()) 8136 return false; 8137 EVT VT = LD->getValueType(0); 8138 if (VT.getSizeInBits() / 8 != Bytes) 8139 return false; 8140 8141 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 8142 auto LocDecomp = BaseIndexOffset::match(LD, *this); 8143 8144 int64_t Offset = 0; 8145 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 8146 return (Dist * Bytes == Offset); 8147 return false; 8148 } 8149 8150 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 8151 /// it cannot be inferred. 8152 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 8153 // If this is a GlobalAddress + cst, return the alignment. 8154 const GlobalValue *GV; 8155 int64_t GVOffset = 0; 8156 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 8157 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType()); 8158 KnownBits Known(IdxWidth); 8159 llvm::computeKnownBits(GV, Known, getDataLayout()); 8160 unsigned AlignBits = Known.countMinTrailingZeros(); 8161 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 8162 if (Align) 8163 return MinAlign(Align, GVOffset); 8164 } 8165 8166 // If this is a direct reference to a stack slot, use information about the 8167 // stack slot's alignment. 8168 int FrameIdx = 1 << 31; 8169 int64_t FrameOffset = 0; 8170 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 8171 FrameIdx = FI->getIndex(); 8172 } else if (isBaseWithConstantOffset(Ptr) && 8173 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 8174 // Handle FI+Cst 8175 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 8176 FrameOffset = Ptr.getConstantOperandVal(1); 8177 } 8178 8179 if (FrameIdx != (1 << 31)) { 8180 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 8181 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 8182 FrameOffset); 8183 return FIInfoAlign; 8184 } 8185 8186 return 0; 8187 } 8188 8189 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 8190 /// which is split (or expanded) into two not necessarily identical pieces. 8191 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 8192 // Currently all types are split in half. 8193 EVT LoVT, HiVT; 8194 if (!VT.isVector()) 8195 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 8196 else 8197 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 8198 8199 return std::make_pair(LoVT, HiVT); 8200 } 8201 8202 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 8203 /// low/high part. 8204 std::pair<SDValue, SDValue> 8205 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 8206 const EVT &HiVT) { 8207 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 8208 N.getValueType().getVectorNumElements() && 8209 "More vector elements requested than available!"); 8210 SDValue Lo, Hi; 8211 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 8212 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 8213 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 8214 getConstant(LoVT.getVectorNumElements(), DL, 8215 TLI->getVectorIdxTy(getDataLayout()))); 8216 return std::make_pair(Lo, Hi); 8217 } 8218 8219 void SelectionDAG::ExtractVectorElements(SDValue Op, 8220 SmallVectorImpl<SDValue> &Args, 8221 unsigned Start, unsigned Count) { 8222 EVT VT = Op.getValueType(); 8223 if (Count == 0) 8224 Count = VT.getVectorNumElements(); 8225 8226 EVT EltVT = VT.getVectorElementType(); 8227 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 8228 SDLoc SL(Op); 8229 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 8230 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8231 Op, getConstant(i, SL, IdxTy))); 8232 } 8233 } 8234 8235 // getAddressSpace - Return the address space this GlobalAddress belongs to. 8236 unsigned GlobalAddressSDNode::getAddressSpace() const { 8237 return getGlobal()->getType()->getAddressSpace(); 8238 } 8239 8240 Type *ConstantPoolSDNode::getType() const { 8241 if (isMachineConstantPoolEntry()) 8242 return Val.MachineCPVal->getType(); 8243 return Val.ConstVal->getType(); 8244 } 8245 8246 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 8247 unsigned &SplatBitSize, 8248 bool &HasAnyUndefs, 8249 unsigned MinSplatBits, 8250 bool IsBigEndian) const { 8251 EVT VT = getValueType(0); 8252 assert(VT.isVector() && "Expected a vector type"); 8253 unsigned VecWidth = VT.getSizeInBits(); 8254 if (MinSplatBits > VecWidth) 8255 return false; 8256 8257 // FIXME: The widths are based on this node's type, but build vectors can 8258 // truncate their operands. 8259 SplatValue = APInt(VecWidth, 0); 8260 SplatUndef = APInt(VecWidth, 0); 8261 8262 // Get the bits. Bits with undefined values (when the corresponding element 8263 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 8264 // in SplatValue. If any of the values are not constant, give up and return 8265 // false. 8266 unsigned int NumOps = getNumOperands(); 8267 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 8268 unsigned EltWidth = VT.getScalarSizeInBits(); 8269 8270 for (unsigned j = 0; j < NumOps; ++j) { 8271 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 8272 SDValue OpVal = getOperand(i); 8273 unsigned BitPos = j * EltWidth; 8274 8275 if (OpVal.isUndef()) 8276 SplatUndef.setBits(BitPos, BitPos + EltWidth); 8277 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 8278 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 8279 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 8280 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 8281 else 8282 return false; 8283 } 8284 8285 // The build_vector is all constants or undefs. Find the smallest element 8286 // size that splats the vector. 8287 HasAnyUndefs = (SplatUndef != 0); 8288 8289 // FIXME: This does not work for vectors with elements less than 8 bits. 8290 while (VecWidth > 8) { 8291 unsigned HalfSize = VecWidth / 2; 8292 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 8293 APInt LowValue = SplatValue.trunc(HalfSize); 8294 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 8295 APInt LowUndef = SplatUndef.trunc(HalfSize); 8296 8297 // If the two halves do not match (ignoring undef bits), stop here. 8298 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 8299 MinSplatBits > HalfSize) 8300 break; 8301 8302 SplatValue = HighValue | LowValue; 8303 SplatUndef = HighUndef & LowUndef; 8304 8305 VecWidth = HalfSize; 8306 } 8307 8308 SplatBitSize = VecWidth; 8309 return true; 8310 } 8311 8312 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 8313 if (UndefElements) { 8314 UndefElements->clear(); 8315 UndefElements->resize(getNumOperands()); 8316 } 8317 SDValue Splatted; 8318 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 8319 SDValue Op = getOperand(i); 8320 if (Op.isUndef()) { 8321 if (UndefElements) 8322 (*UndefElements)[i] = true; 8323 } else if (!Splatted) { 8324 Splatted = Op; 8325 } else if (Splatted != Op) { 8326 return SDValue(); 8327 } 8328 } 8329 8330 if (!Splatted) { 8331 assert(getOperand(0).isUndef() && 8332 "Can only have a splat without a constant for all undefs."); 8333 return getOperand(0); 8334 } 8335 8336 return Splatted; 8337 } 8338 8339 ConstantSDNode * 8340 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 8341 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 8342 } 8343 8344 ConstantFPSDNode * 8345 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 8346 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 8347 } 8348 8349 int32_t 8350 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 8351 uint32_t BitWidth) const { 8352 if (ConstantFPSDNode *CN = 8353 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 8354 bool IsExact; 8355 APSInt IntVal(BitWidth); 8356 const APFloat &APF = CN->getValueAPF(); 8357 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 8358 APFloat::opOK || 8359 !IsExact) 8360 return -1; 8361 8362 return IntVal.exactLogBase2(); 8363 } 8364 return -1; 8365 } 8366 8367 bool BuildVectorSDNode::isConstant() const { 8368 for (const SDValue &Op : op_values()) { 8369 unsigned Opc = Op.getOpcode(); 8370 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 8371 return false; 8372 } 8373 return true; 8374 } 8375 8376 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 8377 // Find the first non-undef value in the shuffle mask. 8378 unsigned i, e; 8379 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 8380 /* search */; 8381 8382 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 8383 8384 // Make sure all remaining elements are either undef or the same as the first 8385 // non-undef value. 8386 for (int Idx = Mask[i]; i != e; ++i) 8387 if (Mask[i] >= 0 && Mask[i] != Idx) 8388 return false; 8389 return true; 8390 } 8391 8392 // \brief Returns the SDNode if it is a constant integer BuildVector 8393 // or constant integer. 8394 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 8395 if (isa<ConstantSDNode>(N)) 8396 return N.getNode(); 8397 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 8398 return N.getNode(); 8399 // Treat a GlobalAddress supporting constant offset folding as a 8400 // constant integer. 8401 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 8402 if (GA->getOpcode() == ISD::GlobalAddress && 8403 TLI->isOffsetFoldingLegal(GA)) 8404 return GA; 8405 return nullptr; 8406 } 8407 8408 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 8409 if (isa<ConstantFPSDNode>(N)) 8410 return N.getNode(); 8411 8412 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 8413 return N.getNode(); 8414 8415 return nullptr; 8416 } 8417 8418 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 8419 assert(!Node->OperandList && "Node already has operands"); 8420 SDUse *Ops = OperandRecycler.allocate( 8421 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 8422 8423 bool IsDivergent = false; 8424 for (unsigned I = 0; I != Vals.size(); ++I) { 8425 Ops[I].setUser(Node); 8426 Ops[I].setInitial(Vals[I]); 8427 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 8428 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 8429 } 8430 Node->NumOperands = Vals.size(); 8431 Node->OperandList = Ops; 8432 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 8433 if (!TLI->isSDNodeAlwaysUniform(Node)) 8434 Node->SDNodeBits.IsDivergent = IsDivergent; 8435 checkForCycles(Node); 8436 } 8437 8438 #ifndef NDEBUG 8439 static void checkForCyclesHelper(const SDNode *N, 8440 SmallPtrSetImpl<const SDNode*> &Visited, 8441 SmallPtrSetImpl<const SDNode*> &Checked, 8442 const llvm::SelectionDAG *DAG) { 8443 // If this node has already been checked, don't check it again. 8444 if (Checked.count(N)) 8445 return; 8446 8447 // If a node has already been visited on this depth-first walk, reject it as 8448 // a cycle. 8449 if (!Visited.insert(N).second) { 8450 errs() << "Detected cycle in SelectionDAG\n"; 8451 dbgs() << "Offending node:\n"; 8452 N->dumprFull(DAG); dbgs() << "\n"; 8453 abort(); 8454 } 8455 8456 for (const SDValue &Op : N->op_values()) 8457 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 8458 8459 Checked.insert(N); 8460 Visited.erase(N); 8461 } 8462 #endif 8463 8464 void llvm::checkForCycles(const llvm::SDNode *N, 8465 const llvm::SelectionDAG *DAG, 8466 bool force) { 8467 #ifndef NDEBUG 8468 bool check = force; 8469 #ifdef EXPENSIVE_CHECKS 8470 check = true; 8471 #endif // EXPENSIVE_CHECKS 8472 if (check) { 8473 assert(N && "Checking nonexistent SDNode"); 8474 SmallPtrSet<const SDNode*, 32> visited; 8475 SmallPtrSet<const SDNode*, 32> checked; 8476 checkForCyclesHelper(N, visited, checked, DAG); 8477 } 8478 #endif // !NDEBUG 8479 } 8480 8481 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 8482 checkForCycles(DAG->getRoot().getNode(), DAG, force); 8483 } 8484