1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineConstantPool.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DebugInfo.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/ManagedStatic.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/Mutex.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetIntrinsicInfo.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include "llvm/Target/TargetRegisterInfo.h" 48 #include "llvm/Target/TargetSelectionDAGInfo.h" 49 #include "llvm/Target/TargetSubtargetInfo.h" 50 #include <algorithm> 51 #include <cmath> 52 #include <utility> 53 54 using namespace llvm; 55 56 /// makeVTList - Return an instance of the SDVTList struct initialized with the 57 /// specified members. 58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 59 SDVTList Res = {VTs, NumVTs}; 60 return Res; 61 } 62 63 // Default null implementations of the callbacks. 64 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 65 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 66 67 //===----------------------------------------------------------------------===// 68 // ConstantFPSDNode Class 69 //===----------------------------------------------------------------------===// 70 71 /// isExactlyValue - We don't rely on operator== working on double values, as 72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 73 /// As such, this method can be used to do an exact bit-for-bit comparison of 74 /// two floating point values. 75 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 76 return getValueAPF().bitwiseIsEqual(V); 77 } 78 79 bool ConstantFPSDNode::isValueValidForType(EVT VT, 80 const APFloat& Val) { 81 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 82 83 // convert modifies in place, so make a copy. 84 APFloat Val2 = APFloat(Val); 85 bool losesInfo; 86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 87 APFloat::rmNearestTiesToEven, 88 &losesInfo); 89 return !losesInfo; 90 } 91 92 //===----------------------------------------------------------------------===// 93 // ISD Namespace 94 //===----------------------------------------------------------------------===// 95 96 /// isBuildVectorAllOnes - Return true if the specified node is a 97 /// BUILD_VECTOR where all of the elements are ~0 or undef. 98 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 99 // Look through a bit convert. 100 while (N->getOpcode() == ISD::BITCAST) 101 N = N->getOperand(0).getNode(); 102 103 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 104 105 unsigned i = 0, e = N->getNumOperands(); 106 107 // Skip over all of the undef values. 108 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 109 ++i; 110 111 // Do not accept an all-undef vector. 112 if (i == e) return false; 113 114 // Do not accept build_vectors that aren't all constants or which have non-~0 115 // elements. We have to be a bit careful here, as the type of the constant 116 // may not be the same as the type of the vector elements due to type 117 // legalization (the elements are promoted to a legal type for the target and 118 // a vector of a type may be legal when the base element type is not). 119 // We only want to check enough bits to cover the vector elements, because 120 // we care if the resultant vector is all ones, not whether the individual 121 // constants are. 122 SDValue NotZero = N->getOperand(i); 123 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 124 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 125 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 126 return false; 127 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 128 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 129 return false; 130 } else 131 return false; 132 133 // Okay, we have at least one ~0 value, check to see if the rest match or are 134 // undefs. Even with the above element type twiddling, this should be OK, as 135 // the same type legalization should have applied to all the elements. 136 for (++i; i != e; ++i) 137 if (N->getOperand(i) != NotZero && 138 N->getOperand(i).getOpcode() != ISD::UNDEF) 139 return false; 140 return true; 141 } 142 143 144 /// isBuildVectorAllZeros - Return true if the specified node is a 145 /// BUILD_VECTOR where all of the elements are 0 or undef. 146 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 147 // Look through a bit convert. 148 while (N->getOpcode() == ISD::BITCAST) 149 N = N->getOperand(0).getNode(); 150 151 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 152 153 bool IsAllUndef = true; 154 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) { 155 if (N->getOperand(i).getOpcode() == ISD::UNDEF) 156 continue; 157 IsAllUndef = false; 158 // Do not accept build_vectors that aren't all constants or which have non-0 159 // elements. We have to be a bit careful here, as the type of the constant 160 // may not be the same as the type of the vector elements due to type 161 // legalization (the elements are promoted to a legal type for the target 162 // and a vector of a type may be legal when the base element type is not). 163 // We only want to check enough bits to cover the vector elements, because 164 // we care if the resultant vector is all zeros, not whether the individual 165 // constants are. 166 SDValue Zero = N->getOperand(i); 167 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 168 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) { 169 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 170 return false; 171 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) { 172 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 173 return false; 174 } else 175 return false; 176 } 177 178 // Do not accept an all-undef vector. 179 if (IsAllUndef) 180 return false; 181 return true; 182 } 183 184 /// \brief Return true if the specified node is a BUILD_VECTOR node of 185 /// all ConstantSDNode or undef. 186 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 187 if (N->getOpcode() != ISD::BUILD_VECTOR) 188 return false; 189 190 for (const SDValue &Op : N->op_values()) { 191 if (Op.getOpcode() == ISD::UNDEF) 192 continue; 193 if (!isa<ConstantSDNode>(Op)) 194 return false; 195 } 196 return true; 197 } 198 199 /// \brief Return true if the specified node is a BUILD_VECTOR node of 200 /// all ConstantFPSDNode or undef. 201 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 202 if (N->getOpcode() != ISD::BUILD_VECTOR) 203 return false; 204 205 for (const SDValue &Op : N->op_values()) { 206 if (Op.getOpcode() == ISD::UNDEF) 207 continue; 208 if (!isa<ConstantFPSDNode>(Op)) 209 return false; 210 } 211 return true; 212 } 213 214 /// isScalarToVector - Return true if the specified node is a 215 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low 216 /// element is not an undef. 217 bool ISD::isScalarToVector(const SDNode *N) { 218 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) 219 return true; 220 221 if (N->getOpcode() != ISD::BUILD_VECTOR) 222 return false; 223 if (N->getOperand(0).getOpcode() == ISD::UNDEF) 224 return false; 225 unsigned NumElems = N->getNumOperands(); 226 if (NumElems == 1) 227 return false; 228 for (unsigned i = 1; i < NumElems; ++i) { 229 SDValue V = N->getOperand(i); 230 if (V.getOpcode() != ISD::UNDEF) 231 return false; 232 } 233 return true; 234 } 235 236 /// allOperandsUndef - Return true if the node has at least one operand 237 /// and all operands of the specified node are ISD::UNDEF. 238 bool ISD::allOperandsUndef(const SDNode *N) { 239 // Return false if the node has no operands. 240 // This is "logically inconsistent" with the definition of "all" but 241 // is probably the desired behavior. 242 if (N->getNumOperands() == 0) 243 return false; 244 245 for (const SDValue &Op : N->op_values()) 246 if (Op.getOpcode() != ISD::UNDEF) 247 return false; 248 249 return true; 250 } 251 252 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 253 switch (ExtType) { 254 case ISD::EXTLOAD: 255 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 256 case ISD::SEXTLOAD: 257 return ISD::SIGN_EXTEND; 258 case ISD::ZEXTLOAD: 259 return ISD::ZERO_EXTEND; 260 default: 261 break; 262 } 263 264 llvm_unreachable("Invalid LoadExtType"); 265 } 266 267 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) 268 /// when given the operation for (X op Y). 269 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 270 // To perform this operation, we just need to swap the L and G bits of the 271 // operation. 272 unsigned OldL = (Operation >> 2) & 1; 273 unsigned OldG = (Operation >> 1) & 1; 274 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 275 (OldL << 1) | // New G bit 276 (OldG << 2)); // New L bit. 277 } 278 279 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where 280 /// 'op' is a valid SetCC operation. 281 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 282 unsigned Operation = Op; 283 if (isInteger) 284 Operation ^= 7; // Flip L, G, E bits, but not U. 285 else 286 Operation ^= 15; // Flip all of the condition bits. 287 288 if (Operation > ISD::SETTRUE2) 289 Operation &= ~8; // Don't let N and U bits get set. 290 291 return ISD::CondCode(Operation); 292 } 293 294 295 /// isSignedOp - For an integer comparison, return 1 if the comparison is a 296 /// signed operation and 2 if the result is an unsigned comparison. Return zero 297 /// if the operation does not depend on the sign of the input (setne and seteq). 298 static int isSignedOp(ISD::CondCode Opcode) { 299 switch (Opcode) { 300 default: llvm_unreachable("Illegal integer setcc operation!"); 301 case ISD::SETEQ: 302 case ISD::SETNE: return 0; 303 case ISD::SETLT: 304 case ISD::SETLE: 305 case ISD::SETGT: 306 case ISD::SETGE: return 1; 307 case ISD::SETULT: 308 case ISD::SETULE: 309 case ISD::SETUGT: 310 case ISD::SETUGE: return 2; 311 } 312 } 313 314 /// getSetCCOrOperation - Return the result of a logical OR between different 315 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function 316 /// returns SETCC_INVALID if it is not possible to represent the resultant 317 /// comparison. 318 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 319 bool isInteger) { 320 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 321 // Cannot fold a signed integer setcc with an unsigned integer setcc. 322 return ISD::SETCC_INVALID; 323 324 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 325 326 // If the N and U bits get set then the resultant comparison DOES suddenly 327 // care about orderedness, and is true when ordered. 328 if (Op > ISD::SETTRUE2) 329 Op &= ~16; // Clear the U bit if the N bit is set. 330 331 // Canonicalize illegal integer setcc's. 332 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 333 Op = ISD::SETNE; 334 335 return ISD::CondCode(Op); 336 } 337 338 /// getSetCCAndOperation - Return the result of a logical AND between different 339 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This 340 /// function returns zero if it is not possible to represent the resultant 341 /// comparison. 342 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 343 bool isInteger) { 344 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 345 // Cannot fold a signed setcc with an unsigned setcc. 346 return ISD::SETCC_INVALID; 347 348 // Combine all of the condition bits. 349 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 350 351 // Canonicalize illegal integer setcc's. 352 if (isInteger) { 353 switch (Result) { 354 default: break; 355 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 356 case ISD::SETOEQ: // SETEQ & SETU[LG]E 357 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 358 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 359 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 360 } 361 } 362 363 return Result; 364 } 365 366 //===----------------------------------------------------------------------===// 367 // SDNode Profile Support 368 //===----------------------------------------------------------------------===// 369 370 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 371 /// 372 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 373 ID.AddInteger(OpC); 374 } 375 376 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 377 /// solely with their pointer. 378 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 379 ID.AddPointer(VTList.VTs); 380 } 381 382 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 383 /// 384 static void AddNodeIDOperands(FoldingSetNodeID &ID, 385 ArrayRef<SDValue> Ops) { 386 for (auto& Op : Ops) { 387 ID.AddPointer(Op.getNode()); 388 ID.AddInteger(Op.getResNo()); 389 } 390 } 391 392 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 393 /// 394 static void AddNodeIDOperands(FoldingSetNodeID &ID, 395 ArrayRef<SDUse> Ops) { 396 for (auto& Op : Ops) { 397 ID.AddPointer(Op.getNode()); 398 ID.AddInteger(Op.getResNo()); 399 } 400 } 401 /// Add logical or fast math flag values to FoldingSetNodeID value. 402 static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode, 403 const SDNodeFlags *Flags) { 404 if (!Flags || !isBinOpWithFlags(Opcode)) 405 return; 406 407 unsigned RawFlags = Flags->getRawFlags(); 408 // If no flags are set, do not alter the ID. We must match the ID of nodes 409 // that were created without explicitly specifying flags. This also saves time 410 // and allows a gradual increase in API usage of the optional optimization 411 // flags. 412 if (RawFlags != 0) 413 ID.AddInteger(RawFlags); 414 } 415 416 static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) { 417 if (auto *Node = dyn_cast<BinaryWithFlagsSDNode>(N)) 418 AddNodeIDFlags(ID, Node->getOpcode(), &Node->Flags); 419 } 420 421 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 422 SDVTList VTList, ArrayRef<SDValue> OpList) { 423 AddNodeIDOpcode(ID, OpC); 424 AddNodeIDValueTypes(ID, VTList); 425 AddNodeIDOperands(ID, OpList); 426 } 427 428 /// If this is an SDNode with special info, add this info to the NodeID data. 429 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 430 switch (N->getOpcode()) { 431 case ISD::TargetExternalSymbol: 432 case ISD::ExternalSymbol: 433 case ISD::MCSymbol: 434 llvm_unreachable("Should only be used on nodes with operands"); 435 default: break; // Normal nodes don't need extra info. 436 case ISD::TargetConstant: 437 case ISD::Constant: { 438 const ConstantSDNode *C = cast<ConstantSDNode>(N); 439 ID.AddPointer(C->getConstantIntValue()); 440 ID.AddBoolean(C->isOpaque()); 441 break; 442 } 443 case ISD::TargetConstantFP: 444 case ISD::ConstantFP: { 445 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 446 break; 447 } 448 case ISD::TargetGlobalAddress: 449 case ISD::GlobalAddress: 450 case ISD::TargetGlobalTLSAddress: 451 case ISD::GlobalTLSAddress: { 452 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 453 ID.AddPointer(GA->getGlobal()); 454 ID.AddInteger(GA->getOffset()); 455 ID.AddInteger(GA->getTargetFlags()); 456 ID.AddInteger(GA->getAddressSpace()); 457 break; 458 } 459 case ISD::BasicBlock: 460 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 461 break; 462 case ISD::Register: 463 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 464 break; 465 case ISD::RegisterMask: 466 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 467 break; 468 case ISD::SRCVALUE: 469 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 470 break; 471 case ISD::FrameIndex: 472 case ISD::TargetFrameIndex: 473 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 474 break; 475 case ISD::JumpTable: 476 case ISD::TargetJumpTable: 477 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 478 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 479 break; 480 case ISD::ConstantPool: 481 case ISD::TargetConstantPool: { 482 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 483 ID.AddInteger(CP->getAlignment()); 484 ID.AddInteger(CP->getOffset()); 485 if (CP->isMachineConstantPoolEntry()) 486 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 487 else 488 ID.AddPointer(CP->getConstVal()); 489 ID.AddInteger(CP->getTargetFlags()); 490 break; 491 } 492 case ISD::TargetIndex: { 493 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 494 ID.AddInteger(TI->getIndex()); 495 ID.AddInteger(TI->getOffset()); 496 ID.AddInteger(TI->getTargetFlags()); 497 break; 498 } 499 case ISD::LOAD: { 500 const LoadSDNode *LD = cast<LoadSDNode>(N); 501 ID.AddInteger(LD->getMemoryVT().getRawBits()); 502 ID.AddInteger(LD->getRawSubclassData()); 503 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 504 break; 505 } 506 case ISD::STORE: { 507 const StoreSDNode *ST = cast<StoreSDNode>(N); 508 ID.AddInteger(ST->getMemoryVT().getRawBits()); 509 ID.AddInteger(ST->getRawSubclassData()); 510 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 511 break; 512 } 513 case ISD::ATOMIC_CMP_SWAP: 514 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 515 case ISD::ATOMIC_SWAP: 516 case ISD::ATOMIC_LOAD_ADD: 517 case ISD::ATOMIC_LOAD_SUB: 518 case ISD::ATOMIC_LOAD_AND: 519 case ISD::ATOMIC_LOAD_OR: 520 case ISD::ATOMIC_LOAD_XOR: 521 case ISD::ATOMIC_LOAD_NAND: 522 case ISD::ATOMIC_LOAD_MIN: 523 case ISD::ATOMIC_LOAD_MAX: 524 case ISD::ATOMIC_LOAD_UMIN: 525 case ISD::ATOMIC_LOAD_UMAX: 526 case ISD::ATOMIC_LOAD: 527 case ISD::ATOMIC_STORE: { 528 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 529 ID.AddInteger(AT->getMemoryVT().getRawBits()); 530 ID.AddInteger(AT->getRawSubclassData()); 531 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 532 break; 533 } 534 case ISD::PREFETCH: { 535 const MemSDNode *PF = cast<MemSDNode>(N); 536 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 537 break; 538 } 539 case ISD::VECTOR_SHUFFLE: { 540 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 541 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 542 i != e; ++i) 543 ID.AddInteger(SVN->getMaskElt(i)); 544 break; 545 } 546 case ISD::TargetBlockAddress: 547 case ISD::BlockAddress: { 548 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 549 ID.AddPointer(BA->getBlockAddress()); 550 ID.AddInteger(BA->getOffset()); 551 ID.AddInteger(BA->getTargetFlags()); 552 break; 553 } 554 } // end switch (N->getOpcode()) 555 556 AddNodeIDFlags(ID, N); 557 558 // Target specific memory nodes could also have address spaces to check. 559 if (N->isTargetMemoryOpcode()) 560 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 561 } 562 563 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 564 /// data. 565 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 566 AddNodeIDOpcode(ID, N->getOpcode()); 567 // Add the return value info. 568 AddNodeIDValueTypes(ID, N->getVTList()); 569 // Add the operand info. 570 AddNodeIDOperands(ID, N->ops()); 571 572 // Handle SDNode leafs with special info. 573 AddNodeIDCustom(ID, N); 574 } 575 576 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in 577 /// the CSE map that carries volatility, temporalness, indexing mode, and 578 /// extension/truncation information. 579 /// 580 static inline unsigned 581 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile, 582 bool isNonTemporal, bool isInvariant) { 583 assert((ConvType & 3) == ConvType && 584 "ConvType may not require more than 2 bits!"); 585 assert((AM & 7) == AM && 586 "AM may not require more than 3 bits!"); 587 return ConvType | 588 (AM << 2) | 589 (isVolatile << 5) | 590 (isNonTemporal << 6) | 591 (isInvariant << 7); 592 } 593 594 //===----------------------------------------------------------------------===// 595 // SelectionDAG Class 596 //===----------------------------------------------------------------------===// 597 598 /// doNotCSE - Return true if CSE should not be performed for this node. 599 static bool doNotCSE(SDNode *N) { 600 if (N->getValueType(0) == MVT::Glue) 601 return true; // Never CSE anything that produces a flag. 602 603 switch (N->getOpcode()) { 604 default: break; 605 case ISD::HANDLENODE: 606 case ISD::EH_LABEL: 607 return true; // Never CSE these nodes. 608 } 609 610 // Check that remaining values produced are not flags. 611 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 612 if (N->getValueType(i) == MVT::Glue) 613 return true; // Never CSE anything that produces a flag. 614 615 return false; 616 } 617 618 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 619 /// SelectionDAG. 620 void SelectionDAG::RemoveDeadNodes() { 621 // Create a dummy node (which is not added to allnodes), that adds a reference 622 // to the root node, preventing it from being deleted. 623 HandleSDNode Dummy(getRoot()); 624 625 SmallVector<SDNode*, 128> DeadNodes; 626 627 // Add all obviously-dead nodes to the DeadNodes worklist. 628 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I) 629 if (I->use_empty()) 630 DeadNodes.push_back(I); 631 632 RemoveDeadNodes(DeadNodes); 633 634 // If the root changed (e.g. it was a dead load, update the root). 635 setRoot(Dummy.getValue()); 636 } 637 638 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 639 /// given list, and any nodes that become unreachable as a result. 640 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 641 642 // Process the worklist, deleting the nodes and adding their uses to the 643 // worklist. 644 while (!DeadNodes.empty()) { 645 SDNode *N = DeadNodes.pop_back_val(); 646 647 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 648 DUL->NodeDeleted(N, nullptr); 649 650 // Take the node out of the appropriate CSE map. 651 RemoveNodeFromCSEMaps(N); 652 653 // Next, brutally remove the operand list. This is safe to do, as there are 654 // no cycles in the graph. 655 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 656 SDUse &Use = *I++; 657 SDNode *Operand = Use.getNode(); 658 Use.set(SDValue()); 659 660 // Now that we removed this operand, see if there are no uses of it left. 661 if (Operand->use_empty()) 662 DeadNodes.push_back(Operand); 663 } 664 665 DeallocateNode(N); 666 } 667 } 668 669 void SelectionDAG::RemoveDeadNode(SDNode *N){ 670 SmallVector<SDNode*, 16> DeadNodes(1, N); 671 672 // Create a dummy node that adds a reference to the root node, preventing 673 // it from being deleted. (This matters if the root is an operand of the 674 // dead node.) 675 HandleSDNode Dummy(getRoot()); 676 677 RemoveDeadNodes(DeadNodes); 678 } 679 680 void SelectionDAG::DeleteNode(SDNode *N) { 681 // First take this out of the appropriate CSE map. 682 RemoveNodeFromCSEMaps(N); 683 684 // Finally, remove uses due to operands of this node, remove from the 685 // AllNodes list, and delete the node. 686 DeleteNodeNotInCSEMaps(N); 687 } 688 689 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 690 assert(N != AllNodes.begin() && "Cannot delete the entry node!"); 691 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 692 693 // Drop all of the operands and decrement used node's use counts. 694 N->DropOperands(); 695 696 DeallocateNode(N); 697 } 698 699 void SDDbgInfo::erase(const SDNode *Node) { 700 DbgValMapType::iterator I = DbgValMap.find(Node); 701 if (I == DbgValMap.end()) 702 return; 703 for (auto &Val: I->second) 704 Val->setIsInvalidated(); 705 DbgValMap.erase(I); 706 } 707 708 void SelectionDAG::DeallocateNode(SDNode *N) { 709 if (N->OperandsNeedDelete) 710 delete[] N->OperandList; 711 712 // Set the opcode to DELETED_NODE to help catch bugs when node 713 // memory is reallocated. 714 N->NodeType = ISD::DELETED_NODE; 715 716 NodeAllocator.Deallocate(AllNodes.remove(N)); 717 718 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 719 // them and forget about that node. 720 DbgInfo->erase(N); 721 } 722 723 #ifndef NDEBUG 724 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 725 static void VerifySDNode(SDNode *N) { 726 switch (N->getOpcode()) { 727 default: 728 break; 729 case ISD::BUILD_PAIR: { 730 EVT VT = N->getValueType(0); 731 assert(N->getNumValues() == 1 && "Too many results!"); 732 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 733 "Wrong return type!"); 734 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 735 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 736 "Mismatched operand types!"); 737 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 738 "Wrong operand type!"); 739 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 740 "Wrong return type size"); 741 break; 742 } 743 case ISD::BUILD_VECTOR: { 744 assert(N->getNumValues() == 1 && "Too many results!"); 745 assert(N->getValueType(0).isVector() && "Wrong return type!"); 746 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 747 "Wrong number of operands!"); 748 EVT EltVT = N->getValueType(0).getVectorElementType(); 749 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 750 assert((I->getValueType() == EltVT || 751 (EltVT.isInteger() && I->getValueType().isInteger() && 752 EltVT.bitsLE(I->getValueType()))) && 753 "Wrong operand type!"); 754 assert(I->getValueType() == N->getOperand(0).getValueType() && 755 "Operands must all have the same type"); 756 } 757 break; 758 } 759 } 760 } 761 #endif // NDEBUG 762 763 /// \brief Insert a newly allocated node into the DAG. 764 /// 765 /// Handles insertion into the all nodes list and CSE map, as well as 766 /// verification and other common operations when a new node is allocated. 767 void SelectionDAG::InsertNode(SDNode *N) { 768 AllNodes.push_back(N); 769 #ifndef NDEBUG 770 VerifySDNode(N); 771 #endif 772 } 773 774 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 775 /// correspond to it. This is useful when we're about to delete or repurpose 776 /// the node. We don't want future request for structurally identical nodes 777 /// to return N anymore. 778 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 779 bool Erased = false; 780 switch (N->getOpcode()) { 781 case ISD::HANDLENODE: return false; // noop. 782 case ISD::CONDCODE: 783 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 784 "Cond code doesn't exist!"); 785 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 786 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 787 break; 788 case ISD::ExternalSymbol: 789 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 790 break; 791 case ISD::TargetExternalSymbol: { 792 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 793 Erased = TargetExternalSymbols.erase( 794 std::pair<std::string,unsigned char>(ESN->getSymbol(), 795 ESN->getTargetFlags())); 796 break; 797 } 798 case ISD::MCSymbol: { 799 auto *MCSN = cast<MCSymbolSDNode>(N); 800 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 801 break; 802 } 803 case ISD::VALUETYPE: { 804 EVT VT = cast<VTSDNode>(N)->getVT(); 805 if (VT.isExtended()) { 806 Erased = ExtendedValueTypeNodes.erase(VT); 807 } else { 808 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 809 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 810 } 811 break; 812 } 813 default: 814 // Remove it from the CSE Map. 815 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 816 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 817 Erased = CSEMap.RemoveNode(N); 818 break; 819 } 820 #ifndef NDEBUG 821 // Verify that the node was actually in one of the CSE maps, unless it has a 822 // flag result (which cannot be CSE'd) or is one of the special cases that are 823 // not subject to CSE. 824 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 825 !N->isMachineOpcode() && !doNotCSE(N)) { 826 N->dump(this); 827 dbgs() << "\n"; 828 llvm_unreachable("Node is not in map!"); 829 } 830 #endif 831 return Erased; 832 } 833 834 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 835 /// maps and modified in place. Add it back to the CSE maps, unless an identical 836 /// node already exists, in which case transfer all its users to the existing 837 /// node. This transfer can potentially trigger recursive merging. 838 /// 839 void 840 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 841 // For node types that aren't CSE'd, just act as if no identical node 842 // already exists. 843 if (!doNotCSE(N)) { 844 SDNode *Existing = CSEMap.GetOrInsertNode(N); 845 if (Existing != N) { 846 // If there was already an existing matching node, use ReplaceAllUsesWith 847 // to replace the dead one with the existing one. This can cause 848 // recursive merging of other unrelated nodes down the line. 849 ReplaceAllUsesWith(N, Existing); 850 851 // N is now dead. Inform the listeners and delete it. 852 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 853 DUL->NodeDeleted(N, Existing); 854 DeleteNodeNotInCSEMaps(N); 855 return; 856 } 857 } 858 859 // If the node doesn't already exist, we updated it. Inform listeners. 860 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 861 DUL->NodeUpdated(N); 862 } 863 864 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 865 /// were replaced with those specified. If this node is never memoized, 866 /// return null, otherwise return a pointer to the slot it would take. If a 867 /// node already exists with these operands, the slot will be non-null. 868 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 869 void *&InsertPos) { 870 if (doNotCSE(N)) 871 return nullptr; 872 873 SDValue Ops[] = { Op }; 874 FoldingSetNodeID ID; 875 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 876 AddNodeIDCustom(ID, N); 877 SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos); 878 return Node; 879 } 880 881 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 882 /// were replaced with those specified. If this node is never memoized, 883 /// return null, otherwise return a pointer to the slot it would take. If a 884 /// node already exists with these operands, the slot will be non-null. 885 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 886 SDValue Op1, SDValue Op2, 887 void *&InsertPos) { 888 if (doNotCSE(N)) 889 return nullptr; 890 891 SDValue Ops[] = { Op1, Op2 }; 892 FoldingSetNodeID ID; 893 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 894 AddNodeIDCustom(ID, N); 895 SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos); 896 return Node; 897 } 898 899 900 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 901 /// were replaced with those specified. If this node is never memoized, 902 /// return null, otherwise return a pointer to the slot it would take. If a 903 /// node already exists with these operands, the slot will be non-null. 904 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 905 void *&InsertPos) { 906 if (doNotCSE(N)) 907 return nullptr; 908 909 FoldingSetNodeID ID; 910 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 911 AddNodeIDCustom(ID, N); 912 SDNode *Node = FindNodeOrInsertPos(ID, N->getDebugLoc(), InsertPos); 913 return Node; 914 } 915 916 /// getEVTAlignment - Compute the default alignment value for the 917 /// given type. 918 /// 919 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 920 Type *Ty = VT == MVT::iPTR ? 921 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 922 VT.getTypeForEVT(*getContext()); 923 924 return getDataLayout().getABITypeAlignment(Ty); 925 } 926 927 // EntryNode could meaningfully have debug info if we can find it... 928 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 929 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL), 930 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 931 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false), 932 UpdateListeners(nullptr) { 933 AllNodes.push_back(&EntryNode); 934 DbgInfo = new SDDbgInfo(); 935 } 936 937 void SelectionDAG::init(MachineFunction &mf) { 938 MF = &mf; 939 TLI = getSubtarget().getTargetLowering(); 940 TSI = getSubtarget().getSelectionDAGInfo(); 941 Context = &mf.getFunction()->getContext(); 942 } 943 944 SelectionDAG::~SelectionDAG() { 945 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 946 allnodes_clear(); 947 delete DbgInfo; 948 } 949 950 void SelectionDAG::allnodes_clear() { 951 assert(&*AllNodes.begin() == &EntryNode); 952 AllNodes.remove(AllNodes.begin()); 953 while (!AllNodes.empty()) 954 DeallocateNode(AllNodes.begin()); 955 } 956 957 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL, 958 SDVTList VTs, SDValue N1, 959 SDValue N2, 960 const SDNodeFlags *Flags) { 961 if (isBinOpWithFlags(Opcode)) { 962 // If no flags were passed in, use a default flags object. 963 SDNodeFlags F; 964 if (Flags == nullptr) 965 Flags = &F; 966 967 BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode( 968 Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags); 969 970 return FN; 971 } 972 973 BinarySDNode *N = new (NodeAllocator) 974 BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2); 975 return N; 976 } 977 978 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 979 void *&InsertPos) { 980 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 981 if (N) { 982 switch (N->getOpcode()) { 983 default: break; 984 case ISD::Constant: 985 case ISD::ConstantFP: 986 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 987 "debug location. Use another overload."); 988 } 989 } 990 return N; 991 } 992 993 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 994 DebugLoc DL, void *&InsertPos) { 995 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 996 if (N) { 997 switch (N->getOpcode()) { 998 default: break; // Process only regular (non-target) constant nodes. 999 case ISD::Constant: 1000 case ISD::ConstantFP: 1001 // Erase debug location from the node if the node is used at several 1002 // different places to do not propagate one location to all uses as it 1003 // leads to incorrect debug info. 1004 if (N->getDebugLoc() != DL) 1005 N->setDebugLoc(DebugLoc()); 1006 break; 1007 } 1008 } 1009 return N; 1010 } 1011 1012 void SelectionDAG::clear() { 1013 allnodes_clear(); 1014 OperandAllocator.Reset(); 1015 CSEMap.clear(); 1016 1017 ExtendedValueTypeNodes.clear(); 1018 ExternalSymbols.clear(); 1019 TargetExternalSymbols.clear(); 1020 MCSymbols.clear(); 1021 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1022 static_cast<CondCodeSDNode*>(nullptr)); 1023 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1024 static_cast<SDNode*>(nullptr)); 1025 1026 EntryNode.UseList = nullptr; 1027 AllNodes.push_back(&EntryNode); 1028 Root = getEntryNode(); 1029 DbgInfo->clear(); 1030 } 1031 1032 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 1033 return VT.bitsGT(Op.getValueType()) ? 1034 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1035 getNode(ISD::TRUNCATE, DL, VT, Op); 1036 } 1037 1038 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 1039 return VT.bitsGT(Op.getValueType()) ? 1040 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1041 getNode(ISD::TRUNCATE, DL, VT, Op); 1042 } 1043 1044 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 1045 return VT.bitsGT(Op.getValueType()) ? 1046 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1047 getNode(ISD::TRUNCATE, DL, VT, Op); 1048 } 1049 1050 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT, 1051 EVT OpVT) { 1052 if (VT.bitsLE(Op.getValueType())) 1053 return getNode(ISD::TRUNCATE, SL, VT, Op); 1054 1055 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1056 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1057 } 1058 1059 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) { 1060 assert(!VT.isVector() && 1061 "getZeroExtendInReg should use the vector element type instead of " 1062 "the vector type!"); 1063 if (Op.getValueType() == VT) return Op; 1064 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1065 APInt Imm = APInt::getLowBitsSet(BitWidth, 1066 VT.getSizeInBits()); 1067 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1068 getConstant(Imm, DL, Op.getValueType())); 1069 } 1070 1071 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1072 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1073 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1074 "The sizes of the input and result must match in order to perform the " 1075 "extend in-register."); 1076 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1077 "The destination vector type must have fewer lanes than the input."); 1078 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1079 } 1080 1081 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1082 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1083 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1084 "The sizes of the input and result must match in order to perform the " 1085 "extend in-register."); 1086 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1087 "The destination vector type must have fewer lanes than the input."); 1088 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1089 } 1090 1091 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1092 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1093 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1094 "The sizes of the input and result must match in order to perform the " 1095 "extend in-register."); 1096 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1097 "The destination vector type must have fewer lanes than the input."); 1098 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1099 } 1100 1101 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1102 /// 1103 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) { 1104 EVT EltVT = VT.getScalarType(); 1105 SDValue NegOne = 1106 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1107 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1108 } 1109 1110 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) { 1111 EVT EltVT = VT.getScalarType(); 1112 SDValue TrueValue; 1113 switch (TLI->getBooleanContents(VT)) { 1114 case TargetLowering::ZeroOrOneBooleanContent: 1115 case TargetLowering::UndefinedBooleanContent: 1116 TrueValue = getConstant(1, DL, VT); 1117 break; 1118 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1119 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1120 VT); 1121 break; 1122 } 1123 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1124 } 1125 1126 SDValue SelectionDAG::getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isT, 1127 bool isO) { 1128 EVT EltVT = VT.getScalarType(); 1129 assert((EltVT.getSizeInBits() >= 64 || 1130 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1131 "getConstant with a uint64_t value that doesn't fit in the type!"); 1132 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1133 } 1134 1135 SDValue SelectionDAG::getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isT, 1136 bool isO) 1137 { 1138 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1139 } 1140 1141 SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT, 1142 bool isT, bool isO) { 1143 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1144 1145 EVT EltVT = VT.getScalarType(); 1146 const ConstantInt *Elt = &Val; 1147 1148 // In some cases the vector type is legal but the element type is illegal and 1149 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1150 // inserted value (the type does not need to match the vector element type). 1151 // Any extra bits introduced will be truncated away. 1152 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1153 TargetLowering::TypePromoteInteger) { 1154 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1155 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); 1156 Elt = ConstantInt::get(*getContext(), NewVal); 1157 } 1158 // In other cases the element type is illegal and needs to be expanded, for 1159 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1160 // the value into n parts and use a vector type with n-times the elements. 1161 // Then bitcast to the type requested. 1162 // Legalizing constants too early makes the DAGCombiner's job harder so we 1163 // only legalize if the DAG tells us we must produce legal types. 1164 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1165 TLI->getTypeAction(*getContext(), EltVT) == 1166 TargetLowering::TypeExpandInteger) { 1167 APInt NewVal = Elt->getValue(); 1168 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1169 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1170 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1171 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1172 1173 // Check the temporary vector is the correct size. If this fails then 1174 // getTypeToTransformTo() probably returned a type whose size (in bits) 1175 // isn't a power-of-2 factor of the requested type size. 1176 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1177 1178 SmallVector<SDValue, 2> EltParts; 1179 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1180 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1181 .trunc(ViaEltSizeInBits), DL, 1182 ViaEltVT, isT, isO)); 1183 } 1184 1185 // EltParts is currently in little endian order. If we actually want 1186 // big-endian order then reverse it now. 1187 if (getDataLayout().isBigEndian()) 1188 std::reverse(EltParts.begin(), EltParts.end()); 1189 1190 // The elements must be reversed when the element order is different 1191 // to the endianness of the elements (because the BITCAST is itself a 1192 // vector shuffle in this situation). However, we do not need any code to 1193 // perform this reversal because getConstant() is producing a vector 1194 // splat. 1195 // This situation occurs in MIPS MSA. 1196 1197 SmallVector<SDValue, 8> Ops; 1198 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) 1199 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1200 1201 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT, 1202 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT, 1203 Ops)); 1204 return Result; 1205 } 1206 1207 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1208 "APInt size does not match type size!"); 1209 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1210 FoldingSetNodeID ID; 1211 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1212 ID.AddPointer(Elt); 1213 ID.AddBoolean(isO); 1214 void *IP = nullptr; 1215 SDNode *N = nullptr; 1216 if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))) 1217 if (!VT.isVector()) 1218 return SDValue(N, 0); 1219 1220 if (!N) { 1221 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, DL.getDebugLoc(), 1222 EltVT); 1223 CSEMap.InsertNode(N, IP); 1224 InsertNode(N); 1225 } 1226 1227 SDValue Result(N, 0); 1228 if (VT.isVector()) { 1229 SmallVector<SDValue, 8> Ops; 1230 Ops.assign(VT.getVectorNumElements(), Result); 1231 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops); 1232 } 1233 return Result; 1234 } 1235 1236 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget) { 1237 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1238 } 1239 1240 SDValue SelectionDAG::getConstantFP(const APFloat& V, SDLoc DL, EVT VT, 1241 bool isTarget) { 1242 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1243 } 1244 1245 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, SDLoc DL, EVT VT, 1246 bool isTarget){ 1247 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1248 1249 EVT EltVT = VT.getScalarType(); 1250 1251 // Do the map lookup using the actual bit pattern for the floating point 1252 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1253 // we don't have issues with SNANs. 1254 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1255 FoldingSetNodeID ID; 1256 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1257 ID.AddPointer(&V); 1258 void *IP = nullptr; 1259 SDNode *N = nullptr; 1260 if ((N = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))) 1261 if (!VT.isVector()) 1262 return SDValue(N, 0); 1263 1264 if (!N) { 1265 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, DL.getDebugLoc(), 1266 EltVT); 1267 CSEMap.InsertNode(N, IP); 1268 InsertNode(N); 1269 } 1270 1271 SDValue Result(N, 0); 1272 if (VT.isVector()) { 1273 SmallVector<SDValue, 8> Ops; 1274 Ops.assign(VT.getVectorNumElements(), Result); 1275 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops); 1276 } 1277 return Result; 1278 } 1279 1280 SDValue SelectionDAG::getConstantFP(double Val, SDLoc DL, EVT VT, 1281 bool isTarget) { 1282 EVT EltVT = VT.getScalarType(); 1283 if (EltVT==MVT::f32) 1284 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1285 else if (EltVT==MVT::f64) 1286 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1287 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 || 1288 EltVT==MVT::f16) { 1289 bool ignored; 1290 APFloat apf = APFloat(Val); 1291 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1292 &ignored); 1293 return getConstantFP(apf, DL, VT, isTarget); 1294 } else 1295 llvm_unreachable("Unsupported type in getConstantFP"); 1296 } 1297 1298 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL, 1299 EVT VT, int64_t Offset, 1300 bool isTargetGA, 1301 unsigned char TargetFlags) { 1302 assert((TargetFlags == 0 || isTargetGA) && 1303 "Cannot set target flags on target-independent globals"); 1304 1305 // Truncate (with sign-extension) the offset value to the pointer size. 1306 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1307 if (BitWidth < 64) 1308 Offset = SignExtend64(Offset, BitWidth); 1309 1310 unsigned Opc; 1311 if (GV->isThreadLocal()) 1312 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1313 else 1314 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1315 1316 FoldingSetNodeID ID; 1317 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1318 ID.AddPointer(GV); 1319 ID.AddInteger(Offset); 1320 ID.AddInteger(TargetFlags); 1321 ID.AddInteger(GV->getType()->getAddressSpace()); 1322 void *IP = nullptr; 1323 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 1324 return SDValue(E, 0); 1325 1326 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(), 1327 DL.getDebugLoc(), GV, VT, 1328 Offset, TargetFlags); 1329 CSEMap.InsertNode(N, IP); 1330 InsertNode(N); 1331 return SDValue(N, 0); 1332 } 1333 1334 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1335 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1336 FoldingSetNodeID ID; 1337 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1338 ID.AddInteger(FI); 1339 void *IP = nullptr; 1340 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1341 return SDValue(E, 0); 1342 1343 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget); 1344 CSEMap.InsertNode(N, IP); 1345 InsertNode(N); 1346 return SDValue(N, 0); 1347 } 1348 1349 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1350 unsigned char TargetFlags) { 1351 assert((TargetFlags == 0 || isTarget) && 1352 "Cannot set target flags on target-independent jump tables"); 1353 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1354 FoldingSetNodeID ID; 1355 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1356 ID.AddInteger(JTI); 1357 ID.AddInteger(TargetFlags); 1358 void *IP = nullptr; 1359 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1360 return SDValue(E, 0); 1361 1362 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget, 1363 TargetFlags); 1364 CSEMap.InsertNode(N, IP); 1365 InsertNode(N); 1366 return SDValue(N, 0); 1367 } 1368 1369 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1370 unsigned Alignment, int Offset, 1371 bool isTarget, 1372 unsigned char TargetFlags) { 1373 assert((TargetFlags == 0 || isTarget) && 1374 "Cannot set target flags on target-independent globals"); 1375 if (Alignment == 0) 1376 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1377 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1378 FoldingSetNodeID ID; 1379 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1380 ID.AddInteger(Alignment); 1381 ID.AddInteger(Offset); 1382 ID.AddPointer(C); 1383 ID.AddInteger(TargetFlags); 1384 void *IP = nullptr; 1385 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1386 return SDValue(E, 0); 1387 1388 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1389 Alignment, TargetFlags); 1390 CSEMap.InsertNode(N, IP); 1391 InsertNode(N); 1392 return SDValue(N, 0); 1393 } 1394 1395 1396 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1397 unsigned Alignment, int Offset, 1398 bool isTarget, 1399 unsigned char TargetFlags) { 1400 assert((TargetFlags == 0 || isTarget) && 1401 "Cannot set target flags on target-independent globals"); 1402 if (Alignment == 0) 1403 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1404 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1405 FoldingSetNodeID ID; 1406 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1407 ID.AddInteger(Alignment); 1408 ID.AddInteger(Offset); 1409 C->addSelectionDAGCSEId(ID); 1410 ID.AddInteger(TargetFlags); 1411 void *IP = nullptr; 1412 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1413 return SDValue(E, 0); 1414 1415 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1416 Alignment, TargetFlags); 1417 CSEMap.InsertNode(N, IP); 1418 InsertNode(N); 1419 return SDValue(N, 0); 1420 } 1421 1422 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1423 unsigned char TargetFlags) { 1424 FoldingSetNodeID ID; 1425 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1426 ID.AddInteger(Index); 1427 ID.AddInteger(Offset); 1428 ID.AddInteger(TargetFlags); 1429 void *IP = nullptr; 1430 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1431 return SDValue(E, 0); 1432 1433 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset, 1434 TargetFlags); 1435 CSEMap.InsertNode(N, IP); 1436 InsertNode(N); 1437 return SDValue(N, 0); 1438 } 1439 1440 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1441 FoldingSetNodeID ID; 1442 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1443 ID.AddPointer(MBB); 1444 void *IP = nullptr; 1445 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1446 return SDValue(E, 0); 1447 1448 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB); 1449 CSEMap.InsertNode(N, IP); 1450 InsertNode(N); 1451 return SDValue(N, 0); 1452 } 1453 1454 SDValue SelectionDAG::getValueType(EVT VT) { 1455 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1456 ValueTypeNodes.size()) 1457 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1458 1459 SDNode *&N = VT.isExtended() ? 1460 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1461 1462 if (N) return SDValue(N, 0); 1463 N = new (NodeAllocator) VTSDNode(VT); 1464 InsertNode(N); 1465 return SDValue(N, 0); 1466 } 1467 1468 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1469 SDNode *&N = ExternalSymbols[Sym]; 1470 if (N) return SDValue(N, 0); 1471 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT); 1472 InsertNode(N); 1473 return SDValue(N, 0); 1474 } 1475 1476 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1477 SDNode *&N = MCSymbols[Sym]; 1478 if (N) 1479 return SDValue(N, 0); 1480 N = new (NodeAllocator) MCSymbolSDNode(Sym, VT); 1481 InsertNode(N); 1482 return SDValue(N, 0); 1483 } 1484 1485 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1486 unsigned char TargetFlags) { 1487 SDNode *&N = 1488 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1489 TargetFlags)]; 1490 if (N) return SDValue(N, 0); 1491 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); 1492 InsertNode(N); 1493 return SDValue(N, 0); 1494 } 1495 1496 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1497 if ((unsigned)Cond >= CondCodeNodes.size()) 1498 CondCodeNodes.resize(Cond+1); 1499 1500 if (!CondCodeNodes[Cond]) { 1501 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond); 1502 CondCodeNodes[Cond] = N; 1503 InsertNode(N); 1504 } 1505 1506 return SDValue(CondCodeNodes[Cond], 0); 1507 } 1508 1509 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in 1510 // the shuffle mask M that point at N1 to point at N2, and indices that point 1511 // N2 to point at N1. 1512 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) { 1513 std::swap(N1, N2); 1514 ShuffleVectorSDNode::commuteMask(M); 1515 } 1516 1517 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, 1518 SDValue N2, const int *Mask) { 1519 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1520 "Invalid VECTOR_SHUFFLE"); 1521 1522 // Canonicalize shuffle undef, undef -> undef 1523 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) 1524 return getUNDEF(VT); 1525 1526 // Validate that all indices in Mask are within the range of the elements 1527 // input to the shuffle. 1528 unsigned NElts = VT.getVectorNumElements(); 1529 SmallVector<int, 8> MaskVec; 1530 for (unsigned i = 0; i != NElts; ++i) { 1531 assert(Mask[i] < (int)(NElts * 2) && "Index out of range"); 1532 MaskVec.push_back(Mask[i]); 1533 } 1534 1535 // Canonicalize shuffle v, v -> v, undef 1536 if (N1 == N2) { 1537 N2 = getUNDEF(VT); 1538 for (unsigned i = 0; i != NElts; ++i) 1539 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts; 1540 } 1541 1542 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1543 if (N1.getOpcode() == ISD::UNDEF) 1544 commuteShuffle(N1, N2, MaskVec); 1545 1546 // If shuffling a splat, try to blend the splat instead. We do this here so 1547 // that even when this arises during lowering we don't have to re-handle it. 1548 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1549 BitVector UndefElements; 1550 SDValue Splat = BV->getSplatValue(&UndefElements); 1551 if (!Splat) 1552 return; 1553 1554 for (int i = 0; i < (int)NElts; ++i) { 1555 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + (int)NElts)) 1556 continue; 1557 1558 // If this input comes from undef, mark it as such. 1559 if (UndefElements[MaskVec[i] - Offset]) { 1560 MaskVec[i] = -1; 1561 continue; 1562 } 1563 1564 // If we can blend a non-undef lane, use that instead. 1565 if (!UndefElements[i]) 1566 MaskVec[i] = i + Offset; 1567 } 1568 }; 1569 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1570 BlendSplat(N1BV, 0); 1571 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1572 BlendSplat(N2BV, NElts); 1573 1574 // Canonicalize all index into lhs, -> shuffle lhs, undef 1575 // Canonicalize all index into rhs, -> shuffle rhs, undef 1576 bool AllLHS = true, AllRHS = true; 1577 bool N2Undef = N2.getOpcode() == ISD::UNDEF; 1578 for (unsigned i = 0; i != NElts; ++i) { 1579 if (MaskVec[i] >= (int)NElts) { 1580 if (N2Undef) 1581 MaskVec[i] = -1; 1582 else 1583 AllLHS = false; 1584 } else if (MaskVec[i] >= 0) { 1585 AllRHS = false; 1586 } 1587 } 1588 if (AllLHS && AllRHS) 1589 return getUNDEF(VT); 1590 if (AllLHS && !N2Undef) 1591 N2 = getUNDEF(VT); 1592 if (AllRHS) { 1593 N1 = getUNDEF(VT); 1594 commuteShuffle(N1, N2, MaskVec); 1595 } 1596 // Reset our undef status after accounting for the mask. 1597 N2Undef = N2.getOpcode() == ISD::UNDEF; 1598 // Re-check whether both sides ended up undef. 1599 if (N1.getOpcode() == ISD::UNDEF && N2Undef) 1600 return getUNDEF(VT); 1601 1602 // If Identity shuffle return that node. 1603 bool Identity = true, AllSame = true; 1604 for (unsigned i = 0; i != NElts; ++i) { 1605 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false; 1606 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1607 } 1608 if (Identity && NElts) 1609 return N1; 1610 1611 // Shuffling a constant splat doesn't change the result. 1612 if (N2Undef) { 1613 SDValue V = N1; 1614 1615 // Look through any bitcasts. We check that these don't change the number 1616 // (and size) of elements and just changes their types. 1617 while (V.getOpcode() == ISD::BITCAST) 1618 V = V->getOperand(0); 1619 1620 // A splat should always show up as a build vector node. 1621 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1622 BitVector UndefElements; 1623 SDValue Splat = BV->getSplatValue(&UndefElements); 1624 // If this is a splat of an undef, shuffling it is also undef. 1625 if (Splat && Splat.getOpcode() == ISD::UNDEF) 1626 return getUNDEF(VT); 1627 1628 bool SameNumElts = 1629 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1630 1631 // We only have a splat which can skip shuffles if there is a splatted 1632 // value and no undef lanes rearranged by the shuffle. 1633 if (Splat && UndefElements.none()) { 1634 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1635 // number of elements match or the value splatted is a zero constant. 1636 if (SameNumElts) 1637 return N1; 1638 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1639 if (C->isNullValue()) 1640 return N1; 1641 } 1642 1643 // If the shuffle itself creates a splat, build the vector directly. 1644 if (AllSame && SameNumElts) { 1645 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1646 SmallVector<SDValue, 8> Ops(NElts, Splatted); 1647 1648 EVT BuildVT = BV->getValueType(0); 1649 SDValue NewBV = getNode(ISD::BUILD_VECTOR, dl, BuildVT, Ops); 1650 1651 // We may have jumped through bitcasts, so the type of the 1652 // BUILD_VECTOR may not match the type of the shuffle. 1653 if (BuildVT != VT) 1654 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1655 return NewBV; 1656 } 1657 } 1658 } 1659 1660 FoldingSetNodeID ID; 1661 SDValue Ops[2] = { N1, N2 }; 1662 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1663 for (unsigned i = 0; i != NElts; ++i) 1664 ID.AddInteger(MaskVec[i]); 1665 1666 void* IP = nullptr; 1667 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) 1668 return SDValue(E, 0); 1669 1670 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1671 // SDNode doesn't have access to it. This memory will be "leaked" when 1672 // the node is deallocated, but recovered when the NodeAllocator is released. 1673 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1674 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int)); 1675 1676 ShuffleVectorSDNode *N = 1677 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(), 1678 dl.getDebugLoc(), N1, N2, 1679 MaskAlloc); 1680 CSEMap.InsertNode(N, IP); 1681 InsertNode(N); 1682 return SDValue(N, 0); 1683 } 1684 1685 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1686 MVT VT = SV.getSimpleValueType(0); 1687 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1688 ShuffleVectorSDNode::commuteMask(MaskVec); 1689 1690 SDValue Op0 = SV.getOperand(0); 1691 SDValue Op1 = SV.getOperand(1); 1692 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]); 1693 } 1694 1695 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl, 1696 SDValue Val, SDValue DTy, 1697 SDValue STy, SDValue Rnd, SDValue Sat, 1698 ISD::CvtCode Code) { 1699 // If the src and dest types are the same and the conversion is between 1700 // integer types of the same sign or two floats, no conversion is necessary. 1701 if (DTy == STy && 1702 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF)) 1703 return Val; 1704 1705 FoldingSetNodeID ID; 1706 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; 1707 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops); 1708 void* IP = nullptr; 1709 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) 1710 return SDValue(E, 0); 1711 1712 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(), 1713 dl.getDebugLoc(), 1714 Ops, Code); 1715 CSEMap.InsertNode(N, IP); 1716 InsertNode(N); 1717 return SDValue(N, 0); 1718 } 1719 1720 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1721 FoldingSetNodeID ID; 1722 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1723 ID.AddInteger(RegNo); 1724 void *IP = nullptr; 1725 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1726 return SDValue(E, 0); 1727 1728 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT); 1729 CSEMap.InsertNode(N, IP); 1730 InsertNode(N); 1731 return SDValue(N, 0); 1732 } 1733 1734 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1735 FoldingSetNodeID ID; 1736 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1737 ID.AddPointer(RegMask); 1738 void *IP = nullptr; 1739 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1740 return SDValue(E, 0); 1741 1742 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask); 1743 CSEMap.InsertNode(N, IP); 1744 InsertNode(N); 1745 return SDValue(N, 0); 1746 } 1747 1748 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) { 1749 FoldingSetNodeID ID; 1750 SDValue Ops[] = { Root }; 1751 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops); 1752 ID.AddPointer(Label); 1753 void *IP = nullptr; 1754 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1755 return SDValue(E, 0); 1756 1757 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(), 1758 dl.getDebugLoc(), Root, Label); 1759 CSEMap.InsertNode(N, IP); 1760 InsertNode(N); 1761 return SDValue(N, 0); 1762 } 1763 1764 1765 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1766 int64_t Offset, 1767 bool isTarget, 1768 unsigned char TargetFlags) { 1769 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1770 1771 FoldingSetNodeID ID; 1772 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1773 ID.AddPointer(BA); 1774 ID.AddInteger(Offset); 1775 ID.AddInteger(TargetFlags); 1776 void *IP = nullptr; 1777 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1778 return SDValue(E, 0); 1779 1780 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset, 1781 TargetFlags); 1782 CSEMap.InsertNode(N, IP); 1783 InsertNode(N); 1784 return SDValue(N, 0); 1785 } 1786 1787 SDValue SelectionDAG::getSrcValue(const Value *V) { 1788 assert((!V || V->getType()->isPointerTy()) && 1789 "SrcValue is not a pointer?"); 1790 1791 FoldingSetNodeID ID; 1792 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1793 ID.AddPointer(V); 1794 1795 void *IP = nullptr; 1796 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1797 return SDValue(E, 0); 1798 1799 SDNode *N = new (NodeAllocator) SrcValueSDNode(V); 1800 CSEMap.InsertNode(N, IP); 1801 InsertNode(N); 1802 return SDValue(N, 0); 1803 } 1804 1805 /// getMDNode - Return an MDNodeSDNode which holds an MDNode. 1806 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1807 FoldingSetNodeID ID; 1808 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1809 ID.AddPointer(MD); 1810 1811 void *IP = nullptr; 1812 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1813 return SDValue(E, 0); 1814 1815 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); 1816 CSEMap.InsertNode(N, IP); 1817 InsertNode(N); 1818 return SDValue(N, 0); 1819 } 1820 1821 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1822 if (VT == V.getValueType()) 1823 return V; 1824 1825 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1826 } 1827 1828 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode. 1829 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr, 1830 unsigned SrcAS, unsigned DestAS) { 1831 SDValue Ops[] = {Ptr}; 1832 FoldingSetNodeID ID; 1833 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1834 ID.AddInteger(SrcAS); 1835 ID.AddInteger(DestAS); 1836 1837 void *IP = nullptr; 1838 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) 1839 return SDValue(E, 0); 1840 1841 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(), 1842 dl.getDebugLoc(), 1843 VT, Ptr, SrcAS, DestAS); 1844 CSEMap.InsertNode(N, IP); 1845 InsertNode(N); 1846 return SDValue(N, 0); 1847 } 1848 1849 /// getShiftAmountOperand - Return the specified value casted to 1850 /// the target's desired shift amount type. 1851 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1852 EVT OpTy = Op.getValueType(); 1853 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1854 if (OpTy == ShTy || OpTy.isVector()) return Op; 1855 1856 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 1857 return getNode(Opcode, SDLoc(Op), ShTy, Op); 1858 } 1859 1860 /// CreateStackTemporary - Create a stack temporary, suitable for holding the 1861 /// specified value type. 1862 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1863 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1864 unsigned ByteSize = VT.getStoreSize(); 1865 Type *Ty = VT.getTypeForEVT(*getContext()); 1866 unsigned StackAlign = 1867 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1868 1869 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 1870 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); 1871 } 1872 1873 /// CreateStackTemporary - Create a stack temporary suitable for holding 1874 /// either of the specified value types. 1875 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1876 unsigned Bytes = std::max(VT1.getStoreSizeInBits(), 1877 VT2.getStoreSizeInBits())/8; 1878 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1879 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1880 const DataLayout &DL = getDataLayout(); 1881 unsigned Align = 1882 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1883 1884 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1885 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); 1886 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); 1887 } 1888 1889 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, 1890 SDValue N2, ISD::CondCode Cond, SDLoc dl) { 1891 // These setcc operations always fold. 1892 switch (Cond) { 1893 default: break; 1894 case ISD::SETFALSE: 1895 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1896 case ISD::SETTRUE: 1897 case ISD::SETTRUE2: { 1898 TargetLowering::BooleanContent Cnt = 1899 TLI->getBooleanContents(N1->getValueType(0)); 1900 return getConstant( 1901 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1902 VT); 1903 } 1904 1905 case ISD::SETOEQ: 1906 case ISD::SETOGT: 1907 case ISD::SETOGE: 1908 case ISD::SETOLT: 1909 case ISD::SETOLE: 1910 case ISD::SETONE: 1911 case ISD::SETO: 1912 case ISD::SETUO: 1913 case ISD::SETUEQ: 1914 case ISD::SETUNE: 1915 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1916 break; 1917 } 1918 1919 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) { 1920 const APInt &C2 = N2C->getAPIntValue(); 1921 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1922 const APInt &C1 = N1C->getAPIntValue(); 1923 1924 switch (Cond) { 1925 default: llvm_unreachable("Unknown integer setcc!"); 1926 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1927 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1928 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1929 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1930 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1931 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1932 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1933 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1934 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1935 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1936 } 1937 } 1938 } 1939 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1940 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) { 1941 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1942 switch (Cond) { 1943 default: break; 1944 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1945 return getUNDEF(VT); 1946 // fall through 1947 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1948 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1949 return getUNDEF(VT); 1950 // fall through 1951 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1952 R==APFloat::cmpLessThan, dl, VT); 1953 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1954 return getUNDEF(VT); 1955 // fall through 1956 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1957 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1958 return getUNDEF(VT); 1959 // fall through 1960 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1961 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1962 return getUNDEF(VT); 1963 // fall through 1964 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1965 R==APFloat::cmpEqual, dl, VT); 1966 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1967 return getUNDEF(VT); 1968 // fall through 1969 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1970 R==APFloat::cmpEqual, dl, VT); 1971 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1972 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1973 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1974 R==APFloat::cmpEqual, dl, VT); 1975 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1976 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1977 R==APFloat::cmpLessThan, dl, VT); 1978 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1979 R==APFloat::cmpUnordered, dl, VT); 1980 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1981 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1982 } 1983 } else { 1984 // Ensure that the constant occurs on the RHS. 1985 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1986 MVT CompVT = N1.getValueType().getSimpleVT(); 1987 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1988 return SDValue(); 1989 1990 return getSetCC(dl, VT, N2, N1, SwappedCond); 1991 } 1992 } 1993 1994 // Could not fold it. 1995 return SDValue(); 1996 } 1997 1998 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1999 /// use this predicate to simplify operations downstream. 2000 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2001 // This predicate is not safe for vector operations. 2002 if (Op.getValueType().isVector()) 2003 return false; 2004 2005 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 2006 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 2007 } 2008 2009 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2010 /// this predicate to simplify operations downstream. Mask is known to be zero 2011 /// for bits that V cannot have. 2012 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2013 unsigned Depth) const { 2014 APInt KnownZero, KnownOne; 2015 computeKnownBits(Op, KnownZero, KnownOne, Depth); 2016 return (KnownZero & Mask) == Mask; 2017 } 2018 2019 /// Determine which bits of Op are known to be either zero or one and return 2020 /// them in the KnownZero/KnownOne bitsets. 2021 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, 2022 APInt &KnownOne, unsigned Depth) const { 2023 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 2024 2025 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 2026 if (Depth == 6) 2027 return; // Limit search depth. 2028 2029 APInt KnownZero2, KnownOne2; 2030 2031 switch (Op.getOpcode()) { 2032 case ISD::Constant: 2033 // We know all of the bits for a constant! 2034 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 2035 KnownZero = ~KnownOne; 2036 break; 2037 case ISD::AND: 2038 // If either the LHS or the RHS are Zero, the result is zero. 2039 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2040 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2041 2042 // Output known-1 bits are only known if set in both the LHS & RHS. 2043 KnownOne &= KnownOne2; 2044 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2045 KnownZero |= KnownZero2; 2046 break; 2047 case ISD::OR: 2048 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2049 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2050 2051 // Output known-0 bits are only known if clear in both the LHS & RHS. 2052 KnownZero &= KnownZero2; 2053 // Output known-1 are known to be set if set in either the LHS | RHS. 2054 KnownOne |= KnownOne2; 2055 break; 2056 case ISD::XOR: { 2057 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2058 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2059 2060 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2061 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 2062 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2063 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 2064 KnownZero = KnownZeroOut; 2065 break; 2066 } 2067 case ISD::MUL: { 2068 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2069 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2070 2071 // If low bits are zero in either operand, output low known-0 bits. 2072 // Also compute a conserative estimate for high known-0 bits. 2073 // More trickiness is possible, but this is sufficient for the 2074 // interesting case of alignment computation. 2075 KnownOne.clearAllBits(); 2076 unsigned TrailZ = KnownZero.countTrailingOnes() + 2077 KnownZero2.countTrailingOnes(); 2078 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 2079 KnownZero2.countLeadingOnes(), 2080 BitWidth) - BitWidth; 2081 2082 TrailZ = std::min(TrailZ, BitWidth); 2083 LeadZ = std::min(LeadZ, BitWidth); 2084 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 2085 APInt::getHighBitsSet(BitWidth, LeadZ); 2086 break; 2087 } 2088 case ISD::UDIV: { 2089 // For the purposes of computing leading zeros we can conservatively 2090 // treat a udiv as a logical right shift by the power of 2 known to 2091 // be less than the denominator. 2092 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2093 unsigned LeadZ = KnownZero2.countLeadingOnes(); 2094 2095 KnownOne2.clearAllBits(); 2096 KnownZero2.clearAllBits(); 2097 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2098 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 2099 if (RHSUnknownLeadingOnes != BitWidth) 2100 LeadZ = std::min(BitWidth, 2101 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 2102 2103 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 2104 break; 2105 } 2106 case ISD::SELECT: 2107 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 2108 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2109 2110 // Only known if known in both the LHS and RHS. 2111 KnownOne &= KnownOne2; 2112 KnownZero &= KnownZero2; 2113 break; 2114 case ISD::SELECT_CC: 2115 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 2116 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 2117 2118 // Only known if known in both the LHS and RHS. 2119 KnownOne &= KnownOne2; 2120 KnownZero &= KnownZero2; 2121 break; 2122 case ISD::SADDO: 2123 case ISD::UADDO: 2124 case ISD::SSUBO: 2125 case ISD::USUBO: 2126 case ISD::SMULO: 2127 case ISD::UMULO: 2128 if (Op.getResNo() != 1) 2129 break; 2130 // The boolean result conforms to getBooleanContents. 2131 // If we know the result of a setcc has the top bits zero, use this info. 2132 // We know that we have an integer-based boolean since these operations 2133 // are only available for integer. 2134 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2135 TargetLowering::ZeroOrOneBooleanContent && 2136 BitWidth > 1) 2137 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2138 break; 2139 case ISD::SETCC: 2140 // If we know the result of a setcc has the top bits zero, use this info. 2141 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2142 TargetLowering::ZeroOrOneBooleanContent && 2143 BitWidth > 1) 2144 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2145 break; 2146 case ISD::SHL: 2147 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 2148 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2149 unsigned ShAmt = SA->getZExtValue(); 2150 2151 // If the shift count is an invalid immediate, don't do anything. 2152 if (ShAmt >= BitWidth) 2153 break; 2154 2155 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2156 KnownZero <<= ShAmt; 2157 KnownOne <<= ShAmt; 2158 // low bits known zero. 2159 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); 2160 } 2161 break; 2162 case ISD::SRL: 2163 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 2164 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2165 unsigned ShAmt = SA->getZExtValue(); 2166 2167 // If the shift count is an invalid immediate, don't do anything. 2168 if (ShAmt >= BitWidth) 2169 break; 2170 2171 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2172 KnownZero = KnownZero.lshr(ShAmt); 2173 KnownOne = KnownOne.lshr(ShAmt); 2174 2175 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 2176 KnownZero |= HighBits; // High bits known zero. 2177 } 2178 break; 2179 case ISD::SRA: 2180 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2181 unsigned ShAmt = SA->getZExtValue(); 2182 2183 // If the shift count is an invalid immediate, don't do anything. 2184 if (ShAmt >= BitWidth) 2185 break; 2186 2187 // If any of the demanded bits are produced by the sign extension, we also 2188 // demand the input sign bit. 2189 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 2190 2191 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2192 KnownZero = KnownZero.lshr(ShAmt); 2193 KnownOne = KnownOne.lshr(ShAmt); 2194 2195 // Handle the sign bits. 2196 APInt SignBit = APInt::getSignBit(BitWidth); 2197 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask. 2198 2199 if (KnownZero.intersects(SignBit)) { 2200 KnownZero |= HighBits; // New bits are known zero. 2201 } else if (KnownOne.intersects(SignBit)) { 2202 KnownOne |= HighBits; // New bits are known one. 2203 } 2204 } 2205 break; 2206 case ISD::SIGN_EXTEND_INREG: { 2207 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2208 unsigned EBits = EVT.getScalarType().getSizeInBits(); 2209 2210 // Sign extension. Compute the demanded bits in the result that are not 2211 // present in the input. 2212 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2213 2214 APInt InSignBit = APInt::getSignBit(EBits); 2215 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2216 2217 // If the sign extended bits are demanded, we know that the sign 2218 // bit is demanded. 2219 InSignBit = InSignBit.zext(BitWidth); 2220 if (NewBits.getBoolValue()) 2221 InputDemandedBits |= InSignBit; 2222 2223 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2224 KnownOne &= InputDemandedBits; 2225 KnownZero &= InputDemandedBits; 2226 2227 // If the sign bit of the input is known set or clear, then we know the 2228 // top bits of the result. 2229 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 2230 KnownZero |= NewBits; 2231 KnownOne &= ~NewBits; 2232 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 2233 KnownOne |= NewBits; 2234 KnownZero &= ~NewBits; 2235 } else { // Input sign bit unknown 2236 KnownZero &= ~NewBits; 2237 KnownOne &= ~NewBits; 2238 } 2239 break; 2240 } 2241 case ISD::CTTZ: 2242 case ISD::CTTZ_ZERO_UNDEF: 2243 case ISD::CTLZ: 2244 case ISD::CTLZ_ZERO_UNDEF: 2245 case ISD::CTPOP: { 2246 unsigned LowBits = Log2_32(BitWidth)+1; 2247 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 2248 KnownOne.clearAllBits(); 2249 break; 2250 } 2251 case ISD::LOAD: { 2252 LoadSDNode *LD = cast<LoadSDNode>(Op); 2253 // If this is a ZEXTLoad and we are looking at the loaded value. 2254 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2255 EVT VT = LD->getMemoryVT(); 2256 unsigned MemBits = VT.getScalarType().getSizeInBits(); 2257 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 2258 } else if (const MDNode *Ranges = LD->getRanges()) { 2259 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero); 2260 } 2261 break; 2262 } 2263 case ISD::ZERO_EXTEND: { 2264 EVT InVT = Op.getOperand(0).getValueType(); 2265 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2266 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2267 KnownZero = KnownZero.trunc(InBits); 2268 KnownOne = KnownOne.trunc(InBits); 2269 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2270 KnownZero = KnownZero.zext(BitWidth); 2271 KnownOne = KnownOne.zext(BitWidth); 2272 KnownZero |= NewBits; 2273 break; 2274 } 2275 case ISD::SIGN_EXTEND: { 2276 EVT InVT = Op.getOperand(0).getValueType(); 2277 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2278 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2279 2280 KnownZero = KnownZero.trunc(InBits); 2281 KnownOne = KnownOne.trunc(InBits); 2282 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2283 2284 // Note if the sign bit is known to be zero or one. 2285 bool SignBitKnownZero = KnownZero.isNegative(); 2286 bool SignBitKnownOne = KnownOne.isNegative(); 2287 2288 KnownZero = KnownZero.zext(BitWidth); 2289 KnownOne = KnownOne.zext(BitWidth); 2290 2291 // If the sign bit is known zero or one, the top bits match. 2292 if (SignBitKnownZero) 2293 KnownZero |= NewBits; 2294 else if (SignBitKnownOne) 2295 KnownOne |= NewBits; 2296 break; 2297 } 2298 case ISD::ANY_EXTEND: { 2299 EVT InVT = Op.getOperand(0).getValueType(); 2300 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2301 KnownZero = KnownZero.trunc(InBits); 2302 KnownOne = KnownOne.trunc(InBits); 2303 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2304 KnownZero = KnownZero.zext(BitWidth); 2305 KnownOne = KnownOne.zext(BitWidth); 2306 break; 2307 } 2308 case ISD::TRUNCATE: { 2309 EVT InVT = Op.getOperand(0).getValueType(); 2310 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2311 KnownZero = KnownZero.zext(InBits); 2312 KnownOne = KnownOne.zext(InBits); 2313 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2314 KnownZero = KnownZero.trunc(BitWidth); 2315 KnownOne = KnownOne.trunc(BitWidth); 2316 break; 2317 } 2318 case ISD::AssertZext: { 2319 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2320 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2321 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2322 KnownZero |= (~InMask); 2323 KnownOne &= (~KnownZero); 2324 break; 2325 } 2326 case ISD::FGETSIGN: 2327 // All bits are zero except the low bit. 2328 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2329 break; 2330 2331 case ISD::SUB: { 2332 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) { 2333 // We know that the top bits of C-X are clear if X contains less bits 2334 // than C (i.e. no wrap-around can happen). For example, 20-X is 2335 // positive if we can prove that X is >= 0 and < 16. 2336 if (CLHS->getAPIntValue().isNonNegative()) { 2337 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2338 // NLZ can't be BitWidth with no sign bit 2339 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2340 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2341 2342 // If all of the MaskV bits are known to be zero, then we know the 2343 // output top bits are zero, because we now know that the output is 2344 // from [0-C]. 2345 if ((KnownZero2 & MaskV) == MaskV) { 2346 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2347 // Top bits known zero. 2348 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 2349 } 2350 } 2351 } 2352 } 2353 // fall through 2354 case ISD::ADD: 2355 case ISD::ADDE: { 2356 // Output known-0 bits are known if clear or set in both the low clear bits 2357 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2358 // low 3 bits clear. 2359 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2360 unsigned KnownZeroOut = KnownZero2.countTrailingOnes(); 2361 2362 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2363 KnownZeroOut = std::min(KnownZeroOut, 2364 KnownZero2.countTrailingOnes()); 2365 2366 if (Op.getOpcode() == ISD::ADD) { 2367 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut); 2368 break; 2369 } 2370 2371 // With ADDE, a carry bit may be added in, so we can only use this 2372 // information if we know (at least) that the low two bits are clear. We 2373 // then return to the caller that the low bit is unknown but that other bits 2374 // are known zero. 2375 if (KnownZeroOut >= 2) // ADDE 2376 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut); 2377 break; 2378 } 2379 case ISD::SREM: 2380 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2381 const APInt &RA = Rem->getAPIntValue().abs(); 2382 if (RA.isPowerOf2()) { 2383 APInt LowBits = RA - 1; 2384 computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1); 2385 2386 // The low bits of the first operand are unchanged by the srem. 2387 KnownZero = KnownZero2 & LowBits; 2388 KnownOne = KnownOne2 & LowBits; 2389 2390 // If the first operand is non-negative or has all low bits zero, then 2391 // the upper bits are all zero. 2392 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2393 KnownZero |= ~LowBits; 2394 2395 // If the first operand is negative and not all low bits are zero, then 2396 // the upper bits are all one. 2397 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2398 KnownOne |= ~LowBits; 2399 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2400 } 2401 } 2402 break; 2403 case ISD::UREM: { 2404 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2405 const APInt &RA = Rem->getAPIntValue(); 2406 if (RA.isPowerOf2()) { 2407 APInt LowBits = (RA - 1); 2408 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1); 2409 2410 // The upper bits are all zero, the lower ones are unchanged. 2411 KnownZero = KnownZero2 | ~LowBits; 2412 KnownOne = KnownOne2 & LowBits; 2413 break; 2414 } 2415 } 2416 2417 // Since the result is less than or equal to either operand, any leading 2418 // zero bits in either operand must also exist in the result. 2419 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2420 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2421 2422 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2423 KnownZero2.countLeadingOnes()); 2424 KnownOne.clearAllBits(); 2425 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2426 break; 2427 } 2428 case ISD::EXTRACT_ELEMENT: { 2429 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2430 const unsigned Index = 2431 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2432 const unsigned BitWidth = Op.getValueType().getSizeInBits(); 2433 2434 // Remove low part of known bits mask 2435 KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth); 2436 KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth); 2437 2438 // Remove high part of known bit mask 2439 KnownZero = KnownZero.trunc(BitWidth); 2440 KnownOne = KnownOne.trunc(BitWidth); 2441 break; 2442 } 2443 case ISD::SMIN: 2444 case ISD::SMAX: 2445 case ISD::UMIN: 2446 case ISD::UMAX: { 2447 APInt Op0Zero, Op0One; 2448 APInt Op1Zero, Op1One; 2449 computeKnownBits(Op.getOperand(0), Op0Zero, Op0One, Depth); 2450 computeKnownBits(Op.getOperand(1), Op1Zero, Op1One, Depth); 2451 2452 KnownZero = Op0Zero & Op1Zero; 2453 KnownOne = Op0One & Op1One; 2454 break; 2455 } 2456 case ISD::FrameIndex: 2457 case ISD::TargetFrameIndex: 2458 if (unsigned Align = InferPtrAlignment(Op)) { 2459 // The low bits are known zero if the pointer is aligned. 2460 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2461 break; 2462 } 2463 break; 2464 2465 default: 2466 if (Op.getOpcode() < ISD::BUILTIN_OP_END) 2467 break; 2468 // Fallthrough 2469 case ISD::INTRINSIC_WO_CHAIN: 2470 case ISD::INTRINSIC_W_CHAIN: 2471 case ISD::INTRINSIC_VOID: 2472 // Allow the target to implement this method for its nodes. 2473 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2474 break; 2475 } 2476 2477 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 2478 } 2479 2480 /// ComputeNumSignBits - Return the number of times the sign bit of the 2481 /// register is replicated into the other bits. We know that at least 1 bit 2482 /// is always equal to the sign bit (itself), but other cases can give us 2483 /// information. For example, immediately after an "SRA X, 2", we know that 2484 /// the top 3 bits are all equal to each other, so we return 3. 2485 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ 2486 EVT VT = Op.getValueType(); 2487 assert(VT.isInteger() && "Invalid VT!"); 2488 unsigned VTBits = VT.getScalarType().getSizeInBits(); 2489 unsigned Tmp, Tmp2; 2490 unsigned FirstAnswer = 1; 2491 2492 if (Depth == 6) 2493 return 1; // Limit search depth. 2494 2495 switch (Op.getOpcode()) { 2496 default: break; 2497 case ISD::AssertSext: 2498 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2499 return VTBits-Tmp+1; 2500 case ISD::AssertZext: 2501 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2502 return VTBits-Tmp; 2503 2504 case ISD::Constant: { 2505 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2506 return Val.getNumSignBits(); 2507 } 2508 2509 case ISD::SIGN_EXTEND: 2510 Tmp = 2511 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 2512 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2513 2514 case ISD::SIGN_EXTEND_INREG: 2515 // Max of the input and what this extends. 2516 Tmp = 2517 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits(); 2518 Tmp = VTBits-Tmp+1; 2519 2520 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2521 return std::max(Tmp, Tmp2); 2522 2523 case ISD::SRA: 2524 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2525 // SRA X, C -> adds C sign bits. 2526 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2527 Tmp += C->getZExtValue(); 2528 if (Tmp > VTBits) Tmp = VTBits; 2529 } 2530 return Tmp; 2531 case ISD::SHL: 2532 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2533 // shl destroys sign bits. 2534 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2535 if (C->getZExtValue() >= VTBits || // Bad shift. 2536 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 2537 return Tmp - C->getZExtValue(); 2538 } 2539 break; 2540 case ISD::AND: 2541 case ISD::OR: 2542 case ISD::XOR: // NOT is handled here. 2543 // Logical binary ops preserve the number of sign bits at the worst. 2544 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2545 if (Tmp != 1) { 2546 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2547 FirstAnswer = std::min(Tmp, Tmp2); 2548 // We computed what we know about the sign bits as our first 2549 // answer. Now proceed to the generic code that uses 2550 // computeKnownBits, and pick whichever answer is better. 2551 } 2552 break; 2553 2554 case ISD::SELECT: 2555 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2556 if (Tmp == 1) return 1; // Early out. 2557 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2558 return std::min(Tmp, Tmp2); 2559 case ISD::SMIN: 2560 case ISD::SMAX: 2561 case ISD::UMIN: 2562 case ISD::UMAX: 2563 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 2564 if (Tmp == 1) 2565 return 1; // Early out. 2566 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 2567 return std::min(Tmp, Tmp2); 2568 case ISD::SADDO: 2569 case ISD::UADDO: 2570 case ISD::SSUBO: 2571 case ISD::USUBO: 2572 case ISD::SMULO: 2573 case ISD::UMULO: 2574 if (Op.getResNo() != 1) 2575 break; 2576 // The boolean result conforms to getBooleanContents. Fall through. 2577 // If setcc returns 0/-1, all bits are sign bits. 2578 // We know that we have an integer-based boolean since these operations 2579 // are only available for integer. 2580 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2581 TargetLowering::ZeroOrNegativeOneBooleanContent) 2582 return VTBits; 2583 break; 2584 case ISD::SETCC: 2585 // If setcc returns 0/-1, all bits are sign bits. 2586 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2587 TargetLowering::ZeroOrNegativeOneBooleanContent) 2588 return VTBits; 2589 break; 2590 case ISD::ROTL: 2591 case ISD::ROTR: 2592 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2593 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2594 2595 // Handle rotate right by N like a rotate left by 32-N. 2596 if (Op.getOpcode() == ISD::ROTR) 2597 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2598 2599 // If we aren't rotating out all of the known-in sign bits, return the 2600 // number that are left. This handles rotl(sext(x), 1) for example. 2601 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2602 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2603 } 2604 break; 2605 case ISD::ADD: 2606 // Add can have at most one carry bit. Thus we know that the output 2607 // is, at worst, one more bit than the inputs. 2608 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2609 if (Tmp == 1) return 1; // Early out. 2610 2611 // Special case decrementing a value (ADD X, -1): 2612 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2613 if (CRHS->isAllOnesValue()) { 2614 APInt KnownZero, KnownOne; 2615 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2616 2617 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2618 // sign bits set. 2619 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2620 return VTBits; 2621 2622 // If we are subtracting one from a positive number, there is no carry 2623 // out of the result. 2624 if (KnownZero.isNegative()) 2625 return Tmp; 2626 } 2627 2628 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2629 if (Tmp2 == 1) return 1; 2630 return std::min(Tmp, Tmp2)-1; 2631 2632 case ISD::SUB: 2633 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2634 if (Tmp2 == 1) return 1; 2635 2636 // Handle NEG. 2637 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) 2638 if (CLHS->isNullValue()) { 2639 APInt KnownZero, KnownOne; 2640 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2641 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2642 // sign bits set. 2643 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2644 return VTBits; 2645 2646 // If the input is known to be positive (the sign bit is known clear), 2647 // the output of the NEG has the same number of sign bits as the input. 2648 if (KnownZero.isNegative()) 2649 return Tmp2; 2650 2651 // Otherwise, we treat this like a SUB. 2652 } 2653 2654 // Sub can have at most one carry bit. Thus we know that the output 2655 // is, at worst, one more bit than the inputs. 2656 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2657 if (Tmp == 1) return 1; // Early out. 2658 return std::min(Tmp, Tmp2)-1; 2659 case ISD::TRUNCATE: 2660 // FIXME: it's tricky to do anything useful for this, but it is an important 2661 // case for targets like X86. 2662 break; 2663 case ISD::EXTRACT_ELEMENT: { 2664 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2665 const int BitWidth = Op.getValueType().getSizeInBits(); 2666 const int Items = 2667 Op.getOperand(0).getValueType().getSizeInBits() / BitWidth; 2668 2669 // Get reverse index (starting from 1), Op1 value indexes elements from 2670 // little end. Sign starts at big end. 2671 const int rIndex = Items - 1 - 2672 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2673 2674 // If the sign portion ends in our element the substraction gives correct 2675 // result. Otherwise it gives either negative or > bitwidth result 2676 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 2677 } 2678 } 2679 2680 // If we are looking at the loaded value of the SDNode. 2681 if (Op.getResNo() == 0) { 2682 // Handle LOADX separately here. EXTLOAD case will fallthrough. 2683 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 2684 unsigned ExtType = LD->getExtensionType(); 2685 switch (ExtType) { 2686 default: break; 2687 case ISD::SEXTLOAD: // '17' bits known 2688 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2689 return VTBits-Tmp+1; 2690 case ISD::ZEXTLOAD: // '16' bits known 2691 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2692 return VTBits-Tmp; 2693 } 2694 } 2695 } 2696 2697 // Allow the target to implement this method for its nodes. 2698 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2699 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2700 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2701 Op.getOpcode() == ISD::INTRINSIC_VOID) { 2702 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth); 2703 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 2704 } 2705 2706 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2707 // use this information. 2708 APInt KnownZero, KnownOne; 2709 computeKnownBits(Op, KnownZero, KnownOne, Depth); 2710 2711 APInt Mask; 2712 if (KnownZero.isNegative()) { // sign bit is 0 2713 Mask = KnownZero; 2714 } else if (KnownOne.isNegative()) { // sign bit is 1; 2715 Mask = KnownOne; 2716 } else { 2717 // Nothing known. 2718 return FirstAnswer; 2719 } 2720 2721 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2722 // the number of identical bits in the top of the input value. 2723 Mask = ~Mask; 2724 Mask <<= Mask.getBitWidth()-VTBits; 2725 // Return # leading zeros. We use 'min' here in case Val was zero before 2726 // shifting. We don't want to return '64' as for an i32 "0". 2727 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 2728 } 2729 2730 /// isBaseWithConstantOffset - Return true if the specified operand is an 2731 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an 2732 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same 2733 /// semantics as an ADD. This handles the equivalence: 2734 /// X|Cst == X+Cst iff X&Cst = 0. 2735 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 2736 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 2737 !isa<ConstantSDNode>(Op.getOperand(1))) 2738 return false; 2739 2740 if (Op.getOpcode() == ISD::OR && 2741 !MaskedValueIsZero(Op.getOperand(0), 2742 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 2743 return false; 2744 2745 return true; 2746 } 2747 2748 2749 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 2750 // If we're told that NaNs won't happen, assume they won't. 2751 if (getTarget().Options.NoNaNsFPMath) 2752 return true; 2753 2754 // If the value is a constant, we can obviously see if it is a NaN or not. 2755 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2756 return !C->getValueAPF().isNaN(); 2757 2758 // TODO: Recognize more cases here. 2759 2760 return false; 2761 } 2762 2763 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 2764 // If the value is a constant, we can obviously see if it is a zero or not. 2765 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2766 return !C->isZero(); 2767 2768 // TODO: Recognize more cases here. 2769 switch (Op.getOpcode()) { 2770 default: break; 2771 case ISD::OR: 2772 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2773 return !C->isNullValue(); 2774 break; 2775 } 2776 2777 return false; 2778 } 2779 2780 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 2781 // Check the obvious case. 2782 if (A == B) return true; 2783 2784 // For for negative and positive zero. 2785 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 2786 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 2787 if (CA->isZero() && CB->isZero()) return true; 2788 2789 // Otherwise they may not be equal. 2790 return false; 2791 } 2792 2793 /// getNode - Gets or creates the specified node. 2794 /// 2795 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) { 2796 FoldingSetNodeID ID; 2797 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 2798 void *IP = nullptr; 2799 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 2800 return SDValue(E, 0); 2801 2802 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), 2803 DL.getDebugLoc(), getVTList(VT)); 2804 CSEMap.InsertNode(N, IP); 2805 2806 InsertNode(N); 2807 return SDValue(N, 0); 2808 } 2809 2810 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 2811 EVT VT, SDValue Operand) { 2812 // Constant fold unary operations with an integer constant operand. Even 2813 // opaque constant will be folded, because the folding of unary operations 2814 // doesn't create new constants with different values. Nevertheless, the 2815 // opaque flag is preserved during folding to prevent future folding with 2816 // other constants. 2817 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) { 2818 const APInt &Val = C->getAPIntValue(); 2819 switch (Opcode) { 2820 default: break; 2821 case ISD::SIGN_EXTEND: 2822 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 2823 C->isTargetOpcode(), C->isOpaque()); 2824 case ISD::ANY_EXTEND: 2825 case ISD::ZERO_EXTEND: 2826 case ISD::TRUNCATE: 2827 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 2828 C->isTargetOpcode(), C->isOpaque()); 2829 case ISD::UINT_TO_FP: 2830 case ISD::SINT_TO_FP: { 2831 APFloat apf(EVTToAPFloatSemantics(VT), 2832 APInt::getNullValue(VT.getSizeInBits())); 2833 (void)apf.convertFromAPInt(Val, 2834 Opcode==ISD::SINT_TO_FP, 2835 APFloat::rmNearestTiesToEven); 2836 return getConstantFP(apf, DL, VT); 2837 } 2838 case ISD::BITCAST: 2839 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 2840 return getConstantFP(APFloat(APFloat::IEEEhalf, Val), DL, VT); 2841 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 2842 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), DL, VT); 2843 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 2844 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), DL, VT); 2845 break; 2846 case ISD::BSWAP: 2847 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 2848 C->isOpaque()); 2849 case ISD::CTPOP: 2850 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 2851 C->isOpaque()); 2852 case ISD::CTLZ: 2853 case ISD::CTLZ_ZERO_UNDEF: 2854 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 2855 C->isOpaque()); 2856 case ISD::CTTZ: 2857 case ISD::CTTZ_ZERO_UNDEF: 2858 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 2859 C->isOpaque()); 2860 } 2861 } 2862 2863 // Constant fold unary operations with a floating point constant operand. 2864 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) { 2865 APFloat V = C->getValueAPF(); // make copy 2866 switch (Opcode) { 2867 case ISD::FNEG: 2868 V.changeSign(); 2869 return getConstantFP(V, DL, VT); 2870 case ISD::FABS: 2871 V.clearSign(); 2872 return getConstantFP(V, DL, VT); 2873 case ISD::FCEIL: { 2874 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 2875 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2876 return getConstantFP(V, DL, VT); 2877 break; 2878 } 2879 case ISD::FTRUNC: { 2880 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 2881 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2882 return getConstantFP(V, DL, VT); 2883 break; 2884 } 2885 case ISD::FFLOOR: { 2886 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 2887 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2888 return getConstantFP(V, DL, VT); 2889 break; 2890 } 2891 case ISD::FP_EXTEND: { 2892 bool ignored; 2893 // This can return overflow, underflow, or inexact; we don't care. 2894 // FIXME need to be more flexible about rounding mode. 2895 (void)V.convert(EVTToAPFloatSemantics(VT), 2896 APFloat::rmNearestTiesToEven, &ignored); 2897 return getConstantFP(V, DL, VT); 2898 } 2899 case ISD::FP_TO_SINT: 2900 case ISD::FP_TO_UINT: { 2901 integerPart x[2]; 2902 bool ignored; 2903 static_assert(integerPartWidth >= 64, "APFloat parts too small!"); 2904 // FIXME need to be more flexible about rounding mode. 2905 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 2906 Opcode==ISD::FP_TO_SINT, 2907 APFloat::rmTowardZero, &ignored); 2908 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 2909 break; 2910 APInt api(VT.getSizeInBits(), x); 2911 return getConstant(api, DL, VT); 2912 } 2913 case ISD::BITCAST: 2914 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 2915 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 2916 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 2917 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 2918 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 2919 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 2920 break; 2921 } 2922 } 2923 2924 // Constant fold unary operations with a vector integer or float operand. 2925 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) { 2926 if (BV->isConstant()) { 2927 switch (Opcode) { 2928 default: 2929 // FIXME: Entirely reasonable to perform folding of other unary 2930 // operations here as the need arises. 2931 break; 2932 case ISD::FNEG: 2933 case ISD::FABS: 2934 case ISD::FCEIL: 2935 case ISD::FTRUNC: 2936 case ISD::FFLOOR: 2937 case ISD::FP_EXTEND: 2938 case ISD::FP_TO_SINT: 2939 case ISD::FP_TO_UINT: 2940 case ISD::TRUNCATE: 2941 case ISD::UINT_TO_FP: 2942 case ISD::SINT_TO_FP: 2943 case ISD::BSWAP: 2944 case ISD::CTLZ: 2945 case ISD::CTLZ_ZERO_UNDEF: 2946 case ISD::CTTZ: 2947 case ISD::CTTZ_ZERO_UNDEF: 2948 case ISD::CTPOP: { 2949 EVT SVT = VT.getScalarType(); 2950 EVT InVT = BV->getValueType(0); 2951 EVT InSVT = InVT.getScalarType(); 2952 2953 // Find legal integer scalar type for constant promotion and 2954 // ensure that its scalar size is at least as large as source. 2955 EVT LegalSVT = SVT; 2956 if (SVT.isInteger()) { 2957 LegalSVT = TLI->getTypeToTransformTo(*getContext(), SVT); 2958 if (LegalSVT.bitsLT(SVT)) break; 2959 } 2960 2961 // Let the above scalar folding handle the folding of each element. 2962 SmallVector<SDValue, 8> Ops; 2963 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 2964 SDValue OpN = BV->getOperand(i); 2965 EVT OpVT = OpN.getValueType(); 2966 2967 // Build vector (integer) scalar operands may need implicit 2968 // truncation - do this before constant folding. 2969 if (OpVT.isInteger() && OpVT.bitsGT(InSVT)) 2970 OpN = getNode(ISD::TRUNCATE, DL, InSVT, OpN); 2971 2972 OpN = getNode(Opcode, DL, SVT, OpN); 2973 2974 // Legalize the (integer) scalar constant if necessary. 2975 if (LegalSVT != SVT) 2976 OpN = getNode(ISD::ANY_EXTEND, DL, LegalSVT, OpN); 2977 2978 if (OpN.getOpcode() != ISD::UNDEF && 2979 OpN.getOpcode() != ISD::Constant && 2980 OpN.getOpcode() != ISD::ConstantFP) 2981 break; 2982 Ops.push_back(OpN); 2983 } 2984 if (Ops.size() == VT.getVectorNumElements()) 2985 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops); 2986 break; 2987 } 2988 } 2989 } 2990 } 2991 2992 unsigned OpOpcode = Operand.getNode()->getOpcode(); 2993 switch (Opcode) { 2994 case ISD::TokenFactor: 2995 case ISD::MERGE_VALUES: 2996 case ISD::CONCAT_VECTORS: 2997 return Operand; // Factor, merge or concat of one node? No need. 2998 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 2999 case ISD::FP_EXTEND: 3000 assert(VT.isFloatingPoint() && 3001 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3002 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3003 assert((!VT.isVector() || 3004 VT.getVectorNumElements() == 3005 Operand.getValueType().getVectorNumElements()) && 3006 "Vector element count mismatch!"); 3007 if (Operand.getOpcode() == ISD::UNDEF) 3008 return getUNDEF(VT); 3009 break; 3010 case ISD::SIGN_EXTEND: 3011 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3012 "Invalid SIGN_EXTEND!"); 3013 if (Operand.getValueType() == VT) return Operand; // noop extension 3014 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 3015 "Invalid sext node, dst < src!"); 3016 assert((!VT.isVector() || 3017 VT.getVectorNumElements() == 3018 Operand.getValueType().getVectorNumElements()) && 3019 "Vector element count mismatch!"); 3020 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3021 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3022 else if (OpOpcode == ISD::UNDEF) 3023 // sext(undef) = 0, because the top bits will all be the same. 3024 return getConstant(0, DL, VT); 3025 break; 3026 case ISD::ZERO_EXTEND: 3027 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3028 "Invalid ZERO_EXTEND!"); 3029 if (Operand.getValueType() == VT) return Operand; // noop extension 3030 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 3031 "Invalid zext node, dst < src!"); 3032 assert((!VT.isVector() || 3033 VT.getVectorNumElements() == 3034 Operand.getValueType().getVectorNumElements()) && 3035 "Vector element count mismatch!"); 3036 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3037 return getNode(ISD::ZERO_EXTEND, DL, VT, 3038 Operand.getNode()->getOperand(0)); 3039 else if (OpOpcode == ISD::UNDEF) 3040 // zext(undef) = 0, because the top bits will be zero. 3041 return getConstant(0, DL, VT); 3042 break; 3043 case ISD::ANY_EXTEND: 3044 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3045 "Invalid ANY_EXTEND!"); 3046 if (Operand.getValueType() == VT) return Operand; // noop extension 3047 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 3048 "Invalid anyext node, dst < src!"); 3049 assert((!VT.isVector() || 3050 VT.getVectorNumElements() == 3051 Operand.getValueType().getVectorNumElements()) && 3052 "Vector element count mismatch!"); 3053 3054 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3055 OpOpcode == ISD::ANY_EXTEND) 3056 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3057 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3058 else if (OpOpcode == ISD::UNDEF) 3059 return getUNDEF(VT); 3060 3061 // (ext (trunx x)) -> x 3062 if (OpOpcode == ISD::TRUNCATE) { 3063 SDValue OpOp = Operand.getNode()->getOperand(0); 3064 if (OpOp.getValueType() == VT) 3065 return OpOp; 3066 } 3067 break; 3068 case ISD::TRUNCATE: 3069 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3070 "Invalid TRUNCATE!"); 3071 if (Operand.getValueType() == VT) return Operand; // noop truncate 3072 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) && 3073 "Invalid truncate node, src < dst!"); 3074 assert((!VT.isVector() || 3075 VT.getVectorNumElements() == 3076 Operand.getValueType().getVectorNumElements()) && 3077 "Vector element count mismatch!"); 3078 if (OpOpcode == ISD::TRUNCATE) 3079 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 3080 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3081 OpOpcode == ISD::ANY_EXTEND) { 3082 // If the source is smaller than the dest, we still need an extend. 3083 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 3084 .bitsLT(VT.getScalarType())) 3085 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3086 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 3087 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 3088 return Operand.getNode()->getOperand(0); 3089 } 3090 if (OpOpcode == ISD::UNDEF) 3091 return getUNDEF(VT); 3092 break; 3093 case ISD::BSWAP: 3094 assert(VT.isInteger() && VT == Operand.getValueType() && 3095 "Invalid BSWAP!"); 3096 assert((VT.getScalarSizeInBits() % 16 == 0) && 3097 "BSWAP types must be a multiple of 16 bits!"); 3098 if (OpOpcode == ISD::UNDEF) 3099 return getUNDEF(VT); 3100 break; 3101 case ISD::BITCAST: 3102 // Basic sanity checking. 3103 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() 3104 && "Cannot BITCAST between types of different sizes!"); 3105 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3106 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3107 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3108 if (OpOpcode == ISD::UNDEF) 3109 return getUNDEF(VT); 3110 break; 3111 case ISD::SCALAR_TO_VECTOR: 3112 assert(VT.isVector() && !Operand.getValueType().isVector() && 3113 (VT.getVectorElementType() == Operand.getValueType() || 3114 (VT.getVectorElementType().isInteger() && 3115 Operand.getValueType().isInteger() && 3116 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3117 "Illegal SCALAR_TO_VECTOR node!"); 3118 if (OpOpcode == ISD::UNDEF) 3119 return getUNDEF(VT); 3120 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3121 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3122 isa<ConstantSDNode>(Operand.getOperand(1)) && 3123 Operand.getConstantOperandVal(1) == 0 && 3124 Operand.getOperand(0).getValueType() == VT) 3125 return Operand.getOperand(0); 3126 break; 3127 case ISD::FNEG: 3128 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3129 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3130 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 3131 Operand.getNode()->getOperand(0)); 3132 if (OpOpcode == ISD::FNEG) // --X -> X 3133 return Operand.getNode()->getOperand(0); 3134 break; 3135 case ISD::FABS: 3136 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3137 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 3138 break; 3139 } 3140 3141 SDNode *N; 3142 SDVTList VTs = getVTList(VT); 3143 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3144 FoldingSetNodeID ID; 3145 SDValue Ops[1] = { Operand }; 3146 AddNodeIDNode(ID, Opcode, VTs, Ops); 3147 void *IP = nullptr; 3148 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 3149 return SDValue(E, 0); 3150 3151 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 3152 DL.getDebugLoc(), VTs, Operand); 3153 CSEMap.InsertNode(N, IP); 3154 } else { 3155 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 3156 DL.getDebugLoc(), VTs, Operand); 3157 } 3158 3159 InsertNode(N); 3160 return SDValue(N, 0); 3161 } 3162 3163 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3164 const APInt &C2) { 3165 switch (Opcode) { 3166 case ISD::ADD: return std::make_pair(C1 + C2, true); 3167 case ISD::SUB: return std::make_pair(C1 - C2, true); 3168 case ISD::MUL: return std::make_pair(C1 * C2, true); 3169 case ISD::AND: return std::make_pair(C1 & C2, true); 3170 case ISD::OR: return std::make_pair(C1 | C2, true); 3171 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3172 case ISD::SHL: return std::make_pair(C1 << C2, true); 3173 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3174 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3175 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3176 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3177 case ISD::UDIV: 3178 if (!C2.getBoolValue()) 3179 break; 3180 return std::make_pair(C1.udiv(C2), true); 3181 case ISD::UREM: 3182 if (!C2.getBoolValue()) 3183 break; 3184 return std::make_pair(C1.urem(C2), true); 3185 case ISD::SDIV: 3186 if (!C2.getBoolValue()) 3187 break; 3188 return std::make_pair(C1.sdiv(C2), true); 3189 case ISD::SREM: 3190 if (!C2.getBoolValue()) 3191 break; 3192 return std::make_pair(C1.srem(C2), true); 3193 } 3194 return std::make_pair(APInt(1, 0), false); 3195 } 3196 3197 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT, 3198 const ConstantSDNode *Cst1, 3199 const ConstantSDNode *Cst2) { 3200 if (Cst1->isOpaque() || Cst2->isOpaque()) 3201 return SDValue(); 3202 3203 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3204 Cst2->getAPIntValue()); 3205 if (!Folded.second) 3206 return SDValue(); 3207 return getConstant(Folded.first, DL, VT); 3208 } 3209 3210 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT, 3211 SDNode *Cst1, SDNode *Cst2) { 3212 // If the opcode is a target-specific ISD node, there's nothing we can 3213 // do here and the operand rules may not line up with the below, so 3214 // bail early. 3215 if (Opcode >= ISD::BUILTIN_OP_END) 3216 return SDValue(); 3217 3218 // Handle the case of two scalars. 3219 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 3220 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 3221 if (SDValue Folded = 3222 FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2)) { 3223 if (!VT.isVector()) 3224 return Folded; 3225 SmallVector<SDValue, 4> Outputs; 3226 // We may have a vector type but a scalar result. Create a splat. 3227 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3228 // Build a big vector out of the scalar elements we generated. 3229 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs); 3230 } else { 3231 return SDValue(); 3232 } 3233 } 3234 } 3235 3236 // For vectors extract each constant element into Inputs so we can constant 3237 // fold them individually. 3238 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 3239 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 3240 if (!BV1 || !BV2) 3241 return SDValue(); 3242 3243 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 3244 3245 EVT SVT = VT.getScalarType(); 3246 SmallVector<SDValue, 4> Outputs; 3247 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 3248 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I)); 3249 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I)); 3250 if (!V1 || !V2) // Not a constant, bail. 3251 return SDValue(); 3252 3253 if (V1->isOpaque() || V2->isOpaque()) 3254 return SDValue(); 3255 3256 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 3257 // FIXME: This is valid and could be handled by truncating the APInts. 3258 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 3259 return SDValue(); 3260 3261 // Fold one vector element. 3262 std::pair<APInt, bool> Folded = FoldValue(Opcode, V1->getAPIntValue(), 3263 V2->getAPIntValue()); 3264 if (!Folded.second) 3265 return SDValue(); 3266 Outputs.push_back(getConstant(Folded.first, DL, SVT)); 3267 } 3268 3269 assert(VT.getVectorNumElements() == Outputs.size() && 3270 "Vector size mismatch!"); 3271 3272 // We may have a vector type but a scalar result. Create a splat. 3273 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3274 3275 // Build a big vector out of the scalar elements we generated. 3276 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs); 3277 } 3278 3279 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, 3280 SDValue N2, const SDNodeFlags *Flags) { 3281 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3282 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 3283 switch (Opcode) { 3284 default: break; 3285 case ISD::TokenFactor: 3286 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 3287 N2.getValueType() == MVT::Other && "Invalid token factor!"); 3288 // Fold trivial token factors. 3289 if (N1.getOpcode() == ISD::EntryToken) return N2; 3290 if (N2.getOpcode() == ISD::EntryToken) return N1; 3291 if (N1 == N2) return N1; 3292 break; 3293 case ISD::CONCAT_VECTORS: 3294 // Concat of UNDEFs is UNDEF. 3295 if (N1.getOpcode() == ISD::UNDEF && 3296 N2.getOpcode() == ISD::UNDEF) 3297 return getUNDEF(VT); 3298 3299 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3300 // one big BUILD_VECTOR. 3301 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3302 N2.getOpcode() == ISD::BUILD_VECTOR) { 3303 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3304 N1.getNode()->op_end()); 3305 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3306 3307 // BUILD_VECTOR requires all inputs to be of the same type, find the 3308 // maximum type and extend them all. 3309 EVT SVT = VT.getScalarType(); 3310 for (SDValue Op : Elts) 3311 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3312 if (SVT.bitsGT(VT.getScalarType())) 3313 for (SDValue &Op : Elts) 3314 Op = TLI->isZExtFree(Op.getValueType(), SVT) 3315 ? getZExtOrTrunc(Op, DL, SVT) 3316 : getSExtOrTrunc(Op, DL, SVT); 3317 3318 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts); 3319 } 3320 break; 3321 case ISD::AND: 3322 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3323 assert(N1.getValueType() == N2.getValueType() && 3324 N1.getValueType() == VT && "Binary operator types must match!"); 3325 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 3326 // worth handling here. 3327 if (N2C && N2C->isNullValue()) 3328 return N2; 3329 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 3330 return N1; 3331 break; 3332 case ISD::OR: 3333 case ISD::XOR: 3334 case ISD::ADD: 3335 case ISD::SUB: 3336 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3337 assert(N1.getValueType() == N2.getValueType() && 3338 N1.getValueType() == VT && "Binary operator types must match!"); 3339 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 3340 // it's worth handling here. 3341 if (N2C && N2C->isNullValue()) 3342 return N1; 3343 break; 3344 case ISD::UDIV: 3345 case ISD::UREM: 3346 case ISD::MULHU: 3347 case ISD::MULHS: 3348 case ISD::MUL: 3349 case ISD::SDIV: 3350 case ISD::SREM: 3351 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3352 assert(N1.getValueType() == N2.getValueType() && 3353 N1.getValueType() == VT && "Binary operator types must match!"); 3354 break; 3355 case ISD::FADD: 3356 case ISD::FSUB: 3357 case ISD::FMUL: 3358 case ISD::FDIV: 3359 case ISD::FREM: 3360 if (getTarget().Options.UnsafeFPMath) { 3361 if (Opcode == ISD::FADD) { 3362 // 0+x --> x 3363 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) 3364 if (CFP->getValueAPF().isZero()) 3365 return N2; 3366 // x+0 --> x 3367 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 3368 if (CFP->getValueAPF().isZero()) 3369 return N1; 3370 } else if (Opcode == ISD::FSUB) { 3371 // x-0 --> x 3372 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 3373 if (CFP->getValueAPF().isZero()) 3374 return N1; 3375 } else if (Opcode == ISD::FMUL) { 3376 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1); 3377 SDValue V = N2; 3378 3379 // If the first operand isn't the constant, try the second 3380 if (!CFP) { 3381 CFP = dyn_cast<ConstantFPSDNode>(N2); 3382 V = N1; 3383 } 3384 3385 if (CFP) { 3386 // 0*x --> 0 3387 if (CFP->isZero()) 3388 return SDValue(CFP,0); 3389 // 1*x --> x 3390 if (CFP->isExactlyValue(1.0)) 3391 return V; 3392 } 3393 } 3394 } 3395 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 3396 assert(N1.getValueType() == N2.getValueType() && 3397 N1.getValueType() == VT && "Binary operator types must match!"); 3398 break; 3399 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 3400 assert(N1.getValueType() == VT && 3401 N1.getValueType().isFloatingPoint() && 3402 N2.getValueType().isFloatingPoint() && 3403 "Invalid FCOPYSIGN!"); 3404 break; 3405 case ISD::SHL: 3406 case ISD::SRA: 3407 case ISD::SRL: 3408 case ISD::ROTL: 3409 case ISD::ROTR: 3410 assert(VT == N1.getValueType() && 3411 "Shift operators return type must be the same as their first arg"); 3412 assert(VT.isInteger() && N2.getValueType().isInteger() && 3413 "Shifts only work on integers"); 3414 assert((!VT.isVector() || VT == N2.getValueType()) && 3415 "Vector shift amounts must be in the same as their first arg"); 3416 // Verify that the shift amount VT is bit enough to hold valid shift 3417 // amounts. This catches things like trying to shift an i1024 value by an 3418 // i8, which is easy to fall into in generic code that uses 3419 // TLI.getShiftAmount(). 3420 assert(N2.getValueType().getSizeInBits() >= 3421 Log2_32_Ceil(N1.getValueType().getSizeInBits()) && 3422 "Invalid use of small shift amount with oversized value!"); 3423 3424 // Always fold shifts of i1 values so the code generator doesn't need to 3425 // handle them. Since we know the size of the shift has to be less than the 3426 // size of the value, the shift/rotate count is guaranteed to be zero. 3427 if (VT == MVT::i1) 3428 return N1; 3429 if (N2C && N2C->isNullValue()) 3430 return N1; 3431 break; 3432 case ISD::FP_ROUND_INREG: { 3433 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3434 assert(VT == N1.getValueType() && "Not an inreg round!"); 3435 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 3436 "Cannot FP_ROUND_INREG integer types"); 3437 assert(EVT.isVector() == VT.isVector() && 3438 "FP_ROUND_INREG type should be vector iff the operand " 3439 "type is vector!"); 3440 assert((!EVT.isVector() || 3441 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3442 "Vector element counts must match in FP_ROUND_INREG"); 3443 assert(EVT.bitsLE(VT) && "Not rounding down!"); 3444 (void)EVT; 3445 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 3446 break; 3447 } 3448 case ISD::FP_ROUND: 3449 assert(VT.isFloatingPoint() && 3450 N1.getValueType().isFloatingPoint() && 3451 VT.bitsLE(N1.getValueType()) && 3452 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); 3453 if (N1.getValueType() == VT) return N1; // noop conversion. 3454 break; 3455 case ISD::AssertSext: 3456 case ISD::AssertZext: { 3457 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3458 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3459 assert(VT.isInteger() && EVT.isInteger() && 3460 "Cannot *_EXTEND_INREG FP types"); 3461 assert(!EVT.isVector() && 3462 "AssertSExt/AssertZExt type should be the vector element type " 3463 "rather than the vector type!"); 3464 assert(EVT.bitsLE(VT) && "Not extending!"); 3465 if (VT == EVT) return N1; // noop assertion. 3466 break; 3467 } 3468 case ISD::SIGN_EXTEND_INREG: { 3469 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3470 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3471 assert(VT.isInteger() && EVT.isInteger() && 3472 "Cannot *_EXTEND_INREG FP types"); 3473 assert(EVT.isVector() == VT.isVector() && 3474 "SIGN_EXTEND_INREG type should be vector iff the operand " 3475 "type is vector!"); 3476 assert((!EVT.isVector() || 3477 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3478 "Vector element counts must match in SIGN_EXTEND_INREG"); 3479 assert(EVT.bitsLE(VT) && "Not extending!"); 3480 if (EVT == VT) return N1; // Not actually extending 3481 3482 auto SignExtendInReg = [&](APInt Val) { 3483 unsigned FromBits = EVT.getScalarType().getSizeInBits(); 3484 Val <<= Val.getBitWidth() - FromBits; 3485 Val = Val.ashr(Val.getBitWidth() - FromBits); 3486 return getConstant(Val, DL, VT.getScalarType()); 3487 }; 3488 3489 if (N1C) { 3490 APInt Val = N1C->getAPIntValue(); 3491 return SignExtendInReg(Val); 3492 } 3493 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 3494 SmallVector<SDValue, 8> Ops; 3495 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 3496 SDValue Op = N1.getOperand(i); 3497 if (Op.getValueType() != VT.getScalarType()) break; 3498 if (Op.getOpcode() == ISD::UNDEF) { 3499 Ops.push_back(Op); 3500 continue; 3501 } 3502 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getNode())) { 3503 APInt Val = C->getAPIntValue(); 3504 Ops.push_back(SignExtendInReg(Val)); 3505 continue; 3506 } 3507 break; 3508 } 3509 if (Ops.size() == VT.getVectorNumElements()) 3510 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops); 3511 } 3512 break; 3513 } 3514 case ISD::EXTRACT_VECTOR_ELT: 3515 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 3516 if (N1.getOpcode() == ISD::UNDEF) 3517 return getUNDEF(VT); 3518 3519 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 3520 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 3521 return getUNDEF(VT); 3522 3523 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 3524 // expanding copies of large vectors from registers. 3525 if (N2C && 3526 N1.getOpcode() == ISD::CONCAT_VECTORS && 3527 N1.getNumOperands() > 0) { 3528 unsigned Factor = 3529 N1.getOperand(0).getValueType().getVectorNumElements(); 3530 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 3531 N1.getOperand(N2C->getZExtValue() / Factor), 3532 getConstant(N2C->getZExtValue() % Factor, DL, 3533 N2.getValueType())); 3534 } 3535 3536 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 3537 // expanding large vector constants. 3538 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 3539 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 3540 3541 if (VT != Elt.getValueType()) 3542 // If the vector element type is not legal, the BUILD_VECTOR operands 3543 // are promoted and implicitly truncated, and the result implicitly 3544 // extended. Make that explicit here. 3545 Elt = getAnyExtOrTrunc(Elt, DL, VT); 3546 3547 return Elt; 3548 } 3549 3550 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 3551 // operations are lowered to scalars. 3552 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 3553 // If the indices are the same, return the inserted element else 3554 // if the indices are known different, extract the element from 3555 // the original vector. 3556 SDValue N1Op2 = N1.getOperand(2); 3557 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode()); 3558 3559 if (N1Op2C && N2C) { 3560 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 3561 if (VT == N1.getOperand(1).getValueType()) 3562 return N1.getOperand(1); 3563 else 3564 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 3565 } 3566 3567 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 3568 } 3569 } 3570 break; 3571 case ISD::EXTRACT_ELEMENT: 3572 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 3573 assert(!N1.getValueType().isVector() && !VT.isVector() && 3574 (N1.getValueType().isInteger() == VT.isInteger()) && 3575 N1.getValueType() != VT && 3576 "Wrong types for EXTRACT_ELEMENT!"); 3577 3578 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 3579 // 64-bit integers into 32-bit parts. Instead of building the extract of 3580 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 3581 if (N1.getOpcode() == ISD::BUILD_PAIR) 3582 return N1.getOperand(N2C->getZExtValue()); 3583 3584 // EXTRACT_ELEMENT of a constant int is also very common. 3585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3586 unsigned ElementSize = VT.getSizeInBits(); 3587 unsigned Shift = ElementSize * N2C->getZExtValue(); 3588 APInt ShiftedVal = C->getAPIntValue().lshr(Shift); 3589 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 3590 } 3591 break; 3592 case ISD::EXTRACT_SUBVECTOR: { 3593 SDValue Index = N2; 3594 if (VT.isSimple() && N1.getValueType().isSimple()) { 3595 assert(VT.isVector() && N1.getValueType().isVector() && 3596 "Extract subvector VTs must be a vectors!"); 3597 assert(VT.getVectorElementType() == 3598 N1.getValueType().getVectorElementType() && 3599 "Extract subvector VTs must have the same element type!"); 3600 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 3601 "Extract subvector must be from larger vector to smaller vector!"); 3602 3603 if (isa<ConstantSDNode>(Index.getNode())) { 3604 assert((VT.getVectorNumElements() + 3605 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3606 <= N1.getValueType().getVectorNumElements()) 3607 && "Extract subvector overflow!"); 3608 } 3609 3610 // Trivial extraction. 3611 if (VT.getSimpleVT() == N1.getSimpleValueType()) 3612 return N1; 3613 } 3614 break; 3615 } 3616 } 3617 3618 // Perform trivial constant folding. 3619 if (SDValue SV = 3620 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 3621 return SV; 3622 3623 // Canonicalize constant to RHS if commutative. 3624 if (N1C && !N2C && isCommutativeBinOp(Opcode)) { 3625 std::swap(N1C, N2C); 3626 std::swap(N1, N2); 3627 } 3628 3629 // Constant fold FP operations. 3630 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 3631 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 3632 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 3633 if (N1CFP) { 3634 if (!N2CFP && isCommutativeBinOp(Opcode)) { 3635 // Canonicalize constant to RHS if commutative. 3636 std::swap(N1CFP, N2CFP); 3637 std::swap(N1, N2); 3638 } else if (N2CFP) { 3639 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 3640 APFloat::opStatus s; 3641 switch (Opcode) { 3642 case ISD::FADD: 3643 s = V1.add(V2, APFloat::rmNearestTiesToEven); 3644 if (!HasFPExceptions || s != APFloat::opInvalidOp) 3645 return getConstantFP(V1, DL, VT); 3646 break; 3647 case ISD::FSUB: 3648 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 3649 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 3650 return getConstantFP(V1, DL, VT); 3651 break; 3652 case ISD::FMUL: 3653 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 3654 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 3655 return getConstantFP(V1, DL, VT); 3656 break; 3657 case ISD::FDIV: 3658 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 3659 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 3660 s!=APFloat::opDivByZero)) { 3661 return getConstantFP(V1, DL, VT); 3662 } 3663 break; 3664 case ISD::FREM : 3665 s = V1.mod(V2, APFloat::rmNearestTiesToEven); 3666 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 3667 s!=APFloat::opDivByZero)) { 3668 return getConstantFP(V1, DL, VT); 3669 } 3670 break; 3671 case ISD::FCOPYSIGN: 3672 V1.copySign(V2); 3673 return getConstantFP(V1, DL, VT); 3674 default: break; 3675 } 3676 } 3677 3678 if (Opcode == ISD::FP_ROUND) { 3679 APFloat V = N1CFP->getValueAPF(); // make copy 3680 bool ignored; 3681 // This can return overflow, underflow, or inexact; we don't care. 3682 // FIXME need to be more flexible about rounding mode. 3683 (void)V.convert(EVTToAPFloatSemantics(VT), 3684 APFloat::rmNearestTiesToEven, &ignored); 3685 return getConstantFP(V, DL, VT); 3686 } 3687 } 3688 3689 // Canonicalize an UNDEF to the RHS, even over a constant. 3690 if (N1.getOpcode() == ISD::UNDEF) { 3691 if (isCommutativeBinOp(Opcode)) { 3692 std::swap(N1, N2); 3693 } else { 3694 switch (Opcode) { 3695 case ISD::FP_ROUND_INREG: 3696 case ISD::SIGN_EXTEND_INREG: 3697 case ISD::SUB: 3698 case ISD::FSUB: 3699 case ISD::FDIV: 3700 case ISD::FREM: 3701 case ISD::SRA: 3702 return N1; // fold op(undef, arg2) -> undef 3703 case ISD::UDIV: 3704 case ISD::SDIV: 3705 case ISD::UREM: 3706 case ISD::SREM: 3707 case ISD::SRL: 3708 case ISD::SHL: 3709 if (!VT.isVector()) 3710 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 3711 // For vectors, we can't easily build an all zero vector, just return 3712 // the LHS. 3713 return N2; 3714 } 3715 } 3716 } 3717 3718 // Fold a bunch of operators when the RHS is undef. 3719 if (N2.getOpcode() == ISD::UNDEF) { 3720 switch (Opcode) { 3721 case ISD::XOR: 3722 if (N1.getOpcode() == ISD::UNDEF) 3723 // Handle undef ^ undef -> 0 special case. This is a common 3724 // idiom (misuse). 3725 return getConstant(0, DL, VT); 3726 // fallthrough 3727 case ISD::ADD: 3728 case ISD::ADDC: 3729 case ISD::ADDE: 3730 case ISD::SUB: 3731 case ISD::UDIV: 3732 case ISD::SDIV: 3733 case ISD::UREM: 3734 case ISD::SREM: 3735 return N2; // fold op(arg1, undef) -> undef 3736 case ISD::FADD: 3737 case ISD::FSUB: 3738 case ISD::FMUL: 3739 case ISD::FDIV: 3740 case ISD::FREM: 3741 if (getTarget().Options.UnsafeFPMath) 3742 return N2; 3743 break; 3744 case ISD::MUL: 3745 case ISD::AND: 3746 case ISD::SRL: 3747 case ISD::SHL: 3748 if (!VT.isVector()) 3749 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 3750 // For vectors, we can't easily build an all zero vector, just return 3751 // the LHS. 3752 return N1; 3753 case ISD::OR: 3754 if (!VT.isVector()) 3755 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 3756 // For vectors, we can't easily build an all one vector, just return 3757 // the LHS. 3758 return N1; 3759 case ISD::SRA: 3760 return N1; 3761 } 3762 } 3763 3764 // Memoize this node if possible. 3765 BinarySDNode *N; 3766 SDVTList VTs = getVTList(VT); 3767 if (VT != MVT::Glue) { 3768 SDValue Ops[] = {N1, N2}; 3769 FoldingSetNodeID ID; 3770 AddNodeIDNode(ID, Opcode, VTs, Ops); 3771 AddNodeIDFlags(ID, Opcode, Flags); 3772 void *IP = nullptr; 3773 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 3774 return SDValue(E, 0); 3775 3776 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags); 3777 3778 CSEMap.InsertNode(N, IP); 3779 } else { 3780 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags); 3781 } 3782 3783 InsertNode(N); 3784 return SDValue(N, 0); 3785 } 3786 3787 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3788 SDValue N1, SDValue N2, SDValue N3) { 3789 // Perform various simplifications. 3790 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3791 switch (Opcode) { 3792 case ISD::FMA: { 3793 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 3794 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 3795 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 3796 if (N1CFP && N2CFP && N3CFP) { 3797 APFloat V1 = N1CFP->getValueAPF(); 3798 const APFloat &V2 = N2CFP->getValueAPF(); 3799 const APFloat &V3 = N3CFP->getValueAPF(); 3800 APFloat::opStatus s = 3801 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 3802 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 3803 return getConstantFP(V1, DL, VT); 3804 } 3805 break; 3806 } 3807 case ISD::CONCAT_VECTORS: 3808 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3809 // one big BUILD_VECTOR. 3810 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3811 N2.getOpcode() == ISD::BUILD_VECTOR && 3812 N3.getOpcode() == ISD::BUILD_VECTOR) { 3813 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3814 N1.getNode()->op_end()); 3815 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3816 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end()); 3817 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts); 3818 } 3819 break; 3820 case ISD::SETCC: { 3821 // Use FoldSetCC to simplify SETCC's. 3822 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL); 3823 if (Simp.getNode()) return Simp; 3824 break; 3825 } 3826 case ISD::SELECT: 3827 if (N1C) { 3828 if (N1C->getZExtValue()) 3829 return N2; // select true, X, Y -> X 3830 return N3; // select false, X, Y -> Y 3831 } 3832 3833 if (N2 == N3) return N2; // select C, X, X -> X 3834 break; 3835 case ISD::VECTOR_SHUFFLE: 3836 llvm_unreachable("should use getVectorShuffle constructor!"); 3837 case ISD::INSERT_SUBVECTOR: { 3838 SDValue Index = N3; 3839 if (VT.isSimple() && N1.getValueType().isSimple() 3840 && N2.getValueType().isSimple()) { 3841 assert(VT.isVector() && N1.getValueType().isVector() && 3842 N2.getValueType().isVector() && 3843 "Insert subvector VTs must be a vectors"); 3844 assert(VT == N1.getValueType() && 3845 "Dest and insert subvector source types must match!"); 3846 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 3847 "Insert subvector must be from smaller vector to larger vector!"); 3848 if (isa<ConstantSDNode>(Index.getNode())) { 3849 assert((N2.getValueType().getVectorNumElements() + 3850 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3851 <= VT.getVectorNumElements()) 3852 && "Insert subvector overflow!"); 3853 } 3854 3855 // Trivial insertion. 3856 if (VT.getSimpleVT() == N2.getSimpleValueType()) 3857 return N2; 3858 } 3859 break; 3860 } 3861 case ISD::BITCAST: 3862 // Fold bit_convert nodes from a type to themselves. 3863 if (N1.getValueType() == VT) 3864 return N1; 3865 break; 3866 } 3867 3868 // Memoize node if it doesn't produce a flag. 3869 SDNode *N; 3870 SDVTList VTs = getVTList(VT); 3871 if (VT != MVT::Glue) { 3872 SDValue Ops[] = { N1, N2, N3 }; 3873 FoldingSetNodeID ID; 3874 AddNodeIDNode(ID, Opcode, VTs, Ops); 3875 void *IP = nullptr; 3876 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 3877 return SDValue(E, 0); 3878 3879 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3880 DL.getDebugLoc(), VTs, N1, N2, N3); 3881 CSEMap.InsertNode(N, IP); 3882 } else { 3883 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3884 DL.getDebugLoc(), VTs, N1, N2, N3); 3885 } 3886 3887 InsertNode(N); 3888 return SDValue(N, 0); 3889 } 3890 3891 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3892 SDValue N1, SDValue N2, SDValue N3, 3893 SDValue N4) { 3894 SDValue Ops[] = { N1, N2, N3, N4 }; 3895 return getNode(Opcode, DL, VT, Ops); 3896 } 3897 3898 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3899 SDValue N1, SDValue N2, SDValue N3, 3900 SDValue N4, SDValue N5) { 3901 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 3902 return getNode(Opcode, DL, VT, Ops); 3903 } 3904 3905 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 3906 /// the incoming stack arguments to be loaded from the stack. 3907 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 3908 SmallVector<SDValue, 8> ArgChains; 3909 3910 // Include the original chain at the beginning of the list. When this is 3911 // used by target LowerCall hooks, this helps legalize find the 3912 // CALLSEQ_BEGIN node. 3913 ArgChains.push_back(Chain); 3914 3915 // Add a chain value for each stack argument. 3916 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 3917 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 3918 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3919 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3920 if (FI->getIndex() < 0) 3921 ArgChains.push_back(SDValue(L, 1)); 3922 3923 // Build a tokenfactor for all the chains. 3924 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 3925 } 3926 3927 /// getMemsetValue - Vectorized representation of the memset value 3928 /// operand. 3929 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 3930 SDLoc dl) { 3931 assert(Value.getOpcode() != ISD::UNDEF); 3932 3933 unsigned NumBits = VT.getScalarType().getSizeInBits(); 3934 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3935 assert(C->getAPIntValue().getBitWidth() == 8); 3936 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 3937 if (VT.isInteger()) 3938 return DAG.getConstant(Val, dl, VT); 3939 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 3940 VT); 3941 } 3942 3943 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 3944 EVT IntVT = VT.getScalarType(); 3945 if (!IntVT.isInteger()) 3946 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 3947 3948 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 3949 if (NumBits > 8) { 3950 // Use a multiplication with 0x010101... to extend the input to the 3951 // required length. 3952 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 3953 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 3954 DAG.getConstant(Magic, dl, IntVT)); 3955 } 3956 3957 if (VT != Value.getValueType() && !VT.isInteger()) 3958 Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value); 3959 if (VT != Value.getValueType()) { 3960 assert(VT.getVectorElementType() == Value.getValueType() && 3961 "value type should be one vector element here"); 3962 SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value); 3963 Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps); 3964 } 3965 3966 return Value; 3967 } 3968 3969 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3970 /// used when a memcpy is turned into a memset when the source is a constant 3971 /// string ptr. 3972 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG, 3973 const TargetLowering &TLI, StringRef Str) { 3974 // Handle vector with all elements zero. 3975 if (Str.empty()) { 3976 if (VT.isInteger()) 3977 return DAG.getConstant(0, dl, VT); 3978 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 3979 return DAG.getConstantFP(0.0, dl, VT); 3980 else if (VT.isVector()) { 3981 unsigned NumElts = VT.getVectorNumElements(); 3982 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 3983 return DAG.getNode(ISD::BITCAST, dl, VT, 3984 DAG.getConstant(0, dl, 3985 EVT::getVectorVT(*DAG.getContext(), 3986 EltVT, NumElts))); 3987 } else 3988 llvm_unreachable("Expected type!"); 3989 } 3990 3991 assert(!VT.isVector() && "Can't handle vector type here!"); 3992 unsigned NumVTBits = VT.getSizeInBits(); 3993 unsigned NumVTBytes = NumVTBits / 8; 3994 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 3995 3996 APInt Val(NumVTBits, 0); 3997 if (DAG.getDataLayout().isLittleEndian()) { 3998 for (unsigned i = 0; i != NumBytes; ++i) 3999 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 4000 } else { 4001 for (unsigned i = 0; i != NumBytes; ++i) 4002 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 4003 } 4004 4005 // If the "cost" of materializing the integer immediate is less than the cost 4006 // of a load, then it is cost effective to turn the load into the immediate. 4007 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4008 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4009 return DAG.getConstant(Val, dl, VT); 4010 return SDValue(nullptr, 0); 4011 } 4012 4013 /// getMemBasePlusOffset - Returns base and offset node for the 4014 /// 4015 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl, 4016 SelectionDAG &DAG) { 4017 EVT VT = Base.getValueType(); 4018 return DAG.getNode(ISD::ADD, dl, 4019 VT, Base, DAG.getConstant(Offset, dl, VT)); 4020 } 4021 4022 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 4023 /// 4024 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 4025 unsigned SrcDelta = 0; 4026 GlobalAddressSDNode *G = nullptr; 4027 if (Src.getOpcode() == ISD::GlobalAddress) 4028 G = cast<GlobalAddressSDNode>(Src); 4029 else if (Src.getOpcode() == ISD::ADD && 4030 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4031 Src.getOperand(1).getOpcode() == ISD::Constant) { 4032 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4033 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4034 } 4035 if (!G) 4036 return false; 4037 4038 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false); 4039 } 4040 4041 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4042 /// Return true if the number of memory ops is below the threshold (Limit). 4043 /// It returns the types of the sequence of memory ops to perform 4044 /// memset / memcpy by reference. 4045 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4046 unsigned Limit, uint64_t Size, 4047 unsigned DstAlign, unsigned SrcAlign, 4048 bool IsMemset, 4049 bool ZeroMemset, 4050 bool MemcpyStrSrc, 4051 bool AllowOverlap, 4052 SelectionDAG &DAG, 4053 const TargetLowering &TLI) { 4054 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4055 "Expecting memcpy / memset source to meet alignment requirement!"); 4056 // If 'SrcAlign' is zero, that means the memory operation does not need to 4057 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4058 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4059 // is the specified alignment of the memory operation. If it is zero, that 4060 // means it's possible to change the alignment of the destination. 4061 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4062 // not need to be loaded. 4063 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4064 IsMemset, ZeroMemset, MemcpyStrSrc, 4065 DAG.getMachineFunction()); 4066 4067 if (VT == MVT::Other) { 4068 unsigned AS = 0; 4069 if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) || 4070 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) { 4071 VT = TLI.getPointerTy(DAG.getDataLayout()); 4072 } else { 4073 switch (DstAlign & 7) { 4074 case 0: VT = MVT::i64; break; 4075 case 4: VT = MVT::i32; break; 4076 case 2: VT = MVT::i16; break; 4077 default: VT = MVT::i8; break; 4078 } 4079 } 4080 4081 MVT LVT = MVT::i64; 4082 while (!TLI.isTypeLegal(LVT)) 4083 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 4084 assert(LVT.isInteger()); 4085 4086 if (VT.bitsGT(LVT)) 4087 VT = LVT; 4088 } 4089 4090 unsigned NumMemOps = 0; 4091 while (Size != 0) { 4092 unsigned VTSize = VT.getSizeInBits() / 8; 4093 while (VTSize > Size) { 4094 // For now, only use non-vector load / store's for the left-over pieces. 4095 EVT NewVT = VT; 4096 unsigned NewVTSize; 4097 4098 bool Found = false; 4099 if (VT.isVector() || VT.isFloatingPoint()) { 4100 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 4101 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 4102 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 4103 Found = true; 4104 else if (NewVT == MVT::i64 && 4105 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 4106 TLI.isSafeMemOpType(MVT::f64)) { 4107 // i64 is usually not legal on 32-bit targets, but f64 may be. 4108 NewVT = MVT::f64; 4109 Found = true; 4110 } 4111 } 4112 4113 if (!Found) { 4114 do { 4115 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 4116 if (NewVT == MVT::i8) 4117 break; 4118 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 4119 } 4120 NewVTSize = NewVT.getSizeInBits() / 8; 4121 4122 // If the new VT cannot cover all of the remaining bits, then consider 4123 // issuing a (or a pair of) unaligned and overlapping load / store. 4124 // FIXME: Only does this for 64-bit or more since we don't have proper 4125 // cost model for unaligned load / store. 4126 bool Fast; 4127 unsigned AS = 0; 4128 if (NumMemOps && AllowOverlap && 4129 VTSize >= 8 && NewVTSize < Size && 4130 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast) 4131 VTSize = Size; 4132 else { 4133 VT = NewVT; 4134 VTSize = NewVTSize; 4135 } 4136 } 4137 4138 if (++NumMemOps > Limit) 4139 return false; 4140 4141 MemOps.push_back(VT); 4142 Size -= VTSize; 4143 } 4144 4145 return true; 4146 } 4147 4148 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 4149 SDValue Chain, SDValue Dst, 4150 SDValue Src, uint64_t Size, 4151 unsigned Align, bool isVol, 4152 bool AlwaysInline, 4153 MachinePointerInfo DstPtrInfo, 4154 MachinePointerInfo SrcPtrInfo) { 4155 // Turn a memcpy of undef to nop. 4156 if (Src.getOpcode() == ISD::UNDEF) 4157 return Chain; 4158 4159 // Expand memcpy to a series of load and store ops if the size operand falls 4160 // below a certain threshold. 4161 // TODO: In the AlwaysInline case, if the size is big then generate a loop 4162 // rather than maybe a humongous number of loads and stores. 4163 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4164 std::vector<EVT> MemOps; 4165 bool DstAlignCanChange = false; 4166 MachineFunction &MF = DAG.getMachineFunction(); 4167 MachineFrameInfo *MFI = MF.getFrameInfo(); 4168 bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); 4169 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4170 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 4171 DstAlignCanChange = true; 4172 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4173 if (Align > SrcAlign) 4174 SrcAlign = Align; 4175 StringRef Str; 4176 bool CopyFromStr = isMemSrcFromString(Src, Str); 4177 bool isZeroStr = CopyFromStr && Str.empty(); 4178 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 4179 4180 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4181 (DstAlignCanChange ? 0 : Align), 4182 (isZeroStr ? 0 : SrcAlign), 4183 false, false, CopyFromStr, true, DAG, TLI)) 4184 return SDValue(); 4185 4186 if (DstAlignCanChange) { 4187 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4188 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4189 4190 // Don't promote to an alignment that would require dynamic stack 4191 // realignment. 4192 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 4193 if (!TRI->needsStackRealignment(MF)) 4194 while (NewAlign > Align && 4195 DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign)) 4196 NewAlign /= 2; 4197 4198 if (NewAlign > Align) { 4199 // Give the stack frame object a larger alignment if needed. 4200 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 4201 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 4202 Align = NewAlign; 4203 } 4204 } 4205 4206 SmallVector<SDValue, 8> OutChains; 4207 unsigned NumMemOps = MemOps.size(); 4208 uint64_t SrcOff = 0, DstOff = 0; 4209 for (unsigned i = 0; i != NumMemOps; ++i) { 4210 EVT VT = MemOps[i]; 4211 unsigned VTSize = VT.getSizeInBits() / 8; 4212 SDValue Value, Store; 4213 4214 if (VTSize > Size) { 4215 // Issuing an unaligned load / store pair that overlaps with the previous 4216 // pair. Adjust the offset accordingly. 4217 assert(i == NumMemOps-1 && i != 0); 4218 SrcOff -= VTSize - Size; 4219 DstOff -= VTSize - Size; 4220 } 4221 4222 if (CopyFromStr && 4223 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 4224 // It's unlikely a store of a vector immediate can be done in a single 4225 // instruction. It would require a load from a constantpool first. 4226 // We only handle zero vectors here. 4227 // FIXME: Handle other cases where store of vector immediate is done in 4228 // a single instruction. 4229 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 4230 if (Value.getNode()) 4231 Store = DAG.getStore(Chain, dl, Value, 4232 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4233 DstPtrInfo.getWithOffset(DstOff), isVol, 4234 false, Align); 4235 } 4236 4237 if (!Store.getNode()) { 4238 // The type might not be legal for the target. This should only happen 4239 // if the type is smaller than a legal type, as on PPC, so the right 4240 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 4241 // to Load/Store if NVT==VT. 4242 // FIXME does the case above also need this? 4243 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 4244 assert(NVT.bitsGE(VT)); 4245 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 4246 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 4247 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, 4248 false, MinAlign(SrcAlign, SrcOff)); 4249 Store = DAG.getTruncStore(Chain, dl, Value, 4250 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4251 DstPtrInfo.getWithOffset(DstOff), VT, isVol, 4252 false, Align); 4253 } 4254 OutChains.push_back(Store); 4255 SrcOff += VTSize; 4256 DstOff += VTSize; 4257 Size -= VTSize; 4258 } 4259 4260 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4261 } 4262 4263 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 4264 SDValue Chain, SDValue Dst, 4265 SDValue Src, uint64_t Size, 4266 unsigned Align, bool isVol, 4267 bool AlwaysInline, 4268 MachinePointerInfo DstPtrInfo, 4269 MachinePointerInfo SrcPtrInfo) { 4270 // Turn a memmove of undef to nop. 4271 if (Src.getOpcode() == ISD::UNDEF) 4272 return Chain; 4273 4274 // Expand memmove to a series of load and store ops if the size operand falls 4275 // below a certain threshold. 4276 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4277 std::vector<EVT> MemOps; 4278 bool DstAlignCanChange = false; 4279 MachineFunction &MF = DAG.getMachineFunction(); 4280 MachineFrameInfo *MFI = MF.getFrameInfo(); 4281 bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); 4282 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4283 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 4284 DstAlignCanChange = true; 4285 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4286 if (Align > SrcAlign) 4287 SrcAlign = Align; 4288 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 4289 4290 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4291 (DstAlignCanChange ? 0 : Align), SrcAlign, 4292 false, false, false, false, DAG, TLI)) 4293 return SDValue(); 4294 4295 if (DstAlignCanChange) { 4296 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4297 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4298 if (NewAlign > Align) { 4299 // Give the stack frame object a larger alignment if needed. 4300 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 4301 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 4302 Align = NewAlign; 4303 } 4304 } 4305 4306 uint64_t SrcOff = 0, DstOff = 0; 4307 SmallVector<SDValue, 8> LoadValues; 4308 SmallVector<SDValue, 8> LoadChains; 4309 SmallVector<SDValue, 8> OutChains; 4310 unsigned NumMemOps = MemOps.size(); 4311 for (unsigned i = 0; i < NumMemOps; i++) { 4312 EVT VT = MemOps[i]; 4313 unsigned VTSize = VT.getSizeInBits() / 8; 4314 SDValue Value; 4315 4316 Value = DAG.getLoad(VT, dl, Chain, 4317 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 4318 SrcPtrInfo.getWithOffset(SrcOff), isVol, 4319 false, false, SrcAlign); 4320 LoadValues.push_back(Value); 4321 LoadChains.push_back(Value.getValue(1)); 4322 SrcOff += VTSize; 4323 } 4324 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 4325 OutChains.clear(); 4326 for (unsigned i = 0; i < NumMemOps; i++) { 4327 EVT VT = MemOps[i]; 4328 unsigned VTSize = VT.getSizeInBits() / 8; 4329 SDValue Store; 4330 4331 Store = DAG.getStore(Chain, dl, LoadValues[i], 4332 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4333 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); 4334 OutChains.push_back(Store); 4335 DstOff += VTSize; 4336 } 4337 4338 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4339 } 4340 4341 /// \brief Lower the call to 'memset' intrinsic function into a series of store 4342 /// operations. 4343 /// 4344 /// \param DAG Selection DAG where lowered code is placed. 4345 /// \param dl Link to corresponding IR location. 4346 /// \param Chain Control flow dependency. 4347 /// \param Dst Pointer to destination memory location. 4348 /// \param Src Value of byte to write into the memory. 4349 /// \param Size Number of bytes to write. 4350 /// \param Align Alignment of the destination in bytes. 4351 /// \param isVol True if destination is volatile. 4352 /// \param DstPtrInfo IR information on the memory pointer. 4353 /// \returns New head in the control flow, if lowering was successful, empty 4354 /// SDValue otherwise. 4355 /// 4356 /// The function tries to replace 'llvm.memset' intrinsic with several store 4357 /// operations and value calculation code. This is usually profitable for small 4358 /// memory size. 4359 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl, 4360 SDValue Chain, SDValue Dst, 4361 SDValue Src, uint64_t Size, 4362 unsigned Align, bool isVol, 4363 MachinePointerInfo DstPtrInfo) { 4364 // Turn a memset of undef to nop. 4365 if (Src.getOpcode() == ISD::UNDEF) 4366 return Chain; 4367 4368 // Expand memset to a series of load/store ops if the size operand 4369 // falls below a certain threshold. 4370 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4371 std::vector<EVT> MemOps; 4372 bool DstAlignCanChange = false; 4373 MachineFunction &MF = DAG.getMachineFunction(); 4374 MachineFrameInfo *MFI = MF.getFrameInfo(); 4375 bool OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); 4376 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4377 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 4378 DstAlignCanChange = true; 4379 bool IsZeroVal = 4380 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 4381 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 4382 Size, (DstAlignCanChange ? 0 : Align), 0, 4383 true, IsZeroVal, false, true, DAG, TLI)) 4384 return SDValue(); 4385 4386 if (DstAlignCanChange) { 4387 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4388 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4389 if (NewAlign > Align) { 4390 // Give the stack frame object a larger alignment if needed. 4391 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 4392 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 4393 Align = NewAlign; 4394 } 4395 } 4396 4397 SmallVector<SDValue, 8> OutChains; 4398 uint64_t DstOff = 0; 4399 unsigned NumMemOps = MemOps.size(); 4400 4401 // Find the largest store and generate the bit pattern for it. 4402 EVT LargestVT = MemOps[0]; 4403 for (unsigned i = 1; i < NumMemOps; i++) 4404 if (MemOps[i].bitsGT(LargestVT)) 4405 LargestVT = MemOps[i]; 4406 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 4407 4408 for (unsigned i = 0; i < NumMemOps; i++) { 4409 EVT VT = MemOps[i]; 4410 unsigned VTSize = VT.getSizeInBits() / 8; 4411 if (VTSize > Size) { 4412 // Issuing an unaligned load / store pair that overlaps with the previous 4413 // pair. Adjust the offset accordingly. 4414 assert(i == NumMemOps-1 && i != 0); 4415 DstOff -= VTSize - Size; 4416 } 4417 4418 // If this store is smaller than the largest store see whether we can get 4419 // the smaller value for free with a truncate. 4420 SDValue Value = MemSetValue; 4421 if (VT.bitsLT(LargestVT)) { 4422 if (!LargestVT.isVector() && !VT.isVector() && 4423 TLI.isTruncateFree(LargestVT, VT)) 4424 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 4425 else 4426 Value = getMemsetValue(Src, VT, DAG, dl); 4427 } 4428 assert(Value.getValueType() == VT && "Value with wrong type."); 4429 SDValue Store = DAG.getStore(Chain, dl, Value, 4430 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4431 DstPtrInfo.getWithOffset(DstOff), 4432 isVol, false, Align); 4433 OutChains.push_back(Store); 4434 DstOff += VT.getSizeInBits() / 8; 4435 Size -= VTSize; 4436 } 4437 4438 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4439 } 4440 4441 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, 4442 SDValue Src, SDValue Size, 4443 unsigned Align, bool isVol, bool AlwaysInline, 4444 bool isTailCall, MachinePointerInfo DstPtrInfo, 4445 MachinePointerInfo SrcPtrInfo) { 4446 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4447 4448 // Check to see if we should lower the memcpy to loads and stores first. 4449 // For cases within the target-specified limits, this is the best choice. 4450 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4451 if (ConstantSize) { 4452 // Memcpy with size zero? Just return the original chain. 4453 if (ConstantSize->isNullValue()) 4454 return Chain; 4455 4456 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4457 ConstantSize->getZExtValue(),Align, 4458 isVol, false, DstPtrInfo, SrcPtrInfo); 4459 if (Result.getNode()) 4460 return Result; 4461 } 4462 4463 // Then check to see if we should lower the memcpy with target-specific 4464 // code. If the target chooses to do this, this is the next best. 4465 if (TSI) { 4466 SDValue Result = TSI->EmitTargetCodeForMemcpy( 4467 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 4468 DstPtrInfo, SrcPtrInfo); 4469 if (Result.getNode()) 4470 return Result; 4471 } 4472 4473 // If we really need inline code and the target declined to provide it, 4474 // use a (potentially long) sequence of loads and stores. 4475 if (AlwaysInline) { 4476 assert(ConstantSize && "AlwaysInline requires a constant size!"); 4477 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4478 ConstantSize->getZExtValue(), Align, isVol, 4479 true, DstPtrInfo, SrcPtrInfo); 4480 } 4481 4482 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 4483 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 4484 // respect volatile, so they may do things like read or write memory 4485 // beyond the given memory regions. But fixing this isn't easy, and most 4486 // people don't care. 4487 4488 // Emit a library call. 4489 TargetLowering::ArgListTy Args; 4490 TargetLowering::ArgListEntry Entry; 4491 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 4492 Entry.Node = Dst; Args.push_back(Entry); 4493 Entry.Node = Src; Args.push_back(Entry); 4494 Entry.Node = Size; Args.push_back(Entry); 4495 // FIXME: pass in SDLoc 4496 TargetLowering::CallLoweringInfo CLI(*this); 4497 CLI.setDebugLoc(dl) 4498 .setChain(Chain) 4499 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 4500 Type::getVoidTy(*getContext()), 4501 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 4502 TLI->getPointerTy(getDataLayout())), 4503 std::move(Args), 0) 4504 .setDiscardResult() 4505 .setTailCall(isTailCall); 4506 4507 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4508 return CallResult.second; 4509 } 4510 4511 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, 4512 SDValue Src, SDValue Size, 4513 unsigned Align, bool isVol, bool isTailCall, 4514 MachinePointerInfo DstPtrInfo, 4515 MachinePointerInfo SrcPtrInfo) { 4516 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4517 4518 // Check to see if we should lower the memmove to loads and stores first. 4519 // For cases within the target-specified limits, this is the best choice. 4520 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4521 if (ConstantSize) { 4522 // Memmove with size zero? Just return the original chain. 4523 if (ConstantSize->isNullValue()) 4524 return Chain; 4525 4526 SDValue Result = 4527 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 4528 ConstantSize->getZExtValue(), Align, isVol, 4529 false, DstPtrInfo, SrcPtrInfo); 4530 if (Result.getNode()) 4531 return Result; 4532 } 4533 4534 // Then check to see if we should lower the memmove with target-specific 4535 // code. If the target chooses to do this, this is the next best. 4536 if (TSI) { 4537 SDValue Result = TSI->EmitTargetCodeForMemmove( 4538 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 4539 if (Result.getNode()) 4540 return Result; 4541 } 4542 4543 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 4544 // not be safe. See memcpy above for more details. 4545 4546 // Emit a library call. 4547 TargetLowering::ArgListTy Args; 4548 TargetLowering::ArgListEntry Entry; 4549 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 4550 Entry.Node = Dst; Args.push_back(Entry); 4551 Entry.Node = Src; Args.push_back(Entry); 4552 Entry.Node = Size; Args.push_back(Entry); 4553 // FIXME: pass in SDLoc 4554 TargetLowering::CallLoweringInfo CLI(*this); 4555 CLI.setDebugLoc(dl) 4556 .setChain(Chain) 4557 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 4558 Type::getVoidTy(*getContext()), 4559 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 4560 TLI->getPointerTy(getDataLayout())), 4561 std::move(Args), 0) 4562 .setDiscardResult() 4563 .setTailCall(isTailCall); 4564 4565 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4566 return CallResult.second; 4567 } 4568 4569 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst, 4570 SDValue Src, SDValue Size, 4571 unsigned Align, bool isVol, bool isTailCall, 4572 MachinePointerInfo DstPtrInfo) { 4573 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4574 4575 // Check to see if we should lower the memset to stores first. 4576 // For cases within the target-specified limits, this is the best choice. 4577 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4578 if (ConstantSize) { 4579 // Memset with size zero? Just return the original chain. 4580 if (ConstantSize->isNullValue()) 4581 return Chain; 4582 4583 SDValue Result = 4584 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 4585 Align, isVol, DstPtrInfo); 4586 4587 if (Result.getNode()) 4588 return Result; 4589 } 4590 4591 // Then check to see if we should lower the memset with target-specific 4592 // code. If the target chooses to do this, this is the next best. 4593 if (TSI) { 4594 SDValue Result = TSI->EmitTargetCodeForMemset( 4595 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 4596 if (Result.getNode()) 4597 return Result; 4598 } 4599 4600 // Emit a library call. 4601 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 4602 TargetLowering::ArgListTy Args; 4603 TargetLowering::ArgListEntry Entry; 4604 Entry.Node = Dst; Entry.Ty = IntPtrTy; 4605 Args.push_back(Entry); 4606 Entry.Node = Src; 4607 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 4608 Args.push_back(Entry); 4609 Entry.Node = Size; 4610 Entry.Ty = IntPtrTy; 4611 Args.push_back(Entry); 4612 4613 // FIXME: pass in SDLoc 4614 TargetLowering::CallLoweringInfo CLI(*this); 4615 CLI.setDebugLoc(dl) 4616 .setChain(Chain) 4617 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 4618 Type::getVoidTy(*getContext()), 4619 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 4620 TLI->getPointerTy(getDataLayout())), 4621 std::move(Args), 0) 4622 .setDiscardResult() 4623 .setTailCall(isTailCall); 4624 4625 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4626 return CallResult.second; 4627 } 4628 4629 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4630 SDVTList VTList, ArrayRef<SDValue> Ops, 4631 MachineMemOperand *MMO, 4632 AtomicOrdering SuccessOrdering, 4633 AtomicOrdering FailureOrdering, 4634 SynchronizationScope SynchScope) { 4635 FoldingSetNodeID ID; 4636 ID.AddInteger(MemVT.getRawBits()); 4637 AddNodeIDNode(ID, Opcode, VTList, Ops); 4638 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4639 void* IP = nullptr; 4640 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 4641 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4642 return SDValue(E, 0); 4643 } 4644 4645 // Allocate the operands array for the node out of the BumpPtrAllocator, since 4646 // SDNode doesn't have access to it. This memory will be "leaked" when 4647 // the node is deallocated, but recovered when the allocator is released. 4648 // If the number of operands is less than 5 we use AtomicSDNode's internal 4649 // storage. 4650 unsigned NumOps = Ops.size(); 4651 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) 4652 : nullptr; 4653 4654 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), 4655 dl.getDebugLoc(), VTList, MemVT, 4656 Ops.data(), DynOps, NumOps, MMO, 4657 SuccessOrdering, FailureOrdering, 4658 SynchScope); 4659 CSEMap.InsertNode(N, IP); 4660 InsertNode(N); 4661 return SDValue(N, 0); 4662 } 4663 4664 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4665 SDVTList VTList, ArrayRef<SDValue> Ops, 4666 MachineMemOperand *MMO, 4667 AtomicOrdering Ordering, 4668 SynchronizationScope SynchScope) { 4669 return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering, 4670 Ordering, SynchScope); 4671 } 4672 4673 SDValue SelectionDAG::getAtomicCmpSwap( 4674 unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain, 4675 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 4676 unsigned Alignment, AtomicOrdering SuccessOrdering, 4677 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { 4678 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 4679 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 4680 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4681 4682 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4683 Alignment = getEVTAlignment(MemVT); 4684 4685 MachineFunction &MF = getMachineFunction(); 4686 4687 // FIXME: Volatile isn't really correct; we should keep track of atomic 4688 // orderings in the memoperand. 4689 unsigned Flags = MachineMemOperand::MOVolatile; 4690 Flags |= MachineMemOperand::MOLoad; 4691 Flags |= MachineMemOperand::MOStore; 4692 4693 MachineMemOperand *MMO = 4694 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); 4695 4696 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO, 4697 SuccessOrdering, FailureOrdering, SynchScope); 4698 } 4699 4700 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, 4701 SDVTList VTs, SDValue Chain, SDValue Ptr, 4702 SDValue Cmp, SDValue Swp, 4703 MachineMemOperand *MMO, 4704 AtomicOrdering SuccessOrdering, 4705 AtomicOrdering FailureOrdering, 4706 SynchronizationScope SynchScope) { 4707 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 4708 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 4709 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4710 4711 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 4712 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, 4713 SuccessOrdering, FailureOrdering, SynchScope); 4714 } 4715 4716 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4717 SDValue Chain, 4718 SDValue Ptr, SDValue Val, 4719 const Value* PtrVal, 4720 unsigned Alignment, 4721 AtomicOrdering Ordering, 4722 SynchronizationScope SynchScope) { 4723 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4724 Alignment = getEVTAlignment(MemVT); 4725 4726 MachineFunction &MF = getMachineFunction(); 4727 // An atomic store does not load. An atomic load does not store. 4728 // (An atomicrmw obviously both loads and stores.) 4729 // For now, atomics are considered to be volatile always, and they are 4730 // chained as such. 4731 // FIXME: Volatile isn't really correct; we should keep track of atomic 4732 // orderings in the memoperand. 4733 unsigned Flags = MachineMemOperand::MOVolatile; 4734 if (Opcode != ISD::ATOMIC_STORE) 4735 Flags |= MachineMemOperand::MOLoad; 4736 if (Opcode != ISD::ATOMIC_LOAD) 4737 Flags |= MachineMemOperand::MOStore; 4738 4739 MachineMemOperand *MMO = 4740 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4741 MemVT.getStoreSize(), Alignment); 4742 4743 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO, 4744 Ordering, SynchScope); 4745 } 4746 4747 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4748 SDValue Chain, 4749 SDValue Ptr, SDValue Val, 4750 MachineMemOperand *MMO, 4751 AtomicOrdering Ordering, 4752 SynchronizationScope SynchScope) { 4753 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 4754 Opcode == ISD::ATOMIC_LOAD_SUB || 4755 Opcode == ISD::ATOMIC_LOAD_AND || 4756 Opcode == ISD::ATOMIC_LOAD_OR || 4757 Opcode == ISD::ATOMIC_LOAD_XOR || 4758 Opcode == ISD::ATOMIC_LOAD_NAND || 4759 Opcode == ISD::ATOMIC_LOAD_MIN || 4760 Opcode == ISD::ATOMIC_LOAD_MAX || 4761 Opcode == ISD::ATOMIC_LOAD_UMIN || 4762 Opcode == ISD::ATOMIC_LOAD_UMAX || 4763 Opcode == ISD::ATOMIC_SWAP || 4764 Opcode == ISD::ATOMIC_STORE) && 4765 "Invalid Atomic Op"); 4766 4767 EVT VT = Val.getValueType(); 4768 4769 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 4770 getVTList(VT, MVT::Other); 4771 SDValue Ops[] = {Chain, Ptr, Val}; 4772 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope); 4773 } 4774 4775 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4776 EVT VT, SDValue Chain, 4777 SDValue Ptr, 4778 MachineMemOperand *MMO, 4779 AtomicOrdering Ordering, 4780 SynchronizationScope SynchScope) { 4781 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 4782 4783 SDVTList VTs = getVTList(VT, MVT::Other); 4784 SDValue Ops[] = {Chain, Ptr}; 4785 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope); 4786 } 4787 4788 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 4789 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) { 4790 if (Ops.size() == 1) 4791 return Ops[0]; 4792 4793 SmallVector<EVT, 4> VTs; 4794 VTs.reserve(Ops.size()); 4795 for (unsigned i = 0; i < Ops.size(); ++i) 4796 VTs.push_back(Ops[i].getValueType()); 4797 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 4798 } 4799 4800 SDValue 4801 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4802 ArrayRef<SDValue> Ops, 4803 EVT MemVT, MachinePointerInfo PtrInfo, 4804 unsigned Align, bool Vol, 4805 bool ReadMem, bool WriteMem, unsigned Size) { 4806 if (Align == 0) // Ensure that codegen never sees alignment 0 4807 Align = getEVTAlignment(MemVT); 4808 4809 MachineFunction &MF = getMachineFunction(); 4810 unsigned Flags = 0; 4811 if (WriteMem) 4812 Flags |= MachineMemOperand::MOStore; 4813 if (ReadMem) 4814 Flags |= MachineMemOperand::MOLoad; 4815 if (Vol) 4816 Flags |= MachineMemOperand::MOVolatile; 4817 if (!Size) 4818 Size = MemVT.getStoreSize(); 4819 MachineMemOperand *MMO = 4820 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 4821 4822 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 4823 } 4824 4825 SDValue 4826 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4827 ArrayRef<SDValue> Ops, EVT MemVT, 4828 MachineMemOperand *MMO) { 4829 assert((Opcode == ISD::INTRINSIC_VOID || 4830 Opcode == ISD::INTRINSIC_W_CHAIN || 4831 Opcode == ISD::PREFETCH || 4832 Opcode == ISD::LIFETIME_START || 4833 Opcode == ISD::LIFETIME_END || 4834 (Opcode <= INT_MAX && 4835 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 4836 "Opcode is not a memory-accessing opcode!"); 4837 4838 // Memoize the node unless it returns a flag. 4839 MemIntrinsicSDNode *N; 4840 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4841 FoldingSetNodeID ID; 4842 AddNodeIDNode(ID, Opcode, VTList, Ops); 4843 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4844 void *IP = nullptr; 4845 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 4846 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 4847 return SDValue(E, 0); 4848 } 4849 4850 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4851 dl.getDebugLoc(), VTList, Ops, 4852 MemVT, MMO); 4853 CSEMap.InsertNode(N, IP); 4854 } else { 4855 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4856 dl.getDebugLoc(), VTList, Ops, 4857 MemVT, MMO); 4858 } 4859 InsertNode(N); 4860 return SDValue(N, 0); 4861 } 4862 4863 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4864 /// MachinePointerInfo record from it. This is particularly useful because the 4865 /// code generator has many cases where it doesn't bother passing in a 4866 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4867 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { 4868 // If this is FI+Offset, we can model it. 4869 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 4870 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); 4871 4872 // If this is (FI+Offset1)+Offset2, we can model it. 4873 if (Ptr.getOpcode() != ISD::ADD || 4874 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 4875 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 4876 return MachinePointerInfo(); 4877 4878 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4879 return MachinePointerInfo::getFixedStack(FI, Offset+ 4880 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 4881 } 4882 4883 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4884 /// MachinePointerInfo record from it. This is particularly useful because the 4885 /// code generator has many cases where it doesn't bother passing in a 4886 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4887 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { 4888 // If the 'Offset' value isn't a constant, we can't handle this. 4889 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 4890 return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); 4891 if (OffsetOp.getOpcode() == ISD::UNDEF) 4892 return InferPointerInfo(Ptr); 4893 return MachinePointerInfo(); 4894 } 4895 4896 4897 SDValue 4898 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4899 EVT VT, SDLoc dl, SDValue Chain, 4900 SDValue Ptr, SDValue Offset, 4901 MachinePointerInfo PtrInfo, EVT MemVT, 4902 bool isVolatile, bool isNonTemporal, bool isInvariant, 4903 unsigned Alignment, const AAMDNodes &AAInfo, 4904 const MDNode *Ranges) { 4905 assert(Chain.getValueType() == MVT::Other && 4906 "Invalid chain type"); 4907 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4908 Alignment = getEVTAlignment(VT); 4909 4910 unsigned Flags = MachineMemOperand::MOLoad; 4911 if (isVolatile) 4912 Flags |= MachineMemOperand::MOVolatile; 4913 if (isNonTemporal) 4914 Flags |= MachineMemOperand::MONonTemporal; 4915 if (isInvariant) 4916 Flags |= MachineMemOperand::MOInvariant; 4917 4918 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 4919 // clients. 4920 if (PtrInfo.V.isNull()) 4921 PtrInfo = InferPointerInfo(Ptr, Offset); 4922 4923 MachineFunction &MF = getMachineFunction(); 4924 MachineMemOperand *MMO = 4925 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 4926 AAInfo, Ranges); 4927 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 4928 } 4929 4930 SDValue 4931 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4932 EVT VT, SDLoc dl, SDValue Chain, 4933 SDValue Ptr, SDValue Offset, EVT MemVT, 4934 MachineMemOperand *MMO) { 4935 if (VT == MemVT) { 4936 ExtType = ISD::NON_EXTLOAD; 4937 } else if (ExtType == ISD::NON_EXTLOAD) { 4938 assert(VT == MemVT && "Non-extending load from different memory type!"); 4939 } else { 4940 // Extending load. 4941 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 4942 "Should only be an extending load, not truncating!"); 4943 assert(VT.isInteger() == MemVT.isInteger() && 4944 "Cannot convert from FP to Int or Int -> FP!"); 4945 assert(VT.isVector() == MemVT.isVector() && 4946 "Cannot use an ext load to convert to or from a vector!"); 4947 assert((!VT.isVector() || 4948 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 4949 "Cannot use an ext load to change the number of vector elements!"); 4950 } 4951 4952 bool Indexed = AM != ISD::UNINDEXED; 4953 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && 4954 "Unindexed load with an offset!"); 4955 4956 SDVTList VTs = Indexed ? 4957 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 4958 SDValue Ops[] = { Chain, Ptr, Offset }; 4959 FoldingSetNodeID ID; 4960 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 4961 ID.AddInteger(MemVT.getRawBits()); 4962 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), 4963 MMO->isNonTemporal(), 4964 MMO->isInvariant())); 4965 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4966 void *IP = nullptr; 4967 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 4968 cast<LoadSDNode>(E)->refineAlignment(MMO); 4969 return SDValue(E, 0); 4970 } 4971 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(), 4972 dl.getDebugLoc(), VTs, AM, ExtType, 4973 MemVT, MMO); 4974 CSEMap.InsertNode(N, IP); 4975 InsertNode(N); 4976 return SDValue(N, 0); 4977 } 4978 4979 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4980 SDValue Chain, SDValue Ptr, 4981 MachinePointerInfo PtrInfo, 4982 bool isVolatile, bool isNonTemporal, 4983 bool isInvariant, unsigned Alignment, 4984 const AAMDNodes &AAInfo, 4985 const MDNode *Ranges) { 4986 SDValue Undef = getUNDEF(Ptr.getValueType()); 4987 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4988 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment, 4989 AAInfo, Ranges); 4990 } 4991 4992 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4993 SDValue Chain, SDValue Ptr, 4994 MachineMemOperand *MMO) { 4995 SDValue Undef = getUNDEF(Ptr.getValueType()); 4996 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4997 VT, MMO); 4998 } 4999 5000 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 5001 SDValue Chain, SDValue Ptr, 5002 MachinePointerInfo PtrInfo, EVT MemVT, 5003 bool isVolatile, bool isNonTemporal, 5004 bool isInvariant, unsigned Alignment, 5005 const AAMDNodes &AAInfo) { 5006 SDValue Undef = getUNDEF(Ptr.getValueType()); 5007 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5008 PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant, 5009 Alignment, AAInfo); 5010 } 5011 5012 5013 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 5014 SDValue Chain, SDValue Ptr, EVT MemVT, 5015 MachineMemOperand *MMO) { 5016 SDValue Undef = getUNDEF(Ptr.getValueType()); 5017 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5018 MemVT, MMO); 5019 } 5020 5021 SDValue 5022 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, 5023 SDValue Offset, ISD::MemIndexedMode AM) { 5024 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5025 assert(LD->getOffset().getOpcode() == ISD::UNDEF && 5026 "Load is already a indexed load!"); 5027 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5028 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5029 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), 5030 false, LD->getAlignment()); 5031 } 5032 5033 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 5034 SDValue Ptr, MachinePointerInfo PtrInfo, 5035 bool isVolatile, bool isNonTemporal, 5036 unsigned Alignment, const AAMDNodes &AAInfo) { 5037 assert(Chain.getValueType() == MVT::Other && 5038 "Invalid chain type"); 5039 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5040 Alignment = getEVTAlignment(Val.getValueType()); 5041 5042 unsigned Flags = MachineMemOperand::MOStore; 5043 if (isVolatile) 5044 Flags |= MachineMemOperand::MOVolatile; 5045 if (isNonTemporal) 5046 Flags |= MachineMemOperand::MONonTemporal; 5047 5048 if (PtrInfo.V.isNull()) 5049 PtrInfo = InferPointerInfo(Ptr); 5050 5051 MachineFunction &MF = getMachineFunction(); 5052 MachineMemOperand *MMO = 5053 MF.getMachineMemOperand(PtrInfo, Flags, 5054 Val.getValueType().getStoreSize(), Alignment, 5055 AAInfo); 5056 5057 return getStore(Chain, dl, Val, Ptr, MMO); 5058 } 5059 5060 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 5061 SDValue Ptr, MachineMemOperand *MMO) { 5062 assert(Chain.getValueType() == MVT::Other && 5063 "Invalid chain type"); 5064 EVT VT = Val.getValueType(); 5065 SDVTList VTs = getVTList(MVT::Other); 5066 SDValue Undef = getUNDEF(Ptr.getValueType()); 5067 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5068 FoldingSetNodeID ID; 5069 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5070 ID.AddInteger(VT.getRawBits()); 5071 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 5072 MMO->isNonTemporal(), MMO->isInvariant())); 5073 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5074 void *IP = nullptr; 5075 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5076 cast<StoreSDNode>(E)->refineAlignment(MMO); 5077 return SDValue(E, 0); 5078 } 5079 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 5080 dl.getDebugLoc(), VTs, 5081 ISD::UNINDEXED, false, VT, MMO); 5082 CSEMap.InsertNode(N, IP); 5083 InsertNode(N); 5084 return SDValue(N, 0); 5085 } 5086 5087 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 5088 SDValue Ptr, MachinePointerInfo PtrInfo, 5089 EVT SVT,bool isVolatile, bool isNonTemporal, 5090 unsigned Alignment, 5091 const AAMDNodes &AAInfo) { 5092 assert(Chain.getValueType() == MVT::Other && 5093 "Invalid chain type"); 5094 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5095 Alignment = getEVTAlignment(SVT); 5096 5097 unsigned Flags = MachineMemOperand::MOStore; 5098 if (isVolatile) 5099 Flags |= MachineMemOperand::MOVolatile; 5100 if (isNonTemporal) 5101 Flags |= MachineMemOperand::MONonTemporal; 5102 5103 if (PtrInfo.V.isNull()) 5104 PtrInfo = InferPointerInfo(Ptr); 5105 5106 MachineFunction &MF = getMachineFunction(); 5107 MachineMemOperand *MMO = 5108 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, 5109 AAInfo); 5110 5111 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 5112 } 5113 5114 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 5115 SDValue Ptr, EVT SVT, 5116 MachineMemOperand *MMO) { 5117 EVT VT = Val.getValueType(); 5118 5119 assert(Chain.getValueType() == MVT::Other && 5120 "Invalid chain type"); 5121 if (VT == SVT) 5122 return getStore(Chain, dl, Val, Ptr, MMO); 5123 5124 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 5125 "Should only be a truncating store, not extending!"); 5126 assert(VT.isInteger() == SVT.isInteger() && 5127 "Can't do FP-INT conversion!"); 5128 assert(VT.isVector() == SVT.isVector() && 5129 "Cannot use trunc store to convert to or from a vector!"); 5130 assert((!VT.isVector() || 5131 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 5132 "Cannot use trunc store to change the number of vector elements!"); 5133 5134 SDVTList VTs = getVTList(MVT::Other); 5135 SDValue Undef = getUNDEF(Ptr.getValueType()); 5136 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5137 FoldingSetNodeID ID; 5138 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5139 ID.AddInteger(SVT.getRawBits()); 5140 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(), 5141 MMO->isNonTemporal(), MMO->isInvariant())); 5142 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5143 void *IP = nullptr; 5144 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5145 cast<StoreSDNode>(E)->refineAlignment(MMO); 5146 return SDValue(E, 0); 5147 } 5148 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 5149 dl.getDebugLoc(), VTs, 5150 ISD::UNINDEXED, true, SVT, MMO); 5151 CSEMap.InsertNode(N, IP); 5152 InsertNode(N); 5153 return SDValue(N, 0); 5154 } 5155 5156 SDValue 5157 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base, 5158 SDValue Offset, ISD::MemIndexedMode AM) { 5159 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 5160 assert(ST->getOffset().getOpcode() == ISD::UNDEF && 5161 "Store is already a indexed store!"); 5162 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 5163 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 5164 FoldingSetNodeID ID; 5165 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5166 ID.AddInteger(ST->getMemoryVT().getRawBits()); 5167 ID.AddInteger(ST->getRawSubclassData()); 5168 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 5169 void *IP = nullptr; 5170 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) 5171 return SDValue(E, 0); 5172 5173 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 5174 dl.getDebugLoc(), VTs, AM, 5175 ST->isTruncatingStore(), 5176 ST->getMemoryVT(), 5177 ST->getMemOperand()); 5178 CSEMap.InsertNode(N, IP); 5179 InsertNode(N); 5180 return SDValue(N, 0); 5181 } 5182 5183 SDValue 5184 SelectionDAG::getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain, 5185 SDValue Ptr, SDValue Mask, SDValue Src0, EVT MemVT, 5186 MachineMemOperand *MMO, ISD::LoadExtType ExtTy) { 5187 5188 SDVTList VTs = getVTList(VT, MVT::Other); 5189 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 5190 FoldingSetNodeID ID; 5191 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 5192 ID.AddInteger(VT.getRawBits()); 5193 ID.AddInteger(encodeMemSDNodeFlags(ExtTy, ISD::UNINDEXED, 5194 MMO->isVolatile(), 5195 MMO->isNonTemporal(), 5196 MMO->isInvariant())); 5197 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5198 void *IP = nullptr; 5199 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5200 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 5201 return SDValue(E, 0); 5202 } 5203 SDNode *N = new (NodeAllocator) MaskedLoadSDNode(dl.getIROrder(), 5204 dl.getDebugLoc(), Ops, 4, VTs, 5205 ExtTy, MemVT, MMO); 5206 CSEMap.InsertNode(N, IP); 5207 InsertNode(N); 5208 return SDValue(N, 0); 5209 } 5210 5211 SDValue SelectionDAG::getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val, 5212 SDValue Ptr, SDValue Mask, EVT MemVT, 5213 MachineMemOperand *MMO, bool isTrunc) { 5214 assert(Chain.getValueType() == MVT::Other && 5215 "Invalid chain type"); 5216 EVT VT = Val.getValueType(); 5217 SDVTList VTs = getVTList(MVT::Other); 5218 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 5219 FoldingSetNodeID ID; 5220 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 5221 ID.AddInteger(VT.getRawBits()); 5222 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 5223 MMO->isNonTemporal(), MMO->isInvariant())); 5224 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5225 void *IP = nullptr; 5226 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5227 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 5228 return SDValue(E, 0); 5229 } 5230 SDNode *N = new (NodeAllocator) MaskedStoreSDNode(dl.getIROrder(), 5231 dl.getDebugLoc(), Ops, 4, 5232 VTs, isTrunc, MemVT, MMO); 5233 CSEMap.InsertNode(N, IP); 5234 InsertNode(N); 5235 return SDValue(N, 0); 5236 } 5237 5238 SDValue 5239 SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl, 5240 ArrayRef<SDValue> Ops, 5241 MachineMemOperand *MMO) { 5242 5243 FoldingSetNodeID ID; 5244 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 5245 ID.AddInteger(VT.getRawBits()); 5246 ID.AddInteger(encodeMemSDNodeFlags(ISD::NON_EXTLOAD, ISD::UNINDEXED, 5247 MMO->isVolatile(), 5248 MMO->isNonTemporal(), 5249 MMO->isInvariant())); 5250 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5251 void *IP = nullptr; 5252 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5253 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 5254 return SDValue(E, 0); 5255 } 5256 MaskedGatherSDNode *N = 5257 new (NodeAllocator) MaskedGatherSDNode(dl.getIROrder(), dl.getDebugLoc(), 5258 Ops, VTs, VT, MMO); 5259 CSEMap.InsertNode(N, IP); 5260 InsertNode(N); 5261 return SDValue(N, 0); 5262 } 5263 5264 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl, 5265 ArrayRef<SDValue> Ops, 5266 MachineMemOperand *MMO) { 5267 FoldingSetNodeID ID; 5268 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 5269 ID.AddInteger(VT.getRawBits()); 5270 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 5271 MMO->isNonTemporal(), 5272 MMO->isInvariant())); 5273 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5274 void *IP = nullptr; 5275 if (SDNode *E = FindNodeOrInsertPos(ID, dl.getDebugLoc(), IP)) { 5276 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 5277 return SDValue(E, 0); 5278 } 5279 SDNode *N = 5280 new (NodeAllocator) MaskedScatterSDNode(dl.getIROrder(), dl.getDebugLoc(), 5281 Ops, VTs, VT, MMO); 5282 CSEMap.InsertNode(N, IP); 5283 InsertNode(N); 5284 return SDValue(N, 0); 5285 } 5286 5287 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl, 5288 SDValue Chain, SDValue Ptr, 5289 SDValue SV, 5290 unsigned Align) { 5291 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 5292 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 5293 } 5294 5295 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 5296 ArrayRef<SDUse> Ops) { 5297 switch (Ops.size()) { 5298 case 0: return getNode(Opcode, DL, VT); 5299 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 5300 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 5301 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 5302 default: break; 5303 } 5304 5305 // Copy from an SDUse array into an SDValue array for use with 5306 // the regular getNode logic. 5307 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 5308 return getNode(Opcode, DL, VT, NewOps); 5309 } 5310 5311 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 5312 ArrayRef<SDValue> Ops) { 5313 unsigned NumOps = Ops.size(); 5314 switch (NumOps) { 5315 case 0: return getNode(Opcode, DL, VT); 5316 case 1: return getNode(Opcode, DL, VT, Ops[0]); 5317 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 5318 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 5319 default: break; 5320 } 5321 5322 switch (Opcode) { 5323 default: break; 5324 case ISD::SELECT_CC: { 5325 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 5326 assert(Ops[0].getValueType() == Ops[1].getValueType() && 5327 "LHS and RHS of condition must have same type!"); 5328 assert(Ops[2].getValueType() == Ops[3].getValueType() && 5329 "True and False arms of SelectCC must have same type!"); 5330 assert(Ops[2].getValueType() == VT && 5331 "select_cc node must be of same type as true and false value!"); 5332 break; 5333 } 5334 case ISD::BR_CC: { 5335 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 5336 assert(Ops[2].getValueType() == Ops[3].getValueType() && 5337 "LHS/RHS of comparison should match types!"); 5338 break; 5339 } 5340 } 5341 5342 // Memoize nodes. 5343 SDNode *N; 5344 SDVTList VTs = getVTList(VT); 5345 5346 if (VT != MVT::Glue) { 5347 FoldingSetNodeID ID; 5348 AddNodeIDNode(ID, Opcode, VTs, Ops); 5349 void *IP = nullptr; 5350 5351 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 5352 return SDValue(E, 0); 5353 5354 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5355 VTs, Ops); 5356 CSEMap.InsertNode(N, IP); 5357 } else { 5358 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5359 VTs, Ops); 5360 } 5361 5362 InsertNode(N); 5363 return SDValue(N, 0); 5364 } 5365 5366 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 5367 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 5368 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 5369 } 5370 5371 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5372 ArrayRef<SDValue> Ops) { 5373 if (VTList.NumVTs == 1) 5374 return getNode(Opcode, DL, VTList.VTs[0], Ops); 5375 5376 #if 0 5377 switch (Opcode) { 5378 // FIXME: figure out how to safely handle things like 5379 // int foo(int x) { return 1 << (x & 255); } 5380 // int bar() { return foo(256); } 5381 case ISD::SRA_PARTS: 5382 case ISD::SRL_PARTS: 5383 case ISD::SHL_PARTS: 5384 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 5385 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 5386 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5387 else if (N3.getOpcode() == ISD::AND) 5388 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 5389 // If the and is only masking out bits that cannot effect the shift, 5390 // eliminate the and. 5391 unsigned NumBits = VT.getScalarType().getSizeInBits()*2; 5392 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 5393 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5394 } 5395 break; 5396 } 5397 #endif 5398 5399 // Memoize the node unless it returns a flag. 5400 SDNode *N; 5401 unsigned NumOps = Ops.size(); 5402 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5403 FoldingSetNodeID ID; 5404 AddNodeIDNode(ID, Opcode, VTList, Ops); 5405 void *IP = nullptr; 5406 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) 5407 return SDValue(E, 0); 5408 5409 if (NumOps == 1) { 5410 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 5411 DL.getDebugLoc(), VTList, Ops[0]); 5412 } else if (NumOps == 2) { 5413 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 5414 DL.getDebugLoc(), VTList, Ops[0], 5415 Ops[1]); 5416 } else if (NumOps == 3) { 5417 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 5418 DL.getDebugLoc(), VTList, Ops[0], 5419 Ops[1], Ops[2]); 5420 } else { 5421 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5422 VTList, Ops); 5423 } 5424 CSEMap.InsertNode(N, IP); 5425 } else { 5426 if (NumOps == 1) { 5427 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 5428 DL.getDebugLoc(), VTList, Ops[0]); 5429 } else if (NumOps == 2) { 5430 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 5431 DL.getDebugLoc(), VTList, Ops[0], 5432 Ops[1]); 5433 } else if (NumOps == 3) { 5434 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 5435 DL.getDebugLoc(), VTList, Ops[0], 5436 Ops[1], Ops[2]); 5437 } else { 5438 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5439 VTList, Ops); 5440 } 5441 } 5442 InsertNode(N); 5443 return SDValue(N, 0); 5444 } 5445 5446 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) { 5447 return getNode(Opcode, DL, VTList, None); 5448 } 5449 5450 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5451 SDValue N1) { 5452 SDValue Ops[] = { N1 }; 5453 return getNode(Opcode, DL, VTList, Ops); 5454 } 5455 5456 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5457 SDValue N1, SDValue N2) { 5458 SDValue Ops[] = { N1, N2 }; 5459 return getNode(Opcode, DL, VTList, Ops); 5460 } 5461 5462 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5463 SDValue N1, SDValue N2, SDValue N3) { 5464 SDValue Ops[] = { N1, N2, N3 }; 5465 return getNode(Opcode, DL, VTList, Ops); 5466 } 5467 5468 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5469 SDValue N1, SDValue N2, SDValue N3, 5470 SDValue N4) { 5471 SDValue Ops[] = { N1, N2, N3, N4 }; 5472 return getNode(Opcode, DL, VTList, Ops); 5473 } 5474 5475 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5476 SDValue N1, SDValue N2, SDValue N3, 5477 SDValue N4, SDValue N5) { 5478 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5479 return getNode(Opcode, DL, VTList, Ops); 5480 } 5481 5482 SDVTList SelectionDAG::getVTList(EVT VT) { 5483 return makeVTList(SDNode::getValueTypeList(VT), 1); 5484 } 5485 5486 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 5487 FoldingSetNodeID ID; 5488 ID.AddInteger(2U); 5489 ID.AddInteger(VT1.getRawBits()); 5490 ID.AddInteger(VT2.getRawBits()); 5491 5492 void *IP = nullptr; 5493 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5494 if (!Result) { 5495 EVT *Array = Allocator.Allocate<EVT>(2); 5496 Array[0] = VT1; 5497 Array[1] = VT2; 5498 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 5499 VTListMap.InsertNode(Result, IP); 5500 } 5501 return Result->getSDVTList(); 5502 } 5503 5504 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 5505 FoldingSetNodeID ID; 5506 ID.AddInteger(3U); 5507 ID.AddInteger(VT1.getRawBits()); 5508 ID.AddInteger(VT2.getRawBits()); 5509 ID.AddInteger(VT3.getRawBits()); 5510 5511 void *IP = nullptr; 5512 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5513 if (!Result) { 5514 EVT *Array = Allocator.Allocate<EVT>(3); 5515 Array[0] = VT1; 5516 Array[1] = VT2; 5517 Array[2] = VT3; 5518 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 5519 VTListMap.InsertNode(Result, IP); 5520 } 5521 return Result->getSDVTList(); 5522 } 5523 5524 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 5525 FoldingSetNodeID ID; 5526 ID.AddInteger(4U); 5527 ID.AddInteger(VT1.getRawBits()); 5528 ID.AddInteger(VT2.getRawBits()); 5529 ID.AddInteger(VT3.getRawBits()); 5530 ID.AddInteger(VT4.getRawBits()); 5531 5532 void *IP = nullptr; 5533 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5534 if (!Result) { 5535 EVT *Array = Allocator.Allocate<EVT>(4); 5536 Array[0] = VT1; 5537 Array[1] = VT2; 5538 Array[2] = VT3; 5539 Array[3] = VT4; 5540 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 5541 VTListMap.InsertNode(Result, IP); 5542 } 5543 return Result->getSDVTList(); 5544 } 5545 5546 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 5547 unsigned NumVTs = VTs.size(); 5548 FoldingSetNodeID ID; 5549 ID.AddInteger(NumVTs); 5550 for (unsigned index = 0; index < NumVTs; index++) { 5551 ID.AddInteger(VTs[index].getRawBits()); 5552 } 5553 5554 void *IP = nullptr; 5555 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5556 if (!Result) { 5557 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 5558 std::copy(VTs.begin(), VTs.end(), Array); 5559 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 5560 VTListMap.InsertNode(Result, IP); 5561 } 5562 return Result->getSDVTList(); 5563 } 5564 5565 5566 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 5567 /// specified operands. If the resultant node already exists in the DAG, 5568 /// this does not modify the specified node, instead it returns the node that 5569 /// already exists. If the resultant node does not exist in the DAG, the 5570 /// input node is returned. As a degenerate case, if you specify the same 5571 /// input operands as the node already has, the input node is returned. 5572 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 5573 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 5574 5575 // Check to see if there is no change. 5576 if (Op == N->getOperand(0)) return N; 5577 5578 // See if the modified node already exists. 5579 void *InsertPos = nullptr; 5580 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 5581 return Existing; 5582 5583 // Nope it doesn't. Remove the node from its current place in the maps. 5584 if (InsertPos) 5585 if (!RemoveNodeFromCSEMaps(N)) 5586 InsertPos = nullptr; 5587 5588 // Now we update the operands. 5589 N->OperandList[0].set(Op); 5590 5591 // If this gets put into a CSE map, add it. 5592 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5593 return N; 5594 } 5595 5596 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 5597 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 5598 5599 // Check to see if there is no change. 5600 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 5601 return N; // No operands changed, just return the input node. 5602 5603 // See if the modified node already exists. 5604 void *InsertPos = nullptr; 5605 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 5606 return Existing; 5607 5608 // Nope it doesn't. Remove the node from its current place in the maps. 5609 if (InsertPos) 5610 if (!RemoveNodeFromCSEMaps(N)) 5611 InsertPos = nullptr; 5612 5613 // Now we update the operands. 5614 if (N->OperandList[0] != Op1) 5615 N->OperandList[0].set(Op1); 5616 if (N->OperandList[1] != Op2) 5617 N->OperandList[1].set(Op2); 5618 5619 // If this gets put into a CSE map, add it. 5620 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5621 return N; 5622 } 5623 5624 SDNode *SelectionDAG:: 5625 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 5626 SDValue Ops[] = { Op1, Op2, Op3 }; 5627 return UpdateNodeOperands(N, Ops); 5628 } 5629 5630 SDNode *SelectionDAG:: 5631 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5632 SDValue Op3, SDValue Op4) { 5633 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 5634 return UpdateNodeOperands(N, Ops); 5635 } 5636 5637 SDNode *SelectionDAG:: 5638 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5639 SDValue Op3, SDValue Op4, SDValue Op5) { 5640 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 5641 return UpdateNodeOperands(N, Ops); 5642 } 5643 5644 SDNode *SelectionDAG:: 5645 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 5646 unsigned NumOps = Ops.size(); 5647 assert(N->getNumOperands() == NumOps && 5648 "Update with wrong number of operands"); 5649 5650 // If no operands changed just return the input node. 5651 if (Ops.empty() || std::equal(Ops.begin(), Ops.end(), N->op_begin())) 5652 return N; 5653 5654 // See if the modified node already exists. 5655 void *InsertPos = nullptr; 5656 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 5657 return Existing; 5658 5659 // Nope it doesn't. Remove the node from its current place in the maps. 5660 if (InsertPos) 5661 if (!RemoveNodeFromCSEMaps(N)) 5662 InsertPos = nullptr; 5663 5664 // Now we update the operands. 5665 for (unsigned i = 0; i != NumOps; ++i) 5666 if (N->OperandList[i] != Ops[i]) 5667 N->OperandList[i].set(Ops[i]); 5668 5669 // If this gets put into a CSE map, add it. 5670 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5671 return N; 5672 } 5673 5674 /// DropOperands - Release the operands and set this node to have 5675 /// zero operands. 5676 void SDNode::DropOperands() { 5677 // Unlike the code in MorphNodeTo that does this, we don't need to 5678 // watch for dead nodes here. 5679 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 5680 SDUse &Use = *I++; 5681 Use.set(SDValue()); 5682 } 5683 } 5684 5685 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 5686 /// machine opcode. 5687 /// 5688 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5689 EVT VT) { 5690 SDVTList VTs = getVTList(VT); 5691 return SelectNodeTo(N, MachineOpc, VTs, None); 5692 } 5693 5694 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5695 EVT VT, SDValue Op1) { 5696 SDVTList VTs = getVTList(VT); 5697 SDValue Ops[] = { Op1 }; 5698 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5699 } 5700 5701 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5702 EVT VT, SDValue Op1, 5703 SDValue Op2) { 5704 SDVTList VTs = getVTList(VT); 5705 SDValue Ops[] = { Op1, Op2 }; 5706 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5707 } 5708 5709 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5710 EVT VT, SDValue Op1, 5711 SDValue Op2, SDValue Op3) { 5712 SDVTList VTs = getVTList(VT); 5713 SDValue Ops[] = { Op1, Op2, Op3 }; 5714 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5715 } 5716 5717 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5718 EVT VT, ArrayRef<SDValue> Ops) { 5719 SDVTList VTs = getVTList(VT); 5720 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5721 } 5722 5723 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5724 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 5725 SDVTList VTs = getVTList(VT1, VT2); 5726 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5727 } 5728 5729 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5730 EVT VT1, EVT VT2) { 5731 SDVTList VTs = getVTList(VT1, VT2); 5732 return SelectNodeTo(N, MachineOpc, VTs, None); 5733 } 5734 5735 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5736 EVT VT1, EVT VT2, EVT VT3, 5737 ArrayRef<SDValue> Ops) { 5738 SDVTList VTs = getVTList(VT1, VT2, VT3); 5739 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5740 } 5741 5742 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5743 EVT VT1, EVT VT2, EVT VT3, EVT VT4, 5744 ArrayRef<SDValue> Ops) { 5745 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5746 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5747 } 5748 5749 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5750 EVT VT1, EVT VT2, 5751 SDValue Op1) { 5752 SDVTList VTs = getVTList(VT1, VT2); 5753 SDValue Ops[] = { Op1 }; 5754 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5755 } 5756 5757 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5758 EVT VT1, EVT VT2, 5759 SDValue Op1, SDValue Op2) { 5760 SDVTList VTs = getVTList(VT1, VT2); 5761 SDValue Ops[] = { Op1, Op2 }; 5762 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5763 } 5764 5765 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5766 EVT VT1, EVT VT2, 5767 SDValue Op1, SDValue Op2, 5768 SDValue Op3) { 5769 SDVTList VTs = getVTList(VT1, VT2); 5770 SDValue Ops[] = { Op1, Op2, Op3 }; 5771 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5772 } 5773 5774 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5775 EVT VT1, EVT VT2, EVT VT3, 5776 SDValue Op1, SDValue Op2, 5777 SDValue Op3) { 5778 SDVTList VTs = getVTList(VT1, VT2, VT3); 5779 SDValue Ops[] = { Op1, Op2, Op3 }; 5780 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5781 } 5782 5783 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5784 SDVTList VTs,ArrayRef<SDValue> Ops) { 5785 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 5786 // Reset the NodeID to -1. 5787 N->setNodeId(-1); 5788 return N; 5789 } 5790 5791 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away 5792 /// the line number information on the merged node since it is not possible to 5793 /// preserve the information that operation is associated with multiple lines. 5794 /// This will make the debugger working better at -O0, were there is a higher 5795 /// probability having other instructions associated with that line. 5796 /// 5797 /// For IROrder, we keep the smaller of the two 5798 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) { 5799 DebugLoc NLoc = N->getDebugLoc(); 5800 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 5801 N->setDebugLoc(DebugLoc()); 5802 } 5803 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 5804 N->setIROrder(Order); 5805 return N; 5806 } 5807 5808 /// MorphNodeTo - This *mutates* the specified node to have the specified 5809 /// return type, opcode, and operands. 5810 /// 5811 /// Note that MorphNodeTo returns the resultant node. If there is already a 5812 /// node of the specified opcode and operands, it returns that node instead of 5813 /// the current one. Note that the SDLoc need not be the same. 5814 /// 5815 /// Using MorphNodeTo is faster than creating a new node and swapping it in 5816 /// with ReplaceAllUsesWith both because it often avoids allocating a new 5817 /// node, and because it doesn't require CSE recalculation for any of 5818 /// the node's users. 5819 /// 5820 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 5821 /// As a consequence it isn't appropriate to use from within the DAG combiner or 5822 /// the legalizer which maintain worklists that would need to be updated when 5823 /// deleting things. 5824 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 5825 SDVTList VTs, ArrayRef<SDValue> Ops) { 5826 unsigned NumOps = Ops.size(); 5827 // If an identical node already exists, use it. 5828 void *IP = nullptr; 5829 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 5830 FoldingSetNodeID ID; 5831 AddNodeIDNode(ID, Opc, VTs, Ops); 5832 if (SDNode *ON = FindNodeOrInsertPos(ID, N->getDebugLoc(), IP)) 5833 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N)); 5834 } 5835 5836 if (!RemoveNodeFromCSEMaps(N)) 5837 IP = nullptr; 5838 5839 // Start the morphing. 5840 N->NodeType = Opc; 5841 N->ValueList = VTs.VTs; 5842 N->NumValues = VTs.NumVTs; 5843 5844 // Clear the operands list, updating used nodes to remove this from their 5845 // use list. Keep track of any operands that become dead as a result. 5846 SmallPtrSet<SDNode*, 16> DeadNodeSet; 5847 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 5848 SDUse &Use = *I++; 5849 SDNode *Used = Use.getNode(); 5850 Use.set(SDValue()); 5851 if (Used->use_empty()) 5852 DeadNodeSet.insert(Used); 5853 } 5854 5855 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) { 5856 // Initialize the memory references information. 5857 MN->setMemRefs(nullptr, nullptr); 5858 // If NumOps is larger than the # of operands we can have in a 5859 // MachineSDNode, reallocate the operand list. 5860 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) { 5861 if (MN->OperandsNeedDelete) 5862 delete[] MN->OperandList; 5863 if (NumOps > array_lengthof(MN->LocalOperands)) 5864 // We're creating a final node that will live unmorphed for the 5865 // remainder of the current SelectionDAG iteration, so we can allocate 5866 // the operands directly out of a pool with no recycling metadata. 5867 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5868 Ops.data(), NumOps); 5869 else 5870 MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps); 5871 MN->OperandsNeedDelete = false; 5872 } else 5873 MN->InitOperands(MN->OperandList, Ops.data(), NumOps); 5874 } else { 5875 // If NumOps is larger than the # of operands we currently have, reallocate 5876 // the operand list. 5877 if (NumOps > N->NumOperands) { 5878 if (N->OperandsNeedDelete) 5879 delete[] N->OperandList; 5880 N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps); 5881 N->OperandsNeedDelete = true; 5882 } else 5883 N->InitOperands(N->OperandList, Ops.data(), NumOps); 5884 } 5885 5886 // Delete any nodes that are still dead after adding the uses for the 5887 // new operands. 5888 if (!DeadNodeSet.empty()) { 5889 SmallVector<SDNode *, 16> DeadNodes; 5890 for (SDNode *N : DeadNodeSet) 5891 if (N->use_empty()) 5892 DeadNodes.push_back(N); 5893 RemoveDeadNodes(DeadNodes); 5894 } 5895 5896 if (IP) 5897 CSEMap.InsertNode(N, IP); // Memoize the new node. 5898 return N; 5899 } 5900 5901 5902 /// getMachineNode - These are used for target selectors to create a new node 5903 /// with specified return type(s), MachineInstr opcode, and operands. 5904 /// 5905 /// Note that getMachineNode returns the resultant node. If there is already a 5906 /// node of the specified opcode and operands, it returns that node instead of 5907 /// the current one. 5908 MachineSDNode * 5909 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) { 5910 SDVTList VTs = getVTList(VT); 5911 return getMachineNode(Opcode, dl, VTs, None); 5912 } 5913 5914 MachineSDNode * 5915 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) { 5916 SDVTList VTs = getVTList(VT); 5917 SDValue Ops[] = { Op1 }; 5918 return getMachineNode(Opcode, dl, VTs, Ops); 5919 } 5920 5921 MachineSDNode * 5922 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5923 SDValue Op1, SDValue Op2) { 5924 SDVTList VTs = getVTList(VT); 5925 SDValue Ops[] = { Op1, Op2 }; 5926 return getMachineNode(Opcode, dl, VTs, Ops); 5927 } 5928 5929 MachineSDNode * 5930 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5931 SDValue Op1, SDValue Op2, SDValue Op3) { 5932 SDVTList VTs = getVTList(VT); 5933 SDValue Ops[] = { Op1, Op2, Op3 }; 5934 return getMachineNode(Opcode, dl, VTs, Ops); 5935 } 5936 5937 MachineSDNode * 5938 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5939 ArrayRef<SDValue> Ops) { 5940 SDVTList VTs = getVTList(VT); 5941 return getMachineNode(Opcode, dl, VTs, Ops); 5942 } 5943 5944 MachineSDNode * 5945 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) { 5946 SDVTList VTs = getVTList(VT1, VT2); 5947 return getMachineNode(Opcode, dl, VTs, None); 5948 } 5949 5950 MachineSDNode * 5951 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5952 EVT VT1, EVT VT2, SDValue Op1) { 5953 SDVTList VTs = getVTList(VT1, VT2); 5954 SDValue Ops[] = { Op1 }; 5955 return getMachineNode(Opcode, dl, VTs, Ops); 5956 } 5957 5958 MachineSDNode * 5959 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5960 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { 5961 SDVTList VTs = getVTList(VT1, VT2); 5962 SDValue Ops[] = { Op1, Op2 }; 5963 return getMachineNode(Opcode, dl, VTs, Ops); 5964 } 5965 5966 MachineSDNode * 5967 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5968 EVT VT1, EVT VT2, SDValue Op1, 5969 SDValue Op2, SDValue Op3) { 5970 SDVTList VTs = getVTList(VT1, VT2); 5971 SDValue Ops[] = { Op1, Op2, Op3 }; 5972 return getMachineNode(Opcode, dl, VTs, Ops); 5973 } 5974 5975 MachineSDNode * 5976 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5977 EVT VT1, EVT VT2, 5978 ArrayRef<SDValue> Ops) { 5979 SDVTList VTs = getVTList(VT1, VT2); 5980 return getMachineNode(Opcode, dl, VTs, Ops); 5981 } 5982 5983 MachineSDNode * 5984 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5985 EVT VT1, EVT VT2, EVT VT3, 5986 SDValue Op1, SDValue Op2) { 5987 SDVTList VTs = getVTList(VT1, VT2, VT3); 5988 SDValue Ops[] = { Op1, Op2 }; 5989 return getMachineNode(Opcode, dl, VTs, Ops); 5990 } 5991 5992 MachineSDNode * 5993 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5994 EVT VT1, EVT VT2, EVT VT3, 5995 SDValue Op1, SDValue Op2, SDValue Op3) { 5996 SDVTList VTs = getVTList(VT1, VT2, VT3); 5997 SDValue Ops[] = { Op1, Op2, Op3 }; 5998 return getMachineNode(Opcode, dl, VTs, Ops); 5999 } 6000 6001 MachineSDNode * 6002 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 6003 EVT VT1, EVT VT2, EVT VT3, 6004 ArrayRef<SDValue> Ops) { 6005 SDVTList VTs = getVTList(VT1, VT2, VT3); 6006 return getMachineNode(Opcode, dl, VTs, Ops); 6007 } 6008 6009 MachineSDNode * 6010 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, 6011 EVT VT2, EVT VT3, EVT VT4, 6012 ArrayRef<SDValue> Ops) { 6013 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 6014 return getMachineNode(Opcode, dl, VTs, Ops); 6015 } 6016 6017 MachineSDNode * 6018 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 6019 ArrayRef<EVT> ResultTys, 6020 ArrayRef<SDValue> Ops) { 6021 SDVTList VTs = getVTList(ResultTys); 6022 return getMachineNode(Opcode, dl, VTs, Ops); 6023 } 6024 6025 MachineSDNode * 6026 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs, 6027 ArrayRef<SDValue> OpsArray) { 6028 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6029 MachineSDNode *N; 6030 void *IP = nullptr; 6031 const SDValue *Ops = OpsArray.data(); 6032 unsigned NumOps = OpsArray.size(); 6033 6034 if (DoCSE) { 6035 FoldingSetNodeID ID; 6036 AddNodeIDNode(ID, ~Opcode, VTs, OpsArray); 6037 IP = nullptr; 6038 if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP)) { 6039 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL)); 6040 } 6041 } 6042 6043 // Allocate a new MachineSDNode. 6044 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(), 6045 DL.getDebugLoc(), VTs); 6046 6047 // Initialize the operands list. 6048 if (NumOps > array_lengthof(N->LocalOperands)) 6049 // We're creating a final node that will live unmorphed for the 6050 // remainder of the current SelectionDAG iteration, so we can allocate 6051 // the operands directly out of a pool with no recycling metadata. 6052 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 6053 Ops, NumOps); 6054 else 6055 N->InitOperands(N->LocalOperands, Ops, NumOps); 6056 N->OperandsNeedDelete = false; 6057 6058 if (DoCSE) 6059 CSEMap.InsertNode(N, IP); 6060 6061 InsertNode(N); 6062 return N; 6063 } 6064 6065 /// getTargetExtractSubreg - A convenience function for creating 6066 /// TargetOpcode::EXTRACT_SUBREG nodes. 6067 SDValue 6068 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, 6069 SDValue Operand) { 6070 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6071 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6072 VT, Operand, SRIdxVal); 6073 return SDValue(Subreg, 0); 6074 } 6075 6076 /// getTargetInsertSubreg - A convenience function for creating 6077 /// TargetOpcode::INSERT_SUBREG nodes. 6078 SDValue 6079 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT, 6080 SDValue Operand, SDValue Subreg) { 6081 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6082 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6083 VT, Operand, Subreg, SRIdxVal); 6084 return SDValue(Result, 0); 6085 } 6086 6087 /// getNodeIfExists - Get the specified node if it's already available, or 6088 /// else return NULL. 6089 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6090 ArrayRef<SDValue> Ops, 6091 const SDNodeFlags *Flags) { 6092 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6093 FoldingSetNodeID ID; 6094 AddNodeIDNode(ID, Opcode, VTList, Ops); 6095 AddNodeIDFlags(ID, Opcode, Flags); 6096 void *IP = nullptr; 6097 if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP)) 6098 return E; 6099 } 6100 return nullptr; 6101 } 6102 6103 /// getDbgValue - Creates a SDDbgValue node. 6104 /// 6105 /// SDNode 6106 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, 6107 unsigned R, bool IsIndirect, uint64_t Off, 6108 DebugLoc DL, unsigned O) { 6109 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6110 "Expected inlined-at fields to agree"); 6111 return new (DbgInfo->getAlloc()) 6112 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O); 6113 } 6114 6115 /// Constant 6116 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr, 6117 const Value *C, uint64_t Off, 6118 DebugLoc DL, unsigned O) { 6119 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6120 "Expected inlined-at fields to agree"); 6121 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O); 6122 } 6123 6124 /// FrameIndex 6125 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, 6126 unsigned FI, uint64_t Off, 6127 DebugLoc DL, unsigned O) { 6128 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6129 "Expected inlined-at fields to agree"); 6130 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O); 6131 } 6132 6133 namespace { 6134 6135 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 6136 /// pointed to by a use iterator is deleted, increment the use iterator 6137 /// so that it doesn't dangle. 6138 /// 6139 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 6140 SDNode::use_iterator &UI; 6141 SDNode::use_iterator &UE; 6142 6143 void NodeDeleted(SDNode *N, SDNode *E) override { 6144 // Increment the iterator as needed. 6145 while (UI != UE && N == *UI) 6146 ++UI; 6147 } 6148 6149 public: 6150 RAUWUpdateListener(SelectionDAG &d, 6151 SDNode::use_iterator &ui, 6152 SDNode::use_iterator &ue) 6153 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 6154 }; 6155 6156 } 6157 6158 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6159 /// This can cause recursive merging of nodes in the DAG. 6160 /// 6161 /// This version assumes From has a single result value. 6162 /// 6163 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 6164 SDNode *From = FromN.getNode(); 6165 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 6166 "Cannot replace with this method!"); 6167 assert(From != To.getNode() && "Cannot replace uses of with self"); 6168 6169 // Iterate over all the existing uses of From. New uses will be added 6170 // to the beginning of the use list, which we avoid visiting. 6171 // This specifically avoids visiting uses of From that arise while the 6172 // replacement is happening, because any such uses would be the result 6173 // of CSE: If an existing node looks like From after one of its operands 6174 // is replaced by To, we don't want to replace of all its users with To 6175 // too. See PR3018 for more info. 6176 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6177 RAUWUpdateListener Listener(*this, UI, UE); 6178 while (UI != UE) { 6179 SDNode *User = *UI; 6180 6181 // This node is about to morph, remove its old self from the CSE maps. 6182 RemoveNodeFromCSEMaps(User); 6183 6184 // A user can appear in a use list multiple times, and when this 6185 // happens the uses are usually next to each other in the list. 6186 // To help reduce the number of CSE recomputations, process all 6187 // the uses of this user that we can find this way. 6188 do { 6189 SDUse &Use = UI.getUse(); 6190 ++UI; 6191 Use.set(To); 6192 } while (UI != UE && *UI == User); 6193 6194 // Now that we have modified User, add it back to the CSE maps. If it 6195 // already exists there, recursively merge the results together. 6196 AddModifiedNodeToCSEMaps(User); 6197 } 6198 6199 // If we just RAUW'd the root, take note. 6200 if (FromN == getRoot()) 6201 setRoot(To); 6202 } 6203 6204 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6205 /// This can cause recursive merging of nodes in the DAG. 6206 /// 6207 /// This version assumes that for each value of From, there is a 6208 /// corresponding value in To in the same position with the same type. 6209 /// 6210 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 6211 #ifndef NDEBUG 6212 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6213 assert((!From->hasAnyUseOfValue(i) || 6214 From->getValueType(i) == To->getValueType(i)) && 6215 "Cannot use this version of ReplaceAllUsesWith!"); 6216 #endif 6217 6218 // Handle the trivial case. 6219 if (From == To) 6220 return; 6221 6222 // Iterate over just the existing users of From. See the comments in 6223 // the ReplaceAllUsesWith above. 6224 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6225 RAUWUpdateListener Listener(*this, UI, UE); 6226 while (UI != UE) { 6227 SDNode *User = *UI; 6228 6229 // This node is about to morph, remove its old self from the CSE maps. 6230 RemoveNodeFromCSEMaps(User); 6231 6232 // A user can appear in a use list multiple times, and when this 6233 // happens the uses are usually next to each other in the list. 6234 // To help reduce the number of CSE recomputations, process all 6235 // the uses of this user that we can find this way. 6236 do { 6237 SDUse &Use = UI.getUse(); 6238 ++UI; 6239 Use.setNode(To); 6240 } while (UI != UE && *UI == User); 6241 6242 // Now that we have modified User, add it back to the CSE maps. If it 6243 // already exists there, recursively merge the results together. 6244 AddModifiedNodeToCSEMaps(User); 6245 } 6246 6247 // If we just RAUW'd the root, take note. 6248 if (From == getRoot().getNode()) 6249 setRoot(SDValue(To, getRoot().getResNo())); 6250 } 6251 6252 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6253 /// This can cause recursive merging of nodes in the DAG. 6254 /// 6255 /// This version can replace From with any result values. To must match the 6256 /// number and types of values returned by From. 6257 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 6258 if (From->getNumValues() == 1) // Handle the simple case efficiently. 6259 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 6260 6261 // Iterate over just the existing users of From. See the comments in 6262 // the ReplaceAllUsesWith above. 6263 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6264 RAUWUpdateListener Listener(*this, UI, UE); 6265 while (UI != UE) { 6266 SDNode *User = *UI; 6267 6268 // This node is about to morph, remove its old self from the CSE maps. 6269 RemoveNodeFromCSEMaps(User); 6270 6271 // A user can appear in a use list multiple times, and when this 6272 // happens the uses are usually next to each other in the list. 6273 // To help reduce the number of CSE recomputations, process all 6274 // the uses of this user that we can find this way. 6275 do { 6276 SDUse &Use = UI.getUse(); 6277 const SDValue &ToOp = To[Use.getResNo()]; 6278 ++UI; 6279 Use.set(ToOp); 6280 } while (UI != UE && *UI == User); 6281 6282 // Now that we have modified User, add it back to the CSE maps. If it 6283 // already exists there, recursively merge the results together. 6284 AddModifiedNodeToCSEMaps(User); 6285 } 6286 6287 // If we just RAUW'd the root, take note. 6288 if (From == getRoot().getNode()) 6289 setRoot(SDValue(To[getRoot().getResNo()])); 6290 } 6291 6292 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 6293 /// uses of other values produced by From.getNode() alone. The Deleted 6294 /// vector is handled the same way as for ReplaceAllUsesWith. 6295 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 6296 // Handle the really simple, really trivial case efficiently. 6297 if (From == To) return; 6298 6299 // Handle the simple, trivial, case efficiently. 6300 if (From.getNode()->getNumValues() == 1) { 6301 ReplaceAllUsesWith(From, To); 6302 return; 6303 } 6304 6305 // Iterate over just the existing users of From. See the comments in 6306 // the ReplaceAllUsesWith above. 6307 SDNode::use_iterator UI = From.getNode()->use_begin(), 6308 UE = From.getNode()->use_end(); 6309 RAUWUpdateListener Listener(*this, UI, UE); 6310 while (UI != UE) { 6311 SDNode *User = *UI; 6312 bool UserRemovedFromCSEMaps = false; 6313 6314 // A user can appear in a use list multiple times, and when this 6315 // happens the uses are usually next to each other in the list. 6316 // To help reduce the number of CSE recomputations, process all 6317 // the uses of this user that we can find this way. 6318 do { 6319 SDUse &Use = UI.getUse(); 6320 6321 // Skip uses of different values from the same node. 6322 if (Use.getResNo() != From.getResNo()) { 6323 ++UI; 6324 continue; 6325 } 6326 6327 // If this node hasn't been modified yet, it's still in the CSE maps, 6328 // so remove its old self from the CSE maps. 6329 if (!UserRemovedFromCSEMaps) { 6330 RemoveNodeFromCSEMaps(User); 6331 UserRemovedFromCSEMaps = true; 6332 } 6333 6334 ++UI; 6335 Use.set(To); 6336 } while (UI != UE && *UI == User); 6337 6338 // We are iterating over all uses of the From node, so if a use 6339 // doesn't use the specific value, no changes are made. 6340 if (!UserRemovedFromCSEMaps) 6341 continue; 6342 6343 // Now that we have modified User, add it back to the CSE maps. If it 6344 // already exists there, recursively merge the results together. 6345 AddModifiedNodeToCSEMaps(User); 6346 } 6347 6348 // If we just RAUW'd the root, take note. 6349 if (From == getRoot()) 6350 setRoot(To); 6351 } 6352 6353 namespace { 6354 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 6355 /// to record information about a use. 6356 struct UseMemo { 6357 SDNode *User; 6358 unsigned Index; 6359 SDUse *Use; 6360 }; 6361 6362 /// operator< - Sort Memos by User. 6363 bool operator<(const UseMemo &L, const UseMemo &R) { 6364 return (intptr_t)L.User < (intptr_t)R.User; 6365 } 6366 } 6367 6368 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 6369 /// uses of other values produced by From.getNode() alone. The same value 6370 /// may appear in both the From and To list. The Deleted vector is 6371 /// handled the same way as for ReplaceAllUsesWith. 6372 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 6373 const SDValue *To, 6374 unsigned Num){ 6375 // Handle the simple, trivial case efficiently. 6376 if (Num == 1) 6377 return ReplaceAllUsesOfValueWith(*From, *To); 6378 6379 // Read up all the uses and make records of them. This helps 6380 // processing new uses that are introduced during the 6381 // replacement process. 6382 SmallVector<UseMemo, 4> Uses; 6383 for (unsigned i = 0; i != Num; ++i) { 6384 unsigned FromResNo = From[i].getResNo(); 6385 SDNode *FromNode = From[i].getNode(); 6386 for (SDNode::use_iterator UI = FromNode->use_begin(), 6387 E = FromNode->use_end(); UI != E; ++UI) { 6388 SDUse &Use = UI.getUse(); 6389 if (Use.getResNo() == FromResNo) { 6390 UseMemo Memo = { *UI, i, &Use }; 6391 Uses.push_back(Memo); 6392 } 6393 } 6394 } 6395 6396 // Sort the uses, so that all the uses from a given User are together. 6397 std::sort(Uses.begin(), Uses.end()); 6398 6399 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 6400 UseIndex != UseIndexEnd; ) { 6401 // We know that this user uses some value of From. If it is the right 6402 // value, update it. 6403 SDNode *User = Uses[UseIndex].User; 6404 6405 // This node is about to morph, remove its old self from the CSE maps. 6406 RemoveNodeFromCSEMaps(User); 6407 6408 // The Uses array is sorted, so all the uses for a given User 6409 // are next to each other in the list. 6410 // To help reduce the number of CSE recomputations, process all 6411 // the uses of this user that we can find this way. 6412 do { 6413 unsigned i = Uses[UseIndex].Index; 6414 SDUse &Use = *Uses[UseIndex].Use; 6415 ++UseIndex; 6416 6417 Use.set(To[i]); 6418 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 6419 6420 // Now that we have modified User, add it back to the CSE maps. If it 6421 // already exists there, recursively merge the results together. 6422 AddModifiedNodeToCSEMaps(User); 6423 } 6424 } 6425 6426 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 6427 /// based on their topological order. It returns the maximum id and a vector 6428 /// of the SDNodes* in assigned order by reference. 6429 unsigned SelectionDAG::AssignTopologicalOrder() { 6430 6431 unsigned DAGSize = 0; 6432 6433 // SortedPos tracks the progress of the algorithm. Nodes before it are 6434 // sorted, nodes after it are unsorted. When the algorithm completes 6435 // it is at the end of the list. 6436 allnodes_iterator SortedPos = allnodes_begin(); 6437 6438 // Visit all the nodes. Move nodes with no operands to the front of 6439 // the list immediately. Annotate nodes that do have operands with their 6440 // operand count. Before we do this, the Node Id fields of the nodes 6441 // may contain arbitrary values. After, the Node Id fields for nodes 6442 // before SortedPos will contain the topological sort index, and the 6443 // Node Id fields for nodes At SortedPos and after will contain the 6444 // count of outstanding operands. 6445 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 6446 SDNode *N = I++; 6447 checkForCycles(N, this); 6448 unsigned Degree = N->getNumOperands(); 6449 if (Degree == 0) { 6450 // A node with no uses, add it to the result array immediately. 6451 N->setNodeId(DAGSize++); 6452 allnodes_iterator Q = N; 6453 if (Q != SortedPos) 6454 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 6455 assert(SortedPos != AllNodes.end() && "Overran node list"); 6456 ++SortedPos; 6457 } else { 6458 // Temporarily use the Node Id as scratch space for the degree count. 6459 N->setNodeId(Degree); 6460 } 6461 } 6462 6463 // Visit all the nodes. As we iterate, move nodes into sorted order, 6464 // such that by the time the end is reached all nodes will be sorted. 6465 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) { 6466 SDNode *N = I; 6467 checkForCycles(N, this); 6468 // N is in sorted position, so all its uses have one less operand 6469 // that needs to be sorted. 6470 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6471 UI != UE; ++UI) { 6472 SDNode *P = *UI; 6473 unsigned Degree = P->getNodeId(); 6474 assert(Degree != 0 && "Invalid node degree"); 6475 --Degree; 6476 if (Degree == 0) { 6477 // All of P's operands are sorted, so P may sorted now. 6478 P->setNodeId(DAGSize++); 6479 if (P != SortedPos) 6480 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 6481 assert(SortedPos != AllNodes.end() && "Overran node list"); 6482 ++SortedPos; 6483 } else { 6484 // Update P's outstanding operand count. 6485 P->setNodeId(Degree); 6486 } 6487 } 6488 if (I == SortedPos) { 6489 #ifndef NDEBUG 6490 SDNode *S = ++I; 6491 dbgs() << "Overran sorted position:\n"; 6492 S->dumprFull(this); dbgs() << "\n"; 6493 dbgs() << "Checking if this is due to cycles\n"; 6494 checkForCycles(this, true); 6495 #endif 6496 llvm_unreachable(nullptr); 6497 } 6498 } 6499 6500 assert(SortedPos == AllNodes.end() && 6501 "Topological sort incomplete!"); 6502 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 6503 "First node in topological sort is not the entry token!"); 6504 assert(AllNodes.front().getNodeId() == 0 && 6505 "First node in topological sort has non-zero id!"); 6506 assert(AllNodes.front().getNumOperands() == 0 && 6507 "First node in topological sort has operands!"); 6508 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 6509 "Last node in topologic sort has unexpected id!"); 6510 assert(AllNodes.back().use_empty() && 6511 "Last node in topologic sort has users!"); 6512 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 6513 return DAGSize; 6514 } 6515 6516 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 6517 /// value is produced by SD. 6518 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 6519 if (SD) { 6520 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 6521 SD->setHasDebugValue(true); 6522 } 6523 DbgInfo->add(DB, SD, isParameter); 6524 } 6525 6526 /// TransferDbgValues - Transfer SDDbgValues. 6527 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 6528 if (From == To || !From.getNode()->getHasDebugValue()) 6529 return; 6530 SDNode *FromNode = From.getNode(); 6531 SDNode *ToNode = To.getNode(); 6532 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 6533 SmallVector<SDDbgValue *, 2> ClonedDVs; 6534 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 6535 I != E; ++I) { 6536 SDDbgValue *Dbg = *I; 6537 if (Dbg->getKind() == SDDbgValue::SDNODE) { 6538 SDDbgValue *Clone = 6539 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode, 6540 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(), 6541 Dbg->getDebugLoc(), Dbg->getOrder()); 6542 ClonedDVs.push_back(Clone); 6543 } 6544 } 6545 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(), 6546 E = ClonedDVs.end(); I != E; ++I) 6547 AddDbgValue(*I, ToNode, false); 6548 } 6549 6550 //===----------------------------------------------------------------------===// 6551 // SDNode Class 6552 //===----------------------------------------------------------------------===// 6553 6554 HandleSDNode::~HandleSDNode() { 6555 DropOperands(); 6556 } 6557 6558 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 6559 DebugLoc DL, const GlobalValue *GA, 6560 EVT VT, int64_t o, unsigned char TF) 6561 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 6562 TheGlobal = GA; 6563 } 6564 6565 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, 6566 SDValue X, unsigned SrcAS, 6567 unsigned DestAS) 6568 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X), 6569 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 6570 6571 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6572 EVT memvt, MachineMemOperand *mmo) 6573 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 6574 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6575 MMO->isNonTemporal(), MMO->isInvariant()); 6576 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6577 assert(isNonTemporal() == MMO->isNonTemporal() && 6578 "Non-temporal encoding error!"); 6579 // We check here that the size of the memory operand fits within the size of 6580 // the MMO. This is because the MMO might indicate only a possible address 6581 // range instead of specifying the affected memory addresses precisely. 6582 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 6583 } 6584 6585 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6586 ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo) 6587 : SDNode(Opc, Order, dl, VTs, Ops), 6588 MemoryVT(memvt), MMO(mmo) { 6589 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6590 MMO->isNonTemporal(), MMO->isInvariant()); 6591 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6592 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 6593 } 6594 6595 /// Profile - Gather unique data for the node. 6596 /// 6597 void SDNode::Profile(FoldingSetNodeID &ID) const { 6598 AddNodeIDNode(ID, this); 6599 } 6600 6601 namespace { 6602 struct EVTArray { 6603 std::vector<EVT> VTs; 6604 6605 EVTArray() { 6606 VTs.reserve(MVT::LAST_VALUETYPE); 6607 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 6608 VTs.push_back(MVT((MVT::SimpleValueType)i)); 6609 } 6610 }; 6611 } 6612 6613 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 6614 static ManagedStatic<EVTArray> SimpleVTArray; 6615 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 6616 6617 /// getValueTypeList - Return a pointer to the specified value type. 6618 /// 6619 const EVT *SDNode::getValueTypeList(EVT VT) { 6620 if (VT.isExtended()) { 6621 sys::SmartScopedLock<true> Lock(*VTMutex); 6622 return &(*EVTs->insert(VT).first); 6623 } else { 6624 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 6625 "Value type out of range!"); 6626 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 6627 } 6628 } 6629 6630 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 6631 /// indicated value. This method ignores uses of other values defined by this 6632 /// operation. 6633 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 6634 assert(Value < getNumValues() && "Bad value!"); 6635 6636 // TODO: Only iterate over uses of a given value of the node 6637 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 6638 if (UI.getUse().getResNo() == Value) { 6639 if (NUses == 0) 6640 return false; 6641 --NUses; 6642 } 6643 } 6644 6645 // Found exactly the right number of uses? 6646 return NUses == 0; 6647 } 6648 6649 6650 /// hasAnyUseOfValue - Return true if there are any use of the indicated 6651 /// value. This method ignores uses of other values defined by this operation. 6652 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 6653 assert(Value < getNumValues() && "Bad value!"); 6654 6655 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 6656 if (UI.getUse().getResNo() == Value) 6657 return true; 6658 6659 return false; 6660 } 6661 6662 6663 /// isOnlyUserOf - Return true if this node is the only use of N. 6664 /// 6665 bool SDNode::isOnlyUserOf(SDNode *N) const { 6666 bool Seen = false; 6667 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 6668 SDNode *User = *I; 6669 if (User == this) 6670 Seen = true; 6671 else 6672 return false; 6673 } 6674 6675 return Seen; 6676 } 6677 6678 /// isOperand - Return true if this node is an operand of N. 6679 /// 6680 bool SDValue::isOperandOf(SDNode *N) const { 6681 for (const SDValue &Op : N->op_values()) 6682 if (*this == Op) 6683 return true; 6684 return false; 6685 } 6686 6687 bool SDNode::isOperandOf(SDNode *N) const { 6688 for (unsigned i = 0, e = N->NumOperands; i != e; ++i) 6689 if (this == N->OperandList[i].getNode()) 6690 return true; 6691 return false; 6692 } 6693 6694 /// reachesChainWithoutSideEffects - Return true if this operand (which must 6695 /// be a chain) reaches the specified operand without crossing any 6696 /// side-effecting instructions on any chain path. In practice, this looks 6697 /// through token factors and non-volatile loads. In order to remain efficient, 6698 /// this only looks a couple of nodes in, it does not do an exhaustive search. 6699 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 6700 unsigned Depth) const { 6701 if (*this == Dest) return true; 6702 6703 // Don't search too deeply, we just want to be able to see through 6704 // TokenFactor's etc. 6705 if (Depth == 0) return false; 6706 6707 // If this is a token factor, all inputs to the TF happen in parallel. If any 6708 // of the operands of the TF does not reach dest, then we cannot do the xform. 6709 if (getOpcode() == ISD::TokenFactor) { 6710 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 6711 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 6712 return false; 6713 return true; 6714 } 6715 6716 // Loads don't have side effects, look through them. 6717 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 6718 if (!Ld->isVolatile()) 6719 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 6720 } 6721 return false; 6722 } 6723 6724 /// hasPredecessor - Return true if N is a predecessor of this node. 6725 /// N is either an operand of this node, or can be reached by recursively 6726 /// traversing up the operands. 6727 /// NOTE: This is an expensive method. Use it carefully. 6728 bool SDNode::hasPredecessor(const SDNode *N) const { 6729 SmallPtrSet<const SDNode *, 32> Visited; 6730 SmallVector<const SDNode *, 16> Worklist; 6731 return hasPredecessorHelper(N, Visited, Worklist); 6732 } 6733 6734 bool 6735 SDNode::hasPredecessorHelper(const SDNode *N, 6736 SmallPtrSetImpl<const SDNode *> &Visited, 6737 SmallVectorImpl<const SDNode *> &Worklist) const { 6738 if (Visited.empty()) { 6739 Worklist.push_back(this); 6740 } else { 6741 // Take a look in the visited set. If we've already encountered this node 6742 // we needn't search further. 6743 if (Visited.count(N)) 6744 return true; 6745 } 6746 6747 // Haven't visited N yet. Continue the search. 6748 while (!Worklist.empty()) { 6749 const SDNode *M = Worklist.pop_back_val(); 6750 for (const SDValue &OpV : M->op_values()) { 6751 SDNode *Op = OpV.getNode(); 6752 if (Visited.insert(Op).second) 6753 Worklist.push_back(Op); 6754 if (Op == N) 6755 return true; 6756 } 6757 } 6758 6759 return false; 6760 } 6761 6762 uint64_t SDNode::getConstantOperandVal(unsigned Num) const { 6763 assert(Num < NumOperands && "Invalid child # of SDNode!"); 6764 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue(); 6765 } 6766 6767 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 6768 assert(N->getNumValues() == 1 && 6769 "Can't unroll a vector with multiple results!"); 6770 6771 EVT VT = N->getValueType(0); 6772 unsigned NE = VT.getVectorNumElements(); 6773 EVT EltVT = VT.getVectorElementType(); 6774 SDLoc dl(N); 6775 6776 SmallVector<SDValue, 8> Scalars; 6777 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 6778 6779 // If ResNE is 0, fully unroll the vector op. 6780 if (ResNE == 0) 6781 ResNE = NE; 6782 else if (NE > ResNE) 6783 NE = ResNE; 6784 6785 unsigned i; 6786 for (i= 0; i != NE; ++i) { 6787 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 6788 SDValue Operand = N->getOperand(j); 6789 EVT OperandVT = Operand.getValueType(); 6790 if (OperandVT.isVector()) { 6791 // A vector operand; extract a single element. 6792 EVT OperandEltVT = OperandVT.getVectorElementType(); 6793 Operands[j] = 6794 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 6795 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 6796 } else { 6797 // A scalar operand; just use it as is. 6798 Operands[j] = Operand; 6799 } 6800 } 6801 6802 switch (N->getOpcode()) { 6803 default: 6804 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands)); 6805 break; 6806 case ISD::VSELECT: 6807 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 6808 break; 6809 case ISD::SHL: 6810 case ISD::SRA: 6811 case ISD::SRL: 6812 case ISD::ROTL: 6813 case ISD::ROTR: 6814 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 6815 getShiftAmountOperand(Operands[0].getValueType(), 6816 Operands[1]))); 6817 break; 6818 case ISD::SIGN_EXTEND_INREG: 6819 case ISD::FP_ROUND_INREG: { 6820 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 6821 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6822 Operands[0], 6823 getValueType(ExtVT))); 6824 } 6825 } 6826 } 6827 6828 for (; i < ResNE; ++i) 6829 Scalars.push_back(getUNDEF(EltVT)); 6830 6831 return getNode(ISD::BUILD_VECTOR, dl, 6832 EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars); 6833 } 6834 6835 6836 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 6837 /// location that is 'Dist' units away from the location that the 'Base' load 6838 /// is loading from. 6839 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 6840 unsigned Bytes, int Dist) const { 6841 if (LD->getChain() != Base->getChain()) 6842 return false; 6843 EVT VT = LD->getValueType(0); 6844 if (VT.getSizeInBits() / 8 != Bytes) 6845 return false; 6846 6847 SDValue Loc = LD->getOperand(1); 6848 SDValue BaseLoc = Base->getOperand(1); 6849 if (Loc.getOpcode() == ISD::FrameIndex) { 6850 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6851 return false; 6852 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo(); 6853 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6854 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6855 int FS = MFI->getObjectSize(FI); 6856 int BFS = MFI->getObjectSize(BFI); 6857 if (FS != BFS || FS != (int)Bytes) return false; 6858 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6859 } 6860 6861 // Handle X + C. 6862 if (isBaseWithConstantOffset(Loc)) { 6863 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 6864 if (Loc.getOperand(0) == BaseLoc) { 6865 // If the base location is a simple address with no offset itself, then 6866 // the second load's first add operand should be the base address. 6867 if (LocOffset == Dist * (int)Bytes) 6868 return true; 6869 } else if (isBaseWithConstantOffset(BaseLoc)) { 6870 // The base location itself has an offset, so subtract that value from the 6871 // second load's offset before comparing to distance * size. 6872 int64_t BOffset = 6873 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue(); 6874 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) { 6875 if ((LocOffset - BOffset) == Dist * (int)Bytes) 6876 return true; 6877 } 6878 } 6879 } 6880 const GlobalValue *GV1 = nullptr; 6881 const GlobalValue *GV2 = nullptr; 6882 int64_t Offset1 = 0; 6883 int64_t Offset2 = 0; 6884 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6885 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6886 if (isGA1 && isGA2 && GV1 == GV2) 6887 return Offset1 == (Offset2 + Dist*Bytes); 6888 return false; 6889 } 6890 6891 6892 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 6893 /// it cannot be inferred. 6894 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 6895 // If this is a GlobalAddress + cst, return the alignment. 6896 const GlobalValue *GV; 6897 int64_t GVOffset = 0; 6898 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 6899 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 6900 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 6901 llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne, 6902 getDataLayout()); 6903 unsigned AlignBits = KnownZero.countTrailingOnes(); 6904 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 6905 if (Align) 6906 return MinAlign(Align, GVOffset); 6907 } 6908 6909 // If this is a direct reference to a stack slot, use information about the 6910 // stack slot's alignment. 6911 int FrameIdx = 1 << 31; 6912 int64_t FrameOffset = 0; 6913 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 6914 FrameIdx = FI->getIndex(); 6915 } else if (isBaseWithConstantOffset(Ptr) && 6916 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 6917 // Handle FI+Cst 6918 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6919 FrameOffset = Ptr.getConstantOperandVal(1); 6920 } 6921 6922 if (FrameIdx != (1 << 31)) { 6923 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo(); 6924 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 6925 FrameOffset); 6926 return FIInfoAlign; 6927 } 6928 6929 return 0; 6930 } 6931 6932 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 6933 /// which is split (or expanded) into two not necessarily identical pieces. 6934 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 6935 // Currently all types are split in half. 6936 EVT LoVT, HiVT; 6937 if (!VT.isVector()) { 6938 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 6939 } else { 6940 unsigned NumElements = VT.getVectorNumElements(); 6941 assert(!(NumElements & 1) && "Splitting vector, but not in half!"); 6942 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 6943 NumElements/2); 6944 } 6945 return std::make_pair(LoVT, HiVT); 6946 } 6947 6948 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 6949 /// low/high part. 6950 std::pair<SDValue, SDValue> 6951 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 6952 const EVT &HiVT) { 6953 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 6954 N.getValueType().getVectorNumElements() && 6955 "More vector elements requested than available!"); 6956 SDValue Lo, Hi; 6957 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 6958 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 6959 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 6960 getConstant(LoVT.getVectorNumElements(), DL, 6961 TLI->getVectorIdxTy(getDataLayout()))); 6962 return std::make_pair(Lo, Hi); 6963 } 6964 6965 void SelectionDAG::ExtractVectorElements(SDValue Op, 6966 SmallVectorImpl<SDValue> &Args, 6967 unsigned Start, unsigned Count) { 6968 EVT VT = Op.getValueType(); 6969 if (Count == 0) 6970 Count = VT.getVectorNumElements(); 6971 6972 EVT EltVT = VT.getVectorElementType(); 6973 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 6974 SDLoc SL(Op); 6975 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 6976 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 6977 Op, getConstant(i, SL, IdxTy))); 6978 } 6979 } 6980 6981 // getAddressSpace - Return the address space this GlobalAddress belongs to. 6982 unsigned GlobalAddressSDNode::getAddressSpace() const { 6983 return getGlobal()->getType()->getAddressSpace(); 6984 } 6985 6986 6987 Type *ConstantPoolSDNode::getType() const { 6988 if (isMachineConstantPoolEntry()) 6989 return Val.MachineCPVal->getType(); 6990 return Val.ConstVal->getType(); 6991 } 6992 6993 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 6994 APInt &SplatUndef, 6995 unsigned &SplatBitSize, 6996 bool &HasAnyUndefs, 6997 unsigned MinSplatBits, 6998 bool isBigEndian) const { 6999 EVT VT = getValueType(0); 7000 assert(VT.isVector() && "Expected a vector type"); 7001 unsigned sz = VT.getSizeInBits(); 7002 if (MinSplatBits > sz) 7003 return false; 7004 7005 SplatValue = APInt(sz, 0); 7006 SplatUndef = APInt(sz, 0); 7007 7008 // Get the bits. Bits with undefined values (when the corresponding element 7009 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 7010 // in SplatValue. If any of the values are not constant, give up and return 7011 // false. 7012 unsigned int nOps = getNumOperands(); 7013 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 7014 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); 7015 7016 for (unsigned j = 0; j < nOps; ++j) { 7017 unsigned i = isBigEndian ? nOps-1-j : j; 7018 SDValue OpVal = getOperand(i); 7019 unsigned BitPos = j * EltBitSize; 7020 7021 if (OpVal.getOpcode() == ISD::UNDEF) 7022 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 7023 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 7024 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 7025 zextOrTrunc(sz) << BitPos; 7026 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 7027 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 7028 else 7029 return false; 7030 } 7031 7032 // The build_vector is all constants or undefs. Find the smallest element 7033 // size that splats the vector. 7034 7035 HasAnyUndefs = (SplatUndef != 0); 7036 while (sz > 8) { 7037 7038 unsigned HalfSize = sz / 2; 7039 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 7040 APInt LowValue = SplatValue.trunc(HalfSize); 7041 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 7042 APInt LowUndef = SplatUndef.trunc(HalfSize); 7043 7044 // If the two halves do not match (ignoring undef bits), stop here. 7045 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 7046 MinSplatBits > HalfSize) 7047 break; 7048 7049 SplatValue = HighValue | LowValue; 7050 SplatUndef = HighUndef & LowUndef; 7051 7052 sz = HalfSize; 7053 } 7054 7055 SplatBitSize = sz; 7056 return true; 7057 } 7058 7059 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 7060 if (UndefElements) { 7061 UndefElements->clear(); 7062 UndefElements->resize(getNumOperands()); 7063 } 7064 SDValue Splatted; 7065 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 7066 SDValue Op = getOperand(i); 7067 if (Op.getOpcode() == ISD::UNDEF) { 7068 if (UndefElements) 7069 (*UndefElements)[i] = true; 7070 } else if (!Splatted) { 7071 Splatted = Op; 7072 } else if (Splatted != Op) { 7073 return SDValue(); 7074 } 7075 } 7076 7077 if (!Splatted) { 7078 assert(getOperand(0).getOpcode() == ISD::UNDEF && 7079 "Can only have a splat without a constant for all undefs."); 7080 return getOperand(0); 7081 } 7082 7083 return Splatted; 7084 } 7085 7086 ConstantSDNode * 7087 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 7088 return dyn_cast_or_null<ConstantSDNode>( 7089 getSplatValue(UndefElements).getNode()); 7090 } 7091 7092 ConstantFPSDNode * 7093 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 7094 return dyn_cast_or_null<ConstantFPSDNode>( 7095 getSplatValue(UndefElements).getNode()); 7096 } 7097 7098 bool BuildVectorSDNode::isConstant() const { 7099 for (const SDValue &Op : op_values()) { 7100 unsigned Opc = Op.getOpcode(); 7101 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 7102 return false; 7103 } 7104 return true; 7105 } 7106 7107 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 7108 // Find the first non-undef value in the shuffle mask. 7109 unsigned i, e; 7110 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 7111 /* search */; 7112 7113 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 7114 7115 // Make sure all remaining elements are either undef or the same as the first 7116 // non-undef value. 7117 for (int Idx = Mask[i]; i != e; ++i) 7118 if (Mask[i] >= 0 && Mask[i] != Idx) 7119 return false; 7120 return true; 7121 } 7122 7123 #ifndef NDEBUG 7124 static void checkForCyclesHelper(const SDNode *N, 7125 SmallPtrSetImpl<const SDNode*> &Visited, 7126 SmallPtrSetImpl<const SDNode*> &Checked, 7127 const llvm::SelectionDAG *DAG) { 7128 // If this node has already been checked, don't check it again. 7129 if (Checked.count(N)) 7130 return; 7131 7132 // If a node has already been visited on this depth-first walk, reject it as 7133 // a cycle. 7134 if (!Visited.insert(N).second) { 7135 errs() << "Detected cycle in SelectionDAG\n"; 7136 dbgs() << "Offending node:\n"; 7137 N->dumprFull(DAG); dbgs() << "\n"; 7138 abort(); 7139 } 7140 7141 for (const SDValue &Op : N->op_values()) 7142 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 7143 7144 Checked.insert(N); 7145 Visited.erase(N); 7146 } 7147 #endif 7148 7149 void llvm::checkForCycles(const llvm::SDNode *N, 7150 const llvm::SelectionDAG *DAG, 7151 bool force) { 7152 #ifndef NDEBUG 7153 bool check = force; 7154 #ifdef XDEBUG 7155 check = true; 7156 #endif // XDEBUG 7157 if (check) { 7158 assert(N && "Checking nonexistent SDNode"); 7159 SmallPtrSet<const SDNode*, 32> visited; 7160 SmallPtrSet<const SDNode*, 32> checked; 7161 checkForCyclesHelper(N, visited, checked, DAG); 7162 } 7163 #endif // !NDEBUG 7164 } 7165 7166 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 7167 checkForCycles(DAG->getRoot().getNode(), DAG, force); 7168 } 7169