1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineConstantPool.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineModuleInfo.h" 26 #include "llvm/IR/CallingConv.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DebugInfo.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/ManagedStatic.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/Mutex.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetIntrinsicInfo.h" 44 #include "llvm/Target/TargetLowering.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include "llvm/Target/TargetRegisterInfo.h" 48 #include "llvm/Target/TargetSelectionDAGInfo.h" 49 #include "llvm/Target/TargetSubtargetInfo.h" 50 #include <algorithm> 51 #include <cmath> 52 53 using namespace llvm; 54 55 /// makeVTList - Return an instance of the SDVTList struct initialized with the 56 /// specified members. 57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 58 SDVTList Res = {VTs, NumVTs}; 59 return Res; 60 } 61 62 // Default null implementations of the callbacks. 63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 65 66 //===----------------------------------------------------------------------===// 67 // ConstantFPSDNode Class 68 //===----------------------------------------------------------------------===// 69 70 /// isExactlyValue - We don't rely on operator== working on double values, as 71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 72 /// As such, this method can be used to do an exact bit-for-bit comparison of 73 /// two floating point values. 74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 75 return getValueAPF().bitwiseIsEqual(V); 76 } 77 78 bool ConstantFPSDNode::isValueValidForType(EVT VT, 79 const APFloat& Val) { 80 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 81 82 // convert modifies in place, so make a copy. 83 APFloat Val2 = APFloat(Val); 84 bool losesInfo; 85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 86 APFloat::rmNearestTiesToEven, 87 &losesInfo); 88 return !losesInfo; 89 } 90 91 //===----------------------------------------------------------------------===// 92 // ISD Namespace 93 //===----------------------------------------------------------------------===// 94 95 /// isBuildVectorAllOnes - Return true if the specified node is a 96 /// BUILD_VECTOR where all of the elements are ~0 or undef. 97 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 98 // Look through a bit convert. 99 if (N->getOpcode() == ISD::BITCAST) 100 N = N->getOperand(0).getNode(); 101 102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 103 104 unsigned i = 0, e = N->getNumOperands(); 105 106 // Skip over all of the undef values. 107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 108 ++i; 109 110 // Do not accept an all-undef vector. 111 if (i == e) return false; 112 113 // Do not accept build_vectors that aren't all constants or which have non-~0 114 // elements. We have to be a bit careful here, as the type of the constant 115 // may not be the same as the type of the vector elements due to type 116 // legalization (the elements are promoted to a legal type for the target and 117 // a vector of a type may be legal when the base element type is not). 118 // We only want to check enough bits to cover the vector elements, because 119 // we care if the resultant vector is all ones, not whether the individual 120 // constants are. 121 SDValue NotZero = N->getOperand(i); 122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 124 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 125 return false; 126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 128 return false; 129 } else 130 return false; 131 132 // Okay, we have at least one ~0 value, check to see if the rest match or are 133 // undefs. Even with the above element type twiddling, this should be OK, as 134 // the same type legalization should have applied to all the elements. 135 for (++i; i != e; ++i) 136 if (N->getOperand(i) != NotZero && 137 N->getOperand(i).getOpcode() != ISD::UNDEF) 138 return false; 139 return true; 140 } 141 142 143 /// isBuildVectorAllZeros - Return true if the specified node is a 144 /// BUILD_VECTOR where all of the elements are 0 or undef. 145 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 146 // Look through a bit convert. 147 if (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 bool IsAllUndef = true; 153 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) { 154 if (N->getOperand(i).getOpcode() == ISD::UNDEF) 155 continue; 156 IsAllUndef = false; 157 // Do not accept build_vectors that aren't all constants or which have non-0 158 // elements. We have to be a bit careful here, as the type of the constant 159 // may not be the same as the type of the vector elements due to type 160 // legalization (the elements are promoted to a legal type for the target 161 // and a vector of a type may be legal when the base element type is not). 162 // We only want to check enough bits to cover the vector elements, because 163 // we care if the resultant vector is all zeros, not whether the individual 164 // constants are. 165 SDValue Zero = N->getOperand(i); 166 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 167 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) { 168 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 169 return false; 170 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) { 171 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 172 return false; 173 } else 174 return false; 175 } 176 177 // Do not accept an all-undef vector. 178 if (IsAllUndef) 179 return false; 180 return true; 181 } 182 183 /// \brief Return true if the specified node is a BUILD_VECTOR node of 184 /// all ConstantSDNode or undef. 185 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 186 if (N->getOpcode() != ISD::BUILD_VECTOR) 187 return false; 188 189 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 190 SDValue Op = N->getOperand(i); 191 if (Op.getOpcode() == ISD::UNDEF) 192 continue; 193 if (!isa<ConstantSDNode>(Op)) 194 return false; 195 } 196 return true; 197 } 198 199 /// isScalarToVector - Return true if the specified node is a 200 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low 201 /// element is not an undef. 202 bool ISD::isScalarToVector(const SDNode *N) { 203 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) 204 return true; 205 206 if (N->getOpcode() != ISD::BUILD_VECTOR) 207 return false; 208 if (N->getOperand(0).getOpcode() == ISD::UNDEF) 209 return false; 210 unsigned NumElems = N->getNumOperands(); 211 if (NumElems == 1) 212 return false; 213 for (unsigned i = 1; i < NumElems; ++i) { 214 SDValue V = N->getOperand(i); 215 if (V.getOpcode() != ISD::UNDEF) 216 return false; 217 } 218 return true; 219 } 220 221 /// allOperandsUndef - Return true if the node has at least one operand 222 /// and all operands of the specified node are ISD::UNDEF. 223 bool ISD::allOperandsUndef(const SDNode *N) { 224 // Return false if the node has no operands. 225 // This is "logically inconsistent" with the definition of "all" but 226 // is probably the desired behavior. 227 if (N->getNumOperands() == 0) 228 return false; 229 230 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i) 231 if (N->getOperand(i).getOpcode() != ISD::UNDEF) 232 return false; 233 234 return true; 235 } 236 237 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) { 238 switch (ExtType) { 239 case ISD::EXTLOAD: 240 return ISD::ANY_EXTEND; 241 case ISD::SEXTLOAD: 242 return ISD::SIGN_EXTEND; 243 case ISD::ZEXTLOAD: 244 return ISD::ZERO_EXTEND; 245 default: 246 break; 247 } 248 249 llvm_unreachable("Invalid LoadExtType"); 250 } 251 252 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) 253 /// when given the operation for (X op Y). 254 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 255 // To perform this operation, we just need to swap the L and G bits of the 256 // operation. 257 unsigned OldL = (Operation >> 2) & 1; 258 unsigned OldG = (Operation >> 1) & 1; 259 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 260 (OldL << 1) | // New G bit 261 (OldG << 2)); // New L bit. 262 } 263 264 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where 265 /// 'op' is a valid SetCC operation. 266 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 267 unsigned Operation = Op; 268 if (isInteger) 269 Operation ^= 7; // Flip L, G, E bits, but not U. 270 else 271 Operation ^= 15; // Flip all of the condition bits. 272 273 if (Operation > ISD::SETTRUE2) 274 Operation &= ~8; // Don't let N and U bits get set. 275 276 return ISD::CondCode(Operation); 277 } 278 279 280 /// isSignedOp - For an integer comparison, return 1 if the comparison is a 281 /// signed operation and 2 if the result is an unsigned comparison. Return zero 282 /// if the operation does not depend on the sign of the input (setne and seteq). 283 static int isSignedOp(ISD::CondCode Opcode) { 284 switch (Opcode) { 285 default: llvm_unreachable("Illegal integer setcc operation!"); 286 case ISD::SETEQ: 287 case ISD::SETNE: return 0; 288 case ISD::SETLT: 289 case ISD::SETLE: 290 case ISD::SETGT: 291 case ISD::SETGE: return 1; 292 case ISD::SETULT: 293 case ISD::SETULE: 294 case ISD::SETUGT: 295 case ISD::SETUGE: return 2; 296 } 297 } 298 299 /// getSetCCOrOperation - Return the result of a logical OR between different 300 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function 301 /// returns SETCC_INVALID if it is not possible to represent the resultant 302 /// comparison. 303 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 304 bool isInteger) { 305 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 306 // Cannot fold a signed integer setcc with an unsigned integer setcc. 307 return ISD::SETCC_INVALID; 308 309 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 310 311 // If the N and U bits get set then the resultant comparison DOES suddenly 312 // care about orderedness, and is true when ordered. 313 if (Op > ISD::SETTRUE2) 314 Op &= ~16; // Clear the U bit if the N bit is set. 315 316 // Canonicalize illegal integer setcc's. 317 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 318 Op = ISD::SETNE; 319 320 return ISD::CondCode(Op); 321 } 322 323 /// getSetCCAndOperation - Return the result of a logical AND between different 324 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This 325 /// function returns zero if it is not possible to represent the resultant 326 /// comparison. 327 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 328 bool isInteger) { 329 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 330 // Cannot fold a signed setcc with an unsigned setcc. 331 return ISD::SETCC_INVALID; 332 333 // Combine all of the condition bits. 334 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 335 336 // Canonicalize illegal integer setcc's. 337 if (isInteger) { 338 switch (Result) { 339 default: break; 340 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 341 case ISD::SETOEQ: // SETEQ & SETU[LG]E 342 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 343 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 344 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 345 } 346 } 347 348 return Result; 349 } 350 351 //===----------------------------------------------------------------------===// 352 // SDNode Profile Support 353 //===----------------------------------------------------------------------===// 354 355 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 356 /// 357 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 358 ID.AddInteger(OpC); 359 } 360 361 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 362 /// solely with their pointer. 363 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 364 ID.AddPointer(VTList.VTs); 365 } 366 367 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 368 /// 369 static void AddNodeIDOperands(FoldingSetNodeID &ID, 370 ArrayRef<SDValue> Ops) { 371 for (auto& Op : Ops) { 372 ID.AddPointer(Op.getNode()); 373 ID.AddInteger(Op.getResNo()); 374 } 375 } 376 377 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 378 /// 379 static void AddNodeIDOperands(FoldingSetNodeID &ID, 380 ArrayRef<SDUse> Ops) { 381 for (auto& Op : Ops) { 382 ID.AddPointer(Op.getNode()); 383 ID.AddInteger(Op.getResNo()); 384 } 385 } 386 387 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw, 388 bool exact) { 389 ID.AddBoolean(nuw); 390 ID.AddBoolean(nsw); 391 ID.AddBoolean(exact); 392 } 393 394 /// AddBinaryNodeIDCustom - Add BinarySDNodes special infos 395 static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode, 396 bool nuw, bool nsw, bool exact) { 397 if (isBinOpWithFlags(Opcode)) 398 AddBinaryNodeIDCustom(ID, nuw, nsw, exact); 399 } 400 401 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 402 SDVTList VTList, ArrayRef<SDValue> OpList) { 403 AddNodeIDOpcode(ID, OpC); 404 AddNodeIDValueTypes(ID, VTList); 405 AddNodeIDOperands(ID, OpList); 406 } 407 408 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to 409 /// the NodeID data. 410 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 411 switch (N->getOpcode()) { 412 case ISD::TargetExternalSymbol: 413 case ISD::ExternalSymbol: 414 llvm_unreachable("Should only be used on nodes with operands"); 415 default: break; // Normal nodes don't need extra info. 416 case ISD::TargetConstant: 417 case ISD::Constant: { 418 const ConstantSDNode *C = cast<ConstantSDNode>(N); 419 ID.AddPointer(C->getConstantIntValue()); 420 ID.AddBoolean(C->isOpaque()); 421 break; 422 } 423 case ISD::TargetConstantFP: 424 case ISD::ConstantFP: { 425 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 426 break; 427 } 428 case ISD::TargetGlobalAddress: 429 case ISD::GlobalAddress: 430 case ISD::TargetGlobalTLSAddress: 431 case ISD::GlobalTLSAddress: { 432 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 433 ID.AddPointer(GA->getGlobal()); 434 ID.AddInteger(GA->getOffset()); 435 ID.AddInteger(GA->getTargetFlags()); 436 ID.AddInteger(GA->getAddressSpace()); 437 break; 438 } 439 case ISD::BasicBlock: 440 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 441 break; 442 case ISD::Register: 443 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 444 break; 445 case ISD::RegisterMask: 446 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 447 break; 448 case ISD::SRCVALUE: 449 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 450 break; 451 case ISD::FrameIndex: 452 case ISD::TargetFrameIndex: 453 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 454 break; 455 case ISD::JumpTable: 456 case ISD::TargetJumpTable: 457 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 458 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 459 break; 460 case ISD::ConstantPool: 461 case ISD::TargetConstantPool: { 462 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 463 ID.AddInteger(CP->getAlignment()); 464 ID.AddInteger(CP->getOffset()); 465 if (CP->isMachineConstantPoolEntry()) 466 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 467 else 468 ID.AddPointer(CP->getConstVal()); 469 ID.AddInteger(CP->getTargetFlags()); 470 break; 471 } 472 case ISD::TargetIndex: { 473 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 474 ID.AddInteger(TI->getIndex()); 475 ID.AddInteger(TI->getOffset()); 476 ID.AddInteger(TI->getTargetFlags()); 477 break; 478 } 479 case ISD::LOAD: { 480 const LoadSDNode *LD = cast<LoadSDNode>(N); 481 ID.AddInteger(LD->getMemoryVT().getRawBits()); 482 ID.AddInteger(LD->getRawSubclassData()); 483 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 484 break; 485 } 486 case ISD::STORE: { 487 const StoreSDNode *ST = cast<StoreSDNode>(N); 488 ID.AddInteger(ST->getMemoryVT().getRawBits()); 489 ID.AddInteger(ST->getRawSubclassData()); 490 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 491 break; 492 } 493 case ISD::SDIV: 494 case ISD::UDIV: 495 case ISD::SRA: 496 case ISD::SRL: 497 case ISD::MUL: 498 case ISD::ADD: 499 case ISD::SUB: 500 case ISD::SHL: { 501 const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N); 502 AddBinaryNodeIDCustom(ID, N->getOpcode(), BinNode->hasNoUnsignedWrap(), 503 BinNode->hasNoSignedWrap(), BinNode->isExact()); 504 break; 505 } 506 case ISD::ATOMIC_CMP_SWAP: 507 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 508 case ISD::ATOMIC_SWAP: 509 case ISD::ATOMIC_LOAD_ADD: 510 case ISD::ATOMIC_LOAD_SUB: 511 case ISD::ATOMIC_LOAD_AND: 512 case ISD::ATOMIC_LOAD_OR: 513 case ISD::ATOMIC_LOAD_XOR: 514 case ISD::ATOMIC_LOAD_NAND: 515 case ISD::ATOMIC_LOAD_MIN: 516 case ISD::ATOMIC_LOAD_MAX: 517 case ISD::ATOMIC_LOAD_UMIN: 518 case ISD::ATOMIC_LOAD_UMAX: 519 case ISD::ATOMIC_LOAD: 520 case ISD::ATOMIC_STORE: { 521 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 522 ID.AddInteger(AT->getMemoryVT().getRawBits()); 523 ID.AddInteger(AT->getRawSubclassData()); 524 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 525 break; 526 } 527 case ISD::PREFETCH: { 528 const MemSDNode *PF = cast<MemSDNode>(N); 529 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 530 break; 531 } 532 case ISD::VECTOR_SHUFFLE: { 533 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 534 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 535 i != e; ++i) 536 ID.AddInteger(SVN->getMaskElt(i)); 537 break; 538 } 539 case ISD::TargetBlockAddress: 540 case ISD::BlockAddress: { 541 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 542 ID.AddPointer(BA->getBlockAddress()); 543 ID.AddInteger(BA->getOffset()); 544 ID.AddInteger(BA->getTargetFlags()); 545 break; 546 } 547 } // end switch (N->getOpcode()) 548 549 // Target specific memory nodes could also have address spaces to check. 550 if (N->isTargetMemoryOpcode()) 551 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 552 } 553 554 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 555 /// data. 556 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 557 AddNodeIDOpcode(ID, N->getOpcode()); 558 // Add the return value info. 559 AddNodeIDValueTypes(ID, N->getVTList()); 560 // Add the operand info. 561 AddNodeIDOperands(ID, N->ops()); 562 563 // Handle SDNode leafs with special info. 564 AddNodeIDCustom(ID, N); 565 } 566 567 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in 568 /// the CSE map that carries volatility, temporalness, indexing mode, and 569 /// extension/truncation information. 570 /// 571 static inline unsigned 572 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile, 573 bool isNonTemporal, bool isInvariant) { 574 assert((ConvType & 3) == ConvType && 575 "ConvType may not require more than 2 bits!"); 576 assert((AM & 7) == AM && 577 "AM may not require more than 3 bits!"); 578 return ConvType | 579 (AM << 2) | 580 (isVolatile << 5) | 581 (isNonTemporal << 6) | 582 (isInvariant << 7); 583 } 584 585 //===----------------------------------------------------------------------===// 586 // SelectionDAG Class 587 //===----------------------------------------------------------------------===// 588 589 /// doNotCSE - Return true if CSE should not be performed for this node. 590 static bool doNotCSE(SDNode *N) { 591 if (N->getValueType(0) == MVT::Glue) 592 return true; // Never CSE anything that produces a flag. 593 594 switch (N->getOpcode()) { 595 default: break; 596 case ISD::HANDLENODE: 597 case ISD::EH_LABEL: 598 return true; // Never CSE these nodes. 599 } 600 601 // Check that remaining values produced are not flags. 602 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 603 if (N->getValueType(i) == MVT::Glue) 604 return true; // Never CSE anything that produces a flag. 605 606 return false; 607 } 608 609 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 610 /// SelectionDAG. 611 void SelectionDAG::RemoveDeadNodes() { 612 // Create a dummy node (which is not added to allnodes), that adds a reference 613 // to the root node, preventing it from being deleted. 614 HandleSDNode Dummy(getRoot()); 615 616 SmallVector<SDNode*, 128> DeadNodes; 617 618 // Add all obviously-dead nodes to the DeadNodes worklist. 619 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I) 620 if (I->use_empty()) 621 DeadNodes.push_back(I); 622 623 RemoveDeadNodes(DeadNodes); 624 625 // If the root changed (e.g. it was a dead load, update the root). 626 setRoot(Dummy.getValue()); 627 } 628 629 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 630 /// given list, and any nodes that become unreachable as a result. 631 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 632 633 // Process the worklist, deleting the nodes and adding their uses to the 634 // worklist. 635 while (!DeadNodes.empty()) { 636 SDNode *N = DeadNodes.pop_back_val(); 637 638 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 639 DUL->NodeDeleted(N, nullptr); 640 641 // Take the node out of the appropriate CSE map. 642 RemoveNodeFromCSEMaps(N); 643 644 // Next, brutally remove the operand list. This is safe to do, as there are 645 // no cycles in the graph. 646 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 647 SDUse &Use = *I++; 648 SDNode *Operand = Use.getNode(); 649 Use.set(SDValue()); 650 651 // Now that we removed this operand, see if there are no uses of it left. 652 if (Operand->use_empty()) 653 DeadNodes.push_back(Operand); 654 } 655 656 DeallocateNode(N); 657 } 658 } 659 660 void SelectionDAG::RemoveDeadNode(SDNode *N){ 661 SmallVector<SDNode*, 16> DeadNodes(1, N); 662 663 // Create a dummy node that adds a reference to the root node, preventing 664 // it from being deleted. (This matters if the root is an operand of the 665 // dead node.) 666 HandleSDNode Dummy(getRoot()); 667 668 RemoveDeadNodes(DeadNodes); 669 } 670 671 void SelectionDAG::DeleteNode(SDNode *N) { 672 // First take this out of the appropriate CSE map. 673 RemoveNodeFromCSEMaps(N); 674 675 // Finally, remove uses due to operands of this node, remove from the 676 // AllNodes list, and delete the node. 677 DeleteNodeNotInCSEMaps(N); 678 } 679 680 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 681 assert(N != AllNodes.begin() && "Cannot delete the entry node!"); 682 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 683 684 // Drop all of the operands and decrement used node's use counts. 685 N->DropOperands(); 686 687 DeallocateNode(N); 688 } 689 690 void SelectionDAG::DeallocateNode(SDNode *N) { 691 if (N->OperandsNeedDelete) 692 delete[] N->OperandList; 693 694 // Set the opcode to DELETED_NODE to help catch bugs when node 695 // memory is reallocated. 696 N->NodeType = ISD::DELETED_NODE; 697 698 NodeAllocator.Deallocate(AllNodes.remove(N)); 699 700 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them. 701 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N); 702 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i) 703 DbgVals[i]->setIsInvalidated(); 704 } 705 706 #ifndef NDEBUG 707 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 708 static void VerifySDNode(SDNode *N) { 709 switch (N->getOpcode()) { 710 default: 711 break; 712 case ISD::BUILD_PAIR: { 713 EVT VT = N->getValueType(0); 714 assert(N->getNumValues() == 1 && "Too many results!"); 715 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 716 "Wrong return type!"); 717 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 718 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 719 "Mismatched operand types!"); 720 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 721 "Wrong operand type!"); 722 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 723 "Wrong return type size"); 724 break; 725 } 726 case ISD::BUILD_VECTOR: { 727 assert(N->getNumValues() == 1 && "Too many results!"); 728 assert(N->getValueType(0).isVector() && "Wrong return type!"); 729 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 730 "Wrong number of operands!"); 731 EVT EltVT = N->getValueType(0).getVectorElementType(); 732 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 733 assert((I->getValueType() == EltVT || 734 (EltVT.isInteger() && I->getValueType().isInteger() && 735 EltVT.bitsLE(I->getValueType()))) && 736 "Wrong operand type!"); 737 assert(I->getValueType() == N->getOperand(0).getValueType() && 738 "Operands must all have the same type"); 739 } 740 break; 741 } 742 } 743 } 744 #endif // NDEBUG 745 746 /// \brief Insert a newly allocated node into the DAG. 747 /// 748 /// Handles insertion into the all nodes list and CSE map, as well as 749 /// verification and other common operations when a new node is allocated. 750 void SelectionDAG::InsertNode(SDNode *N) { 751 AllNodes.push_back(N); 752 #ifndef NDEBUG 753 VerifySDNode(N); 754 #endif 755 } 756 757 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 758 /// correspond to it. This is useful when we're about to delete or repurpose 759 /// the node. We don't want future request for structurally identical nodes 760 /// to return N anymore. 761 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 762 bool Erased = false; 763 switch (N->getOpcode()) { 764 case ISD::HANDLENODE: return false; // noop. 765 case ISD::CONDCODE: 766 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 767 "Cond code doesn't exist!"); 768 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 769 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 770 break; 771 case ISD::ExternalSymbol: 772 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 773 break; 774 case ISD::TargetExternalSymbol: { 775 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 776 Erased = TargetExternalSymbols.erase( 777 std::pair<std::string,unsigned char>(ESN->getSymbol(), 778 ESN->getTargetFlags())); 779 break; 780 } 781 case ISD::VALUETYPE: { 782 EVT VT = cast<VTSDNode>(N)->getVT(); 783 if (VT.isExtended()) { 784 Erased = ExtendedValueTypeNodes.erase(VT); 785 } else { 786 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 787 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 788 } 789 break; 790 } 791 default: 792 // Remove it from the CSE Map. 793 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 794 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 795 Erased = CSEMap.RemoveNode(N); 796 break; 797 } 798 #ifndef NDEBUG 799 // Verify that the node was actually in one of the CSE maps, unless it has a 800 // flag result (which cannot be CSE'd) or is one of the special cases that are 801 // not subject to CSE. 802 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 803 !N->isMachineOpcode() && !doNotCSE(N)) { 804 N->dump(this); 805 dbgs() << "\n"; 806 llvm_unreachable("Node is not in map!"); 807 } 808 #endif 809 return Erased; 810 } 811 812 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 813 /// maps and modified in place. Add it back to the CSE maps, unless an identical 814 /// node already exists, in which case transfer all its users to the existing 815 /// node. This transfer can potentially trigger recursive merging. 816 /// 817 void 818 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 819 // For node types that aren't CSE'd, just act as if no identical node 820 // already exists. 821 if (!doNotCSE(N)) { 822 SDNode *Existing = CSEMap.GetOrInsertNode(N); 823 if (Existing != N) { 824 // If there was already an existing matching node, use ReplaceAllUsesWith 825 // to replace the dead one with the existing one. This can cause 826 // recursive merging of other unrelated nodes down the line. 827 ReplaceAllUsesWith(N, Existing); 828 829 // N is now dead. Inform the listeners and delete it. 830 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 831 DUL->NodeDeleted(N, Existing); 832 DeleteNodeNotInCSEMaps(N); 833 return; 834 } 835 } 836 837 // If the node doesn't already exist, we updated it. Inform listeners. 838 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 839 DUL->NodeUpdated(N); 840 } 841 842 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 843 /// were replaced with those specified. If this node is never memoized, 844 /// return null, otherwise return a pointer to the slot it would take. If a 845 /// node already exists with these operands, the slot will be non-null. 846 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 847 void *&InsertPos) { 848 if (doNotCSE(N)) 849 return nullptr; 850 851 SDValue Ops[] = { Op }; 852 FoldingSetNodeID ID; 853 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 854 AddNodeIDCustom(ID, N); 855 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 856 return Node; 857 } 858 859 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 860 /// were replaced with those specified. If this node is never memoized, 861 /// return null, otherwise return a pointer to the slot it would take. If a 862 /// node already exists with these operands, the slot will be non-null. 863 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 864 SDValue Op1, SDValue Op2, 865 void *&InsertPos) { 866 if (doNotCSE(N)) 867 return nullptr; 868 869 SDValue Ops[] = { Op1, Op2 }; 870 FoldingSetNodeID ID; 871 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 872 AddNodeIDCustom(ID, N); 873 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 874 return Node; 875 } 876 877 878 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 879 /// were replaced with those specified. If this node is never memoized, 880 /// return null, otherwise return a pointer to the slot it would take. If a 881 /// node already exists with these operands, the slot will be non-null. 882 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 883 void *&InsertPos) { 884 if (doNotCSE(N)) 885 return nullptr; 886 887 FoldingSetNodeID ID; 888 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 889 AddNodeIDCustom(ID, N); 890 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 891 return Node; 892 } 893 894 /// getEVTAlignment - Compute the default alignment value for the 895 /// given type. 896 /// 897 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 898 Type *Ty = VT == MVT::iPTR ? 899 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 900 VT.getTypeForEVT(*getContext()); 901 902 return TM.getSubtargetImpl() 903 ->getTargetLowering() 904 ->getDataLayout() 905 ->getABITypeAlignment(Ty); 906 } 907 908 // EntryNode could meaningfully have debug info if we can find it... 909 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 910 : TM(tm), TSI(*tm.getSubtargetImpl()->getSelectionDAGInfo()), TLI(nullptr), 911 OptLevel(OL), 912 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 913 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false), 914 UpdateListeners(nullptr) { 915 AllNodes.push_back(&EntryNode); 916 DbgInfo = new SDDbgInfo(); 917 } 918 919 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) { 920 MF = &mf; 921 TLI = tli; 922 Context = &mf.getFunction()->getContext(); 923 } 924 925 SelectionDAG::~SelectionDAG() { 926 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 927 allnodes_clear(); 928 delete DbgInfo; 929 } 930 931 void SelectionDAG::allnodes_clear() { 932 assert(&*AllNodes.begin() == &EntryNode); 933 AllNodes.remove(AllNodes.begin()); 934 while (!AllNodes.empty()) 935 DeallocateNode(AllNodes.begin()); 936 } 937 938 BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL, 939 SDVTList VTs, SDValue N1, 940 SDValue N2, bool nuw, bool nsw, 941 bool exact) { 942 if (isBinOpWithFlags(Opcode)) { 943 BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode( 944 Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2); 945 FN->setHasNoUnsignedWrap(nuw); 946 FN->setHasNoSignedWrap(nsw); 947 FN->setIsExact(exact); 948 949 return FN; 950 } 951 952 BinarySDNode *N = new (NodeAllocator) 953 BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2); 954 return N; 955 } 956 957 void SelectionDAG::clear() { 958 allnodes_clear(); 959 OperandAllocator.Reset(); 960 CSEMap.clear(); 961 962 ExtendedValueTypeNodes.clear(); 963 ExternalSymbols.clear(); 964 TargetExternalSymbols.clear(); 965 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 966 static_cast<CondCodeSDNode*>(nullptr)); 967 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 968 static_cast<SDNode*>(nullptr)); 969 970 EntryNode.UseList = nullptr; 971 AllNodes.push_back(&EntryNode); 972 Root = getEntryNode(); 973 DbgInfo->clear(); 974 } 975 976 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 977 return VT.bitsGT(Op.getValueType()) ? 978 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 979 getNode(ISD::TRUNCATE, DL, VT, Op); 980 } 981 982 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 983 return VT.bitsGT(Op.getValueType()) ? 984 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 985 getNode(ISD::TRUNCATE, DL, VT, Op); 986 } 987 988 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 989 return VT.bitsGT(Op.getValueType()) ? 990 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 991 getNode(ISD::TRUNCATE, DL, VT, Op); 992 } 993 994 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT, 995 EVT OpVT) { 996 if (VT.bitsLE(Op.getValueType())) 997 return getNode(ISD::TRUNCATE, SL, VT, Op); 998 999 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1000 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1001 } 1002 1003 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) { 1004 assert(!VT.isVector() && 1005 "getZeroExtendInReg should use the vector element type instead of " 1006 "the vector type!"); 1007 if (Op.getValueType() == VT) return Op; 1008 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1009 APInt Imm = APInt::getLowBitsSet(BitWidth, 1010 VT.getSizeInBits()); 1011 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1012 getConstant(Imm, Op.getValueType())); 1013 } 1014 1015 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1016 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1017 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1018 "The sizes of the input and result must match in order to perform the " 1019 "extend in-register."); 1020 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1021 "The destination vector type must have fewer lanes than the input."); 1022 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1023 } 1024 1025 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1026 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1027 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1028 "The sizes of the input and result must match in order to perform the " 1029 "extend in-register."); 1030 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1031 "The destination vector type must have fewer lanes than the input."); 1032 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1033 } 1034 1035 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT) { 1036 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1037 assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && 1038 "The sizes of the input and result must match in order to perform the " 1039 "extend in-register."); 1040 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1041 "The destination vector type must have fewer lanes than the input."); 1042 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1043 } 1044 1045 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1046 /// 1047 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) { 1048 EVT EltVT = VT.getScalarType(); 1049 SDValue NegOne = 1050 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 1051 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1052 } 1053 1054 SDValue SelectionDAG::getLogicalNOT(SDLoc DL, SDValue Val, EVT VT) { 1055 EVT EltVT = VT.getScalarType(); 1056 SDValue TrueValue; 1057 switch (TLI->getBooleanContents(VT)) { 1058 case TargetLowering::ZeroOrOneBooleanContent: 1059 case TargetLowering::UndefinedBooleanContent: 1060 TrueValue = getConstant(1, VT); 1061 break; 1062 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1063 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), 1064 VT); 1065 break; 1066 } 1067 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1068 } 1069 1070 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) { 1071 EVT EltVT = VT.getScalarType(); 1072 assert((EltVT.getSizeInBits() >= 64 || 1073 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1074 "getConstant with a uint64_t value that doesn't fit in the type!"); 1075 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO); 1076 } 1077 1078 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO) 1079 { 1080 return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO); 1081 } 1082 1083 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT, 1084 bool isO) { 1085 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1086 1087 EVT EltVT = VT.getScalarType(); 1088 const ConstantInt *Elt = &Val; 1089 1090 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1091 1092 // In some cases the vector type is legal but the element type is illegal and 1093 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1094 // inserted value (the type does not need to match the vector element type). 1095 // Any extra bits introduced will be truncated away. 1096 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1097 TargetLowering::TypePromoteInteger) { 1098 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1099 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); 1100 Elt = ConstantInt::get(*getContext(), NewVal); 1101 } 1102 // In other cases the element type is illegal and needs to be expanded, for 1103 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1104 // the value into n parts and use a vector type with n-times the elements. 1105 // Then bitcast to the type requested. 1106 // Legalizing constants too early makes the DAGCombiner's job harder so we 1107 // only legalize if the DAG tells us we must produce legal types. 1108 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1109 TLI->getTypeAction(*getContext(), EltVT) == 1110 TargetLowering::TypeExpandInteger) { 1111 APInt NewVal = Elt->getValue(); 1112 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1113 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1114 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1115 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1116 1117 // Check the temporary vector is the correct size. If this fails then 1118 // getTypeToTransformTo() probably returned a type whose size (in bits) 1119 // isn't a power-of-2 factor of the requested type size. 1120 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1121 1122 SmallVector<SDValue, 2> EltParts; 1123 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1124 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1125 .trunc(ViaEltSizeInBits), 1126 ViaEltVT, isT, isO)); 1127 } 1128 1129 // EltParts is currently in little endian order. If we actually want 1130 // big-endian order then reverse it now. 1131 if (TLI->isBigEndian()) 1132 std::reverse(EltParts.begin(), EltParts.end()); 1133 1134 // The elements must be reversed when the element order is different 1135 // to the endianness of the elements (because the BITCAST is itself a 1136 // vector shuffle in this situation). However, we do not need any code to 1137 // perform this reversal because getConstant() is producing a vector 1138 // splat. 1139 // This situation occurs in MIPS MSA. 1140 1141 SmallVector<SDValue, 8> Ops; 1142 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) 1143 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1144 1145 SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT, 1146 getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT, 1147 Ops)); 1148 return Result; 1149 } 1150 1151 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1152 "APInt size does not match type size!"); 1153 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1154 FoldingSetNodeID ID; 1155 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1156 ID.AddPointer(Elt); 1157 ID.AddBoolean(isO); 1158 void *IP = nullptr; 1159 SDNode *N = nullptr; 1160 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1161 if (!VT.isVector()) 1162 return SDValue(N, 0); 1163 1164 if (!N) { 1165 N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT); 1166 CSEMap.InsertNode(N, IP); 1167 InsertNode(N); 1168 } 1169 1170 SDValue Result(N, 0); 1171 if (VT.isVector()) { 1172 SmallVector<SDValue, 8> Ops; 1173 Ops.assign(VT.getVectorNumElements(), Result); 1174 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops); 1175 } 1176 return Result; 1177 } 1178 1179 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { 1180 return getConstant(Val, 1181 TM.getSubtargetImpl()->getTargetLowering()->getPointerTy(), 1182 isTarget); 1183 } 1184 1185 1186 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) { 1187 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget); 1188 } 1189 1190 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){ 1191 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1192 1193 EVT EltVT = VT.getScalarType(); 1194 1195 // Do the map lookup using the actual bit pattern for the floating point 1196 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1197 // we don't have issues with SNANs. 1198 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1199 FoldingSetNodeID ID; 1200 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1201 ID.AddPointer(&V); 1202 void *IP = nullptr; 1203 SDNode *N = nullptr; 1204 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1205 if (!VT.isVector()) 1206 return SDValue(N, 0); 1207 1208 if (!N) { 1209 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT); 1210 CSEMap.InsertNode(N, IP); 1211 InsertNode(N); 1212 } 1213 1214 SDValue Result(N, 0); 1215 if (VT.isVector()) { 1216 SmallVector<SDValue, 8> Ops; 1217 Ops.assign(VT.getVectorNumElements(), Result); 1218 // FIXME SDLoc info might be appropriate here 1219 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Ops); 1220 } 1221 return Result; 1222 } 1223 1224 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) { 1225 EVT EltVT = VT.getScalarType(); 1226 if (EltVT==MVT::f32) 1227 return getConstantFP(APFloat((float)Val), VT, isTarget); 1228 else if (EltVT==MVT::f64) 1229 return getConstantFP(APFloat(Val), VT, isTarget); 1230 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 || 1231 EltVT==MVT::f16) { 1232 bool ignored; 1233 APFloat apf = APFloat(Val); 1234 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1235 &ignored); 1236 return getConstantFP(apf, VT, isTarget); 1237 } else 1238 llvm_unreachable("Unsupported type in getConstantFP"); 1239 } 1240 1241 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL, 1242 EVT VT, int64_t Offset, 1243 bool isTargetGA, 1244 unsigned char TargetFlags) { 1245 assert((TargetFlags == 0 || isTargetGA) && 1246 "Cannot set target flags on target-independent globals"); 1247 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1248 1249 // Truncate (with sign-extension) the offset value to the pointer size. 1250 unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType()); 1251 if (BitWidth < 64) 1252 Offset = SignExtend64(Offset, BitWidth); 1253 1254 unsigned Opc; 1255 if (GV->isThreadLocal()) 1256 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1257 else 1258 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1259 1260 FoldingSetNodeID ID; 1261 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1262 ID.AddPointer(GV); 1263 ID.AddInteger(Offset); 1264 ID.AddInteger(TargetFlags); 1265 ID.AddInteger(GV->getType()->getAddressSpace()); 1266 void *IP = nullptr; 1267 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1268 return SDValue(E, 0); 1269 1270 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(), 1271 DL.getDebugLoc(), GV, VT, 1272 Offset, TargetFlags); 1273 CSEMap.InsertNode(N, IP); 1274 InsertNode(N); 1275 return SDValue(N, 0); 1276 } 1277 1278 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1279 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1280 FoldingSetNodeID ID; 1281 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1282 ID.AddInteger(FI); 1283 void *IP = nullptr; 1284 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1285 return SDValue(E, 0); 1286 1287 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget); 1288 CSEMap.InsertNode(N, IP); 1289 InsertNode(N); 1290 return SDValue(N, 0); 1291 } 1292 1293 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1294 unsigned char TargetFlags) { 1295 assert((TargetFlags == 0 || isTarget) && 1296 "Cannot set target flags on target-independent jump tables"); 1297 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1298 FoldingSetNodeID ID; 1299 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1300 ID.AddInteger(JTI); 1301 ID.AddInteger(TargetFlags); 1302 void *IP = nullptr; 1303 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1304 return SDValue(E, 0); 1305 1306 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget, 1307 TargetFlags); 1308 CSEMap.InsertNode(N, IP); 1309 InsertNode(N); 1310 return SDValue(N, 0); 1311 } 1312 1313 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1314 unsigned Alignment, int Offset, 1315 bool isTarget, 1316 unsigned char TargetFlags) { 1317 assert((TargetFlags == 0 || isTarget) && 1318 "Cannot set target flags on target-independent globals"); 1319 if (Alignment == 0) 1320 Alignment = TM.getSubtargetImpl() 1321 ->getTargetLowering() 1322 ->getDataLayout() 1323 ->getPrefTypeAlignment(C->getType()); 1324 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1325 FoldingSetNodeID ID; 1326 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1327 ID.AddInteger(Alignment); 1328 ID.AddInteger(Offset); 1329 ID.AddPointer(C); 1330 ID.AddInteger(TargetFlags); 1331 void *IP = nullptr; 1332 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1333 return SDValue(E, 0); 1334 1335 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1336 Alignment, TargetFlags); 1337 CSEMap.InsertNode(N, IP); 1338 InsertNode(N); 1339 return SDValue(N, 0); 1340 } 1341 1342 1343 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1344 unsigned Alignment, int Offset, 1345 bool isTarget, 1346 unsigned char TargetFlags) { 1347 assert((TargetFlags == 0 || isTarget) && 1348 "Cannot set target flags on target-independent globals"); 1349 if (Alignment == 0) 1350 Alignment = TM.getSubtargetImpl() 1351 ->getTargetLowering() 1352 ->getDataLayout() 1353 ->getPrefTypeAlignment(C->getType()); 1354 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1355 FoldingSetNodeID ID; 1356 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1357 ID.AddInteger(Alignment); 1358 ID.AddInteger(Offset); 1359 C->addSelectionDAGCSEId(ID); 1360 ID.AddInteger(TargetFlags); 1361 void *IP = nullptr; 1362 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1363 return SDValue(E, 0); 1364 1365 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1366 Alignment, TargetFlags); 1367 CSEMap.InsertNode(N, IP); 1368 InsertNode(N); 1369 return SDValue(N, 0); 1370 } 1371 1372 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1373 unsigned char TargetFlags) { 1374 FoldingSetNodeID ID; 1375 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1376 ID.AddInteger(Index); 1377 ID.AddInteger(Offset); 1378 ID.AddInteger(TargetFlags); 1379 void *IP = nullptr; 1380 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1381 return SDValue(E, 0); 1382 1383 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset, 1384 TargetFlags); 1385 CSEMap.InsertNode(N, IP); 1386 InsertNode(N); 1387 return SDValue(N, 0); 1388 } 1389 1390 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1391 FoldingSetNodeID ID; 1392 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1393 ID.AddPointer(MBB); 1394 void *IP = nullptr; 1395 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1396 return SDValue(E, 0); 1397 1398 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB); 1399 CSEMap.InsertNode(N, IP); 1400 InsertNode(N); 1401 return SDValue(N, 0); 1402 } 1403 1404 SDValue SelectionDAG::getValueType(EVT VT) { 1405 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1406 ValueTypeNodes.size()) 1407 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1408 1409 SDNode *&N = VT.isExtended() ? 1410 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1411 1412 if (N) return SDValue(N, 0); 1413 N = new (NodeAllocator) VTSDNode(VT); 1414 InsertNode(N); 1415 return SDValue(N, 0); 1416 } 1417 1418 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1419 SDNode *&N = ExternalSymbols[Sym]; 1420 if (N) return SDValue(N, 0); 1421 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT); 1422 InsertNode(N); 1423 return SDValue(N, 0); 1424 } 1425 1426 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1427 unsigned char TargetFlags) { 1428 SDNode *&N = 1429 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1430 TargetFlags)]; 1431 if (N) return SDValue(N, 0); 1432 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); 1433 InsertNode(N); 1434 return SDValue(N, 0); 1435 } 1436 1437 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1438 if ((unsigned)Cond >= CondCodeNodes.size()) 1439 CondCodeNodes.resize(Cond+1); 1440 1441 if (!CondCodeNodes[Cond]) { 1442 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond); 1443 CondCodeNodes[Cond] = N; 1444 InsertNode(N); 1445 } 1446 1447 return SDValue(CondCodeNodes[Cond], 0); 1448 } 1449 1450 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in 1451 // the shuffle mask M that point at N1 to point at N2, and indices that point 1452 // N2 to point at N1. 1453 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) { 1454 std::swap(N1, N2); 1455 int NElts = M.size(); 1456 for (int i = 0; i != NElts; ++i) { 1457 if (M[i] >= NElts) 1458 M[i] -= NElts; 1459 else if (M[i] >= 0) 1460 M[i] += NElts; 1461 } 1462 } 1463 1464 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, 1465 SDValue N2, const int *Mask) { 1466 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1467 "Invalid VECTOR_SHUFFLE"); 1468 1469 // Canonicalize shuffle undef, undef -> undef 1470 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) 1471 return getUNDEF(VT); 1472 1473 // Validate that all indices in Mask are within the range of the elements 1474 // input to the shuffle. 1475 unsigned NElts = VT.getVectorNumElements(); 1476 SmallVector<int, 8> MaskVec; 1477 for (unsigned i = 0; i != NElts; ++i) { 1478 assert(Mask[i] < (int)(NElts * 2) && "Index out of range"); 1479 MaskVec.push_back(Mask[i]); 1480 } 1481 1482 // Canonicalize shuffle v, v -> v, undef 1483 if (N1 == N2) { 1484 N2 = getUNDEF(VT); 1485 for (unsigned i = 0; i != NElts; ++i) 1486 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts; 1487 } 1488 1489 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1490 if (N1.getOpcode() == ISD::UNDEF) 1491 commuteShuffle(N1, N2, MaskVec); 1492 1493 // Canonicalize all index into lhs, -> shuffle lhs, undef 1494 // Canonicalize all index into rhs, -> shuffle rhs, undef 1495 bool AllLHS = true, AllRHS = true; 1496 bool N2Undef = N2.getOpcode() == ISD::UNDEF; 1497 for (unsigned i = 0; i != NElts; ++i) { 1498 if (MaskVec[i] >= (int)NElts) { 1499 if (N2Undef) 1500 MaskVec[i] = -1; 1501 else 1502 AllLHS = false; 1503 } else if (MaskVec[i] >= 0) { 1504 AllRHS = false; 1505 } 1506 } 1507 if (AllLHS && AllRHS) 1508 return getUNDEF(VT); 1509 if (AllLHS && !N2Undef) 1510 N2 = getUNDEF(VT); 1511 if (AllRHS) { 1512 N1 = getUNDEF(VT); 1513 commuteShuffle(N1, N2, MaskVec); 1514 } 1515 // Reset our undef status after accounting for the mask. 1516 N2Undef = N2.getOpcode() == ISD::UNDEF; 1517 // Re-check whether both sides ended up undef. 1518 if (N1.getOpcode() == ISD::UNDEF && N2Undef) 1519 return getUNDEF(VT); 1520 1521 // If Identity shuffle return that node. 1522 bool Identity = true; 1523 for (unsigned i = 0; i != NElts; ++i) { 1524 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false; 1525 } 1526 if (Identity && NElts) 1527 return N1; 1528 1529 // Shuffling a constant splat doesn't change the result. 1530 if (N2Undef) { 1531 SDValue V = N1; 1532 1533 // Look through any bitcasts. We check that these don't change the number 1534 // (and size) of elements and just changes their types. 1535 while (V.getOpcode() == ISD::BITCAST) 1536 V = V->getOperand(0); 1537 1538 // A splat should always show up as a build vector node. 1539 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1540 BitVector UndefElements; 1541 SDValue Splat = BV->getSplatValue(&UndefElements); 1542 // If this is a splat of an undef, shuffling it is also undef. 1543 if (Splat && Splat.getOpcode() == ISD::UNDEF) 1544 return getUNDEF(VT); 1545 1546 // We only have a splat which can skip shuffles if there is a splatted 1547 // value and no undef lanes rearranged by the shuffle. 1548 if (Splat && UndefElements.none()) { 1549 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1550 // number of elements match or the value splatted is a zero constant. 1551 if (V.getValueType().getVectorNumElements() == 1552 VT.getVectorNumElements()) 1553 return N1; 1554 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1555 if (C->isNullValue()) 1556 return N1; 1557 } 1558 } 1559 } 1560 1561 FoldingSetNodeID ID; 1562 SDValue Ops[2] = { N1, N2 }; 1563 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1564 for (unsigned i = 0; i != NElts; ++i) 1565 ID.AddInteger(MaskVec[i]); 1566 1567 void* IP = nullptr; 1568 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1569 return SDValue(E, 0); 1570 1571 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1572 // SDNode doesn't have access to it. This memory will be "leaked" when 1573 // the node is deallocated, but recovered when the NodeAllocator is released. 1574 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1575 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int)); 1576 1577 ShuffleVectorSDNode *N = 1578 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(), 1579 dl.getDebugLoc(), N1, N2, 1580 MaskAlloc); 1581 CSEMap.InsertNode(N, IP); 1582 InsertNode(N); 1583 return SDValue(N, 0); 1584 } 1585 1586 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1587 MVT VT = SV.getSimpleValueType(0); 1588 unsigned NumElems = VT.getVectorNumElements(); 1589 SmallVector<int, 8> MaskVec; 1590 1591 for (unsigned i = 0; i != NumElems; ++i) { 1592 int Idx = SV.getMaskElt(i); 1593 if (Idx >= 0) { 1594 if (Idx < (int)NumElems) 1595 Idx += NumElems; 1596 else 1597 Idx -= NumElems; 1598 } 1599 MaskVec.push_back(Idx); 1600 } 1601 1602 SDValue Op0 = SV.getOperand(0); 1603 SDValue Op1 = SV.getOperand(1); 1604 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, &MaskVec[0]); 1605 } 1606 1607 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl, 1608 SDValue Val, SDValue DTy, 1609 SDValue STy, SDValue Rnd, SDValue Sat, 1610 ISD::CvtCode Code) { 1611 // If the src and dest types are the same and the conversion is between 1612 // integer types of the same sign or two floats, no conversion is necessary. 1613 if (DTy == STy && 1614 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF)) 1615 return Val; 1616 1617 FoldingSetNodeID ID; 1618 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; 1619 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops); 1620 void* IP = nullptr; 1621 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1622 return SDValue(E, 0); 1623 1624 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(), 1625 dl.getDebugLoc(), 1626 Ops, Code); 1627 CSEMap.InsertNode(N, IP); 1628 InsertNode(N); 1629 return SDValue(N, 0); 1630 } 1631 1632 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1633 FoldingSetNodeID ID; 1634 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1635 ID.AddInteger(RegNo); 1636 void *IP = nullptr; 1637 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1638 return SDValue(E, 0); 1639 1640 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT); 1641 CSEMap.InsertNode(N, IP); 1642 InsertNode(N); 1643 return SDValue(N, 0); 1644 } 1645 1646 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1647 FoldingSetNodeID ID; 1648 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1649 ID.AddPointer(RegMask); 1650 void *IP = nullptr; 1651 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1652 return SDValue(E, 0); 1653 1654 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask); 1655 CSEMap.InsertNode(N, IP); 1656 InsertNode(N); 1657 return SDValue(N, 0); 1658 } 1659 1660 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) { 1661 FoldingSetNodeID ID; 1662 SDValue Ops[] = { Root }; 1663 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops); 1664 ID.AddPointer(Label); 1665 void *IP = nullptr; 1666 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1667 return SDValue(E, 0); 1668 1669 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(), 1670 dl.getDebugLoc(), Root, Label); 1671 CSEMap.InsertNode(N, IP); 1672 InsertNode(N); 1673 return SDValue(N, 0); 1674 } 1675 1676 1677 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1678 int64_t Offset, 1679 bool isTarget, 1680 unsigned char TargetFlags) { 1681 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1682 1683 FoldingSetNodeID ID; 1684 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1685 ID.AddPointer(BA); 1686 ID.AddInteger(Offset); 1687 ID.AddInteger(TargetFlags); 1688 void *IP = nullptr; 1689 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1690 return SDValue(E, 0); 1691 1692 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset, 1693 TargetFlags); 1694 CSEMap.InsertNode(N, IP); 1695 InsertNode(N); 1696 return SDValue(N, 0); 1697 } 1698 1699 SDValue SelectionDAG::getSrcValue(const Value *V) { 1700 assert((!V || V->getType()->isPointerTy()) && 1701 "SrcValue is not a pointer?"); 1702 1703 FoldingSetNodeID ID; 1704 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1705 ID.AddPointer(V); 1706 1707 void *IP = nullptr; 1708 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1709 return SDValue(E, 0); 1710 1711 SDNode *N = new (NodeAllocator) SrcValueSDNode(V); 1712 CSEMap.InsertNode(N, IP); 1713 InsertNode(N); 1714 return SDValue(N, 0); 1715 } 1716 1717 /// getMDNode - Return an MDNodeSDNode which holds an MDNode. 1718 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1719 FoldingSetNodeID ID; 1720 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1721 ID.AddPointer(MD); 1722 1723 void *IP = nullptr; 1724 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1725 return SDValue(E, 0); 1726 1727 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); 1728 CSEMap.InsertNode(N, IP); 1729 InsertNode(N); 1730 return SDValue(N, 0); 1731 } 1732 1733 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode. 1734 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr, 1735 unsigned SrcAS, unsigned DestAS) { 1736 SDValue Ops[] = {Ptr}; 1737 FoldingSetNodeID ID; 1738 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1739 ID.AddInteger(SrcAS); 1740 ID.AddInteger(DestAS); 1741 1742 void *IP = nullptr; 1743 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1744 return SDValue(E, 0); 1745 1746 SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(), 1747 dl.getDebugLoc(), 1748 VT, Ptr, SrcAS, DestAS); 1749 CSEMap.InsertNode(N, IP); 1750 InsertNode(N); 1751 return SDValue(N, 0); 1752 } 1753 1754 /// getShiftAmountOperand - Return the specified value casted to 1755 /// the target's desired shift amount type. 1756 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1757 EVT OpTy = Op.getValueType(); 1758 EVT ShTy = 1759 TM.getSubtargetImpl()->getTargetLowering()->getShiftAmountTy(LHSTy); 1760 if (OpTy == ShTy || OpTy.isVector()) return Op; 1761 1762 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 1763 return getNode(Opcode, SDLoc(Op), ShTy, Op); 1764 } 1765 1766 /// CreateStackTemporary - Create a stack temporary, suitable for holding the 1767 /// specified value type. 1768 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1769 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1770 unsigned ByteSize = VT.getStoreSize(); 1771 Type *Ty = VT.getTypeForEVT(*getContext()); 1772 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1773 unsigned StackAlign = 1774 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign); 1775 1776 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 1777 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1778 } 1779 1780 /// CreateStackTemporary - Create a stack temporary suitable for holding 1781 /// either of the specified value types. 1782 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1783 unsigned Bytes = std::max(VT1.getStoreSizeInBits(), 1784 VT2.getStoreSizeInBits())/8; 1785 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1786 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1787 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1788 const DataLayout *TD = TLI->getDataLayout(); 1789 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), 1790 TD->getPrefTypeAlignment(Ty2)); 1791 1792 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1793 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); 1794 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1795 } 1796 1797 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, 1798 SDValue N2, ISD::CondCode Cond, SDLoc dl) { 1799 // These setcc operations always fold. 1800 switch (Cond) { 1801 default: break; 1802 case ISD::SETFALSE: 1803 case ISD::SETFALSE2: return getConstant(0, VT); 1804 case ISD::SETTRUE: 1805 case ISD::SETTRUE2: { 1806 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1807 TargetLowering::BooleanContent Cnt = 1808 TLI->getBooleanContents(N1->getValueType(0)); 1809 return getConstant( 1810 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT); 1811 } 1812 1813 case ISD::SETOEQ: 1814 case ISD::SETOGT: 1815 case ISD::SETOGE: 1816 case ISD::SETOLT: 1817 case ISD::SETOLE: 1818 case ISD::SETONE: 1819 case ISD::SETO: 1820 case ISD::SETUO: 1821 case ISD::SETUEQ: 1822 case ISD::SETUNE: 1823 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1824 break; 1825 } 1826 1827 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) { 1828 const APInt &C2 = N2C->getAPIntValue(); 1829 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1830 const APInt &C1 = N1C->getAPIntValue(); 1831 1832 switch (Cond) { 1833 default: llvm_unreachable("Unknown integer setcc!"); 1834 case ISD::SETEQ: return getConstant(C1 == C2, VT); 1835 case ISD::SETNE: return getConstant(C1 != C2, VT); 1836 case ISD::SETULT: return getConstant(C1.ult(C2), VT); 1837 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT); 1838 case ISD::SETULE: return getConstant(C1.ule(C2), VT); 1839 case ISD::SETUGE: return getConstant(C1.uge(C2), VT); 1840 case ISD::SETLT: return getConstant(C1.slt(C2), VT); 1841 case ISD::SETGT: return getConstant(C1.sgt(C2), VT); 1842 case ISD::SETLE: return getConstant(C1.sle(C2), VT); 1843 case ISD::SETGE: return getConstant(C1.sge(C2), VT); 1844 } 1845 } 1846 } 1847 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1848 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) { 1849 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1850 switch (Cond) { 1851 default: break; 1852 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1853 return getUNDEF(VT); 1854 // fall through 1855 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT); 1856 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1857 return getUNDEF(VT); 1858 // fall through 1859 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1860 R==APFloat::cmpLessThan, VT); 1861 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1862 return getUNDEF(VT); 1863 // fall through 1864 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT); 1865 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1866 return getUNDEF(VT); 1867 // fall through 1868 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT); 1869 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1870 return getUNDEF(VT); 1871 // fall through 1872 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1873 R==APFloat::cmpEqual, VT); 1874 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1875 return getUNDEF(VT); 1876 // fall through 1877 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1878 R==APFloat::cmpEqual, VT); 1879 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT); 1880 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT); 1881 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1882 R==APFloat::cmpEqual, VT); 1883 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT); 1884 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1885 R==APFloat::cmpLessThan, VT); 1886 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1887 R==APFloat::cmpUnordered, VT); 1888 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT); 1889 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT); 1890 } 1891 } else { 1892 // Ensure that the constant occurs on the RHS. 1893 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1894 MVT CompVT = N1.getValueType().getSimpleVT(); 1895 if (!TM.getSubtargetImpl()->getTargetLowering()->isCondCodeLegal( 1896 SwappedCond, CompVT)) 1897 return SDValue(); 1898 1899 return getSetCC(dl, VT, N2, N1, SwappedCond); 1900 } 1901 } 1902 1903 // Could not fold it. 1904 return SDValue(); 1905 } 1906 1907 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1908 /// use this predicate to simplify operations downstream. 1909 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1910 // This predicate is not safe for vector operations. 1911 if (Op.getValueType().isVector()) 1912 return false; 1913 1914 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1915 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 1916 } 1917 1918 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1919 /// this predicate to simplify operations downstream. Mask is known to be zero 1920 /// for bits that V cannot have. 1921 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1922 unsigned Depth) const { 1923 APInt KnownZero, KnownOne; 1924 computeKnownBits(Op, KnownZero, KnownOne, Depth); 1925 return (KnownZero & Mask) == Mask; 1926 } 1927 1928 /// Determine which bits of Op are known to be either zero or one and return 1929 /// them in the KnownZero/KnownOne bitsets. 1930 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, 1931 APInt &KnownOne, unsigned Depth) const { 1932 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 1933 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1934 1935 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 1936 if (Depth == 6) 1937 return; // Limit search depth. 1938 1939 APInt KnownZero2, KnownOne2; 1940 1941 switch (Op.getOpcode()) { 1942 case ISD::Constant: 1943 // We know all of the bits for a constant! 1944 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 1945 KnownZero = ~KnownOne; 1946 break; 1947 case ISD::AND: 1948 // If either the LHS or the RHS are Zero, the result is zero. 1949 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1950 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1951 1952 // Output known-1 bits are only known if set in both the LHS & RHS. 1953 KnownOne &= KnownOne2; 1954 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1955 KnownZero |= KnownZero2; 1956 break; 1957 case ISD::OR: 1958 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1959 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1960 1961 // Output known-0 bits are only known if clear in both the LHS & RHS. 1962 KnownZero &= KnownZero2; 1963 // Output known-1 are known to be set if set in either the LHS | RHS. 1964 KnownOne |= KnownOne2; 1965 break; 1966 case ISD::XOR: { 1967 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1968 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1969 1970 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1971 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1972 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1973 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1974 KnownZero = KnownZeroOut; 1975 break; 1976 } 1977 case ISD::MUL: { 1978 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1979 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1980 1981 // If low bits are zero in either operand, output low known-0 bits. 1982 // Also compute a conserative estimate for high known-0 bits. 1983 // More trickiness is possible, but this is sufficient for the 1984 // interesting case of alignment computation. 1985 KnownOne.clearAllBits(); 1986 unsigned TrailZ = KnownZero.countTrailingOnes() + 1987 KnownZero2.countTrailingOnes(); 1988 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 1989 KnownZero2.countLeadingOnes(), 1990 BitWidth) - BitWidth; 1991 1992 TrailZ = std::min(TrailZ, BitWidth); 1993 LeadZ = std::min(LeadZ, BitWidth); 1994 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 1995 APInt::getHighBitsSet(BitWidth, LeadZ); 1996 break; 1997 } 1998 case ISD::UDIV: { 1999 // For the purposes of computing leading zeros we can conservatively 2000 // treat a udiv as a logical right shift by the power of 2 known to 2001 // be less than the denominator. 2002 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2003 unsigned LeadZ = KnownZero2.countLeadingOnes(); 2004 2005 KnownOne2.clearAllBits(); 2006 KnownZero2.clearAllBits(); 2007 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2008 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 2009 if (RHSUnknownLeadingOnes != BitWidth) 2010 LeadZ = std::min(BitWidth, 2011 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 2012 2013 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 2014 break; 2015 } 2016 case ISD::SELECT: 2017 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 2018 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2019 2020 // Only known if known in both the LHS and RHS. 2021 KnownOne &= KnownOne2; 2022 KnownZero &= KnownZero2; 2023 break; 2024 case ISD::SELECT_CC: 2025 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 2026 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 2027 2028 // Only known if known in both the LHS and RHS. 2029 KnownOne &= KnownOne2; 2030 KnownZero &= KnownZero2; 2031 break; 2032 case ISD::SADDO: 2033 case ISD::UADDO: 2034 case ISD::SSUBO: 2035 case ISD::USUBO: 2036 case ISD::SMULO: 2037 case ISD::UMULO: 2038 if (Op.getResNo() != 1) 2039 break; 2040 // The boolean result conforms to getBooleanContents. 2041 // If we know the result of a setcc has the top bits zero, use this info. 2042 // We know that we have an integer-based boolean since these operations 2043 // are only available for integer. 2044 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2045 TargetLowering::ZeroOrOneBooleanContent && 2046 BitWidth > 1) 2047 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2048 break; 2049 case ISD::SETCC: 2050 // If we know the result of a setcc has the top bits zero, use this info. 2051 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2052 TargetLowering::ZeroOrOneBooleanContent && 2053 BitWidth > 1) 2054 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2055 break; 2056 case ISD::SHL: 2057 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 2058 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2059 unsigned ShAmt = SA->getZExtValue(); 2060 2061 // If the shift count is an invalid immediate, don't do anything. 2062 if (ShAmt >= BitWidth) 2063 break; 2064 2065 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2066 KnownZero <<= ShAmt; 2067 KnownOne <<= ShAmt; 2068 // low bits known zero. 2069 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); 2070 } 2071 break; 2072 case ISD::SRL: 2073 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 2074 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2075 unsigned ShAmt = SA->getZExtValue(); 2076 2077 // If the shift count is an invalid immediate, don't do anything. 2078 if (ShAmt >= BitWidth) 2079 break; 2080 2081 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2082 KnownZero = KnownZero.lshr(ShAmt); 2083 KnownOne = KnownOne.lshr(ShAmt); 2084 2085 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 2086 KnownZero |= HighBits; // High bits known zero. 2087 } 2088 break; 2089 case ISD::SRA: 2090 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2091 unsigned ShAmt = SA->getZExtValue(); 2092 2093 // If the shift count is an invalid immediate, don't do anything. 2094 if (ShAmt >= BitWidth) 2095 break; 2096 2097 // If any of the demanded bits are produced by the sign extension, we also 2098 // demand the input sign bit. 2099 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 2100 2101 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2102 KnownZero = KnownZero.lshr(ShAmt); 2103 KnownOne = KnownOne.lshr(ShAmt); 2104 2105 // Handle the sign bits. 2106 APInt SignBit = APInt::getSignBit(BitWidth); 2107 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask. 2108 2109 if (KnownZero.intersects(SignBit)) { 2110 KnownZero |= HighBits; // New bits are known zero. 2111 } else if (KnownOne.intersects(SignBit)) { 2112 KnownOne |= HighBits; // New bits are known one. 2113 } 2114 } 2115 break; 2116 case ISD::SIGN_EXTEND_INREG: { 2117 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2118 unsigned EBits = EVT.getScalarType().getSizeInBits(); 2119 2120 // Sign extension. Compute the demanded bits in the result that are not 2121 // present in the input. 2122 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2123 2124 APInt InSignBit = APInt::getSignBit(EBits); 2125 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2126 2127 // If the sign extended bits are demanded, we know that the sign 2128 // bit is demanded. 2129 InSignBit = InSignBit.zext(BitWidth); 2130 if (NewBits.getBoolValue()) 2131 InputDemandedBits |= InSignBit; 2132 2133 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2134 KnownOne &= InputDemandedBits; 2135 KnownZero &= InputDemandedBits; 2136 2137 // If the sign bit of the input is known set or clear, then we know the 2138 // top bits of the result. 2139 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 2140 KnownZero |= NewBits; 2141 KnownOne &= ~NewBits; 2142 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 2143 KnownOne |= NewBits; 2144 KnownZero &= ~NewBits; 2145 } else { // Input sign bit unknown 2146 KnownZero &= ~NewBits; 2147 KnownOne &= ~NewBits; 2148 } 2149 break; 2150 } 2151 case ISD::CTTZ: 2152 case ISD::CTTZ_ZERO_UNDEF: 2153 case ISD::CTLZ: 2154 case ISD::CTLZ_ZERO_UNDEF: 2155 case ISD::CTPOP: { 2156 unsigned LowBits = Log2_32(BitWidth)+1; 2157 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 2158 KnownOne.clearAllBits(); 2159 break; 2160 } 2161 case ISD::LOAD: { 2162 LoadSDNode *LD = cast<LoadSDNode>(Op); 2163 // If this is a ZEXTLoad and we are looking at the loaded value. 2164 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2165 EVT VT = LD->getMemoryVT(); 2166 unsigned MemBits = VT.getScalarType().getSizeInBits(); 2167 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 2168 } else if (const MDNode *Ranges = LD->getRanges()) { 2169 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero); 2170 } 2171 break; 2172 } 2173 case ISD::ZERO_EXTEND: { 2174 EVT InVT = Op.getOperand(0).getValueType(); 2175 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2176 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2177 KnownZero = KnownZero.trunc(InBits); 2178 KnownOne = KnownOne.trunc(InBits); 2179 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2180 KnownZero = KnownZero.zext(BitWidth); 2181 KnownOne = KnownOne.zext(BitWidth); 2182 KnownZero |= NewBits; 2183 break; 2184 } 2185 case ISD::SIGN_EXTEND: { 2186 EVT InVT = Op.getOperand(0).getValueType(); 2187 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2188 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2189 2190 KnownZero = KnownZero.trunc(InBits); 2191 KnownOne = KnownOne.trunc(InBits); 2192 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2193 2194 // Note if the sign bit is known to be zero or one. 2195 bool SignBitKnownZero = KnownZero.isNegative(); 2196 bool SignBitKnownOne = KnownOne.isNegative(); 2197 2198 KnownZero = KnownZero.zext(BitWidth); 2199 KnownOne = KnownOne.zext(BitWidth); 2200 2201 // If the sign bit is known zero or one, the top bits match. 2202 if (SignBitKnownZero) 2203 KnownZero |= NewBits; 2204 else if (SignBitKnownOne) 2205 KnownOne |= NewBits; 2206 break; 2207 } 2208 case ISD::ANY_EXTEND: { 2209 EVT InVT = Op.getOperand(0).getValueType(); 2210 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2211 KnownZero = KnownZero.trunc(InBits); 2212 KnownOne = KnownOne.trunc(InBits); 2213 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2214 KnownZero = KnownZero.zext(BitWidth); 2215 KnownOne = KnownOne.zext(BitWidth); 2216 break; 2217 } 2218 case ISD::TRUNCATE: { 2219 EVT InVT = Op.getOperand(0).getValueType(); 2220 unsigned InBits = InVT.getScalarType().getSizeInBits(); 2221 KnownZero = KnownZero.zext(InBits); 2222 KnownOne = KnownOne.zext(InBits); 2223 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2224 KnownZero = KnownZero.trunc(BitWidth); 2225 KnownOne = KnownOne.trunc(BitWidth); 2226 break; 2227 } 2228 case ISD::AssertZext: { 2229 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2230 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2231 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2232 KnownZero |= (~InMask); 2233 KnownOne &= (~KnownZero); 2234 break; 2235 } 2236 case ISD::FGETSIGN: 2237 // All bits are zero except the low bit. 2238 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2239 break; 2240 2241 case ISD::SUB: { 2242 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) { 2243 // We know that the top bits of C-X are clear if X contains less bits 2244 // than C (i.e. no wrap-around can happen). For example, 20-X is 2245 // positive if we can prove that X is >= 0 and < 16. 2246 if (CLHS->getAPIntValue().isNonNegative()) { 2247 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2248 // NLZ can't be BitWidth with no sign bit 2249 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2250 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2251 2252 // If all of the MaskV bits are known to be zero, then we know the 2253 // output top bits are zero, because we now know that the output is 2254 // from [0-C]. 2255 if ((KnownZero2 & MaskV) == MaskV) { 2256 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2257 // Top bits known zero. 2258 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 2259 } 2260 } 2261 } 2262 } 2263 // fall through 2264 case ISD::ADD: 2265 case ISD::ADDE: { 2266 // Output known-0 bits are known if clear or set in both the low clear bits 2267 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2268 // low 3 bits clear. 2269 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2270 unsigned KnownZeroOut = KnownZero2.countTrailingOnes(); 2271 2272 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2273 KnownZeroOut = std::min(KnownZeroOut, 2274 KnownZero2.countTrailingOnes()); 2275 2276 if (Op.getOpcode() == ISD::ADD) { 2277 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut); 2278 break; 2279 } 2280 2281 // With ADDE, a carry bit may be added in, so we can only use this 2282 // information if we know (at least) that the low two bits are clear. We 2283 // then return to the caller that the low bit is unknown but that other bits 2284 // are known zero. 2285 if (KnownZeroOut >= 2) // ADDE 2286 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut); 2287 break; 2288 } 2289 case ISD::SREM: 2290 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2291 const APInt &RA = Rem->getAPIntValue().abs(); 2292 if (RA.isPowerOf2()) { 2293 APInt LowBits = RA - 1; 2294 computeKnownBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1); 2295 2296 // The low bits of the first operand are unchanged by the srem. 2297 KnownZero = KnownZero2 & LowBits; 2298 KnownOne = KnownOne2 & LowBits; 2299 2300 // If the first operand is non-negative or has all low bits zero, then 2301 // the upper bits are all zero. 2302 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2303 KnownZero |= ~LowBits; 2304 2305 // If the first operand is negative and not all low bits are zero, then 2306 // the upper bits are all one. 2307 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2308 KnownOne |= ~LowBits; 2309 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2310 } 2311 } 2312 break; 2313 case ISD::UREM: { 2314 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2315 const APInt &RA = Rem->getAPIntValue(); 2316 if (RA.isPowerOf2()) { 2317 APInt LowBits = (RA - 1); 2318 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth + 1); 2319 2320 // The upper bits are all zero, the lower ones are unchanged. 2321 KnownZero = KnownZero2 | ~LowBits; 2322 KnownOne = KnownOne2 & LowBits; 2323 break; 2324 } 2325 } 2326 2327 // Since the result is less than or equal to either operand, any leading 2328 // zero bits in either operand must also exist in the result. 2329 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2330 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2331 2332 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2333 KnownZero2.countLeadingOnes()); 2334 KnownOne.clearAllBits(); 2335 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2336 break; 2337 } 2338 case ISD::FrameIndex: 2339 case ISD::TargetFrameIndex: 2340 if (unsigned Align = InferPtrAlignment(Op)) { 2341 // The low bits are known zero if the pointer is aligned. 2342 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2343 break; 2344 } 2345 break; 2346 2347 default: 2348 if (Op.getOpcode() < ISD::BUILTIN_OP_END) 2349 break; 2350 // Fallthrough 2351 case ISD::INTRINSIC_WO_CHAIN: 2352 case ISD::INTRINSIC_W_CHAIN: 2353 case ISD::INTRINSIC_VOID: 2354 // Allow the target to implement this method for its nodes. 2355 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2356 break; 2357 } 2358 2359 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 2360 } 2361 2362 /// ComputeNumSignBits - Return the number of times the sign bit of the 2363 /// register is replicated into the other bits. We know that at least 1 bit 2364 /// is always equal to the sign bit (itself), but other cases can give us 2365 /// information. For example, immediately after an "SRA X, 2", we know that 2366 /// the top 3 bits are all equal to each other, so we return 3. 2367 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ 2368 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 2369 EVT VT = Op.getValueType(); 2370 assert(VT.isInteger() && "Invalid VT!"); 2371 unsigned VTBits = VT.getScalarType().getSizeInBits(); 2372 unsigned Tmp, Tmp2; 2373 unsigned FirstAnswer = 1; 2374 2375 if (Depth == 6) 2376 return 1; // Limit search depth. 2377 2378 switch (Op.getOpcode()) { 2379 default: break; 2380 case ISD::AssertSext: 2381 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2382 return VTBits-Tmp+1; 2383 case ISD::AssertZext: 2384 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2385 return VTBits-Tmp; 2386 2387 case ISD::Constant: { 2388 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2389 return Val.getNumSignBits(); 2390 } 2391 2392 case ISD::SIGN_EXTEND: 2393 Tmp = 2394 VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 2395 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2396 2397 case ISD::SIGN_EXTEND_INREG: 2398 // Max of the input and what this extends. 2399 Tmp = 2400 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits(); 2401 Tmp = VTBits-Tmp+1; 2402 2403 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2404 return std::max(Tmp, Tmp2); 2405 2406 case ISD::SRA: 2407 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2408 // SRA X, C -> adds C sign bits. 2409 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2410 Tmp += C->getZExtValue(); 2411 if (Tmp > VTBits) Tmp = VTBits; 2412 } 2413 return Tmp; 2414 case ISD::SHL: 2415 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2416 // shl destroys sign bits. 2417 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2418 if (C->getZExtValue() >= VTBits || // Bad shift. 2419 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 2420 return Tmp - C->getZExtValue(); 2421 } 2422 break; 2423 case ISD::AND: 2424 case ISD::OR: 2425 case ISD::XOR: // NOT is handled here. 2426 // Logical binary ops preserve the number of sign bits at the worst. 2427 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2428 if (Tmp != 1) { 2429 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2430 FirstAnswer = std::min(Tmp, Tmp2); 2431 // We computed what we know about the sign bits as our first 2432 // answer. Now proceed to the generic code that uses 2433 // computeKnownBits, and pick whichever answer is better. 2434 } 2435 break; 2436 2437 case ISD::SELECT: 2438 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2439 if (Tmp == 1) return 1; // Early out. 2440 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2441 return std::min(Tmp, Tmp2); 2442 2443 case ISD::SADDO: 2444 case ISD::UADDO: 2445 case ISD::SSUBO: 2446 case ISD::USUBO: 2447 case ISD::SMULO: 2448 case ISD::UMULO: 2449 if (Op.getResNo() != 1) 2450 break; 2451 // The boolean result conforms to getBooleanContents. Fall through. 2452 // If setcc returns 0/-1, all bits are sign bits. 2453 // We know that we have an integer-based boolean since these operations 2454 // are only available for integer. 2455 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2456 TargetLowering::ZeroOrNegativeOneBooleanContent) 2457 return VTBits; 2458 break; 2459 case ISD::SETCC: 2460 // If setcc returns 0/-1, all bits are sign bits. 2461 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2462 TargetLowering::ZeroOrNegativeOneBooleanContent) 2463 return VTBits; 2464 break; 2465 case ISD::ROTL: 2466 case ISD::ROTR: 2467 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2468 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2469 2470 // Handle rotate right by N like a rotate left by 32-N. 2471 if (Op.getOpcode() == ISD::ROTR) 2472 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2473 2474 // If we aren't rotating out all of the known-in sign bits, return the 2475 // number that are left. This handles rotl(sext(x), 1) for example. 2476 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2477 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2478 } 2479 break; 2480 case ISD::ADD: 2481 // Add can have at most one carry bit. Thus we know that the output 2482 // is, at worst, one more bit than the inputs. 2483 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2484 if (Tmp == 1) return 1; // Early out. 2485 2486 // Special case decrementing a value (ADD X, -1): 2487 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2488 if (CRHS->isAllOnesValue()) { 2489 APInt KnownZero, KnownOne; 2490 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2491 2492 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2493 // sign bits set. 2494 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2495 return VTBits; 2496 2497 // If we are subtracting one from a positive number, there is no carry 2498 // out of the result. 2499 if (KnownZero.isNegative()) 2500 return Tmp; 2501 } 2502 2503 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2504 if (Tmp2 == 1) return 1; 2505 return std::min(Tmp, Tmp2)-1; 2506 2507 case ISD::SUB: 2508 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2509 if (Tmp2 == 1) return 1; 2510 2511 // Handle NEG. 2512 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) 2513 if (CLHS->isNullValue()) { 2514 APInt KnownZero, KnownOne; 2515 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2516 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2517 // sign bits set. 2518 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2519 return VTBits; 2520 2521 // If the input is known to be positive (the sign bit is known clear), 2522 // the output of the NEG has the same number of sign bits as the input. 2523 if (KnownZero.isNegative()) 2524 return Tmp2; 2525 2526 // Otherwise, we treat this like a SUB. 2527 } 2528 2529 // Sub can have at most one carry bit. Thus we know that the output 2530 // is, at worst, one more bit than the inputs. 2531 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2532 if (Tmp == 1) return 1; // Early out. 2533 return std::min(Tmp, Tmp2)-1; 2534 case ISD::TRUNCATE: 2535 // FIXME: it's tricky to do anything useful for this, but it is an important 2536 // case for targets like X86. 2537 break; 2538 } 2539 2540 // If we are looking at the loaded value of the SDNode. 2541 if (Op.getResNo() == 0) { 2542 // Handle LOADX separately here. EXTLOAD case will fallthrough. 2543 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 2544 unsigned ExtType = LD->getExtensionType(); 2545 switch (ExtType) { 2546 default: break; 2547 case ISD::SEXTLOAD: // '17' bits known 2548 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2549 return VTBits-Tmp+1; 2550 case ISD::ZEXTLOAD: // '16' bits known 2551 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2552 return VTBits-Tmp; 2553 } 2554 } 2555 } 2556 2557 // Allow the target to implement this method for its nodes. 2558 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2559 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2560 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2561 Op.getOpcode() == ISD::INTRINSIC_VOID) { 2562 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth); 2563 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 2564 } 2565 2566 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2567 // use this information. 2568 APInt KnownZero, KnownOne; 2569 computeKnownBits(Op, KnownZero, KnownOne, Depth); 2570 2571 APInt Mask; 2572 if (KnownZero.isNegative()) { // sign bit is 0 2573 Mask = KnownZero; 2574 } else if (KnownOne.isNegative()) { // sign bit is 1; 2575 Mask = KnownOne; 2576 } else { 2577 // Nothing known. 2578 return FirstAnswer; 2579 } 2580 2581 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2582 // the number of identical bits in the top of the input value. 2583 Mask = ~Mask; 2584 Mask <<= Mask.getBitWidth()-VTBits; 2585 // Return # leading zeros. We use 'min' here in case Val was zero before 2586 // shifting. We don't want to return '64' as for an i32 "0". 2587 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 2588 } 2589 2590 /// isBaseWithConstantOffset - Return true if the specified operand is an 2591 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an 2592 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same 2593 /// semantics as an ADD. This handles the equivalence: 2594 /// X|Cst == X+Cst iff X&Cst = 0. 2595 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 2596 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 2597 !isa<ConstantSDNode>(Op.getOperand(1))) 2598 return false; 2599 2600 if (Op.getOpcode() == ISD::OR && 2601 !MaskedValueIsZero(Op.getOperand(0), 2602 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 2603 return false; 2604 2605 return true; 2606 } 2607 2608 2609 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 2610 // If we're told that NaNs won't happen, assume they won't. 2611 if (getTarget().Options.NoNaNsFPMath) 2612 return true; 2613 2614 // If the value is a constant, we can obviously see if it is a NaN or not. 2615 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2616 return !C->getValueAPF().isNaN(); 2617 2618 // TODO: Recognize more cases here. 2619 2620 return false; 2621 } 2622 2623 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 2624 // If the value is a constant, we can obviously see if it is a zero or not. 2625 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2626 return !C->isZero(); 2627 2628 // TODO: Recognize more cases here. 2629 switch (Op.getOpcode()) { 2630 default: break; 2631 case ISD::OR: 2632 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2633 return !C->isNullValue(); 2634 break; 2635 } 2636 2637 return false; 2638 } 2639 2640 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 2641 // Check the obvious case. 2642 if (A == B) return true; 2643 2644 // For for negative and positive zero. 2645 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 2646 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 2647 if (CA->isZero() && CB->isZero()) return true; 2648 2649 // Otherwise they may not be equal. 2650 return false; 2651 } 2652 2653 /// getNode - Gets or creates the specified node. 2654 /// 2655 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) { 2656 FoldingSetNodeID ID; 2657 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 2658 void *IP = nullptr; 2659 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2660 return SDValue(E, 0); 2661 2662 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), 2663 DL.getDebugLoc(), getVTList(VT)); 2664 CSEMap.InsertNode(N, IP); 2665 2666 InsertNode(N); 2667 return SDValue(N, 0); 2668 } 2669 2670 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 2671 EVT VT, SDValue Operand) { 2672 // Constant fold unary operations with an integer constant operand. Even 2673 // opaque constant will be folded, because the folding of unary operations 2674 // doesn't create new constants with different values. Nevertheless, the 2675 // opaque flag is preserved during folding to prevent future folding with 2676 // other constants. 2677 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) { 2678 const APInt &Val = C->getAPIntValue(); 2679 switch (Opcode) { 2680 default: break; 2681 case ISD::SIGN_EXTEND: 2682 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT, 2683 C->isTargetOpcode(), C->isOpaque()); 2684 case ISD::ANY_EXTEND: 2685 case ISD::ZERO_EXTEND: 2686 case ISD::TRUNCATE: 2687 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT, 2688 C->isTargetOpcode(), C->isOpaque()); 2689 case ISD::UINT_TO_FP: 2690 case ISD::SINT_TO_FP: { 2691 APFloat apf(EVTToAPFloatSemantics(VT), 2692 APInt::getNullValue(VT.getSizeInBits())); 2693 (void)apf.convertFromAPInt(Val, 2694 Opcode==ISD::SINT_TO_FP, 2695 APFloat::rmNearestTiesToEven); 2696 return getConstantFP(apf, VT); 2697 } 2698 case ISD::BITCAST: 2699 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 2700 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT); 2701 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 2702 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT); 2703 break; 2704 case ISD::BSWAP: 2705 return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(), 2706 C->isOpaque()); 2707 case ISD::CTPOP: 2708 return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(), 2709 C->isOpaque()); 2710 case ISD::CTLZ: 2711 case ISD::CTLZ_ZERO_UNDEF: 2712 return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(), 2713 C->isOpaque()); 2714 case ISD::CTTZ: 2715 case ISD::CTTZ_ZERO_UNDEF: 2716 return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(), 2717 C->isOpaque()); 2718 } 2719 } 2720 2721 // Constant fold unary operations with a floating point constant operand. 2722 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) { 2723 APFloat V = C->getValueAPF(); // make copy 2724 switch (Opcode) { 2725 case ISD::FNEG: 2726 V.changeSign(); 2727 return getConstantFP(V, VT); 2728 case ISD::FABS: 2729 V.clearSign(); 2730 return getConstantFP(V, VT); 2731 case ISD::FCEIL: { 2732 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 2733 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2734 return getConstantFP(V, VT); 2735 break; 2736 } 2737 case ISD::FTRUNC: { 2738 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 2739 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2740 return getConstantFP(V, VT); 2741 break; 2742 } 2743 case ISD::FFLOOR: { 2744 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 2745 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2746 return getConstantFP(V, VT); 2747 break; 2748 } 2749 case ISD::FP_EXTEND: { 2750 bool ignored; 2751 // This can return overflow, underflow, or inexact; we don't care. 2752 // FIXME need to be more flexible about rounding mode. 2753 (void)V.convert(EVTToAPFloatSemantics(VT), 2754 APFloat::rmNearestTiesToEven, &ignored); 2755 return getConstantFP(V, VT); 2756 } 2757 case ISD::FP_TO_SINT: 2758 case ISD::FP_TO_UINT: { 2759 integerPart x[2]; 2760 bool ignored; 2761 assert(integerPartWidth >= 64); 2762 // FIXME need to be more flexible about rounding mode. 2763 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 2764 Opcode==ISD::FP_TO_SINT, 2765 APFloat::rmTowardZero, &ignored); 2766 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 2767 break; 2768 APInt api(VT.getSizeInBits(), x); 2769 return getConstant(api, VT); 2770 } 2771 case ISD::BITCAST: 2772 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 2773 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); 2774 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 2775 return getConstant(V.bitcastToAPInt().getZExtValue(), VT); 2776 break; 2777 } 2778 } 2779 2780 // Constant fold unary operations with a vector integer operand. 2781 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) { 2782 if (BV->isConstant()) { 2783 switch (Opcode) { 2784 default: 2785 // FIXME: Entirely reasonable to perform folding of other unary 2786 // operations here as the need arises. 2787 break; 2788 case ISD::UINT_TO_FP: 2789 case ISD::SINT_TO_FP: { 2790 SmallVector<SDValue, 8> Ops; 2791 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 2792 SDValue OpN = BV->getOperand(i); 2793 // Let the above scalar folding handle the conversion of each 2794 // element. 2795 OpN = getNode(ISD::SINT_TO_FP, DL, VT.getVectorElementType(), 2796 OpN); 2797 Ops.push_back(OpN); 2798 } 2799 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops); 2800 } 2801 } 2802 } 2803 } 2804 2805 unsigned OpOpcode = Operand.getNode()->getOpcode(); 2806 switch (Opcode) { 2807 case ISD::TokenFactor: 2808 case ISD::MERGE_VALUES: 2809 case ISD::CONCAT_VECTORS: 2810 return Operand; // Factor, merge or concat of one node? No need. 2811 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 2812 case ISD::FP_EXTEND: 2813 assert(VT.isFloatingPoint() && 2814 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 2815 if (Operand.getValueType() == VT) return Operand; // noop conversion. 2816 assert((!VT.isVector() || 2817 VT.getVectorNumElements() == 2818 Operand.getValueType().getVectorNumElements()) && 2819 "Vector element count mismatch!"); 2820 if (Operand.getOpcode() == ISD::UNDEF) 2821 return getUNDEF(VT); 2822 break; 2823 case ISD::SIGN_EXTEND: 2824 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2825 "Invalid SIGN_EXTEND!"); 2826 if (Operand.getValueType() == VT) return Operand; // noop extension 2827 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2828 "Invalid sext node, dst < src!"); 2829 assert((!VT.isVector() || 2830 VT.getVectorNumElements() == 2831 Operand.getValueType().getVectorNumElements()) && 2832 "Vector element count mismatch!"); 2833 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 2834 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2835 else if (OpOpcode == ISD::UNDEF) 2836 // sext(undef) = 0, because the top bits will all be the same. 2837 return getConstant(0, VT); 2838 break; 2839 case ISD::ZERO_EXTEND: 2840 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2841 "Invalid ZERO_EXTEND!"); 2842 if (Operand.getValueType() == VT) return Operand; // noop extension 2843 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2844 "Invalid zext node, dst < src!"); 2845 assert((!VT.isVector() || 2846 VT.getVectorNumElements() == 2847 Operand.getValueType().getVectorNumElements()) && 2848 "Vector element count mismatch!"); 2849 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 2850 return getNode(ISD::ZERO_EXTEND, DL, VT, 2851 Operand.getNode()->getOperand(0)); 2852 else if (OpOpcode == ISD::UNDEF) 2853 // zext(undef) = 0, because the top bits will be zero. 2854 return getConstant(0, VT); 2855 break; 2856 case ISD::ANY_EXTEND: 2857 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2858 "Invalid ANY_EXTEND!"); 2859 if (Operand.getValueType() == VT) return Operand; // noop extension 2860 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2861 "Invalid anyext node, dst < src!"); 2862 assert((!VT.isVector() || 2863 VT.getVectorNumElements() == 2864 Operand.getValueType().getVectorNumElements()) && 2865 "Vector element count mismatch!"); 2866 2867 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2868 OpOpcode == ISD::ANY_EXTEND) 2869 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 2870 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2871 else if (OpOpcode == ISD::UNDEF) 2872 return getUNDEF(VT); 2873 2874 // (ext (trunx x)) -> x 2875 if (OpOpcode == ISD::TRUNCATE) { 2876 SDValue OpOp = Operand.getNode()->getOperand(0); 2877 if (OpOp.getValueType() == VT) 2878 return OpOp; 2879 } 2880 break; 2881 case ISD::TRUNCATE: 2882 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2883 "Invalid TRUNCATE!"); 2884 if (Operand.getValueType() == VT) return Operand; // noop truncate 2885 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) && 2886 "Invalid truncate node, src < dst!"); 2887 assert((!VT.isVector() || 2888 VT.getVectorNumElements() == 2889 Operand.getValueType().getVectorNumElements()) && 2890 "Vector element count mismatch!"); 2891 if (OpOpcode == ISD::TRUNCATE) 2892 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2893 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2894 OpOpcode == ISD::ANY_EXTEND) { 2895 // If the source is smaller than the dest, we still need an extend. 2896 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 2897 .bitsLT(VT.getScalarType())) 2898 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2899 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 2900 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2901 return Operand.getNode()->getOperand(0); 2902 } 2903 if (OpOpcode == ISD::UNDEF) 2904 return getUNDEF(VT); 2905 break; 2906 case ISD::BITCAST: 2907 // Basic sanity checking. 2908 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() 2909 && "Cannot BITCAST between types of different sizes!"); 2910 if (VT == Operand.getValueType()) return Operand; // noop conversion. 2911 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 2912 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 2913 if (OpOpcode == ISD::UNDEF) 2914 return getUNDEF(VT); 2915 break; 2916 case ISD::SCALAR_TO_VECTOR: 2917 assert(VT.isVector() && !Operand.getValueType().isVector() && 2918 (VT.getVectorElementType() == Operand.getValueType() || 2919 (VT.getVectorElementType().isInteger() && 2920 Operand.getValueType().isInteger() && 2921 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 2922 "Illegal SCALAR_TO_VECTOR node!"); 2923 if (OpOpcode == ISD::UNDEF) 2924 return getUNDEF(VT); 2925 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 2926 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 2927 isa<ConstantSDNode>(Operand.getOperand(1)) && 2928 Operand.getConstantOperandVal(1) == 0 && 2929 Operand.getOperand(0).getValueType() == VT) 2930 return Operand.getOperand(0); 2931 break; 2932 case ISD::FNEG: 2933 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 2934 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 2935 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 2936 Operand.getNode()->getOperand(0)); 2937 if (OpOpcode == ISD::FNEG) // --X -> X 2938 return Operand.getNode()->getOperand(0); 2939 break; 2940 case ISD::FABS: 2941 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 2942 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 2943 break; 2944 } 2945 2946 SDNode *N; 2947 SDVTList VTs = getVTList(VT); 2948 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 2949 FoldingSetNodeID ID; 2950 SDValue Ops[1] = { Operand }; 2951 AddNodeIDNode(ID, Opcode, VTs, Ops); 2952 void *IP = nullptr; 2953 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2954 return SDValue(E, 0); 2955 2956 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 2957 DL.getDebugLoc(), VTs, Operand); 2958 CSEMap.InsertNode(N, IP); 2959 } else { 2960 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 2961 DL.getDebugLoc(), VTs, Operand); 2962 } 2963 2964 InsertNode(N); 2965 return SDValue(N, 0); 2966 } 2967 2968 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT, 2969 SDNode *Cst1, SDNode *Cst2) { 2970 // If the opcode is a target-specific ISD node, there's nothing we can 2971 // do here and the operand rules may not line up with the below, so 2972 // bail early. 2973 if (Opcode >= ISD::BUILTIN_OP_END) 2974 return SDValue(); 2975 2976 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs; 2977 SmallVector<SDValue, 4> Outputs; 2978 EVT SVT = VT.getScalarType(); 2979 2980 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1); 2981 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2); 2982 if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque())) 2983 return SDValue(); 2984 2985 if (Scalar1 && Scalar2) 2986 // Scalar instruction. 2987 Inputs.push_back(std::make_pair(Scalar1, Scalar2)); 2988 else { 2989 // For vectors extract each constant element into Inputs so we can constant 2990 // fold them individually. 2991 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 2992 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 2993 if (!BV1 || !BV2) 2994 return SDValue(); 2995 2996 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 2997 2998 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 2999 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I)); 3000 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I)); 3001 if (!V1 || !V2) // Not a constant, bail. 3002 return SDValue(); 3003 3004 if (V1->isOpaque() || V2->isOpaque()) 3005 return SDValue(); 3006 3007 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 3008 // FIXME: This is valid and could be handled by truncating the APInts. 3009 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 3010 return SDValue(); 3011 3012 Inputs.push_back(std::make_pair(V1, V2)); 3013 } 3014 } 3015 3016 // We have a number of constant values, constant fold them element by element. 3017 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) { 3018 const APInt &C1 = Inputs[I].first->getAPIntValue(); 3019 const APInt &C2 = Inputs[I].second->getAPIntValue(); 3020 3021 switch (Opcode) { 3022 case ISD::ADD: 3023 Outputs.push_back(getConstant(C1 + C2, SVT)); 3024 break; 3025 case ISD::SUB: 3026 Outputs.push_back(getConstant(C1 - C2, SVT)); 3027 break; 3028 case ISD::MUL: 3029 Outputs.push_back(getConstant(C1 * C2, SVT)); 3030 break; 3031 case ISD::UDIV: 3032 if (!C2.getBoolValue()) 3033 return SDValue(); 3034 Outputs.push_back(getConstant(C1.udiv(C2), SVT)); 3035 break; 3036 case ISD::UREM: 3037 if (!C2.getBoolValue()) 3038 return SDValue(); 3039 Outputs.push_back(getConstant(C1.urem(C2), SVT)); 3040 break; 3041 case ISD::SDIV: 3042 if (!C2.getBoolValue()) 3043 return SDValue(); 3044 Outputs.push_back(getConstant(C1.sdiv(C2), SVT)); 3045 break; 3046 case ISD::SREM: 3047 if (!C2.getBoolValue()) 3048 return SDValue(); 3049 Outputs.push_back(getConstant(C1.srem(C2), SVT)); 3050 break; 3051 case ISD::AND: 3052 Outputs.push_back(getConstant(C1 & C2, SVT)); 3053 break; 3054 case ISD::OR: 3055 Outputs.push_back(getConstant(C1 | C2, SVT)); 3056 break; 3057 case ISD::XOR: 3058 Outputs.push_back(getConstant(C1 ^ C2, SVT)); 3059 break; 3060 case ISD::SHL: 3061 Outputs.push_back(getConstant(C1 << C2, SVT)); 3062 break; 3063 case ISD::SRL: 3064 Outputs.push_back(getConstant(C1.lshr(C2), SVT)); 3065 break; 3066 case ISD::SRA: 3067 Outputs.push_back(getConstant(C1.ashr(C2), SVT)); 3068 break; 3069 case ISD::ROTL: 3070 Outputs.push_back(getConstant(C1.rotl(C2), SVT)); 3071 break; 3072 case ISD::ROTR: 3073 Outputs.push_back(getConstant(C1.rotr(C2), SVT)); 3074 break; 3075 default: 3076 return SDValue(); 3077 } 3078 } 3079 3080 assert((Scalar1 && Scalar2) || (VT.getVectorNumElements() == Outputs.size() && 3081 "Expected a scalar or vector!")); 3082 3083 // Handle the scalar case first. 3084 if (!VT.isVector()) 3085 return Outputs.back(); 3086 3087 // We may have a vector type but a scalar result. Create a splat. 3088 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3089 3090 // Build a big vector out of the scalar elements we generated. 3091 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs); 3092 } 3093 3094 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, 3095 SDValue N2, bool nuw, bool nsw, bool exact) { 3096 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3097 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 3098 switch (Opcode) { 3099 default: break; 3100 case ISD::TokenFactor: 3101 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 3102 N2.getValueType() == MVT::Other && "Invalid token factor!"); 3103 // Fold trivial token factors. 3104 if (N1.getOpcode() == ISD::EntryToken) return N2; 3105 if (N2.getOpcode() == ISD::EntryToken) return N1; 3106 if (N1 == N2) return N1; 3107 break; 3108 case ISD::CONCAT_VECTORS: 3109 // Concat of UNDEFs is UNDEF. 3110 if (N1.getOpcode() == ISD::UNDEF && 3111 N2.getOpcode() == ISD::UNDEF) 3112 return getUNDEF(VT); 3113 3114 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3115 // one big BUILD_VECTOR. 3116 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3117 N2.getOpcode() == ISD::BUILD_VECTOR) { 3118 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3119 N1.getNode()->op_end()); 3120 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3121 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts); 3122 } 3123 break; 3124 case ISD::AND: 3125 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3126 assert(N1.getValueType() == N2.getValueType() && 3127 N1.getValueType() == VT && "Binary operator types must match!"); 3128 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 3129 // worth handling here. 3130 if (N2C && N2C->isNullValue()) 3131 return N2; 3132 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 3133 return N1; 3134 break; 3135 case ISD::OR: 3136 case ISD::XOR: 3137 case ISD::ADD: 3138 case ISD::SUB: 3139 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3140 assert(N1.getValueType() == N2.getValueType() && 3141 N1.getValueType() == VT && "Binary operator types must match!"); 3142 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 3143 // it's worth handling here. 3144 if (N2C && N2C->isNullValue()) 3145 return N1; 3146 break; 3147 case ISD::UDIV: 3148 case ISD::UREM: 3149 case ISD::MULHU: 3150 case ISD::MULHS: 3151 case ISD::MUL: 3152 case ISD::SDIV: 3153 case ISD::SREM: 3154 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3155 assert(N1.getValueType() == N2.getValueType() && 3156 N1.getValueType() == VT && "Binary operator types must match!"); 3157 break; 3158 case ISD::FADD: 3159 case ISD::FSUB: 3160 case ISD::FMUL: 3161 case ISD::FDIV: 3162 case ISD::FREM: 3163 if (getTarget().Options.UnsafeFPMath) { 3164 if (Opcode == ISD::FADD) { 3165 // 0+x --> x 3166 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) 3167 if (CFP->getValueAPF().isZero()) 3168 return N2; 3169 // x+0 --> x 3170 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 3171 if (CFP->getValueAPF().isZero()) 3172 return N1; 3173 } else if (Opcode == ISD::FSUB) { 3174 // x-0 --> x 3175 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 3176 if (CFP->getValueAPF().isZero()) 3177 return N1; 3178 } else if (Opcode == ISD::FMUL) { 3179 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1); 3180 SDValue V = N2; 3181 3182 // If the first operand isn't the constant, try the second 3183 if (!CFP) { 3184 CFP = dyn_cast<ConstantFPSDNode>(N2); 3185 V = N1; 3186 } 3187 3188 if (CFP) { 3189 // 0*x --> 0 3190 if (CFP->isZero()) 3191 return SDValue(CFP,0); 3192 // 1*x --> x 3193 if (CFP->isExactlyValue(1.0)) 3194 return V; 3195 } 3196 } 3197 } 3198 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 3199 assert(N1.getValueType() == N2.getValueType() && 3200 N1.getValueType() == VT && "Binary operator types must match!"); 3201 break; 3202 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 3203 assert(N1.getValueType() == VT && 3204 N1.getValueType().isFloatingPoint() && 3205 N2.getValueType().isFloatingPoint() && 3206 "Invalid FCOPYSIGN!"); 3207 break; 3208 case ISD::SHL: 3209 case ISD::SRA: 3210 case ISD::SRL: 3211 case ISD::ROTL: 3212 case ISD::ROTR: 3213 assert(VT == N1.getValueType() && 3214 "Shift operators return type must be the same as their first arg"); 3215 assert(VT.isInteger() && N2.getValueType().isInteger() && 3216 "Shifts only work on integers"); 3217 assert((!VT.isVector() || VT == N2.getValueType()) && 3218 "Vector shift amounts must be in the same as their first arg"); 3219 // Verify that the shift amount VT is bit enough to hold valid shift 3220 // amounts. This catches things like trying to shift an i1024 value by an 3221 // i8, which is easy to fall into in generic code that uses 3222 // TLI.getShiftAmount(). 3223 assert(N2.getValueType().getSizeInBits() >= 3224 Log2_32_Ceil(N1.getValueType().getSizeInBits()) && 3225 "Invalid use of small shift amount with oversized value!"); 3226 3227 // Always fold shifts of i1 values so the code generator doesn't need to 3228 // handle them. Since we know the size of the shift has to be less than the 3229 // size of the value, the shift/rotate count is guaranteed to be zero. 3230 if (VT == MVT::i1) 3231 return N1; 3232 if (N2C && N2C->isNullValue()) 3233 return N1; 3234 break; 3235 case ISD::FP_ROUND_INREG: { 3236 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3237 assert(VT == N1.getValueType() && "Not an inreg round!"); 3238 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 3239 "Cannot FP_ROUND_INREG integer types"); 3240 assert(EVT.isVector() == VT.isVector() && 3241 "FP_ROUND_INREG type should be vector iff the operand " 3242 "type is vector!"); 3243 assert((!EVT.isVector() || 3244 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3245 "Vector element counts must match in FP_ROUND_INREG"); 3246 assert(EVT.bitsLE(VT) && "Not rounding down!"); 3247 (void)EVT; 3248 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 3249 break; 3250 } 3251 case ISD::FP_ROUND: 3252 assert(VT.isFloatingPoint() && 3253 N1.getValueType().isFloatingPoint() && 3254 VT.bitsLE(N1.getValueType()) && 3255 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); 3256 if (N1.getValueType() == VT) return N1; // noop conversion. 3257 break; 3258 case ISD::AssertSext: 3259 case ISD::AssertZext: { 3260 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3261 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3262 assert(VT.isInteger() && EVT.isInteger() && 3263 "Cannot *_EXTEND_INREG FP types"); 3264 assert(!EVT.isVector() && 3265 "AssertSExt/AssertZExt type should be the vector element type " 3266 "rather than the vector type!"); 3267 assert(EVT.bitsLE(VT) && "Not extending!"); 3268 if (VT == EVT) return N1; // noop assertion. 3269 break; 3270 } 3271 case ISD::SIGN_EXTEND_INREG: { 3272 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3273 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3274 assert(VT.isInteger() && EVT.isInteger() && 3275 "Cannot *_EXTEND_INREG FP types"); 3276 assert(EVT.isVector() == VT.isVector() && 3277 "SIGN_EXTEND_INREG type should be vector iff the operand " 3278 "type is vector!"); 3279 assert((!EVT.isVector() || 3280 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3281 "Vector element counts must match in SIGN_EXTEND_INREG"); 3282 assert(EVT.bitsLE(VT) && "Not extending!"); 3283 if (EVT == VT) return N1; // Not actually extending 3284 3285 if (N1C) { 3286 APInt Val = N1C->getAPIntValue(); 3287 unsigned FromBits = EVT.getScalarType().getSizeInBits(); 3288 Val <<= Val.getBitWidth()-FromBits; 3289 Val = Val.ashr(Val.getBitWidth()-FromBits); 3290 return getConstant(Val, VT); 3291 } 3292 break; 3293 } 3294 case ISD::EXTRACT_VECTOR_ELT: 3295 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 3296 if (N1.getOpcode() == ISD::UNDEF) 3297 return getUNDEF(VT); 3298 3299 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 3300 // expanding copies of large vectors from registers. 3301 if (N2C && 3302 N1.getOpcode() == ISD::CONCAT_VECTORS && 3303 N1.getNumOperands() > 0) { 3304 unsigned Factor = 3305 N1.getOperand(0).getValueType().getVectorNumElements(); 3306 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 3307 N1.getOperand(N2C->getZExtValue() / Factor), 3308 getConstant(N2C->getZExtValue() % Factor, 3309 N2.getValueType())); 3310 } 3311 3312 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 3313 // expanding large vector constants. 3314 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 3315 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 3316 3317 if (VT != Elt.getValueType()) 3318 // If the vector element type is not legal, the BUILD_VECTOR operands 3319 // are promoted and implicitly truncated, and the result implicitly 3320 // extended. Make that explicit here. 3321 Elt = getAnyExtOrTrunc(Elt, DL, VT); 3322 3323 return Elt; 3324 } 3325 3326 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 3327 // operations are lowered to scalars. 3328 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 3329 // If the indices are the same, return the inserted element else 3330 // if the indices are known different, extract the element from 3331 // the original vector. 3332 SDValue N1Op2 = N1.getOperand(2); 3333 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode()); 3334 3335 if (N1Op2C && N2C) { 3336 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 3337 if (VT == N1.getOperand(1).getValueType()) 3338 return N1.getOperand(1); 3339 else 3340 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 3341 } 3342 3343 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 3344 } 3345 } 3346 break; 3347 case ISD::EXTRACT_ELEMENT: 3348 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 3349 assert(!N1.getValueType().isVector() && !VT.isVector() && 3350 (N1.getValueType().isInteger() == VT.isInteger()) && 3351 N1.getValueType() != VT && 3352 "Wrong types for EXTRACT_ELEMENT!"); 3353 3354 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 3355 // 64-bit integers into 32-bit parts. Instead of building the extract of 3356 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 3357 if (N1.getOpcode() == ISD::BUILD_PAIR) 3358 return N1.getOperand(N2C->getZExtValue()); 3359 3360 // EXTRACT_ELEMENT of a constant int is also very common. 3361 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3362 unsigned ElementSize = VT.getSizeInBits(); 3363 unsigned Shift = ElementSize * N2C->getZExtValue(); 3364 APInt ShiftedVal = C->getAPIntValue().lshr(Shift); 3365 return getConstant(ShiftedVal.trunc(ElementSize), VT); 3366 } 3367 break; 3368 case ISD::EXTRACT_SUBVECTOR: { 3369 SDValue Index = N2; 3370 if (VT.isSimple() && N1.getValueType().isSimple()) { 3371 assert(VT.isVector() && N1.getValueType().isVector() && 3372 "Extract subvector VTs must be a vectors!"); 3373 assert(VT.getVectorElementType() == 3374 N1.getValueType().getVectorElementType() && 3375 "Extract subvector VTs must have the same element type!"); 3376 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 3377 "Extract subvector must be from larger vector to smaller vector!"); 3378 3379 if (isa<ConstantSDNode>(Index.getNode())) { 3380 assert((VT.getVectorNumElements() + 3381 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3382 <= N1.getValueType().getVectorNumElements()) 3383 && "Extract subvector overflow!"); 3384 } 3385 3386 // Trivial extraction. 3387 if (VT.getSimpleVT() == N1.getSimpleValueType()) 3388 return N1; 3389 } 3390 break; 3391 } 3392 } 3393 3394 // Perform trivial constant folding. 3395 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode()); 3396 if (SV.getNode()) return SV; 3397 3398 // Canonicalize constant to RHS if commutative. 3399 if (N1C && !N2C && isCommutativeBinOp(Opcode)) { 3400 std::swap(N1C, N2C); 3401 std::swap(N1, N2); 3402 } 3403 3404 // Constant fold FP operations. 3405 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 3406 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 3407 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 3408 if (N1CFP) { 3409 if (!N2CFP && isCommutativeBinOp(Opcode)) { 3410 // Canonicalize constant to RHS if commutative. 3411 std::swap(N1CFP, N2CFP); 3412 std::swap(N1, N2); 3413 } else if (N2CFP) { 3414 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 3415 APFloat::opStatus s; 3416 switch (Opcode) { 3417 case ISD::FADD: 3418 s = V1.add(V2, APFloat::rmNearestTiesToEven); 3419 if (!HasFPExceptions || s != APFloat::opInvalidOp) 3420 return getConstantFP(V1, VT); 3421 break; 3422 case ISD::FSUB: 3423 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 3424 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 3425 return getConstantFP(V1, VT); 3426 break; 3427 case ISD::FMUL: 3428 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 3429 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 3430 return getConstantFP(V1, VT); 3431 break; 3432 case ISD::FDIV: 3433 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 3434 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 3435 s!=APFloat::opDivByZero)) { 3436 return getConstantFP(V1, VT); 3437 } 3438 break; 3439 case ISD::FREM : 3440 s = V1.mod(V2, APFloat::rmNearestTiesToEven); 3441 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 3442 s!=APFloat::opDivByZero)) { 3443 return getConstantFP(V1, VT); 3444 } 3445 break; 3446 case ISD::FCOPYSIGN: 3447 V1.copySign(V2); 3448 return getConstantFP(V1, VT); 3449 default: break; 3450 } 3451 } 3452 3453 if (Opcode == ISD::FP_ROUND) { 3454 APFloat V = N1CFP->getValueAPF(); // make copy 3455 bool ignored; 3456 // This can return overflow, underflow, or inexact; we don't care. 3457 // FIXME need to be more flexible about rounding mode. 3458 (void)V.convert(EVTToAPFloatSemantics(VT), 3459 APFloat::rmNearestTiesToEven, &ignored); 3460 return getConstantFP(V, VT); 3461 } 3462 } 3463 3464 // Canonicalize an UNDEF to the RHS, even over a constant. 3465 if (N1.getOpcode() == ISD::UNDEF) { 3466 if (isCommutativeBinOp(Opcode)) { 3467 std::swap(N1, N2); 3468 } else { 3469 switch (Opcode) { 3470 case ISD::FP_ROUND_INREG: 3471 case ISD::SIGN_EXTEND_INREG: 3472 case ISD::SUB: 3473 case ISD::FSUB: 3474 case ISD::FDIV: 3475 case ISD::FREM: 3476 case ISD::SRA: 3477 return N1; // fold op(undef, arg2) -> undef 3478 case ISD::UDIV: 3479 case ISD::SDIV: 3480 case ISD::UREM: 3481 case ISD::SREM: 3482 case ISD::SRL: 3483 case ISD::SHL: 3484 if (!VT.isVector()) 3485 return getConstant(0, VT); // fold op(undef, arg2) -> 0 3486 // For vectors, we can't easily build an all zero vector, just return 3487 // the LHS. 3488 return N2; 3489 } 3490 } 3491 } 3492 3493 // Fold a bunch of operators when the RHS is undef. 3494 if (N2.getOpcode() == ISD::UNDEF) { 3495 switch (Opcode) { 3496 case ISD::XOR: 3497 if (N1.getOpcode() == ISD::UNDEF) 3498 // Handle undef ^ undef -> 0 special case. This is a common 3499 // idiom (misuse). 3500 return getConstant(0, VT); 3501 // fallthrough 3502 case ISD::ADD: 3503 case ISD::ADDC: 3504 case ISD::ADDE: 3505 case ISD::SUB: 3506 case ISD::UDIV: 3507 case ISD::SDIV: 3508 case ISD::UREM: 3509 case ISD::SREM: 3510 return N2; // fold op(arg1, undef) -> undef 3511 case ISD::FADD: 3512 case ISD::FSUB: 3513 case ISD::FMUL: 3514 case ISD::FDIV: 3515 case ISD::FREM: 3516 if (getTarget().Options.UnsafeFPMath) 3517 return N2; 3518 break; 3519 case ISD::MUL: 3520 case ISD::AND: 3521 case ISD::SRL: 3522 case ISD::SHL: 3523 if (!VT.isVector()) 3524 return getConstant(0, VT); // fold op(arg1, undef) -> 0 3525 // For vectors, we can't easily build an all zero vector, just return 3526 // the LHS. 3527 return N1; 3528 case ISD::OR: 3529 if (!VT.isVector()) 3530 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 3531 // For vectors, we can't easily build an all one vector, just return 3532 // the LHS. 3533 return N1; 3534 case ISD::SRA: 3535 return N1; 3536 } 3537 } 3538 3539 // Memoize this node if possible. 3540 BinarySDNode *N; 3541 SDVTList VTs = getVTList(VT); 3542 const bool BinOpHasFlags = isBinOpWithFlags(Opcode); 3543 if (VT != MVT::Glue) { 3544 SDValue Ops[] = {N1, N2}; 3545 FoldingSetNodeID ID; 3546 AddNodeIDNode(ID, Opcode, VTs, Ops); 3547 if (BinOpHasFlags) 3548 AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact); 3549 void *IP = nullptr; 3550 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3551 return SDValue(E, 0); 3552 3553 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact); 3554 3555 CSEMap.InsertNode(N, IP); 3556 } else { 3557 3558 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact); 3559 } 3560 3561 InsertNode(N); 3562 return SDValue(N, 0); 3563 } 3564 3565 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3566 SDValue N1, SDValue N2, SDValue N3) { 3567 // Perform various simplifications. 3568 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3569 switch (Opcode) { 3570 case ISD::FMA: { 3571 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 3572 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 3573 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 3574 if (N1CFP && N2CFP && N3CFP) { 3575 APFloat V1 = N1CFP->getValueAPF(); 3576 const APFloat &V2 = N2CFP->getValueAPF(); 3577 const APFloat &V3 = N3CFP->getValueAPF(); 3578 APFloat::opStatus s = 3579 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 3580 if (s != APFloat::opInvalidOp) 3581 return getConstantFP(V1, VT); 3582 } 3583 break; 3584 } 3585 case ISD::CONCAT_VECTORS: 3586 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3587 // one big BUILD_VECTOR. 3588 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3589 N2.getOpcode() == ISD::BUILD_VECTOR && 3590 N3.getOpcode() == ISD::BUILD_VECTOR) { 3591 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3592 N1.getNode()->op_end()); 3593 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3594 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end()); 3595 return getNode(ISD::BUILD_VECTOR, DL, VT, Elts); 3596 } 3597 break; 3598 case ISD::SETCC: { 3599 // Use FoldSetCC to simplify SETCC's. 3600 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL); 3601 if (Simp.getNode()) return Simp; 3602 break; 3603 } 3604 case ISD::SELECT: 3605 if (N1C) { 3606 if (N1C->getZExtValue()) 3607 return N2; // select true, X, Y -> X 3608 return N3; // select false, X, Y -> Y 3609 } 3610 3611 if (N2 == N3) return N2; // select C, X, X -> X 3612 break; 3613 case ISD::VECTOR_SHUFFLE: 3614 llvm_unreachable("should use getVectorShuffle constructor!"); 3615 case ISD::INSERT_SUBVECTOR: { 3616 SDValue Index = N3; 3617 if (VT.isSimple() && N1.getValueType().isSimple() 3618 && N2.getValueType().isSimple()) { 3619 assert(VT.isVector() && N1.getValueType().isVector() && 3620 N2.getValueType().isVector() && 3621 "Insert subvector VTs must be a vectors"); 3622 assert(VT == N1.getValueType() && 3623 "Dest and insert subvector source types must match!"); 3624 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 3625 "Insert subvector must be from smaller vector to larger vector!"); 3626 if (isa<ConstantSDNode>(Index.getNode())) { 3627 assert((N2.getValueType().getVectorNumElements() + 3628 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3629 <= VT.getVectorNumElements()) 3630 && "Insert subvector overflow!"); 3631 } 3632 3633 // Trivial insertion. 3634 if (VT.getSimpleVT() == N2.getSimpleValueType()) 3635 return N2; 3636 } 3637 break; 3638 } 3639 case ISD::BITCAST: 3640 // Fold bit_convert nodes from a type to themselves. 3641 if (N1.getValueType() == VT) 3642 return N1; 3643 break; 3644 } 3645 3646 // Memoize node if it doesn't produce a flag. 3647 SDNode *N; 3648 SDVTList VTs = getVTList(VT); 3649 if (VT != MVT::Glue) { 3650 SDValue Ops[] = { N1, N2, N3 }; 3651 FoldingSetNodeID ID; 3652 AddNodeIDNode(ID, Opcode, VTs, Ops); 3653 void *IP = nullptr; 3654 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3655 return SDValue(E, 0); 3656 3657 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3658 DL.getDebugLoc(), VTs, N1, N2, N3); 3659 CSEMap.InsertNode(N, IP); 3660 } else { 3661 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 3662 DL.getDebugLoc(), VTs, N1, N2, N3); 3663 } 3664 3665 InsertNode(N); 3666 return SDValue(N, 0); 3667 } 3668 3669 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3670 SDValue N1, SDValue N2, SDValue N3, 3671 SDValue N4) { 3672 SDValue Ops[] = { N1, N2, N3, N4 }; 3673 return getNode(Opcode, DL, VT, Ops); 3674 } 3675 3676 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3677 SDValue N1, SDValue N2, SDValue N3, 3678 SDValue N4, SDValue N5) { 3679 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 3680 return getNode(Opcode, DL, VT, Ops); 3681 } 3682 3683 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 3684 /// the incoming stack arguments to be loaded from the stack. 3685 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 3686 SmallVector<SDValue, 8> ArgChains; 3687 3688 // Include the original chain at the beginning of the list. When this is 3689 // used by target LowerCall hooks, this helps legalize find the 3690 // CALLSEQ_BEGIN node. 3691 ArgChains.push_back(Chain); 3692 3693 // Add a chain value for each stack argument. 3694 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 3695 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 3696 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3697 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3698 if (FI->getIndex() < 0) 3699 ArgChains.push_back(SDValue(L, 1)); 3700 3701 // Build a tokenfactor for all the chains. 3702 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 3703 } 3704 3705 /// getMemsetValue - Vectorized representation of the memset value 3706 /// operand. 3707 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 3708 SDLoc dl) { 3709 assert(Value.getOpcode() != ISD::UNDEF); 3710 3711 unsigned NumBits = VT.getScalarType().getSizeInBits(); 3712 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3713 assert(C->getAPIntValue().getBitWidth() == 8); 3714 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 3715 if (VT.isInteger()) 3716 return DAG.getConstant(Val, VT); 3717 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT); 3718 } 3719 3720 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value); 3721 if (NumBits > 8) { 3722 // Use a multiplication with 0x010101... to extend the input to the 3723 // required length. 3724 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 3725 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT)); 3726 } 3727 3728 return Value; 3729 } 3730 3731 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3732 /// used when a memcpy is turned into a memset when the source is a constant 3733 /// string ptr. 3734 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG, 3735 const TargetLowering &TLI, StringRef Str) { 3736 // Handle vector with all elements zero. 3737 if (Str.empty()) { 3738 if (VT.isInteger()) 3739 return DAG.getConstant(0, VT); 3740 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 3741 return DAG.getConstantFP(0.0, VT); 3742 else if (VT.isVector()) { 3743 unsigned NumElts = VT.getVectorNumElements(); 3744 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 3745 return DAG.getNode(ISD::BITCAST, dl, VT, 3746 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(), 3747 EltVT, NumElts))); 3748 } else 3749 llvm_unreachable("Expected type!"); 3750 } 3751 3752 assert(!VT.isVector() && "Can't handle vector type here!"); 3753 unsigned NumVTBits = VT.getSizeInBits(); 3754 unsigned NumVTBytes = NumVTBits / 8; 3755 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 3756 3757 APInt Val(NumVTBits, 0); 3758 if (TLI.isLittleEndian()) { 3759 for (unsigned i = 0; i != NumBytes; ++i) 3760 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 3761 } else { 3762 for (unsigned i = 0; i != NumBytes; ++i) 3763 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 3764 } 3765 3766 // If the "cost" of materializing the integer immediate is less than the cost 3767 // of a load, then it is cost effective to turn the load into the immediate. 3768 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 3769 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 3770 return DAG.getConstant(Val, VT); 3771 return SDValue(nullptr, 0); 3772 } 3773 3774 /// getMemBasePlusOffset - Returns base and offset node for the 3775 /// 3776 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl, 3777 SelectionDAG &DAG) { 3778 EVT VT = Base.getValueType(); 3779 return DAG.getNode(ISD::ADD, dl, 3780 VT, Base, DAG.getConstant(Offset, VT)); 3781 } 3782 3783 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 3784 /// 3785 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 3786 unsigned SrcDelta = 0; 3787 GlobalAddressSDNode *G = nullptr; 3788 if (Src.getOpcode() == ISD::GlobalAddress) 3789 G = cast<GlobalAddressSDNode>(Src); 3790 else if (Src.getOpcode() == ISD::ADD && 3791 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 3792 Src.getOperand(1).getOpcode() == ISD::Constant) { 3793 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 3794 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 3795 } 3796 if (!G) 3797 return false; 3798 3799 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false); 3800 } 3801 3802 /// FindOptimalMemOpLowering - Determines the optimial series memory ops 3803 /// to replace the memset / memcpy. Return true if the number of memory ops 3804 /// is below the threshold. It returns the types of the sequence of 3805 /// memory ops to perform memset / memcpy by reference. 3806 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 3807 unsigned Limit, uint64_t Size, 3808 unsigned DstAlign, unsigned SrcAlign, 3809 bool IsMemset, 3810 bool ZeroMemset, 3811 bool MemcpyStrSrc, 3812 bool AllowOverlap, 3813 SelectionDAG &DAG, 3814 const TargetLowering &TLI) { 3815 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 3816 "Expecting memcpy / memset source to meet alignment requirement!"); 3817 // If 'SrcAlign' is zero, that means the memory operation does not need to 3818 // load the value, i.e. memset or memcpy from constant string. Otherwise, 3819 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 3820 // is the specified alignment of the memory operation. If it is zero, that 3821 // means it's possible to change the alignment of the destination. 3822 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 3823 // not need to be loaded. 3824 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 3825 IsMemset, ZeroMemset, MemcpyStrSrc, 3826 DAG.getMachineFunction()); 3827 3828 if (VT == MVT::Other) { 3829 unsigned AS = 0; 3830 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) || 3831 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) { 3832 VT = TLI.getPointerTy(); 3833 } else { 3834 switch (DstAlign & 7) { 3835 case 0: VT = MVT::i64; break; 3836 case 4: VT = MVT::i32; break; 3837 case 2: VT = MVT::i16; break; 3838 default: VT = MVT::i8; break; 3839 } 3840 } 3841 3842 MVT LVT = MVT::i64; 3843 while (!TLI.isTypeLegal(LVT)) 3844 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 3845 assert(LVT.isInteger()); 3846 3847 if (VT.bitsGT(LVT)) 3848 VT = LVT; 3849 } 3850 3851 unsigned NumMemOps = 0; 3852 while (Size != 0) { 3853 unsigned VTSize = VT.getSizeInBits() / 8; 3854 while (VTSize > Size) { 3855 // For now, only use non-vector load / store's for the left-over pieces. 3856 EVT NewVT = VT; 3857 unsigned NewVTSize; 3858 3859 bool Found = false; 3860 if (VT.isVector() || VT.isFloatingPoint()) { 3861 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 3862 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 3863 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 3864 Found = true; 3865 else if (NewVT == MVT::i64 && 3866 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 3867 TLI.isSafeMemOpType(MVT::f64)) { 3868 // i64 is usually not legal on 32-bit targets, but f64 may be. 3869 NewVT = MVT::f64; 3870 Found = true; 3871 } 3872 } 3873 3874 if (!Found) { 3875 do { 3876 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 3877 if (NewVT == MVT::i8) 3878 break; 3879 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 3880 } 3881 NewVTSize = NewVT.getSizeInBits() / 8; 3882 3883 // If the new VT cannot cover all of the remaining bits, then consider 3884 // issuing a (or a pair of) unaligned and overlapping load / store. 3885 // FIXME: Only does this for 64-bit or more since we don't have proper 3886 // cost model for unaligned load / store. 3887 bool Fast; 3888 unsigned AS = 0; 3889 if (NumMemOps && AllowOverlap && 3890 VTSize >= 8 && NewVTSize < Size && 3891 TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast) 3892 VTSize = Size; 3893 else { 3894 VT = NewVT; 3895 VTSize = NewVTSize; 3896 } 3897 } 3898 3899 if (++NumMemOps > Limit) 3900 return false; 3901 3902 MemOps.push_back(VT); 3903 Size -= VTSize; 3904 } 3905 3906 return true; 3907 } 3908 3909 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 3910 SDValue Chain, SDValue Dst, 3911 SDValue Src, uint64_t Size, 3912 unsigned Align, bool isVol, 3913 bool AlwaysInline, 3914 MachinePointerInfo DstPtrInfo, 3915 MachinePointerInfo SrcPtrInfo) { 3916 // Turn a memcpy of undef to nop. 3917 if (Src.getOpcode() == ISD::UNDEF) 3918 return Chain; 3919 3920 // Expand memcpy to a series of load and store ops if the size operand falls 3921 // below a certain threshold. 3922 // TODO: In the AlwaysInline case, if the size is big then generate a loop 3923 // rather than maybe a humongous number of loads and stores. 3924 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3925 std::vector<EVT> MemOps; 3926 bool DstAlignCanChange = false; 3927 MachineFunction &MF = DAG.getMachineFunction(); 3928 MachineFrameInfo *MFI = MF.getFrameInfo(); 3929 bool OptSize = 3930 MF.getFunction()->getAttributes(). 3931 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3932 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3933 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3934 DstAlignCanChange = true; 3935 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3936 if (Align > SrcAlign) 3937 SrcAlign = Align; 3938 StringRef Str; 3939 bool CopyFromStr = isMemSrcFromString(Src, Str); 3940 bool isZeroStr = CopyFromStr && Str.empty(); 3941 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 3942 3943 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3944 (DstAlignCanChange ? 0 : Align), 3945 (isZeroStr ? 0 : SrcAlign), 3946 false, false, CopyFromStr, true, DAG, TLI)) 3947 return SDValue(); 3948 3949 if (DstAlignCanChange) { 3950 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3951 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3952 3953 // Don't promote to an alignment that would require dynamic stack 3954 // realignment. 3955 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3956 if (!TRI->needsStackRealignment(MF)) 3957 while (NewAlign > Align && 3958 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign)) 3959 NewAlign /= 2; 3960 3961 if (NewAlign > Align) { 3962 // Give the stack frame object a larger alignment if needed. 3963 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3964 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3965 Align = NewAlign; 3966 } 3967 } 3968 3969 SmallVector<SDValue, 8> OutChains; 3970 unsigned NumMemOps = MemOps.size(); 3971 uint64_t SrcOff = 0, DstOff = 0; 3972 for (unsigned i = 0; i != NumMemOps; ++i) { 3973 EVT VT = MemOps[i]; 3974 unsigned VTSize = VT.getSizeInBits() / 8; 3975 SDValue Value, Store; 3976 3977 if (VTSize > Size) { 3978 // Issuing an unaligned load / store pair that overlaps with the previous 3979 // pair. Adjust the offset accordingly. 3980 assert(i == NumMemOps-1 && i != 0); 3981 SrcOff -= VTSize - Size; 3982 DstOff -= VTSize - Size; 3983 } 3984 3985 if (CopyFromStr && 3986 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 3987 // It's unlikely a store of a vector immediate can be done in a single 3988 // instruction. It would require a load from a constantpool first. 3989 // We only handle zero vectors here. 3990 // FIXME: Handle other cases where store of vector immediate is done in 3991 // a single instruction. 3992 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 3993 if (Value.getNode()) 3994 Store = DAG.getStore(Chain, dl, Value, 3995 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3996 DstPtrInfo.getWithOffset(DstOff), isVol, 3997 false, Align); 3998 } 3999 4000 if (!Store.getNode()) { 4001 // The type might not be legal for the target. This should only happen 4002 // if the type is smaller than a legal type, as on PPC, so the right 4003 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 4004 // to Load/Store if NVT==VT. 4005 // FIXME does the case above also need this? 4006 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 4007 assert(NVT.bitsGE(VT)); 4008 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 4009 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 4010 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, 4011 false, MinAlign(SrcAlign, SrcOff)); 4012 Store = DAG.getTruncStore(Chain, dl, Value, 4013 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4014 DstPtrInfo.getWithOffset(DstOff), VT, isVol, 4015 false, Align); 4016 } 4017 OutChains.push_back(Store); 4018 SrcOff += VTSize; 4019 DstOff += VTSize; 4020 Size -= VTSize; 4021 } 4022 4023 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4024 } 4025 4026 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 4027 SDValue Chain, SDValue Dst, 4028 SDValue Src, uint64_t Size, 4029 unsigned Align, bool isVol, 4030 bool AlwaysInline, 4031 MachinePointerInfo DstPtrInfo, 4032 MachinePointerInfo SrcPtrInfo) { 4033 // Turn a memmove of undef to nop. 4034 if (Src.getOpcode() == ISD::UNDEF) 4035 return Chain; 4036 4037 // Expand memmove to a series of load and store ops if the size operand falls 4038 // below a certain threshold. 4039 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4040 std::vector<EVT> MemOps; 4041 bool DstAlignCanChange = false; 4042 MachineFunction &MF = DAG.getMachineFunction(); 4043 MachineFrameInfo *MFI = MF.getFrameInfo(); 4044 bool OptSize = MF.getFunction()->getAttributes(). 4045 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 4046 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4047 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 4048 DstAlignCanChange = true; 4049 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4050 if (Align > SrcAlign) 4051 SrcAlign = Align; 4052 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 4053 4054 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4055 (DstAlignCanChange ? 0 : Align), SrcAlign, 4056 false, false, false, false, DAG, TLI)) 4057 return SDValue(); 4058 4059 if (DstAlignCanChange) { 4060 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4061 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 4062 if (NewAlign > Align) { 4063 // Give the stack frame object a larger alignment if needed. 4064 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 4065 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 4066 Align = NewAlign; 4067 } 4068 } 4069 4070 uint64_t SrcOff = 0, DstOff = 0; 4071 SmallVector<SDValue, 8> LoadValues; 4072 SmallVector<SDValue, 8> LoadChains; 4073 SmallVector<SDValue, 8> OutChains; 4074 unsigned NumMemOps = MemOps.size(); 4075 for (unsigned i = 0; i < NumMemOps; i++) { 4076 EVT VT = MemOps[i]; 4077 unsigned VTSize = VT.getSizeInBits() / 8; 4078 SDValue Value; 4079 4080 Value = DAG.getLoad(VT, dl, Chain, 4081 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 4082 SrcPtrInfo.getWithOffset(SrcOff), isVol, 4083 false, false, SrcAlign); 4084 LoadValues.push_back(Value); 4085 LoadChains.push_back(Value.getValue(1)); 4086 SrcOff += VTSize; 4087 } 4088 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 4089 OutChains.clear(); 4090 for (unsigned i = 0; i < NumMemOps; i++) { 4091 EVT VT = MemOps[i]; 4092 unsigned VTSize = VT.getSizeInBits() / 8; 4093 SDValue Store; 4094 4095 Store = DAG.getStore(Chain, dl, LoadValues[i], 4096 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4097 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); 4098 OutChains.push_back(Store); 4099 DstOff += VTSize; 4100 } 4101 4102 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4103 } 4104 4105 /// \brief Lower the call to 'memset' intrinsic function into a series of store 4106 /// operations. 4107 /// 4108 /// \param DAG Selection DAG where lowered code is placed. 4109 /// \param dl Link to corresponding IR location. 4110 /// \param Chain Control flow dependency. 4111 /// \param Dst Pointer to destination memory location. 4112 /// \param Src Value of byte to write into the memory. 4113 /// \param Size Number of bytes to write. 4114 /// \param Align Alignment of the destination in bytes. 4115 /// \param isVol True if destination is volatile. 4116 /// \param DstPtrInfo IR information on the memory pointer. 4117 /// \returns New head in the control flow, if lowering was successful, empty 4118 /// SDValue otherwise. 4119 /// 4120 /// The function tries to replace 'llvm.memset' intrinsic with several store 4121 /// operations and value calculation code. This is usually profitable for small 4122 /// memory size. 4123 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl, 4124 SDValue Chain, SDValue Dst, 4125 SDValue Src, uint64_t Size, 4126 unsigned Align, bool isVol, 4127 MachinePointerInfo DstPtrInfo) { 4128 // Turn a memset of undef to nop. 4129 if (Src.getOpcode() == ISD::UNDEF) 4130 return Chain; 4131 4132 // Expand memset to a series of load/store ops if the size operand 4133 // falls below a certain threshold. 4134 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4135 std::vector<EVT> MemOps; 4136 bool DstAlignCanChange = false; 4137 MachineFunction &MF = DAG.getMachineFunction(); 4138 MachineFrameInfo *MFI = MF.getFrameInfo(); 4139 bool OptSize = MF.getFunction()->getAttributes(). 4140 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 4141 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4142 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 4143 DstAlignCanChange = true; 4144 bool IsZeroVal = 4145 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 4146 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 4147 Size, (DstAlignCanChange ? 0 : Align), 0, 4148 true, IsZeroVal, false, true, DAG, TLI)) 4149 return SDValue(); 4150 4151 if (DstAlignCanChange) { 4152 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4153 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 4154 if (NewAlign > Align) { 4155 // Give the stack frame object a larger alignment if needed. 4156 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 4157 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 4158 Align = NewAlign; 4159 } 4160 } 4161 4162 SmallVector<SDValue, 8> OutChains; 4163 uint64_t DstOff = 0; 4164 unsigned NumMemOps = MemOps.size(); 4165 4166 // Find the largest store and generate the bit pattern for it. 4167 EVT LargestVT = MemOps[0]; 4168 for (unsigned i = 1; i < NumMemOps; i++) 4169 if (MemOps[i].bitsGT(LargestVT)) 4170 LargestVT = MemOps[i]; 4171 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 4172 4173 for (unsigned i = 0; i < NumMemOps; i++) { 4174 EVT VT = MemOps[i]; 4175 unsigned VTSize = VT.getSizeInBits() / 8; 4176 if (VTSize > Size) { 4177 // Issuing an unaligned load / store pair that overlaps with the previous 4178 // pair. Adjust the offset accordingly. 4179 assert(i == NumMemOps-1 && i != 0); 4180 DstOff -= VTSize - Size; 4181 } 4182 4183 // If this store is smaller than the largest store see whether we can get 4184 // the smaller value for free with a truncate. 4185 SDValue Value = MemSetValue; 4186 if (VT.bitsLT(LargestVT)) { 4187 if (!LargestVT.isVector() && !VT.isVector() && 4188 TLI.isTruncateFree(LargestVT, VT)) 4189 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 4190 else 4191 Value = getMemsetValue(Src, VT, DAG, dl); 4192 } 4193 assert(Value.getValueType() == VT && "Value with wrong type."); 4194 SDValue Store = DAG.getStore(Chain, dl, Value, 4195 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 4196 DstPtrInfo.getWithOffset(DstOff), 4197 isVol, false, Align); 4198 OutChains.push_back(Store); 4199 DstOff += VT.getSizeInBits() / 8; 4200 Size -= VTSize; 4201 } 4202 4203 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4204 } 4205 4206 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, 4207 SDValue Src, SDValue Size, 4208 unsigned Align, bool isVol, bool AlwaysInline, 4209 MachinePointerInfo DstPtrInfo, 4210 MachinePointerInfo SrcPtrInfo) { 4211 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4212 4213 // Check to see if we should lower the memcpy to loads and stores first. 4214 // For cases within the target-specified limits, this is the best choice. 4215 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4216 if (ConstantSize) { 4217 // Memcpy with size zero? Just return the original chain. 4218 if (ConstantSize->isNullValue()) 4219 return Chain; 4220 4221 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4222 ConstantSize->getZExtValue(),Align, 4223 isVol, false, DstPtrInfo, SrcPtrInfo); 4224 if (Result.getNode()) 4225 return Result; 4226 } 4227 4228 // Then check to see if we should lower the memcpy with target-specific 4229 // code. If the target chooses to do this, this is the next best. 4230 SDValue Result = 4231 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align, 4232 isVol, AlwaysInline, 4233 DstPtrInfo, SrcPtrInfo); 4234 if (Result.getNode()) 4235 return Result; 4236 4237 // If we really need inline code and the target declined to provide it, 4238 // use a (potentially long) sequence of loads and stores. 4239 if (AlwaysInline) { 4240 assert(ConstantSize && "AlwaysInline requires a constant size!"); 4241 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4242 ConstantSize->getZExtValue(), Align, isVol, 4243 true, DstPtrInfo, SrcPtrInfo); 4244 } 4245 4246 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 4247 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 4248 // respect volatile, so they may do things like read or write memory 4249 // beyond the given memory regions. But fixing this isn't easy, and most 4250 // people don't care. 4251 4252 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 4253 4254 // Emit a library call. 4255 TargetLowering::ArgListTy Args; 4256 TargetLowering::ArgListEntry Entry; 4257 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 4258 Entry.Node = Dst; Args.push_back(Entry); 4259 Entry.Node = Src; Args.push_back(Entry); 4260 Entry.Node = Size; Args.push_back(Entry); 4261 // FIXME: pass in SDLoc 4262 TargetLowering::CallLoweringInfo CLI(*this); 4263 CLI.setDebugLoc(dl).setChain(Chain) 4264 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 4265 Type::getVoidTy(*getContext()), 4266 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 4267 TLI->getPointerTy()), std::move(Args), 0) 4268 .setDiscardResult(); 4269 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4270 4271 return CallResult.second; 4272 } 4273 4274 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, 4275 SDValue Src, SDValue Size, 4276 unsigned Align, bool isVol, 4277 MachinePointerInfo DstPtrInfo, 4278 MachinePointerInfo SrcPtrInfo) { 4279 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4280 4281 // Check to see if we should lower the memmove to loads and stores first. 4282 // For cases within the target-specified limits, this is the best choice. 4283 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4284 if (ConstantSize) { 4285 // Memmove with size zero? Just return the original chain. 4286 if (ConstantSize->isNullValue()) 4287 return Chain; 4288 4289 SDValue Result = 4290 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 4291 ConstantSize->getZExtValue(), Align, isVol, 4292 false, DstPtrInfo, SrcPtrInfo); 4293 if (Result.getNode()) 4294 return Result; 4295 } 4296 4297 // Then check to see if we should lower the memmove with target-specific 4298 // code. If the target chooses to do this, this is the next best. 4299 SDValue Result = 4300 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol, 4301 DstPtrInfo, SrcPtrInfo); 4302 if (Result.getNode()) 4303 return Result; 4304 4305 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 4306 // not be safe. See memcpy above for more details. 4307 4308 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 4309 4310 // Emit a library call. 4311 TargetLowering::ArgListTy Args; 4312 TargetLowering::ArgListEntry Entry; 4313 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 4314 Entry.Node = Dst; Args.push_back(Entry); 4315 Entry.Node = Src; Args.push_back(Entry); 4316 Entry.Node = Size; Args.push_back(Entry); 4317 // FIXME: pass in SDLoc 4318 TargetLowering::CallLoweringInfo CLI(*this); 4319 CLI.setDebugLoc(dl).setChain(Chain) 4320 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 4321 Type::getVoidTy(*getContext()), 4322 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 4323 TLI->getPointerTy()), std::move(Args), 0) 4324 .setDiscardResult(); 4325 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4326 4327 return CallResult.second; 4328 } 4329 4330 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst, 4331 SDValue Src, SDValue Size, 4332 unsigned Align, bool isVol, 4333 MachinePointerInfo DstPtrInfo) { 4334 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4335 4336 // Check to see if we should lower the memset to stores first. 4337 // For cases within the target-specified limits, this is the best choice. 4338 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4339 if (ConstantSize) { 4340 // Memset with size zero? Just return the original chain. 4341 if (ConstantSize->isNullValue()) 4342 return Chain; 4343 4344 SDValue Result = 4345 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 4346 Align, isVol, DstPtrInfo); 4347 4348 if (Result.getNode()) 4349 return Result; 4350 } 4351 4352 // Then check to see if we should lower the memset with target-specific 4353 // code. If the target chooses to do this, this is the next best. 4354 SDValue Result = 4355 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol, 4356 DstPtrInfo); 4357 if (Result.getNode()) 4358 return Result; 4359 4360 // Emit a library call. 4361 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 4362 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext()); 4363 TargetLowering::ArgListTy Args; 4364 TargetLowering::ArgListEntry Entry; 4365 Entry.Node = Dst; Entry.Ty = IntPtrTy; 4366 Args.push_back(Entry); 4367 // Extend or truncate the argument to be an i32 value for the call. 4368 if (Src.getValueType().bitsGT(MVT::i32)) 4369 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src); 4370 else 4371 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src); 4372 Entry.Node = Src; 4373 Entry.Ty = Type::getInt32Ty(*getContext()); 4374 Entry.isSExt = true; 4375 Args.push_back(Entry); 4376 Entry.Node = Size; 4377 Entry.Ty = IntPtrTy; 4378 Entry.isSExt = false; 4379 Args.push_back(Entry); 4380 4381 // FIXME: pass in SDLoc 4382 TargetLowering::CallLoweringInfo CLI(*this); 4383 CLI.setDebugLoc(dl).setChain(Chain) 4384 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 4385 Type::getVoidTy(*getContext()), 4386 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 4387 TLI->getPointerTy()), std::move(Args), 0) 4388 .setDiscardResult(); 4389 4390 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4391 return CallResult.second; 4392 } 4393 4394 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4395 SDVTList VTList, ArrayRef<SDValue> Ops, 4396 MachineMemOperand *MMO, 4397 AtomicOrdering SuccessOrdering, 4398 AtomicOrdering FailureOrdering, 4399 SynchronizationScope SynchScope) { 4400 FoldingSetNodeID ID; 4401 ID.AddInteger(MemVT.getRawBits()); 4402 AddNodeIDNode(ID, Opcode, VTList, Ops); 4403 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4404 void* IP = nullptr; 4405 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4406 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4407 return SDValue(E, 0); 4408 } 4409 4410 // Allocate the operands array for the node out of the BumpPtrAllocator, since 4411 // SDNode doesn't have access to it. This memory will be "leaked" when 4412 // the node is deallocated, but recovered when the allocator is released. 4413 // If the number of operands is less than 5 we use AtomicSDNode's internal 4414 // storage. 4415 unsigned NumOps = Ops.size(); 4416 SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) 4417 : nullptr; 4418 4419 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), 4420 dl.getDebugLoc(), VTList, MemVT, 4421 Ops.data(), DynOps, NumOps, MMO, 4422 SuccessOrdering, FailureOrdering, 4423 SynchScope); 4424 CSEMap.InsertNode(N, IP); 4425 InsertNode(N); 4426 return SDValue(N, 0); 4427 } 4428 4429 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4430 SDVTList VTList, ArrayRef<SDValue> Ops, 4431 MachineMemOperand *MMO, 4432 AtomicOrdering Ordering, 4433 SynchronizationScope SynchScope) { 4434 return getAtomic(Opcode, dl, MemVT, VTList, Ops, MMO, Ordering, 4435 Ordering, SynchScope); 4436 } 4437 4438 SDValue SelectionDAG::getAtomicCmpSwap( 4439 unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs, SDValue Chain, 4440 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 4441 unsigned Alignment, AtomicOrdering SuccessOrdering, 4442 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { 4443 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 4444 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 4445 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4446 4447 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4448 Alignment = getEVTAlignment(MemVT); 4449 4450 MachineFunction &MF = getMachineFunction(); 4451 4452 // FIXME: Volatile isn't really correct; we should keep track of atomic 4453 // orderings in the memoperand. 4454 unsigned Flags = MachineMemOperand::MOVolatile; 4455 Flags |= MachineMemOperand::MOLoad; 4456 Flags |= MachineMemOperand::MOStore; 4457 4458 MachineMemOperand *MMO = 4459 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); 4460 4461 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO, 4462 SuccessOrdering, FailureOrdering, SynchScope); 4463 } 4464 4465 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, 4466 SDVTList VTs, SDValue Chain, SDValue Ptr, 4467 SDValue Cmp, SDValue Swp, 4468 MachineMemOperand *MMO, 4469 AtomicOrdering SuccessOrdering, 4470 AtomicOrdering FailureOrdering, 4471 SynchronizationScope SynchScope) { 4472 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 4473 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 4474 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4475 4476 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 4477 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, 4478 SuccessOrdering, FailureOrdering, SynchScope); 4479 } 4480 4481 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4482 SDValue Chain, 4483 SDValue Ptr, SDValue Val, 4484 const Value* PtrVal, 4485 unsigned Alignment, 4486 AtomicOrdering Ordering, 4487 SynchronizationScope SynchScope) { 4488 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4489 Alignment = getEVTAlignment(MemVT); 4490 4491 MachineFunction &MF = getMachineFunction(); 4492 // An atomic store does not load. An atomic load does not store. 4493 // (An atomicrmw obviously both loads and stores.) 4494 // For now, atomics are considered to be volatile always, and they are 4495 // chained as such. 4496 // FIXME: Volatile isn't really correct; we should keep track of atomic 4497 // orderings in the memoperand. 4498 unsigned Flags = MachineMemOperand::MOVolatile; 4499 if (Opcode != ISD::ATOMIC_STORE) 4500 Flags |= MachineMemOperand::MOLoad; 4501 if (Opcode != ISD::ATOMIC_LOAD) 4502 Flags |= MachineMemOperand::MOStore; 4503 4504 MachineMemOperand *MMO = 4505 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4506 MemVT.getStoreSize(), Alignment); 4507 4508 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO, 4509 Ordering, SynchScope); 4510 } 4511 4512 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4513 SDValue Chain, 4514 SDValue Ptr, SDValue Val, 4515 MachineMemOperand *MMO, 4516 AtomicOrdering Ordering, 4517 SynchronizationScope SynchScope) { 4518 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 4519 Opcode == ISD::ATOMIC_LOAD_SUB || 4520 Opcode == ISD::ATOMIC_LOAD_AND || 4521 Opcode == ISD::ATOMIC_LOAD_OR || 4522 Opcode == ISD::ATOMIC_LOAD_XOR || 4523 Opcode == ISD::ATOMIC_LOAD_NAND || 4524 Opcode == ISD::ATOMIC_LOAD_MIN || 4525 Opcode == ISD::ATOMIC_LOAD_MAX || 4526 Opcode == ISD::ATOMIC_LOAD_UMIN || 4527 Opcode == ISD::ATOMIC_LOAD_UMAX || 4528 Opcode == ISD::ATOMIC_SWAP || 4529 Opcode == ISD::ATOMIC_STORE) && 4530 "Invalid Atomic Op"); 4531 4532 EVT VT = Val.getValueType(); 4533 4534 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 4535 getVTList(VT, MVT::Other); 4536 SDValue Ops[] = {Chain, Ptr, Val}; 4537 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope); 4538 } 4539 4540 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4541 EVT VT, SDValue Chain, 4542 SDValue Ptr, 4543 MachineMemOperand *MMO, 4544 AtomicOrdering Ordering, 4545 SynchronizationScope SynchScope) { 4546 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 4547 4548 SDVTList VTs = getVTList(VT, MVT::Other); 4549 SDValue Ops[] = {Chain, Ptr}; 4550 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO, Ordering, SynchScope); 4551 } 4552 4553 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 4554 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl) { 4555 if (Ops.size() == 1) 4556 return Ops[0]; 4557 4558 SmallVector<EVT, 4> VTs; 4559 VTs.reserve(Ops.size()); 4560 for (unsigned i = 0; i < Ops.size(); ++i) 4561 VTs.push_back(Ops[i].getValueType()); 4562 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 4563 } 4564 4565 SDValue 4566 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4567 ArrayRef<SDValue> Ops, 4568 EVT MemVT, MachinePointerInfo PtrInfo, 4569 unsigned Align, bool Vol, 4570 bool ReadMem, bool WriteMem) { 4571 if (Align == 0) // Ensure that codegen never sees alignment 0 4572 Align = getEVTAlignment(MemVT); 4573 4574 MachineFunction &MF = getMachineFunction(); 4575 unsigned Flags = 0; 4576 if (WriteMem) 4577 Flags |= MachineMemOperand::MOStore; 4578 if (ReadMem) 4579 Flags |= MachineMemOperand::MOLoad; 4580 if (Vol) 4581 Flags |= MachineMemOperand::MOVolatile; 4582 MachineMemOperand *MMO = 4583 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align); 4584 4585 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 4586 } 4587 4588 SDValue 4589 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4590 ArrayRef<SDValue> Ops, EVT MemVT, 4591 MachineMemOperand *MMO) { 4592 assert((Opcode == ISD::INTRINSIC_VOID || 4593 Opcode == ISD::INTRINSIC_W_CHAIN || 4594 Opcode == ISD::PREFETCH || 4595 Opcode == ISD::LIFETIME_START || 4596 Opcode == ISD::LIFETIME_END || 4597 (Opcode <= INT_MAX && 4598 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 4599 "Opcode is not a memory-accessing opcode!"); 4600 4601 // Memoize the node unless it returns a flag. 4602 MemIntrinsicSDNode *N; 4603 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4604 FoldingSetNodeID ID; 4605 AddNodeIDNode(ID, Opcode, VTList, Ops); 4606 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4607 void *IP = nullptr; 4608 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4609 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 4610 return SDValue(E, 0); 4611 } 4612 4613 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4614 dl.getDebugLoc(), VTList, Ops, 4615 MemVT, MMO); 4616 CSEMap.InsertNode(N, IP); 4617 } else { 4618 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), 4619 dl.getDebugLoc(), VTList, Ops, 4620 MemVT, MMO); 4621 } 4622 InsertNode(N); 4623 return SDValue(N, 0); 4624 } 4625 4626 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4627 /// MachinePointerInfo record from it. This is particularly useful because the 4628 /// code generator has many cases where it doesn't bother passing in a 4629 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4630 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { 4631 // If this is FI+Offset, we can model it. 4632 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 4633 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); 4634 4635 // If this is (FI+Offset1)+Offset2, we can model it. 4636 if (Ptr.getOpcode() != ISD::ADD || 4637 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 4638 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 4639 return MachinePointerInfo(); 4640 4641 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4642 return MachinePointerInfo::getFixedStack(FI, Offset+ 4643 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 4644 } 4645 4646 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4647 /// MachinePointerInfo record from it. This is particularly useful because the 4648 /// code generator has many cases where it doesn't bother passing in a 4649 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4650 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { 4651 // If the 'Offset' value isn't a constant, we can't handle this. 4652 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 4653 return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); 4654 if (OffsetOp.getOpcode() == ISD::UNDEF) 4655 return InferPointerInfo(Ptr); 4656 return MachinePointerInfo(); 4657 } 4658 4659 4660 SDValue 4661 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4662 EVT VT, SDLoc dl, SDValue Chain, 4663 SDValue Ptr, SDValue Offset, 4664 MachinePointerInfo PtrInfo, EVT MemVT, 4665 bool isVolatile, bool isNonTemporal, bool isInvariant, 4666 unsigned Alignment, const AAMDNodes &AAInfo, 4667 const MDNode *Ranges) { 4668 assert(Chain.getValueType() == MVT::Other && 4669 "Invalid chain type"); 4670 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4671 Alignment = getEVTAlignment(VT); 4672 4673 unsigned Flags = MachineMemOperand::MOLoad; 4674 if (isVolatile) 4675 Flags |= MachineMemOperand::MOVolatile; 4676 if (isNonTemporal) 4677 Flags |= MachineMemOperand::MONonTemporal; 4678 if (isInvariant) 4679 Flags |= MachineMemOperand::MOInvariant; 4680 4681 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 4682 // clients. 4683 if (PtrInfo.V.isNull()) 4684 PtrInfo = InferPointerInfo(Ptr, Offset); 4685 4686 MachineFunction &MF = getMachineFunction(); 4687 MachineMemOperand *MMO = 4688 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 4689 AAInfo, Ranges); 4690 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 4691 } 4692 4693 SDValue 4694 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4695 EVT VT, SDLoc dl, SDValue Chain, 4696 SDValue Ptr, SDValue Offset, EVT MemVT, 4697 MachineMemOperand *MMO) { 4698 if (VT == MemVT) { 4699 ExtType = ISD::NON_EXTLOAD; 4700 } else if (ExtType == ISD::NON_EXTLOAD) { 4701 assert(VT == MemVT && "Non-extending load from different memory type!"); 4702 } else { 4703 // Extending load. 4704 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 4705 "Should only be an extending load, not truncating!"); 4706 assert(VT.isInteger() == MemVT.isInteger() && 4707 "Cannot convert from FP to Int or Int -> FP!"); 4708 assert(VT.isVector() == MemVT.isVector() && 4709 "Cannot use trunc store to convert to or from a vector!"); 4710 assert((!VT.isVector() || 4711 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 4712 "Cannot use trunc store to change the number of vector elements!"); 4713 } 4714 4715 bool Indexed = AM != ISD::UNINDEXED; 4716 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && 4717 "Unindexed load with an offset!"); 4718 4719 SDVTList VTs = Indexed ? 4720 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 4721 SDValue Ops[] = { Chain, Ptr, Offset }; 4722 FoldingSetNodeID ID; 4723 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 4724 ID.AddInteger(MemVT.getRawBits()); 4725 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), 4726 MMO->isNonTemporal(), 4727 MMO->isInvariant())); 4728 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4729 void *IP = nullptr; 4730 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4731 cast<LoadSDNode>(E)->refineAlignment(MMO); 4732 return SDValue(E, 0); 4733 } 4734 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(), 4735 dl.getDebugLoc(), VTs, AM, ExtType, 4736 MemVT, MMO); 4737 CSEMap.InsertNode(N, IP); 4738 InsertNode(N); 4739 return SDValue(N, 0); 4740 } 4741 4742 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4743 SDValue Chain, SDValue Ptr, 4744 MachinePointerInfo PtrInfo, 4745 bool isVolatile, bool isNonTemporal, 4746 bool isInvariant, unsigned Alignment, 4747 const AAMDNodes &AAInfo, 4748 const MDNode *Ranges) { 4749 SDValue Undef = getUNDEF(Ptr.getValueType()); 4750 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4751 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment, 4752 AAInfo, Ranges); 4753 } 4754 4755 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4756 SDValue Chain, SDValue Ptr, 4757 MachineMemOperand *MMO) { 4758 SDValue Undef = getUNDEF(Ptr.getValueType()); 4759 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4760 VT, MMO); 4761 } 4762 4763 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 4764 SDValue Chain, SDValue Ptr, 4765 MachinePointerInfo PtrInfo, EVT MemVT, 4766 bool isVolatile, bool isNonTemporal, 4767 bool isInvariant, unsigned Alignment, 4768 const AAMDNodes &AAInfo) { 4769 SDValue Undef = getUNDEF(Ptr.getValueType()); 4770 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4771 PtrInfo, MemVT, isVolatile, isNonTemporal, isInvariant, 4772 Alignment, AAInfo); 4773 } 4774 4775 4776 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 4777 SDValue Chain, SDValue Ptr, EVT MemVT, 4778 MachineMemOperand *MMO) { 4779 SDValue Undef = getUNDEF(Ptr.getValueType()); 4780 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4781 MemVT, MMO); 4782 } 4783 4784 SDValue 4785 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, 4786 SDValue Offset, ISD::MemIndexedMode AM) { 4787 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 4788 assert(LD->getOffset().getOpcode() == ISD::UNDEF && 4789 "Load is already a indexed load!"); 4790 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 4791 LD->getChain(), Base, Offset, LD->getPointerInfo(), 4792 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), 4793 false, LD->getAlignment()); 4794 } 4795 4796 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4797 SDValue Ptr, MachinePointerInfo PtrInfo, 4798 bool isVolatile, bool isNonTemporal, 4799 unsigned Alignment, const AAMDNodes &AAInfo) { 4800 assert(Chain.getValueType() == MVT::Other && 4801 "Invalid chain type"); 4802 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4803 Alignment = getEVTAlignment(Val.getValueType()); 4804 4805 unsigned Flags = MachineMemOperand::MOStore; 4806 if (isVolatile) 4807 Flags |= MachineMemOperand::MOVolatile; 4808 if (isNonTemporal) 4809 Flags |= MachineMemOperand::MONonTemporal; 4810 4811 if (PtrInfo.V.isNull()) 4812 PtrInfo = InferPointerInfo(Ptr); 4813 4814 MachineFunction &MF = getMachineFunction(); 4815 MachineMemOperand *MMO = 4816 MF.getMachineMemOperand(PtrInfo, Flags, 4817 Val.getValueType().getStoreSize(), Alignment, 4818 AAInfo); 4819 4820 return getStore(Chain, dl, Val, Ptr, MMO); 4821 } 4822 4823 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4824 SDValue Ptr, MachineMemOperand *MMO) { 4825 assert(Chain.getValueType() == MVT::Other && 4826 "Invalid chain type"); 4827 EVT VT = Val.getValueType(); 4828 SDVTList VTs = getVTList(MVT::Other); 4829 SDValue Undef = getUNDEF(Ptr.getValueType()); 4830 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4831 FoldingSetNodeID ID; 4832 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 4833 ID.AddInteger(VT.getRawBits()); 4834 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 4835 MMO->isNonTemporal(), MMO->isInvariant())); 4836 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4837 void *IP = nullptr; 4838 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4839 cast<StoreSDNode>(E)->refineAlignment(MMO); 4840 return SDValue(E, 0); 4841 } 4842 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4843 dl.getDebugLoc(), VTs, 4844 ISD::UNINDEXED, false, VT, MMO); 4845 CSEMap.InsertNode(N, IP); 4846 InsertNode(N); 4847 return SDValue(N, 0); 4848 } 4849 4850 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4851 SDValue Ptr, MachinePointerInfo PtrInfo, 4852 EVT SVT,bool isVolatile, bool isNonTemporal, 4853 unsigned Alignment, 4854 const AAMDNodes &AAInfo) { 4855 assert(Chain.getValueType() == MVT::Other && 4856 "Invalid chain type"); 4857 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4858 Alignment = getEVTAlignment(SVT); 4859 4860 unsigned Flags = MachineMemOperand::MOStore; 4861 if (isVolatile) 4862 Flags |= MachineMemOperand::MOVolatile; 4863 if (isNonTemporal) 4864 Flags |= MachineMemOperand::MONonTemporal; 4865 4866 if (PtrInfo.V.isNull()) 4867 PtrInfo = InferPointerInfo(Ptr); 4868 4869 MachineFunction &MF = getMachineFunction(); 4870 MachineMemOperand *MMO = 4871 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, 4872 AAInfo); 4873 4874 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 4875 } 4876 4877 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4878 SDValue Ptr, EVT SVT, 4879 MachineMemOperand *MMO) { 4880 EVT VT = Val.getValueType(); 4881 4882 assert(Chain.getValueType() == MVT::Other && 4883 "Invalid chain type"); 4884 if (VT == SVT) 4885 return getStore(Chain, dl, Val, Ptr, MMO); 4886 4887 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 4888 "Should only be a truncating store, not extending!"); 4889 assert(VT.isInteger() == SVT.isInteger() && 4890 "Can't do FP-INT conversion!"); 4891 assert(VT.isVector() == SVT.isVector() && 4892 "Cannot use trunc store to convert to or from a vector!"); 4893 assert((!VT.isVector() || 4894 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 4895 "Cannot use trunc store to change the number of vector elements!"); 4896 4897 SDVTList VTs = getVTList(MVT::Other); 4898 SDValue Undef = getUNDEF(Ptr.getValueType()); 4899 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4900 FoldingSetNodeID ID; 4901 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 4902 ID.AddInteger(SVT.getRawBits()); 4903 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(), 4904 MMO->isNonTemporal(), MMO->isInvariant())); 4905 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4906 void *IP = nullptr; 4907 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4908 cast<StoreSDNode>(E)->refineAlignment(MMO); 4909 return SDValue(E, 0); 4910 } 4911 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4912 dl.getDebugLoc(), VTs, 4913 ISD::UNINDEXED, true, SVT, MMO); 4914 CSEMap.InsertNode(N, IP); 4915 InsertNode(N); 4916 return SDValue(N, 0); 4917 } 4918 4919 SDValue 4920 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base, 4921 SDValue Offset, ISD::MemIndexedMode AM) { 4922 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 4923 assert(ST->getOffset().getOpcode() == ISD::UNDEF && 4924 "Store is already a indexed store!"); 4925 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 4926 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 4927 FoldingSetNodeID ID; 4928 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 4929 ID.AddInteger(ST->getMemoryVT().getRawBits()); 4930 ID.AddInteger(ST->getRawSubclassData()); 4931 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 4932 void *IP = nullptr; 4933 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4934 return SDValue(E, 0); 4935 4936 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), 4937 dl.getDebugLoc(), VTs, AM, 4938 ST->isTruncatingStore(), 4939 ST->getMemoryVT(), 4940 ST->getMemOperand()); 4941 CSEMap.InsertNode(N, IP); 4942 InsertNode(N); 4943 return SDValue(N, 0); 4944 } 4945 4946 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl, 4947 SDValue Chain, SDValue Ptr, 4948 SDValue SV, 4949 unsigned Align) { 4950 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) }; 4951 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 4952 } 4953 4954 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4955 ArrayRef<SDUse> Ops) { 4956 switch (Ops.size()) { 4957 case 0: return getNode(Opcode, DL, VT); 4958 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 4959 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4960 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4961 default: break; 4962 } 4963 4964 // Copy from an SDUse array into an SDValue array for use with 4965 // the regular getNode logic. 4966 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 4967 return getNode(Opcode, DL, VT, NewOps); 4968 } 4969 4970 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4971 ArrayRef<SDValue> Ops) { 4972 unsigned NumOps = Ops.size(); 4973 switch (NumOps) { 4974 case 0: return getNode(Opcode, DL, VT); 4975 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4976 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4977 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4978 default: break; 4979 } 4980 4981 switch (Opcode) { 4982 default: break; 4983 case ISD::SELECT_CC: { 4984 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 4985 assert(Ops[0].getValueType() == Ops[1].getValueType() && 4986 "LHS and RHS of condition must have same type!"); 4987 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4988 "True and False arms of SelectCC must have same type!"); 4989 assert(Ops[2].getValueType() == VT && 4990 "select_cc node must be of same type as true and false value!"); 4991 break; 4992 } 4993 case ISD::BR_CC: { 4994 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 4995 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4996 "LHS/RHS of comparison should match types!"); 4997 break; 4998 } 4999 } 5000 5001 // Memoize nodes. 5002 SDNode *N; 5003 SDVTList VTs = getVTList(VT); 5004 5005 if (VT != MVT::Glue) { 5006 FoldingSetNodeID ID; 5007 AddNodeIDNode(ID, Opcode, VTs, Ops); 5008 void *IP = nullptr; 5009 5010 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5011 return SDValue(E, 0); 5012 5013 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5014 VTs, Ops); 5015 CSEMap.InsertNode(N, IP); 5016 } else { 5017 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5018 VTs, Ops); 5019 } 5020 5021 InsertNode(N); 5022 return SDValue(N, 0); 5023 } 5024 5025 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 5026 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 5027 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 5028 } 5029 5030 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5031 ArrayRef<SDValue> Ops) { 5032 if (VTList.NumVTs == 1) 5033 return getNode(Opcode, DL, VTList.VTs[0], Ops); 5034 5035 #if 0 5036 switch (Opcode) { 5037 // FIXME: figure out how to safely handle things like 5038 // int foo(int x) { return 1 << (x & 255); } 5039 // int bar() { return foo(256); } 5040 case ISD::SRA_PARTS: 5041 case ISD::SRL_PARTS: 5042 case ISD::SHL_PARTS: 5043 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 5044 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 5045 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5046 else if (N3.getOpcode() == ISD::AND) 5047 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 5048 // If the and is only masking out bits that cannot effect the shift, 5049 // eliminate the and. 5050 unsigned NumBits = VT.getScalarType().getSizeInBits()*2; 5051 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 5052 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5053 } 5054 break; 5055 } 5056 #endif 5057 5058 // Memoize the node unless it returns a flag. 5059 SDNode *N; 5060 unsigned NumOps = Ops.size(); 5061 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5062 FoldingSetNodeID ID; 5063 AddNodeIDNode(ID, Opcode, VTList, Ops); 5064 void *IP = nullptr; 5065 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5066 return SDValue(E, 0); 5067 5068 if (NumOps == 1) { 5069 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 5070 DL.getDebugLoc(), VTList, Ops[0]); 5071 } else if (NumOps == 2) { 5072 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 5073 DL.getDebugLoc(), VTList, Ops[0], 5074 Ops[1]); 5075 } else if (NumOps == 3) { 5076 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 5077 DL.getDebugLoc(), VTList, Ops[0], 5078 Ops[1], Ops[2]); 5079 } else { 5080 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5081 VTList, Ops); 5082 } 5083 CSEMap.InsertNode(N, IP); 5084 } else { 5085 if (NumOps == 1) { 5086 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), 5087 DL.getDebugLoc(), VTList, Ops[0]); 5088 } else if (NumOps == 2) { 5089 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), 5090 DL.getDebugLoc(), VTList, Ops[0], 5091 Ops[1]); 5092 } else if (NumOps == 3) { 5093 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), 5094 DL.getDebugLoc(), VTList, Ops[0], 5095 Ops[1], Ops[2]); 5096 } else { 5097 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), 5098 VTList, Ops); 5099 } 5100 } 5101 InsertNode(N); 5102 return SDValue(N, 0); 5103 } 5104 5105 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) { 5106 return getNode(Opcode, DL, VTList, ArrayRef<SDValue>()); 5107 } 5108 5109 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5110 SDValue N1) { 5111 SDValue Ops[] = { N1 }; 5112 return getNode(Opcode, DL, VTList, Ops); 5113 } 5114 5115 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5116 SDValue N1, SDValue N2) { 5117 SDValue Ops[] = { N1, N2 }; 5118 return getNode(Opcode, DL, VTList, Ops); 5119 } 5120 5121 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5122 SDValue N1, SDValue N2, SDValue N3) { 5123 SDValue Ops[] = { N1, N2, N3 }; 5124 return getNode(Opcode, DL, VTList, Ops); 5125 } 5126 5127 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5128 SDValue N1, SDValue N2, SDValue N3, 5129 SDValue N4) { 5130 SDValue Ops[] = { N1, N2, N3, N4 }; 5131 return getNode(Opcode, DL, VTList, Ops); 5132 } 5133 5134 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 5135 SDValue N1, SDValue N2, SDValue N3, 5136 SDValue N4, SDValue N5) { 5137 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5138 return getNode(Opcode, DL, VTList, Ops); 5139 } 5140 5141 SDVTList SelectionDAG::getVTList(EVT VT) { 5142 return makeVTList(SDNode::getValueTypeList(VT), 1); 5143 } 5144 5145 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 5146 FoldingSetNodeID ID; 5147 ID.AddInteger(2U); 5148 ID.AddInteger(VT1.getRawBits()); 5149 ID.AddInteger(VT2.getRawBits()); 5150 5151 void *IP = nullptr; 5152 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5153 if (!Result) { 5154 EVT *Array = Allocator.Allocate<EVT>(2); 5155 Array[0] = VT1; 5156 Array[1] = VT2; 5157 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 5158 VTListMap.InsertNode(Result, IP); 5159 } 5160 return Result->getSDVTList(); 5161 } 5162 5163 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 5164 FoldingSetNodeID ID; 5165 ID.AddInteger(3U); 5166 ID.AddInteger(VT1.getRawBits()); 5167 ID.AddInteger(VT2.getRawBits()); 5168 ID.AddInteger(VT3.getRawBits()); 5169 5170 void *IP = nullptr; 5171 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5172 if (!Result) { 5173 EVT *Array = Allocator.Allocate<EVT>(3); 5174 Array[0] = VT1; 5175 Array[1] = VT2; 5176 Array[2] = VT3; 5177 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 5178 VTListMap.InsertNode(Result, IP); 5179 } 5180 return Result->getSDVTList(); 5181 } 5182 5183 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 5184 FoldingSetNodeID ID; 5185 ID.AddInteger(4U); 5186 ID.AddInteger(VT1.getRawBits()); 5187 ID.AddInteger(VT2.getRawBits()); 5188 ID.AddInteger(VT3.getRawBits()); 5189 ID.AddInteger(VT4.getRawBits()); 5190 5191 void *IP = nullptr; 5192 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5193 if (!Result) { 5194 EVT *Array = Allocator.Allocate<EVT>(4); 5195 Array[0] = VT1; 5196 Array[1] = VT2; 5197 Array[2] = VT3; 5198 Array[3] = VT4; 5199 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 5200 VTListMap.InsertNode(Result, IP); 5201 } 5202 return Result->getSDVTList(); 5203 } 5204 5205 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 5206 unsigned NumVTs = VTs.size(); 5207 FoldingSetNodeID ID; 5208 ID.AddInteger(NumVTs); 5209 for (unsigned index = 0; index < NumVTs; index++) { 5210 ID.AddInteger(VTs[index].getRawBits()); 5211 } 5212 5213 void *IP = nullptr; 5214 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5215 if (!Result) { 5216 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 5217 std::copy(VTs.begin(), VTs.end(), Array); 5218 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 5219 VTListMap.InsertNode(Result, IP); 5220 } 5221 return Result->getSDVTList(); 5222 } 5223 5224 5225 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 5226 /// specified operands. If the resultant node already exists in the DAG, 5227 /// this does not modify the specified node, instead it returns the node that 5228 /// already exists. If the resultant node does not exist in the DAG, the 5229 /// input node is returned. As a degenerate case, if you specify the same 5230 /// input operands as the node already has, the input node is returned. 5231 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 5232 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 5233 5234 // Check to see if there is no change. 5235 if (Op == N->getOperand(0)) return N; 5236 5237 // See if the modified node already exists. 5238 void *InsertPos = nullptr; 5239 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 5240 return Existing; 5241 5242 // Nope it doesn't. Remove the node from its current place in the maps. 5243 if (InsertPos) 5244 if (!RemoveNodeFromCSEMaps(N)) 5245 InsertPos = nullptr; 5246 5247 // Now we update the operands. 5248 N->OperandList[0].set(Op); 5249 5250 // If this gets put into a CSE map, add it. 5251 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5252 return N; 5253 } 5254 5255 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 5256 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 5257 5258 // Check to see if there is no change. 5259 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 5260 return N; // No operands changed, just return the input node. 5261 5262 // See if the modified node already exists. 5263 void *InsertPos = nullptr; 5264 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 5265 return Existing; 5266 5267 // Nope it doesn't. Remove the node from its current place in the maps. 5268 if (InsertPos) 5269 if (!RemoveNodeFromCSEMaps(N)) 5270 InsertPos = nullptr; 5271 5272 // Now we update the operands. 5273 if (N->OperandList[0] != Op1) 5274 N->OperandList[0].set(Op1); 5275 if (N->OperandList[1] != Op2) 5276 N->OperandList[1].set(Op2); 5277 5278 // If this gets put into a CSE map, add it. 5279 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5280 return N; 5281 } 5282 5283 SDNode *SelectionDAG:: 5284 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 5285 SDValue Ops[] = { Op1, Op2, Op3 }; 5286 return UpdateNodeOperands(N, Ops); 5287 } 5288 5289 SDNode *SelectionDAG:: 5290 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5291 SDValue Op3, SDValue Op4) { 5292 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 5293 return UpdateNodeOperands(N, Ops); 5294 } 5295 5296 SDNode *SelectionDAG:: 5297 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5298 SDValue Op3, SDValue Op4, SDValue Op5) { 5299 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 5300 return UpdateNodeOperands(N, Ops); 5301 } 5302 5303 SDNode *SelectionDAG:: 5304 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 5305 unsigned NumOps = Ops.size(); 5306 assert(N->getNumOperands() == NumOps && 5307 "Update with wrong number of operands"); 5308 5309 // Check to see if there is no change. 5310 bool AnyChange = false; 5311 for (unsigned i = 0; i != NumOps; ++i) { 5312 if (Ops[i] != N->getOperand(i)) { 5313 AnyChange = true; 5314 break; 5315 } 5316 } 5317 5318 // No operands changed, just return the input node. 5319 if (!AnyChange) return N; 5320 5321 // See if the modified node already exists. 5322 void *InsertPos = nullptr; 5323 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 5324 return Existing; 5325 5326 // Nope it doesn't. Remove the node from its current place in the maps. 5327 if (InsertPos) 5328 if (!RemoveNodeFromCSEMaps(N)) 5329 InsertPos = nullptr; 5330 5331 // Now we update the operands. 5332 for (unsigned i = 0; i != NumOps; ++i) 5333 if (N->OperandList[i] != Ops[i]) 5334 N->OperandList[i].set(Ops[i]); 5335 5336 // If this gets put into a CSE map, add it. 5337 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5338 return N; 5339 } 5340 5341 /// DropOperands - Release the operands and set this node to have 5342 /// zero operands. 5343 void SDNode::DropOperands() { 5344 // Unlike the code in MorphNodeTo that does this, we don't need to 5345 // watch for dead nodes here. 5346 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 5347 SDUse &Use = *I++; 5348 Use.set(SDValue()); 5349 } 5350 } 5351 5352 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 5353 /// machine opcode. 5354 /// 5355 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5356 EVT VT) { 5357 SDVTList VTs = getVTList(VT); 5358 return SelectNodeTo(N, MachineOpc, VTs, None); 5359 } 5360 5361 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5362 EVT VT, SDValue Op1) { 5363 SDVTList VTs = getVTList(VT); 5364 SDValue Ops[] = { Op1 }; 5365 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5366 } 5367 5368 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5369 EVT VT, SDValue Op1, 5370 SDValue Op2) { 5371 SDVTList VTs = getVTList(VT); 5372 SDValue Ops[] = { Op1, Op2 }; 5373 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5374 } 5375 5376 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5377 EVT VT, SDValue Op1, 5378 SDValue Op2, SDValue Op3) { 5379 SDVTList VTs = getVTList(VT); 5380 SDValue Ops[] = { Op1, Op2, Op3 }; 5381 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5382 } 5383 5384 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5385 EVT VT, ArrayRef<SDValue> Ops) { 5386 SDVTList VTs = getVTList(VT); 5387 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5388 } 5389 5390 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5391 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 5392 SDVTList VTs = getVTList(VT1, VT2); 5393 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5394 } 5395 5396 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5397 EVT VT1, EVT VT2) { 5398 SDVTList VTs = getVTList(VT1, VT2); 5399 return SelectNodeTo(N, MachineOpc, VTs, None); 5400 } 5401 5402 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5403 EVT VT1, EVT VT2, EVT VT3, 5404 ArrayRef<SDValue> Ops) { 5405 SDVTList VTs = getVTList(VT1, VT2, VT3); 5406 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5407 } 5408 5409 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5410 EVT VT1, EVT VT2, EVT VT3, EVT VT4, 5411 ArrayRef<SDValue> Ops) { 5412 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5413 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5414 } 5415 5416 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5417 EVT VT1, EVT VT2, 5418 SDValue Op1) { 5419 SDVTList VTs = getVTList(VT1, VT2); 5420 SDValue Ops[] = { Op1 }; 5421 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5422 } 5423 5424 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5425 EVT VT1, EVT VT2, 5426 SDValue Op1, SDValue Op2) { 5427 SDVTList VTs = getVTList(VT1, VT2); 5428 SDValue Ops[] = { Op1, Op2 }; 5429 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5430 } 5431 5432 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5433 EVT VT1, EVT VT2, 5434 SDValue Op1, SDValue Op2, 5435 SDValue Op3) { 5436 SDVTList VTs = getVTList(VT1, VT2); 5437 SDValue Ops[] = { Op1, Op2, Op3 }; 5438 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5439 } 5440 5441 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5442 EVT VT1, EVT VT2, EVT VT3, 5443 SDValue Op1, SDValue Op2, 5444 SDValue Op3) { 5445 SDVTList VTs = getVTList(VT1, VT2, VT3); 5446 SDValue Ops[] = { Op1, Op2, Op3 }; 5447 return SelectNodeTo(N, MachineOpc, VTs, Ops); 5448 } 5449 5450 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5451 SDVTList VTs,ArrayRef<SDValue> Ops) { 5452 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 5453 // Reset the NodeID to -1. 5454 N->setNodeId(-1); 5455 return N; 5456 } 5457 5458 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away 5459 /// the line number information on the merged node since it is not possible to 5460 /// preserve the information that operation is associated with multiple lines. 5461 /// This will make the debugger working better at -O0, were there is a higher 5462 /// probability having other instructions associated with that line. 5463 /// 5464 /// For IROrder, we keep the smaller of the two 5465 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) { 5466 DebugLoc NLoc = N->getDebugLoc(); 5467 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && 5468 (OLoc.getDebugLoc() != NLoc)) { 5469 N->setDebugLoc(DebugLoc()); 5470 } 5471 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 5472 N->setIROrder(Order); 5473 return N; 5474 } 5475 5476 /// MorphNodeTo - This *mutates* the specified node to have the specified 5477 /// return type, opcode, and operands. 5478 /// 5479 /// Note that MorphNodeTo returns the resultant node. If there is already a 5480 /// node of the specified opcode and operands, it returns that node instead of 5481 /// the current one. Note that the SDLoc need not be the same. 5482 /// 5483 /// Using MorphNodeTo is faster than creating a new node and swapping it in 5484 /// with ReplaceAllUsesWith both because it often avoids allocating a new 5485 /// node, and because it doesn't require CSE recalculation for any of 5486 /// the node's users. 5487 /// 5488 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 5489 /// As a consequence it isn't appropriate to use from within the DAG combiner or 5490 /// the legalizer which maintain worklists that would need to be updated when 5491 /// deleting things. 5492 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 5493 SDVTList VTs, ArrayRef<SDValue> Ops) { 5494 unsigned NumOps = Ops.size(); 5495 // If an identical node already exists, use it. 5496 void *IP = nullptr; 5497 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 5498 FoldingSetNodeID ID; 5499 AddNodeIDNode(ID, Opc, VTs, Ops); 5500 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) 5501 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N)); 5502 } 5503 5504 if (!RemoveNodeFromCSEMaps(N)) 5505 IP = nullptr; 5506 5507 // Start the morphing. 5508 N->NodeType = Opc; 5509 N->ValueList = VTs.VTs; 5510 N->NumValues = VTs.NumVTs; 5511 5512 // Clear the operands list, updating used nodes to remove this from their 5513 // use list. Keep track of any operands that become dead as a result. 5514 SmallPtrSet<SDNode*, 16> DeadNodeSet; 5515 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 5516 SDUse &Use = *I++; 5517 SDNode *Used = Use.getNode(); 5518 Use.set(SDValue()); 5519 if (Used->use_empty()) 5520 DeadNodeSet.insert(Used); 5521 } 5522 5523 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) { 5524 // Initialize the memory references information. 5525 MN->setMemRefs(nullptr, nullptr); 5526 // If NumOps is larger than the # of operands we can have in a 5527 // MachineSDNode, reallocate the operand list. 5528 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) { 5529 if (MN->OperandsNeedDelete) 5530 delete[] MN->OperandList; 5531 if (NumOps > array_lengthof(MN->LocalOperands)) 5532 // We're creating a final node that will live unmorphed for the 5533 // remainder of the current SelectionDAG iteration, so we can allocate 5534 // the operands directly out of a pool with no recycling metadata. 5535 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5536 Ops.data(), NumOps); 5537 else 5538 MN->InitOperands(MN->LocalOperands, Ops.data(), NumOps); 5539 MN->OperandsNeedDelete = false; 5540 } else 5541 MN->InitOperands(MN->OperandList, Ops.data(), NumOps); 5542 } else { 5543 // If NumOps is larger than the # of operands we currently have, reallocate 5544 // the operand list. 5545 if (NumOps > N->NumOperands) { 5546 if (N->OperandsNeedDelete) 5547 delete[] N->OperandList; 5548 N->InitOperands(new SDUse[NumOps], Ops.data(), NumOps); 5549 N->OperandsNeedDelete = true; 5550 } else 5551 N->InitOperands(N->OperandList, Ops.data(), NumOps); 5552 } 5553 5554 // Delete any nodes that are still dead after adding the uses for the 5555 // new operands. 5556 if (!DeadNodeSet.empty()) { 5557 SmallVector<SDNode *, 16> DeadNodes; 5558 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(), 5559 E = DeadNodeSet.end(); I != E; ++I) 5560 if ((*I)->use_empty()) 5561 DeadNodes.push_back(*I); 5562 RemoveDeadNodes(DeadNodes); 5563 } 5564 5565 if (IP) 5566 CSEMap.InsertNode(N, IP); // Memoize the new node. 5567 return N; 5568 } 5569 5570 5571 /// getMachineNode - These are used for target selectors to create a new node 5572 /// with specified return type(s), MachineInstr opcode, and operands. 5573 /// 5574 /// Note that getMachineNode returns the resultant node. If there is already a 5575 /// node of the specified opcode and operands, it returns that node instead of 5576 /// the current one. 5577 MachineSDNode * 5578 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) { 5579 SDVTList VTs = getVTList(VT); 5580 return getMachineNode(Opcode, dl, VTs, None); 5581 } 5582 5583 MachineSDNode * 5584 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) { 5585 SDVTList VTs = getVTList(VT); 5586 SDValue Ops[] = { Op1 }; 5587 return getMachineNode(Opcode, dl, VTs, Ops); 5588 } 5589 5590 MachineSDNode * 5591 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5592 SDValue Op1, SDValue Op2) { 5593 SDVTList VTs = getVTList(VT); 5594 SDValue Ops[] = { Op1, Op2 }; 5595 return getMachineNode(Opcode, dl, VTs, Ops); 5596 } 5597 5598 MachineSDNode * 5599 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5600 SDValue Op1, SDValue Op2, SDValue Op3) { 5601 SDVTList VTs = getVTList(VT); 5602 SDValue Ops[] = { Op1, Op2, Op3 }; 5603 return getMachineNode(Opcode, dl, VTs, Ops); 5604 } 5605 5606 MachineSDNode * 5607 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5608 ArrayRef<SDValue> Ops) { 5609 SDVTList VTs = getVTList(VT); 5610 return getMachineNode(Opcode, dl, VTs, Ops); 5611 } 5612 5613 MachineSDNode * 5614 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) { 5615 SDVTList VTs = getVTList(VT1, VT2); 5616 return getMachineNode(Opcode, dl, VTs, None); 5617 } 5618 5619 MachineSDNode * 5620 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5621 EVT VT1, EVT VT2, SDValue Op1) { 5622 SDVTList VTs = getVTList(VT1, VT2); 5623 SDValue Ops[] = { Op1 }; 5624 return getMachineNode(Opcode, dl, VTs, Ops); 5625 } 5626 5627 MachineSDNode * 5628 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5629 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { 5630 SDVTList VTs = getVTList(VT1, VT2); 5631 SDValue Ops[] = { Op1, Op2 }; 5632 return getMachineNode(Opcode, dl, VTs, Ops); 5633 } 5634 5635 MachineSDNode * 5636 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5637 EVT VT1, EVT VT2, SDValue Op1, 5638 SDValue Op2, SDValue Op3) { 5639 SDVTList VTs = getVTList(VT1, VT2); 5640 SDValue Ops[] = { Op1, Op2, Op3 }; 5641 return getMachineNode(Opcode, dl, VTs, Ops); 5642 } 5643 5644 MachineSDNode * 5645 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5646 EVT VT1, EVT VT2, 5647 ArrayRef<SDValue> Ops) { 5648 SDVTList VTs = getVTList(VT1, VT2); 5649 return getMachineNode(Opcode, dl, VTs, Ops); 5650 } 5651 5652 MachineSDNode * 5653 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5654 EVT VT1, EVT VT2, EVT VT3, 5655 SDValue Op1, SDValue Op2) { 5656 SDVTList VTs = getVTList(VT1, VT2, VT3); 5657 SDValue Ops[] = { Op1, Op2 }; 5658 return getMachineNode(Opcode, dl, VTs, Ops); 5659 } 5660 5661 MachineSDNode * 5662 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5663 EVT VT1, EVT VT2, EVT VT3, 5664 SDValue Op1, SDValue Op2, SDValue Op3) { 5665 SDVTList VTs = getVTList(VT1, VT2, VT3); 5666 SDValue Ops[] = { Op1, Op2, Op3 }; 5667 return getMachineNode(Opcode, dl, VTs, Ops); 5668 } 5669 5670 MachineSDNode * 5671 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5672 EVT VT1, EVT VT2, EVT VT3, 5673 ArrayRef<SDValue> Ops) { 5674 SDVTList VTs = getVTList(VT1, VT2, VT3); 5675 return getMachineNode(Opcode, dl, VTs, Ops); 5676 } 5677 5678 MachineSDNode * 5679 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, 5680 EVT VT2, EVT VT3, EVT VT4, 5681 ArrayRef<SDValue> Ops) { 5682 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5683 return getMachineNode(Opcode, dl, VTs, Ops); 5684 } 5685 5686 MachineSDNode * 5687 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5688 ArrayRef<EVT> ResultTys, 5689 ArrayRef<SDValue> Ops) { 5690 SDVTList VTs = getVTList(ResultTys); 5691 return getMachineNode(Opcode, dl, VTs, Ops); 5692 } 5693 5694 MachineSDNode * 5695 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs, 5696 ArrayRef<SDValue> OpsArray) { 5697 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 5698 MachineSDNode *N; 5699 void *IP = nullptr; 5700 const SDValue *Ops = OpsArray.data(); 5701 unsigned NumOps = OpsArray.size(); 5702 5703 if (DoCSE) { 5704 FoldingSetNodeID ID; 5705 AddNodeIDNode(ID, ~Opcode, VTs, OpsArray); 5706 IP = nullptr; 5707 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 5708 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL)); 5709 } 5710 } 5711 5712 // Allocate a new MachineSDNode. 5713 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(), 5714 DL.getDebugLoc(), VTs); 5715 5716 // Initialize the operands list. 5717 if (NumOps > array_lengthof(N->LocalOperands)) 5718 // We're creating a final node that will live unmorphed for the 5719 // remainder of the current SelectionDAG iteration, so we can allocate 5720 // the operands directly out of a pool with no recycling metadata. 5721 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5722 Ops, NumOps); 5723 else 5724 N->InitOperands(N->LocalOperands, Ops, NumOps); 5725 N->OperandsNeedDelete = false; 5726 5727 if (DoCSE) 5728 CSEMap.InsertNode(N, IP); 5729 5730 InsertNode(N); 5731 return N; 5732 } 5733 5734 /// getTargetExtractSubreg - A convenience function for creating 5735 /// TargetOpcode::EXTRACT_SUBREG nodes. 5736 SDValue 5737 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, 5738 SDValue Operand) { 5739 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5740 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 5741 VT, Operand, SRIdxVal); 5742 return SDValue(Subreg, 0); 5743 } 5744 5745 /// getTargetInsertSubreg - A convenience function for creating 5746 /// TargetOpcode::INSERT_SUBREG nodes. 5747 SDValue 5748 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT, 5749 SDValue Operand, SDValue Subreg) { 5750 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5751 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 5752 VT, Operand, Subreg, SRIdxVal); 5753 return SDValue(Result, 0); 5754 } 5755 5756 /// getNodeIfExists - Get the specified node if it's already available, or 5757 /// else return NULL. 5758 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 5759 ArrayRef<SDValue> Ops, bool nuw, bool nsw, 5760 bool exact) { 5761 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 5762 FoldingSetNodeID ID; 5763 AddNodeIDNode(ID, Opcode, VTList, Ops); 5764 if (isBinOpWithFlags(Opcode)) 5765 AddBinaryNodeIDCustom(ID, nuw, nsw, exact); 5766 void *IP = nullptr; 5767 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5768 return E; 5769 } 5770 return nullptr; 5771 } 5772 5773 /// getDbgValue - Creates a SDDbgValue node. 5774 /// 5775 /// SDNode 5776 SDDbgValue * 5777 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, 5778 bool IsIndirect, uint64_t Off, 5779 DebugLoc DL, unsigned O) { 5780 return new (Allocator) SDDbgValue(MDPtr, N, R, IsIndirect, Off, DL, O); 5781 } 5782 5783 /// Constant 5784 SDDbgValue * 5785 SelectionDAG::getConstantDbgValue(MDNode *MDPtr, const Value *C, 5786 uint64_t Off, 5787 DebugLoc DL, unsigned O) { 5788 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O); 5789 } 5790 5791 /// FrameIndex 5792 SDDbgValue * 5793 SelectionDAG::getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off, 5794 DebugLoc DL, unsigned O) { 5795 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O); 5796 } 5797 5798 namespace { 5799 5800 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 5801 /// pointed to by a use iterator is deleted, increment the use iterator 5802 /// so that it doesn't dangle. 5803 /// 5804 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 5805 SDNode::use_iterator &UI; 5806 SDNode::use_iterator &UE; 5807 5808 void NodeDeleted(SDNode *N, SDNode *E) override { 5809 // Increment the iterator as needed. 5810 while (UI != UE && N == *UI) 5811 ++UI; 5812 } 5813 5814 public: 5815 RAUWUpdateListener(SelectionDAG &d, 5816 SDNode::use_iterator &ui, 5817 SDNode::use_iterator &ue) 5818 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 5819 }; 5820 5821 } 5822 5823 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5824 /// This can cause recursive merging of nodes in the DAG. 5825 /// 5826 /// This version assumes From has a single result value. 5827 /// 5828 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 5829 SDNode *From = FromN.getNode(); 5830 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 5831 "Cannot replace with this method!"); 5832 assert(From != To.getNode() && "Cannot replace uses of with self"); 5833 5834 // Iterate over all the existing uses of From. New uses will be added 5835 // to the beginning of the use list, which we avoid visiting. 5836 // This specifically avoids visiting uses of From that arise while the 5837 // replacement is happening, because any such uses would be the result 5838 // of CSE: If an existing node looks like From after one of its operands 5839 // is replaced by To, we don't want to replace of all its users with To 5840 // too. See PR3018 for more info. 5841 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5842 RAUWUpdateListener Listener(*this, UI, UE); 5843 while (UI != UE) { 5844 SDNode *User = *UI; 5845 5846 // This node is about to morph, remove its old self from the CSE maps. 5847 RemoveNodeFromCSEMaps(User); 5848 5849 // A user can appear in a use list multiple times, and when this 5850 // happens the uses are usually next to each other in the list. 5851 // To help reduce the number of CSE recomputations, process all 5852 // the uses of this user that we can find this way. 5853 do { 5854 SDUse &Use = UI.getUse(); 5855 ++UI; 5856 Use.set(To); 5857 } while (UI != UE && *UI == User); 5858 5859 // Now that we have modified User, add it back to the CSE maps. If it 5860 // already exists there, recursively merge the results together. 5861 AddModifiedNodeToCSEMaps(User); 5862 } 5863 5864 // If we just RAUW'd the root, take note. 5865 if (FromN == getRoot()) 5866 setRoot(To); 5867 } 5868 5869 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5870 /// This can cause recursive merging of nodes in the DAG. 5871 /// 5872 /// This version assumes that for each value of From, there is a 5873 /// corresponding value in To in the same position with the same type. 5874 /// 5875 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 5876 #ifndef NDEBUG 5877 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 5878 assert((!From->hasAnyUseOfValue(i) || 5879 From->getValueType(i) == To->getValueType(i)) && 5880 "Cannot use this version of ReplaceAllUsesWith!"); 5881 #endif 5882 5883 // Handle the trivial case. 5884 if (From == To) 5885 return; 5886 5887 // Iterate over just the existing users of From. See the comments in 5888 // the ReplaceAllUsesWith above. 5889 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5890 RAUWUpdateListener Listener(*this, UI, UE); 5891 while (UI != UE) { 5892 SDNode *User = *UI; 5893 5894 // This node is about to morph, remove its old self from the CSE maps. 5895 RemoveNodeFromCSEMaps(User); 5896 5897 // A user can appear in a use list multiple times, and when this 5898 // happens the uses are usually next to each other in the list. 5899 // To help reduce the number of CSE recomputations, process all 5900 // the uses of this user that we can find this way. 5901 do { 5902 SDUse &Use = UI.getUse(); 5903 ++UI; 5904 Use.setNode(To); 5905 } while (UI != UE && *UI == User); 5906 5907 // Now that we have modified User, add it back to the CSE maps. If it 5908 // already exists there, recursively merge the results together. 5909 AddModifiedNodeToCSEMaps(User); 5910 } 5911 5912 // If we just RAUW'd the root, take note. 5913 if (From == getRoot().getNode()) 5914 setRoot(SDValue(To, getRoot().getResNo())); 5915 } 5916 5917 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5918 /// This can cause recursive merging of nodes in the DAG. 5919 /// 5920 /// This version can replace From with any result values. To must match the 5921 /// number and types of values returned by From. 5922 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 5923 if (From->getNumValues() == 1) // Handle the simple case efficiently. 5924 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 5925 5926 // Iterate over just the existing users of From. See the comments in 5927 // the ReplaceAllUsesWith above. 5928 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5929 RAUWUpdateListener Listener(*this, UI, UE); 5930 while (UI != UE) { 5931 SDNode *User = *UI; 5932 5933 // This node is about to morph, remove its old self from the CSE maps. 5934 RemoveNodeFromCSEMaps(User); 5935 5936 // A user can appear in a use list multiple times, and when this 5937 // happens the uses are usually next to each other in the list. 5938 // To help reduce the number of CSE recomputations, process all 5939 // the uses of this user that we can find this way. 5940 do { 5941 SDUse &Use = UI.getUse(); 5942 const SDValue &ToOp = To[Use.getResNo()]; 5943 ++UI; 5944 Use.set(ToOp); 5945 } while (UI != UE && *UI == User); 5946 5947 // Now that we have modified User, add it back to the CSE maps. If it 5948 // already exists there, recursively merge the results together. 5949 AddModifiedNodeToCSEMaps(User); 5950 } 5951 5952 // If we just RAUW'd the root, take note. 5953 if (From == getRoot().getNode()) 5954 setRoot(SDValue(To[getRoot().getResNo()])); 5955 } 5956 5957 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 5958 /// uses of other values produced by From.getNode() alone. The Deleted 5959 /// vector is handled the same way as for ReplaceAllUsesWith. 5960 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 5961 // Handle the really simple, really trivial case efficiently. 5962 if (From == To) return; 5963 5964 // Handle the simple, trivial, case efficiently. 5965 if (From.getNode()->getNumValues() == 1) { 5966 ReplaceAllUsesWith(From, To); 5967 return; 5968 } 5969 5970 // Iterate over just the existing users of From. See the comments in 5971 // the ReplaceAllUsesWith above. 5972 SDNode::use_iterator UI = From.getNode()->use_begin(), 5973 UE = From.getNode()->use_end(); 5974 RAUWUpdateListener Listener(*this, UI, UE); 5975 while (UI != UE) { 5976 SDNode *User = *UI; 5977 bool UserRemovedFromCSEMaps = false; 5978 5979 // A user can appear in a use list multiple times, and when this 5980 // happens the uses are usually next to each other in the list. 5981 // To help reduce the number of CSE recomputations, process all 5982 // the uses of this user that we can find this way. 5983 do { 5984 SDUse &Use = UI.getUse(); 5985 5986 // Skip uses of different values from the same node. 5987 if (Use.getResNo() != From.getResNo()) { 5988 ++UI; 5989 continue; 5990 } 5991 5992 // If this node hasn't been modified yet, it's still in the CSE maps, 5993 // so remove its old self from the CSE maps. 5994 if (!UserRemovedFromCSEMaps) { 5995 RemoveNodeFromCSEMaps(User); 5996 UserRemovedFromCSEMaps = true; 5997 } 5998 5999 ++UI; 6000 Use.set(To); 6001 } while (UI != UE && *UI == User); 6002 6003 // We are iterating over all uses of the From node, so if a use 6004 // doesn't use the specific value, no changes are made. 6005 if (!UserRemovedFromCSEMaps) 6006 continue; 6007 6008 // Now that we have modified User, add it back to the CSE maps. If it 6009 // already exists there, recursively merge the results together. 6010 AddModifiedNodeToCSEMaps(User); 6011 } 6012 6013 // If we just RAUW'd the root, take note. 6014 if (From == getRoot()) 6015 setRoot(To); 6016 } 6017 6018 namespace { 6019 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 6020 /// to record information about a use. 6021 struct UseMemo { 6022 SDNode *User; 6023 unsigned Index; 6024 SDUse *Use; 6025 }; 6026 6027 /// operator< - Sort Memos by User. 6028 bool operator<(const UseMemo &L, const UseMemo &R) { 6029 return (intptr_t)L.User < (intptr_t)R.User; 6030 } 6031 } 6032 6033 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 6034 /// uses of other values produced by From.getNode() alone. The same value 6035 /// may appear in both the From and To list. The Deleted vector is 6036 /// handled the same way as for ReplaceAllUsesWith. 6037 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 6038 const SDValue *To, 6039 unsigned Num){ 6040 // Handle the simple, trivial case efficiently. 6041 if (Num == 1) 6042 return ReplaceAllUsesOfValueWith(*From, *To); 6043 6044 // Read up all the uses and make records of them. This helps 6045 // processing new uses that are introduced during the 6046 // replacement process. 6047 SmallVector<UseMemo, 4> Uses; 6048 for (unsigned i = 0; i != Num; ++i) { 6049 unsigned FromResNo = From[i].getResNo(); 6050 SDNode *FromNode = From[i].getNode(); 6051 for (SDNode::use_iterator UI = FromNode->use_begin(), 6052 E = FromNode->use_end(); UI != E; ++UI) { 6053 SDUse &Use = UI.getUse(); 6054 if (Use.getResNo() == FromResNo) { 6055 UseMemo Memo = { *UI, i, &Use }; 6056 Uses.push_back(Memo); 6057 } 6058 } 6059 } 6060 6061 // Sort the uses, so that all the uses from a given User are together. 6062 std::sort(Uses.begin(), Uses.end()); 6063 6064 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 6065 UseIndex != UseIndexEnd; ) { 6066 // We know that this user uses some value of From. If it is the right 6067 // value, update it. 6068 SDNode *User = Uses[UseIndex].User; 6069 6070 // This node is about to morph, remove its old self from the CSE maps. 6071 RemoveNodeFromCSEMaps(User); 6072 6073 // The Uses array is sorted, so all the uses for a given User 6074 // are next to each other in the list. 6075 // To help reduce the number of CSE recomputations, process all 6076 // the uses of this user that we can find this way. 6077 do { 6078 unsigned i = Uses[UseIndex].Index; 6079 SDUse &Use = *Uses[UseIndex].Use; 6080 ++UseIndex; 6081 6082 Use.set(To[i]); 6083 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 6084 6085 // Now that we have modified User, add it back to the CSE maps. If it 6086 // already exists there, recursively merge the results together. 6087 AddModifiedNodeToCSEMaps(User); 6088 } 6089 } 6090 6091 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 6092 /// based on their topological order. It returns the maximum id and a vector 6093 /// of the SDNodes* in assigned order by reference. 6094 unsigned SelectionDAG::AssignTopologicalOrder() { 6095 6096 unsigned DAGSize = 0; 6097 6098 // SortedPos tracks the progress of the algorithm. Nodes before it are 6099 // sorted, nodes after it are unsorted. When the algorithm completes 6100 // it is at the end of the list. 6101 allnodes_iterator SortedPos = allnodes_begin(); 6102 6103 // Visit all the nodes. Move nodes with no operands to the front of 6104 // the list immediately. Annotate nodes that do have operands with their 6105 // operand count. Before we do this, the Node Id fields of the nodes 6106 // may contain arbitrary values. After, the Node Id fields for nodes 6107 // before SortedPos will contain the topological sort index, and the 6108 // Node Id fields for nodes At SortedPos and after will contain the 6109 // count of outstanding operands. 6110 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 6111 SDNode *N = I++; 6112 checkForCycles(N, this); 6113 unsigned Degree = N->getNumOperands(); 6114 if (Degree == 0) { 6115 // A node with no uses, add it to the result array immediately. 6116 N->setNodeId(DAGSize++); 6117 allnodes_iterator Q = N; 6118 if (Q != SortedPos) 6119 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 6120 assert(SortedPos != AllNodes.end() && "Overran node list"); 6121 ++SortedPos; 6122 } else { 6123 // Temporarily use the Node Id as scratch space for the degree count. 6124 N->setNodeId(Degree); 6125 } 6126 } 6127 6128 // Visit all the nodes. As we iterate, move nodes into sorted order, 6129 // such that by the time the end is reached all nodes will be sorted. 6130 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) { 6131 SDNode *N = I; 6132 checkForCycles(N, this); 6133 // N is in sorted position, so all its uses have one less operand 6134 // that needs to be sorted. 6135 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6136 UI != UE; ++UI) { 6137 SDNode *P = *UI; 6138 unsigned Degree = P->getNodeId(); 6139 assert(Degree != 0 && "Invalid node degree"); 6140 --Degree; 6141 if (Degree == 0) { 6142 // All of P's operands are sorted, so P may sorted now. 6143 P->setNodeId(DAGSize++); 6144 if (P != SortedPos) 6145 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 6146 assert(SortedPos != AllNodes.end() && "Overran node list"); 6147 ++SortedPos; 6148 } else { 6149 // Update P's outstanding operand count. 6150 P->setNodeId(Degree); 6151 } 6152 } 6153 if (I == SortedPos) { 6154 #ifndef NDEBUG 6155 SDNode *S = ++I; 6156 dbgs() << "Overran sorted position:\n"; 6157 S->dumprFull(this); dbgs() << "\n"; 6158 dbgs() << "Checking if this is due to cycles\n"; 6159 checkForCycles(this, true); 6160 #endif 6161 llvm_unreachable(nullptr); 6162 } 6163 } 6164 6165 assert(SortedPos == AllNodes.end() && 6166 "Topological sort incomplete!"); 6167 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 6168 "First node in topological sort is not the entry token!"); 6169 assert(AllNodes.front().getNodeId() == 0 && 6170 "First node in topological sort has non-zero id!"); 6171 assert(AllNodes.front().getNumOperands() == 0 && 6172 "First node in topological sort has operands!"); 6173 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 6174 "Last node in topologic sort has unexpected id!"); 6175 assert(AllNodes.back().use_empty() && 6176 "Last node in topologic sort has users!"); 6177 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 6178 return DAGSize; 6179 } 6180 6181 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 6182 /// value is produced by SD. 6183 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 6184 DbgInfo->add(DB, SD, isParameter); 6185 if (SD) 6186 SD->setHasDebugValue(true); 6187 } 6188 6189 /// TransferDbgValues - Transfer SDDbgValues. 6190 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 6191 if (From == To || !From.getNode()->getHasDebugValue()) 6192 return; 6193 SDNode *FromNode = From.getNode(); 6194 SDNode *ToNode = To.getNode(); 6195 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 6196 SmallVector<SDDbgValue *, 2> ClonedDVs; 6197 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 6198 I != E; ++I) { 6199 SDDbgValue *Dbg = *I; 6200 if (Dbg->getKind() == SDDbgValue::SDNODE) { 6201 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(), 6202 Dbg->isIndirect(), 6203 Dbg->getOffset(), Dbg->getDebugLoc(), 6204 Dbg->getOrder()); 6205 ClonedDVs.push_back(Clone); 6206 } 6207 } 6208 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(), 6209 E = ClonedDVs.end(); I != E; ++I) 6210 AddDbgValue(*I, ToNode, false); 6211 } 6212 6213 //===----------------------------------------------------------------------===// 6214 // SDNode Class 6215 //===----------------------------------------------------------------------===// 6216 6217 HandleSDNode::~HandleSDNode() { 6218 DropOperands(); 6219 } 6220 6221 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 6222 DebugLoc DL, const GlobalValue *GA, 6223 EVT VT, int64_t o, unsigned char TF) 6224 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 6225 TheGlobal = GA; 6226 } 6227 6228 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, 6229 SDValue X, unsigned SrcAS, 6230 unsigned DestAS) 6231 : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X), 6232 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 6233 6234 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6235 EVT memvt, MachineMemOperand *mmo) 6236 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 6237 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6238 MMO->isNonTemporal(), MMO->isInvariant()); 6239 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6240 assert(isNonTemporal() == MMO->isNonTemporal() && 6241 "Non-temporal encoding error!"); 6242 // We check here that the size of the memory operand fits within the size of 6243 // the MMO. This is because the MMO might indicate only a possible address 6244 // range instead of specifying the affected memory addresses precisely. 6245 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 6246 } 6247 6248 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 6249 ArrayRef<SDValue> Ops, EVT memvt, MachineMemOperand *mmo) 6250 : SDNode(Opc, Order, dl, VTs, Ops), 6251 MemoryVT(memvt), MMO(mmo) { 6252 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 6253 MMO->isNonTemporal(), MMO->isInvariant()); 6254 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 6255 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 6256 } 6257 6258 /// Profile - Gather unique data for the node. 6259 /// 6260 void SDNode::Profile(FoldingSetNodeID &ID) const { 6261 AddNodeIDNode(ID, this); 6262 } 6263 6264 namespace { 6265 struct EVTArray { 6266 std::vector<EVT> VTs; 6267 6268 EVTArray() { 6269 VTs.reserve(MVT::LAST_VALUETYPE); 6270 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 6271 VTs.push_back(MVT((MVT::SimpleValueType)i)); 6272 } 6273 }; 6274 } 6275 6276 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 6277 static ManagedStatic<EVTArray> SimpleVTArray; 6278 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 6279 6280 /// getValueTypeList - Return a pointer to the specified value type. 6281 /// 6282 const EVT *SDNode::getValueTypeList(EVT VT) { 6283 if (VT.isExtended()) { 6284 sys::SmartScopedLock<true> Lock(*VTMutex); 6285 return &(*EVTs->insert(VT).first); 6286 } else { 6287 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 6288 "Value type out of range!"); 6289 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 6290 } 6291 } 6292 6293 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 6294 /// indicated value. This method ignores uses of other values defined by this 6295 /// operation. 6296 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 6297 assert(Value < getNumValues() && "Bad value!"); 6298 6299 // TODO: Only iterate over uses of a given value of the node 6300 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 6301 if (UI.getUse().getResNo() == Value) { 6302 if (NUses == 0) 6303 return false; 6304 --NUses; 6305 } 6306 } 6307 6308 // Found exactly the right number of uses? 6309 return NUses == 0; 6310 } 6311 6312 6313 /// hasAnyUseOfValue - Return true if there are any use of the indicated 6314 /// value. This method ignores uses of other values defined by this operation. 6315 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 6316 assert(Value < getNumValues() && "Bad value!"); 6317 6318 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 6319 if (UI.getUse().getResNo() == Value) 6320 return true; 6321 6322 return false; 6323 } 6324 6325 6326 /// isOnlyUserOf - Return true if this node is the only use of N. 6327 /// 6328 bool SDNode::isOnlyUserOf(SDNode *N) const { 6329 bool Seen = false; 6330 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 6331 SDNode *User = *I; 6332 if (User == this) 6333 Seen = true; 6334 else 6335 return false; 6336 } 6337 6338 return Seen; 6339 } 6340 6341 /// isOperand - Return true if this node is an operand of N. 6342 /// 6343 bool SDValue::isOperandOf(SDNode *N) const { 6344 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6345 if (*this == N->getOperand(i)) 6346 return true; 6347 return false; 6348 } 6349 6350 bool SDNode::isOperandOf(SDNode *N) const { 6351 for (unsigned i = 0, e = N->NumOperands; i != e; ++i) 6352 if (this == N->OperandList[i].getNode()) 6353 return true; 6354 return false; 6355 } 6356 6357 /// reachesChainWithoutSideEffects - Return true if this operand (which must 6358 /// be a chain) reaches the specified operand without crossing any 6359 /// side-effecting instructions on any chain path. In practice, this looks 6360 /// through token factors and non-volatile loads. In order to remain efficient, 6361 /// this only looks a couple of nodes in, it does not do an exhaustive search. 6362 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 6363 unsigned Depth) const { 6364 if (*this == Dest) return true; 6365 6366 // Don't search too deeply, we just want to be able to see through 6367 // TokenFactor's etc. 6368 if (Depth == 0) return false; 6369 6370 // If this is a token factor, all inputs to the TF happen in parallel. If any 6371 // of the operands of the TF does not reach dest, then we cannot do the xform. 6372 if (getOpcode() == ISD::TokenFactor) { 6373 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 6374 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 6375 return false; 6376 return true; 6377 } 6378 6379 // Loads don't have side effects, look through them. 6380 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 6381 if (!Ld->isVolatile()) 6382 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 6383 } 6384 return false; 6385 } 6386 6387 /// hasPredecessor - Return true if N is a predecessor of this node. 6388 /// N is either an operand of this node, or can be reached by recursively 6389 /// traversing up the operands. 6390 /// NOTE: This is an expensive method. Use it carefully. 6391 bool SDNode::hasPredecessor(const SDNode *N) const { 6392 SmallPtrSet<const SDNode *, 32> Visited; 6393 SmallVector<const SDNode *, 16> Worklist; 6394 return hasPredecessorHelper(N, Visited, Worklist); 6395 } 6396 6397 bool 6398 SDNode::hasPredecessorHelper(const SDNode *N, 6399 SmallPtrSet<const SDNode *, 32> &Visited, 6400 SmallVectorImpl<const SDNode *> &Worklist) const { 6401 if (Visited.empty()) { 6402 Worklist.push_back(this); 6403 } else { 6404 // Take a look in the visited set. If we've already encountered this node 6405 // we needn't search further. 6406 if (Visited.count(N)) 6407 return true; 6408 } 6409 6410 // Haven't visited N yet. Continue the search. 6411 while (!Worklist.empty()) { 6412 const SDNode *M = Worklist.pop_back_val(); 6413 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 6414 SDNode *Op = M->getOperand(i).getNode(); 6415 if (Visited.insert(Op)) 6416 Worklist.push_back(Op); 6417 if (Op == N) 6418 return true; 6419 } 6420 } 6421 6422 return false; 6423 } 6424 6425 uint64_t SDNode::getConstantOperandVal(unsigned Num) const { 6426 assert(Num < NumOperands && "Invalid child # of SDNode!"); 6427 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue(); 6428 } 6429 6430 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 6431 assert(N->getNumValues() == 1 && 6432 "Can't unroll a vector with multiple results!"); 6433 6434 EVT VT = N->getValueType(0); 6435 unsigned NE = VT.getVectorNumElements(); 6436 EVT EltVT = VT.getVectorElementType(); 6437 SDLoc dl(N); 6438 6439 SmallVector<SDValue, 8> Scalars; 6440 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 6441 6442 // If ResNE is 0, fully unroll the vector op. 6443 if (ResNE == 0) 6444 ResNE = NE; 6445 else if (NE > ResNE) 6446 NE = ResNE; 6447 6448 unsigned i; 6449 for (i= 0; i != NE; ++i) { 6450 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 6451 SDValue Operand = N->getOperand(j); 6452 EVT OperandVT = Operand.getValueType(); 6453 if (OperandVT.isVector()) { 6454 // A vector operand; extract a single element. 6455 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 6456 EVT OperandEltVT = OperandVT.getVectorElementType(); 6457 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, 6458 OperandEltVT, 6459 Operand, 6460 getConstant(i, TLI->getVectorIdxTy())); 6461 } else { 6462 // A scalar operand; just use it as is. 6463 Operands[j] = Operand; 6464 } 6465 } 6466 6467 switch (N->getOpcode()) { 6468 default: 6469 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands)); 6470 break; 6471 case ISD::VSELECT: 6472 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 6473 break; 6474 case ISD::SHL: 6475 case ISD::SRA: 6476 case ISD::SRL: 6477 case ISD::ROTL: 6478 case ISD::ROTR: 6479 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 6480 getShiftAmountOperand(Operands[0].getValueType(), 6481 Operands[1]))); 6482 break; 6483 case ISD::SIGN_EXTEND_INREG: 6484 case ISD::FP_ROUND_INREG: { 6485 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 6486 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6487 Operands[0], 6488 getValueType(ExtVT))); 6489 } 6490 } 6491 } 6492 6493 for (; i < ResNE; ++i) 6494 Scalars.push_back(getUNDEF(EltVT)); 6495 6496 return getNode(ISD::BUILD_VECTOR, dl, 6497 EVT::getVectorVT(*getContext(), EltVT, ResNE), Scalars); 6498 } 6499 6500 6501 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 6502 /// location that is 'Dist' units away from the location that the 'Base' load 6503 /// is loading from. 6504 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 6505 unsigned Bytes, int Dist) const { 6506 if (LD->getChain() != Base->getChain()) 6507 return false; 6508 EVT VT = LD->getValueType(0); 6509 if (VT.getSizeInBits() / 8 != Bytes) 6510 return false; 6511 6512 SDValue Loc = LD->getOperand(1); 6513 SDValue BaseLoc = Base->getOperand(1); 6514 if (Loc.getOpcode() == ISD::FrameIndex) { 6515 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6516 return false; 6517 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo(); 6518 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6519 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6520 int FS = MFI->getObjectSize(FI); 6521 int BFS = MFI->getObjectSize(BFI); 6522 if (FS != BFS || FS != (int)Bytes) return false; 6523 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6524 } 6525 6526 // Handle X+C 6527 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6528 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6529 return true; 6530 6531 const GlobalValue *GV1 = nullptr; 6532 const GlobalValue *GV2 = nullptr; 6533 int64_t Offset1 = 0; 6534 int64_t Offset2 = 0; 6535 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 6536 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6537 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6538 if (isGA1 && isGA2 && GV1 == GV2) 6539 return Offset1 == (Offset2 + Dist*Bytes); 6540 return false; 6541 } 6542 6543 6544 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 6545 /// it cannot be inferred. 6546 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 6547 // If this is a GlobalAddress + cst, return the alignment. 6548 const GlobalValue *GV; 6549 int64_t GVOffset = 0; 6550 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); 6551 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 6552 unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType()); 6553 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 6554 llvm::computeKnownBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne, 6555 TLI->getDataLayout()); 6556 unsigned AlignBits = KnownZero.countTrailingOnes(); 6557 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 6558 if (Align) 6559 return MinAlign(Align, GVOffset); 6560 } 6561 6562 // If this is a direct reference to a stack slot, use information about the 6563 // stack slot's alignment. 6564 int FrameIdx = 1 << 31; 6565 int64_t FrameOffset = 0; 6566 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 6567 FrameIdx = FI->getIndex(); 6568 } else if (isBaseWithConstantOffset(Ptr) && 6569 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 6570 // Handle FI+Cst 6571 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6572 FrameOffset = Ptr.getConstantOperandVal(1); 6573 } 6574 6575 if (FrameIdx != (1 << 31)) { 6576 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo(); 6577 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 6578 FrameOffset); 6579 return FIInfoAlign; 6580 } 6581 6582 return 0; 6583 } 6584 6585 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 6586 /// which is split (or expanded) into two not necessarily identical pieces. 6587 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 6588 // Currently all types are split in half. 6589 EVT LoVT, HiVT; 6590 if (!VT.isVector()) { 6591 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 6592 } else { 6593 unsigned NumElements = VT.getVectorNumElements(); 6594 assert(!(NumElements & 1) && "Splitting vector, but not in half!"); 6595 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 6596 NumElements/2); 6597 } 6598 return std::make_pair(LoVT, HiVT); 6599 } 6600 6601 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 6602 /// low/high part. 6603 std::pair<SDValue, SDValue> 6604 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 6605 const EVT &HiVT) { 6606 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 6607 N.getValueType().getVectorNumElements() && 6608 "More vector elements requested than available!"); 6609 SDValue Lo, Hi; 6610 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 6611 getConstant(0, TLI->getVectorIdxTy())); 6612 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 6613 getConstant(LoVT.getVectorNumElements(), TLI->getVectorIdxTy())); 6614 return std::make_pair(Lo, Hi); 6615 } 6616 6617 void SelectionDAG::ExtractVectorElements(SDValue Op, 6618 SmallVectorImpl<SDValue> &Args, 6619 unsigned Start, unsigned Count) { 6620 EVT VT = Op.getValueType(); 6621 if (Count == 0) 6622 Count = VT.getVectorNumElements(); 6623 6624 EVT EltVT = VT.getVectorElementType(); 6625 EVT IdxTy = TLI->getVectorIdxTy(); 6626 SDLoc SL(Op); 6627 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 6628 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 6629 Op, getConstant(i, IdxTy))); 6630 } 6631 } 6632 6633 // getAddressSpace - Return the address space this GlobalAddress belongs to. 6634 unsigned GlobalAddressSDNode::getAddressSpace() const { 6635 return getGlobal()->getType()->getAddressSpace(); 6636 } 6637 6638 6639 Type *ConstantPoolSDNode::getType() const { 6640 if (isMachineConstantPoolEntry()) 6641 return Val.MachineCPVal->getType(); 6642 return Val.ConstVal->getType(); 6643 } 6644 6645 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 6646 APInt &SplatUndef, 6647 unsigned &SplatBitSize, 6648 bool &HasAnyUndefs, 6649 unsigned MinSplatBits, 6650 bool isBigEndian) const { 6651 EVT VT = getValueType(0); 6652 assert(VT.isVector() && "Expected a vector type"); 6653 unsigned sz = VT.getSizeInBits(); 6654 if (MinSplatBits > sz) 6655 return false; 6656 6657 SplatValue = APInt(sz, 0); 6658 SplatUndef = APInt(sz, 0); 6659 6660 // Get the bits. Bits with undefined values (when the corresponding element 6661 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 6662 // in SplatValue. If any of the values are not constant, give up and return 6663 // false. 6664 unsigned int nOps = getNumOperands(); 6665 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 6666 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); 6667 6668 for (unsigned j = 0; j < nOps; ++j) { 6669 unsigned i = isBigEndian ? nOps-1-j : j; 6670 SDValue OpVal = getOperand(i); 6671 unsigned BitPos = j * EltBitSize; 6672 6673 if (OpVal.getOpcode() == ISD::UNDEF) 6674 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 6675 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 6676 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 6677 zextOrTrunc(sz) << BitPos; 6678 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 6679 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 6680 else 6681 return false; 6682 } 6683 6684 // The build_vector is all constants or undefs. Find the smallest element 6685 // size that splats the vector. 6686 6687 HasAnyUndefs = (SplatUndef != 0); 6688 while (sz > 8) { 6689 6690 unsigned HalfSize = sz / 2; 6691 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 6692 APInt LowValue = SplatValue.trunc(HalfSize); 6693 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 6694 APInt LowUndef = SplatUndef.trunc(HalfSize); 6695 6696 // If the two halves do not match (ignoring undef bits), stop here. 6697 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 6698 MinSplatBits > HalfSize) 6699 break; 6700 6701 SplatValue = HighValue | LowValue; 6702 SplatUndef = HighUndef & LowUndef; 6703 6704 sz = HalfSize; 6705 } 6706 6707 SplatBitSize = sz; 6708 return true; 6709 } 6710 6711 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 6712 if (UndefElements) { 6713 UndefElements->clear(); 6714 UndefElements->resize(getNumOperands()); 6715 } 6716 SDValue Splatted; 6717 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 6718 SDValue Op = getOperand(i); 6719 if (Op.getOpcode() == ISD::UNDEF) { 6720 if (UndefElements) 6721 (*UndefElements)[i] = true; 6722 } else if (!Splatted) { 6723 Splatted = Op; 6724 } else if (Splatted != Op) { 6725 return SDValue(); 6726 } 6727 } 6728 6729 if (!Splatted) { 6730 assert(getOperand(0).getOpcode() == ISD::UNDEF && 6731 "Can only have a splat without a constant for all undefs."); 6732 return getOperand(0); 6733 } 6734 6735 return Splatted; 6736 } 6737 6738 ConstantSDNode * 6739 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 6740 return dyn_cast_or_null<ConstantSDNode>( 6741 getSplatValue(UndefElements).getNode()); 6742 } 6743 6744 ConstantFPSDNode * 6745 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 6746 return dyn_cast_or_null<ConstantFPSDNode>( 6747 getSplatValue(UndefElements).getNode()); 6748 } 6749 6750 bool BuildVectorSDNode::isConstant() const { 6751 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 6752 unsigned Opc = getOperand(i).getOpcode(); 6753 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 6754 return false; 6755 } 6756 return true; 6757 } 6758 6759 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 6760 // Find the first non-undef value in the shuffle mask. 6761 unsigned i, e; 6762 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 6763 /* search */; 6764 6765 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 6766 6767 // Make sure all remaining elements are either undef or the same as the first 6768 // non-undef value. 6769 for (int Idx = Mask[i]; i != e; ++i) 6770 if (Mask[i] >= 0 && Mask[i] != Idx) 6771 return false; 6772 return true; 6773 } 6774 6775 #ifndef NDEBUG 6776 static void checkForCyclesHelper(const SDNode *N, 6777 SmallPtrSet<const SDNode*, 32> &Visited, 6778 SmallPtrSet<const SDNode*, 32> &Checked, 6779 const llvm::SelectionDAG *DAG) { 6780 // If this node has already been checked, don't check it again. 6781 if (Checked.count(N)) 6782 return; 6783 6784 // If a node has already been visited on this depth-first walk, reject it as 6785 // a cycle. 6786 if (!Visited.insert(N)) { 6787 errs() << "Detected cycle in SelectionDAG\n"; 6788 dbgs() << "Offending node:\n"; 6789 N->dumprFull(DAG); dbgs() << "\n"; 6790 abort(); 6791 } 6792 6793 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6794 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked, DAG); 6795 6796 Checked.insert(N); 6797 Visited.erase(N); 6798 } 6799 #endif 6800 6801 void llvm::checkForCycles(const llvm::SDNode *N, 6802 const llvm::SelectionDAG *DAG, 6803 bool force) { 6804 #ifndef NDEBUG 6805 bool check = force; 6806 #ifdef XDEBUG 6807 check = true; 6808 #endif // XDEBUG 6809 if (check) { 6810 assert(N && "Checking nonexistent SDNode"); 6811 SmallPtrSet<const SDNode*, 32> visited; 6812 SmallPtrSet<const SDNode*, 32> checked; 6813 checkForCyclesHelper(N, visited, checked, DAG); 6814 } 6815 #endif // !NDEBUG 6816 } 6817 6818 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 6819 checkForCycles(DAG->getRoot().getNode(), DAG, force); 6820 } 6821