1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineValueType.h" 36 #include "llvm/CodeGen/RuntimeLibcalls.h" 37 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 40 #include "llvm/CodeGen/TargetLowering.h" 41 #include "llvm/CodeGen/TargetRegisterInfo.h" 42 #include "llvm/CodeGen/TargetSubtargetInfo.h" 43 #include "llvm/CodeGen/ValueTypes.h" 44 #include "llvm/IR/Constant.h" 45 #include "llvm/IR/Constants.h" 46 #include "llvm/IR/DataLayout.h" 47 #include "llvm/IR/DebugInfoMetadata.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GlobalValue.h" 52 #include "llvm/IR/Metadata.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CodeGen.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/KnownBits.h" 61 #include "llvm/Support/ManagedStatic.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/Mutex.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetMachine.h" 66 #include "llvm/Target/TargetOptions.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 #define DEBUG_TYPE "selectiondag" 91 92 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 93 DEBUG( 94 dbgs() << Msg; 95 V.getNode()->dump(G); 96 ); 97 } 98 99 //===----------------------------------------------------------------------===// 100 // ConstantFPSDNode Class 101 //===----------------------------------------------------------------------===// 102 103 /// isExactlyValue - We don't rely on operator== working on double values, as 104 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 105 /// As such, this method can be used to do an exact bit-for-bit comparison of 106 /// two floating point values. 107 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 108 return getValueAPF().bitwiseIsEqual(V); 109 } 110 111 bool ConstantFPSDNode::isValueValidForType(EVT VT, 112 const APFloat& Val) { 113 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 114 115 // convert modifies in place, so make a copy. 116 APFloat Val2 = APFloat(Val); 117 bool losesInfo; 118 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 119 APFloat::rmNearestTiesToEven, 120 &losesInfo); 121 return !losesInfo; 122 } 123 124 //===----------------------------------------------------------------------===// 125 // ISD Namespace 126 //===----------------------------------------------------------------------===// 127 128 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 129 auto *BV = dyn_cast<BuildVectorSDNode>(N); 130 if (!BV) 131 return false; 132 133 APInt SplatUndef; 134 unsigned SplatBitSize; 135 bool HasUndefs; 136 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 137 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 138 EltSize) && 139 EltSize == SplatBitSize; 140 } 141 142 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 143 // specializations of the more general isConstantSplatVector()? 144 145 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 146 // Look through a bit convert. 147 while (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 unsigned i = 0, e = N->getNumOperands(); 153 154 // Skip over all of the undef values. 155 while (i != e && N->getOperand(i).isUndef()) 156 ++i; 157 158 // Do not accept an all-undef vector. 159 if (i == e) return false; 160 161 // Do not accept build_vectors that aren't all constants or which have non-~0 162 // elements. We have to be a bit careful here, as the type of the constant 163 // may not be the same as the type of the vector elements due to type 164 // legalization (the elements are promoted to a legal type for the target and 165 // a vector of a type may be legal when the base element type is not). 166 // We only want to check enough bits to cover the vector elements, because 167 // we care if the resultant vector is all ones, not whether the individual 168 // constants are. 169 SDValue NotZero = N->getOperand(i); 170 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 172 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 173 return false; 174 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 175 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 176 return false; 177 } else 178 return false; 179 180 // Okay, we have at least one ~0 value, check to see if the rest match or are 181 // undefs. Even with the above element type twiddling, this should be OK, as 182 // the same type legalization should have applied to all the elements. 183 for (++i; i != e; ++i) 184 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 185 return false; 186 return true; 187 } 188 189 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 190 // Look through a bit convert. 191 while (N->getOpcode() == ISD::BITCAST) 192 N = N->getOperand(0).getNode(); 193 194 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 195 196 bool IsAllUndef = true; 197 for (const SDValue &Op : N->op_values()) { 198 if (Op.isUndef()) 199 continue; 200 IsAllUndef = false; 201 // Do not accept build_vectors that aren't all constants or which have non-0 202 // elements. We have to be a bit careful here, as the type of the constant 203 // may not be the same as the type of the vector elements due to type 204 // legalization (the elements are promoted to a legal type for the target 205 // and a vector of a type may be legal when the base element type is not). 206 // We only want to check enough bits to cover the vector elements, because 207 // we care if the resultant vector is all zeros, not whether the individual 208 // constants are. 209 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 210 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 211 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 212 return false; 213 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 214 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 215 return false; 216 } else 217 return false; 218 } 219 220 // Do not accept an all-undef vector. 221 if (IsAllUndef) 222 return false; 223 return true; 224 } 225 226 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 227 if (N->getOpcode() != ISD::BUILD_VECTOR) 228 return false; 229 230 for (const SDValue &Op : N->op_values()) { 231 if (Op.isUndef()) 232 continue; 233 if (!isa<ConstantSDNode>(Op)) 234 return false; 235 } 236 return true; 237 } 238 239 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 240 if (N->getOpcode() != ISD::BUILD_VECTOR) 241 return false; 242 243 for (const SDValue &Op : N->op_values()) { 244 if (Op.isUndef()) 245 continue; 246 if (!isa<ConstantFPSDNode>(Op)) 247 return false; 248 } 249 return true; 250 } 251 252 bool ISD::allOperandsUndef(const SDNode *N) { 253 // Return false if the node has no operands. 254 // This is "logically inconsistent" with the definition of "all" but 255 // is probably the desired behavior. 256 if (N->getNumOperands() == 0) 257 return false; 258 259 for (const SDValue &Op : N->op_values()) 260 if (!Op.isUndef()) 261 return false; 262 263 return true; 264 } 265 266 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 267 switch (ExtType) { 268 case ISD::EXTLOAD: 269 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 270 case ISD::SEXTLOAD: 271 return ISD::SIGN_EXTEND; 272 case ISD::ZEXTLOAD: 273 return ISD::ZERO_EXTEND; 274 default: 275 break; 276 } 277 278 llvm_unreachable("Invalid LoadExtType"); 279 } 280 281 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 282 // To perform this operation, we just need to swap the L and G bits of the 283 // operation. 284 unsigned OldL = (Operation >> 2) & 1; 285 unsigned OldG = (Operation >> 1) & 1; 286 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 287 (OldL << 1) | // New G bit 288 (OldG << 2)); // New L bit. 289 } 290 291 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 292 unsigned Operation = Op; 293 if (isInteger) 294 Operation ^= 7; // Flip L, G, E bits, but not U. 295 else 296 Operation ^= 15; // Flip all of the condition bits. 297 298 if (Operation > ISD::SETTRUE2) 299 Operation &= ~8; // Don't let N and U bits get set. 300 301 return ISD::CondCode(Operation); 302 } 303 304 /// For an integer comparison, return 1 if the comparison is a signed operation 305 /// and 2 if the result is an unsigned comparison. Return zero if the operation 306 /// does not depend on the sign of the input (setne and seteq). 307 static int isSignedOp(ISD::CondCode Opcode) { 308 switch (Opcode) { 309 default: llvm_unreachable("Illegal integer setcc operation!"); 310 case ISD::SETEQ: 311 case ISD::SETNE: return 0; 312 case ISD::SETLT: 313 case ISD::SETLE: 314 case ISD::SETGT: 315 case ISD::SETGE: return 1; 316 case ISD::SETULT: 317 case ISD::SETULE: 318 case ISD::SETUGT: 319 case ISD::SETUGE: return 2; 320 } 321 } 322 323 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 324 bool IsInteger) { 325 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 326 // Cannot fold a signed integer setcc with an unsigned integer setcc. 327 return ISD::SETCC_INVALID; 328 329 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 330 331 // If the N and U bits get set, then the resultant comparison DOES suddenly 332 // care about orderedness, and it is true when ordered. 333 if (Op > ISD::SETTRUE2) 334 Op &= ~16; // Clear the U bit if the N bit is set. 335 336 // Canonicalize illegal integer setcc's. 337 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 338 Op = ISD::SETNE; 339 340 return ISD::CondCode(Op); 341 } 342 343 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 344 bool IsInteger) { 345 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 346 // Cannot fold a signed setcc with an unsigned setcc. 347 return ISD::SETCC_INVALID; 348 349 // Combine all of the condition bits. 350 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 351 352 // Canonicalize illegal integer setcc's. 353 if (IsInteger) { 354 switch (Result) { 355 default: break; 356 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 357 case ISD::SETOEQ: // SETEQ & SETU[LG]E 358 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 359 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 360 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 361 } 362 } 363 364 return Result; 365 } 366 367 //===----------------------------------------------------------------------===// 368 // SDNode Profile Support 369 //===----------------------------------------------------------------------===// 370 371 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 372 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 373 ID.AddInteger(OpC); 374 } 375 376 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 377 /// solely with their pointer. 378 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 379 ID.AddPointer(VTList.VTs); 380 } 381 382 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 383 static void AddNodeIDOperands(FoldingSetNodeID &ID, 384 ArrayRef<SDValue> Ops) { 385 for (auto& Op : Ops) { 386 ID.AddPointer(Op.getNode()); 387 ID.AddInteger(Op.getResNo()); 388 } 389 } 390 391 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 392 static void AddNodeIDOperands(FoldingSetNodeID &ID, 393 ArrayRef<SDUse> Ops) { 394 for (auto& Op : Ops) { 395 ID.AddPointer(Op.getNode()); 396 ID.AddInteger(Op.getResNo()); 397 } 398 } 399 400 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 401 SDVTList VTList, ArrayRef<SDValue> OpList) { 402 AddNodeIDOpcode(ID, OpC); 403 AddNodeIDValueTypes(ID, VTList); 404 AddNodeIDOperands(ID, OpList); 405 } 406 407 /// If this is an SDNode with special info, add this info to the NodeID data. 408 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 409 switch (N->getOpcode()) { 410 case ISD::TargetExternalSymbol: 411 case ISD::ExternalSymbol: 412 case ISD::MCSymbol: 413 llvm_unreachable("Should only be used on nodes with operands"); 414 default: break; // Normal nodes don't need extra info. 415 case ISD::TargetConstant: 416 case ISD::Constant: { 417 const ConstantSDNode *C = cast<ConstantSDNode>(N); 418 ID.AddPointer(C->getConstantIntValue()); 419 ID.AddBoolean(C->isOpaque()); 420 break; 421 } 422 case ISD::TargetConstantFP: 423 case ISD::ConstantFP: 424 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 425 break; 426 case ISD::TargetGlobalAddress: 427 case ISD::GlobalAddress: 428 case ISD::TargetGlobalTLSAddress: 429 case ISD::GlobalTLSAddress: { 430 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 431 ID.AddPointer(GA->getGlobal()); 432 ID.AddInteger(GA->getOffset()); 433 ID.AddInteger(GA->getTargetFlags()); 434 break; 435 } 436 case ISD::BasicBlock: 437 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 438 break; 439 case ISD::Register: 440 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 441 break; 442 case ISD::RegisterMask: 443 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 444 break; 445 case ISD::SRCVALUE: 446 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 447 break; 448 case ISD::FrameIndex: 449 case ISD::TargetFrameIndex: 450 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 451 break; 452 case ISD::JumpTable: 453 case ISD::TargetJumpTable: 454 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 455 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 456 break; 457 case ISD::ConstantPool: 458 case ISD::TargetConstantPool: { 459 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 460 ID.AddInteger(CP->getAlignment()); 461 ID.AddInteger(CP->getOffset()); 462 if (CP->isMachineConstantPoolEntry()) 463 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 464 else 465 ID.AddPointer(CP->getConstVal()); 466 ID.AddInteger(CP->getTargetFlags()); 467 break; 468 } 469 case ISD::TargetIndex: { 470 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 471 ID.AddInteger(TI->getIndex()); 472 ID.AddInteger(TI->getOffset()); 473 ID.AddInteger(TI->getTargetFlags()); 474 break; 475 } 476 case ISD::LOAD: { 477 const LoadSDNode *LD = cast<LoadSDNode>(N); 478 ID.AddInteger(LD->getMemoryVT().getRawBits()); 479 ID.AddInteger(LD->getRawSubclassData()); 480 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 481 break; 482 } 483 case ISD::STORE: { 484 const StoreSDNode *ST = cast<StoreSDNode>(N); 485 ID.AddInteger(ST->getMemoryVT().getRawBits()); 486 ID.AddInteger(ST->getRawSubclassData()); 487 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 488 break; 489 } 490 case ISD::ATOMIC_CMP_SWAP: 491 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 492 case ISD::ATOMIC_SWAP: 493 case ISD::ATOMIC_LOAD_ADD: 494 case ISD::ATOMIC_LOAD_SUB: 495 case ISD::ATOMIC_LOAD_AND: 496 case ISD::ATOMIC_LOAD_OR: 497 case ISD::ATOMIC_LOAD_XOR: 498 case ISD::ATOMIC_LOAD_NAND: 499 case ISD::ATOMIC_LOAD_MIN: 500 case ISD::ATOMIC_LOAD_MAX: 501 case ISD::ATOMIC_LOAD_UMIN: 502 case ISD::ATOMIC_LOAD_UMAX: 503 case ISD::ATOMIC_LOAD: 504 case ISD::ATOMIC_STORE: { 505 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 506 ID.AddInteger(AT->getMemoryVT().getRawBits()); 507 ID.AddInteger(AT->getRawSubclassData()); 508 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 509 break; 510 } 511 case ISD::PREFETCH: { 512 const MemSDNode *PF = cast<MemSDNode>(N); 513 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 514 break; 515 } 516 case ISD::VECTOR_SHUFFLE: { 517 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 518 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 519 i != e; ++i) 520 ID.AddInteger(SVN->getMaskElt(i)); 521 break; 522 } 523 case ISD::TargetBlockAddress: 524 case ISD::BlockAddress: { 525 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 526 ID.AddPointer(BA->getBlockAddress()); 527 ID.AddInteger(BA->getOffset()); 528 ID.AddInteger(BA->getTargetFlags()); 529 break; 530 } 531 } // end switch (N->getOpcode()) 532 533 // Target specific memory nodes could also have address spaces to check. 534 if (N->isTargetMemoryOpcode()) 535 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 536 } 537 538 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 539 /// data. 540 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 541 AddNodeIDOpcode(ID, N->getOpcode()); 542 // Add the return value info. 543 AddNodeIDValueTypes(ID, N->getVTList()); 544 // Add the operand info. 545 AddNodeIDOperands(ID, N->ops()); 546 547 // Handle SDNode leafs with special info. 548 AddNodeIDCustom(ID, N); 549 } 550 551 //===----------------------------------------------------------------------===// 552 // SelectionDAG Class 553 //===----------------------------------------------------------------------===// 554 555 /// doNotCSE - Return true if CSE should not be performed for this node. 556 static bool doNotCSE(SDNode *N) { 557 if (N->getValueType(0) == MVT::Glue) 558 return true; // Never CSE anything that produces a flag. 559 560 switch (N->getOpcode()) { 561 default: break; 562 case ISD::HANDLENODE: 563 case ISD::EH_LABEL: 564 return true; // Never CSE these nodes. 565 } 566 567 // Check that remaining values produced are not flags. 568 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 569 if (N->getValueType(i) == MVT::Glue) 570 return true; // Never CSE anything that produces a flag. 571 572 return false; 573 } 574 575 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 576 /// SelectionDAG. 577 void SelectionDAG::RemoveDeadNodes() { 578 // Create a dummy node (which is not added to allnodes), that adds a reference 579 // to the root node, preventing it from being deleted. 580 HandleSDNode Dummy(getRoot()); 581 582 SmallVector<SDNode*, 128> DeadNodes; 583 584 // Add all obviously-dead nodes to the DeadNodes worklist. 585 for (SDNode &Node : allnodes()) 586 if (Node.use_empty()) 587 DeadNodes.push_back(&Node); 588 589 RemoveDeadNodes(DeadNodes); 590 591 // If the root changed (e.g. it was a dead load, update the root). 592 setRoot(Dummy.getValue()); 593 } 594 595 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 596 /// given list, and any nodes that become unreachable as a result. 597 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 598 599 // Process the worklist, deleting the nodes and adding their uses to the 600 // worklist. 601 while (!DeadNodes.empty()) { 602 SDNode *N = DeadNodes.pop_back_val(); 603 // Skip to next node if we've already managed to delete the node. This could 604 // happen if replacing a node causes a node previously added to the node to 605 // be deleted. 606 if (N->getOpcode() == ISD::DELETED_NODE) 607 continue; 608 609 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 610 DUL->NodeDeleted(N, nullptr); 611 612 // Take the node out of the appropriate CSE map. 613 RemoveNodeFromCSEMaps(N); 614 615 // Next, brutally remove the operand list. This is safe to do, as there are 616 // no cycles in the graph. 617 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 618 SDUse &Use = *I++; 619 SDNode *Operand = Use.getNode(); 620 Use.set(SDValue()); 621 622 // Now that we removed this operand, see if there are no uses of it left. 623 if (Operand->use_empty()) 624 DeadNodes.push_back(Operand); 625 } 626 627 DeallocateNode(N); 628 } 629 } 630 631 void SelectionDAG::RemoveDeadNode(SDNode *N){ 632 SmallVector<SDNode*, 16> DeadNodes(1, N); 633 634 // Create a dummy node that adds a reference to the root node, preventing 635 // it from being deleted. (This matters if the root is an operand of the 636 // dead node.) 637 HandleSDNode Dummy(getRoot()); 638 639 RemoveDeadNodes(DeadNodes); 640 } 641 642 void SelectionDAG::DeleteNode(SDNode *N) { 643 // First take this out of the appropriate CSE map. 644 RemoveNodeFromCSEMaps(N); 645 646 // Finally, remove uses due to operands of this node, remove from the 647 // AllNodes list, and delete the node. 648 DeleteNodeNotInCSEMaps(N); 649 } 650 651 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 652 assert(N->getIterator() != AllNodes.begin() && 653 "Cannot delete the entry node!"); 654 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 655 656 // Drop all of the operands and decrement used node's use counts. 657 N->DropOperands(); 658 659 DeallocateNode(N); 660 } 661 662 void SDDbgInfo::erase(const SDNode *Node) { 663 DbgValMapType::iterator I = DbgValMap.find(Node); 664 if (I == DbgValMap.end()) 665 return; 666 for (auto &Val: I->second) 667 Val->setIsInvalidated(); 668 DbgValMap.erase(I); 669 } 670 671 void SelectionDAG::DeallocateNode(SDNode *N) { 672 // If we have operands, deallocate them. 673 removeOperands(N); 674 675 NodeAllocator.Deallocate(AllNodes.remove(N)); 676 677 // Set the opcode to DELETED_NODE to help catch bugs when node 678 // memory is reallocated. 679 // FIXME: There are places in SDag that have grown a dependency on the opcode 680 // value in the released node. 681 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 682 N->NodeType = ISD::DELETED_NODE; 683 684 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 685 // them and forget about that node. 686 DbgInfo->erase(N); 687 } 688 689 #ifndef NDEBUG 690 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 691 static void VerifySDNode(SDNode *N) { 692 switch (N->getOpcode()) { 693 default: 694 break; 695 case ISD::BUILD_PAIR: { 696 EVT VT = N->getValueType(0); 697 assert(N->getNumValues() == 1 && "Too many results!"); 698 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 699 "Wrong return type!"); 700 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 701 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 702 "Mismatched operand types!"); 703 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 704 "Wrong operand type!"); 705 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 706 "Wrong return type size"); 707 break; 708 } 709 case ISD::BUILD_VECTOR: { 710 assert(N->getNumValues() == 1 && "Too many results!"); 711 assert(N->getValueType(0).isVector() && "Wrong return type!"); 712 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 713 "Wrong number of operands!"); 714 EVT EltVT = N->getValueType(0).getVectorElementType(); 715 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 716 assert((I->getValueType() == EltVT || 717 (EltVT.isInteger() && I->getValueType().isInteger() && 718 EltVT.bitsLE(I->getValueType()))) && 719 "Wrong operand type!"); 720 assert(I->getValueType() == N->getOperand(0).getValueType() && 721 "Operands must all have the same type"); 722 } 723 break; 724 } 725 } 726 } 727 #endif // NDEBUG 728 729 /// \brief Insert a newly allocated node into the DAG. 730 /// 731 /// Handles insertion into the all nodes list and CSE map, as well as 732 /// verification and other common operations when a new node is allocated. 733 void SelectionDAG::InsertNode(SDNode *N) { 734 AllNodes.push_back(N); 735 #ifndef NDEBUG 736 N->PersistentId = NextPersistentId++; 737 VerifySDNode(N); 738 #endif 739 } 740 741 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 742 /// correspond to it. This is useful when we're about to delete or repurpose 743 /// the node. We don't want future request for structurally identical nodes 744 /// to return N anymore. 745 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 746 bool Erased = false; 747 switch (N->getOpcode()) { 748 case ISD::HANDLENODE: return false; // noop. 749 case ISD::CONDCODE: 750 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 751 "Cond code doesn't exist!"); 752 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 753 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 754 break; 755 case ISD::ExternalSymbol: 756 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 757 break; 758 case ISD::TargetExternalSymbol: { 759 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 760 Erased = TargetExternalSymbols.erase( 761 std::pair<std::string,unsigned char>(ESN->getSymbol(), 762 ESN->getTargetFlags())); 763 break; 764 } 765 case ISD::MCSymbol: { 766 auto *MCSN = cast<MCSymbolSDNode>(N); 767 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 768 break; 769 } 770 case ISD::VALUETYPE: { 771 EVT VT = cast<VTSDNode>(N)->getVT(); 772 if (VT.isExtended()) { 773 Erased = ExtendedValueTypeNodes.erase(VT); 774 } else { 775 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 776 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 777 } 778 break; 779 } 780 default: 781 // Remove it from the CSE Map. 782 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 783 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 784 Erased = CSEMap.RemoveNode(N); 785 break; 786 } 787 #ifndef NDEBUG 788 // Verify that the node was actually in one of the CSE maps, unless it has a 789 // flag result (which cannot be CSE'd) or is one of the special cases that are 790 // not subject to CSE. 791 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 792 !N->isMachineOpcode() && !doNotCSE(N)) { 793 N->dump(this); 794 dbgs() << "\n"; 795 llvm_unreachable("Node is not in map!"); 796 } 797 #endif 798 return Erased; 799 } 800 801 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 802 /// maps and modified in place. Add it back to the CSE maps, unless an identical 803 /// node already exists, in which case transfer all its users to the existing 804 /// node. This transfer can potentially trigger recursive merging. 805 void 806 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 807 // For node types that aren't CSE'd, just act as if no identical node 808 // already exists. 809 if (!doNotCSE(N)) { 810 SDNode *Existing = CSEMap.GetOrInsertNode(N); 811 if (Existing != N) { 812 // If there was already an existing matching node, use ReplaceAllUsesWith 813 // to replace the dead one with the existing one. This can cause 814 // recursive merging of other unrelated nodes down the line. 815 ReplaceAllUsesWith(N, Existing); 816 817 // N is now dead. Inform the listeners and delete it. 818 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 819 DUL->NodeDeleted(N, Existing); 820 DeleteNodeNotInCSEMaps(N); 821 return; 822 } 823 } 824 825 // If the node doesn't already exist, we updated it. Inform listeners. 826 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 827 DUL->NodeUpdated(N); 828 } 829 830 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 831 /// were replaced with those specified. If this node is never memoized, 832 /// return null, otherwise return a pointer to the slot it would take. If a 833 /// node already exists with these operands, the slot will be non-null. 834 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 835 void *&InsertPos) { 836 if (doNotCSE(N)) 837 return nullptr; 838 839 SDValue Ops[] = { Op }; 840 FoldingSetNodeID ID; 841 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 842 AddNodeIDCustom(ID, N); 843 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 844 if (Node) 845 Node->intersectFlagsWith(N->getFlags()); 846 return Node; 847 } 848 849 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 850 /// were replaced with those specified. If this node is never memoized, 851 /// return null, otherwise return a pointer to the slot it would take. If a 852 /// node already exists with these operands, the slot will be non-null. 853 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 854 SDValue Op1, SDValue Op2, 855 void *&InsertPos) { 856 if (doNotCSE(N)) 857 return nullptr; 858 859 SDValue Ops[] = { Op1, Op2 }; 860 FoldingSetNodeID ID; 861 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 862 AddNodeIDCustom(ID, N); 863 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 864 if (Node) 865 Node->intersectFlagsWith(N->getFlags()); 866 return Node; 867 } 868 869 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 870 /// were replaced with those specified. If this node is never memoized, 871 /// return null, otherwise return a pointer to the slot it would take. If a 872 /// node already exists with these operands, the slot will be non-null. 873 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 874 void *&InsertPos) { 875 if (doNotCSE(N)) 876 return nullptr; 877 878 FoldingSetNodeID ID; 879 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 880 AddNodeIDCustom(ID, N); 881 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 882 if (Node) 883 Node->intersectFlagsWith(N->getFlags()); 884 return Node; 885 } 886 887 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 888 Type *Ty = VT == MVT::iPTR ? 889 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 890 VT.getTypeForEVT(*getContext()); 891 892 return getDataLayout().getABITypeAlignment(Ty); 893 } 894 895 // EntryNode could meaningfully have debug info if we can find it... 896 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 897 : TM(tm), OptLevel(OL), 898 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 899 Root(getEntryNode()) { 900 InsertNode(&EntryNode); 901 DbgInfo = new SDDbgInfo(); 902 } 903 904 void SelectionDAG::init(MachineFunction &NewMF, 905 OptimizationRemarkEmitter &NewORE, 906 Pass *PassPtr) { 907 MF = &NewMF; 908 SDAGISelPass = PassPtr; 909 ORE = &NewORE; 910 TLI = getSubtarget().getTargetLowering(); 911 TSI = getSubtarget().getSelectionDAGInfo(); 912 Context = &MF->getFunction().getContext(); 913 } 914 915 SelectionDAG::~SelectionDAG() { 916 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 917 allnodes_clear(); 918 OperandRecycler.clear(OperandAllocator); 919 delete DbgInfo; 920 } 921 922 void SelectionDAG::allnodes_clear() { 923 assert(&*AllNodes.begin() == &EntryNode); 924 AllNodes.remove(AllNodes.begin()); 925 while (!AllNodes.empty()) 926 DeallocateNode(&AllNodes.front()); 927 #ifndef NDEBUG 928 NextPersistentId = 0; 929 #endif 930 } 931 932 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 933 void *&InsertPos) { 934 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 935 if (N) { 936 switch (N->getOpcode()) { 937 default: break; 938 case ISD::Constant: 939 case ISD::ConstantFP: 940 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 941 "debug location. Use another overload."); 942 } 943 } 944 return N; 945 } 946 947 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 948 const SDLoc &DL, void *&InsertPos) { 949 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 950 if (N) { 951 switch (N->getOpcode()) { 952 case ISD::Constant: 953 case ISD::ConstantFP: 954 // Erase debug location from the node if the node is used at several 955 // different places. Do not propagate one location to all uses as it 956 // will cause a worse single stepping debugging experience. 957 if (N->getDebugLoc() != DL.getDebugLoc()) 958 N->setDebugLoc(DebugLoc()); 959 break; 960 default: 961 // When the node's point of use is located earlier in the instruction 962 // sequence than its prior point of use, update its debug info to the 963 // earlier location. 964 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 965 N->setDebugLoc(DL.getDebugLoc()); 966 break; 967 } 968 } 969 return N; 970 } 971 972 void SelectionDAG::clear() { 973 allnodes_clear(); 974 OperandRecycler.clear(OperandAllocator); 975 OperandAllocator.Reset(); 976 CSEMap.clear(); 977 978 ExtendedValueTypeNodes.clear(); 979 ExternalSymbols.clear(); 980 TargetExternalSymbols.clear(); 981 MCSymbols.clear(); 982 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 983 static_cast<CondCodeSDNode*>(nullptr)); 984 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 985 static_cast<SDNode*>(nullptr)); 986 987 EntryNode.UseList = nullptr; 988 InsertNode(&EntryNode); 989 Root = getEntryNode(); 990 DbgInfo->clear(); 991 } 992 993 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 994 return VT.bitsGT(Op.getValueType()) 995 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 996 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 997 } 998 999 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1000 return VT.bitsGT(Op.getValueType()) ? 1001 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1002 getNode(ISD::TRUNCATE, DL, VT, Op); 1003 } 1004 1005 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1006 return VT.bitsGT(Op.getValueType()) ? 1007 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1008 getNode(ISD::TRUNCATE, DL, VT, Op); 1009 } 1010 1011 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1012 return VT.bitsGT(Op.getValueType()) ? 1013 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1014 getNode(ISD::TRUNCATE, DL, VT, Op); 1015 } 1016 1017 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1018 EVT OpVT) { 1019 if (VT.bitsLE(Op.getValueType())) 1020 return getNode(ISD::TRUNCATE, SL, VT, Op); 1021 1022 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1023 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1024 } 1025 1026 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1027 assert(!VT.isVector() && 1028 "getZeroExtendInReg should use the vector element type instead of " 1029 "the vector type!"); 1030 if (Op.getValueType().getScalarType() == VT) return Op; 1031 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1032 APInt Imm = APInt::getLowBitsSet(BitWidth, 1033 VT.getSizeInBits()); 1034 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1035 getConstant(Imm, DL, Op.getValueType())); 1036 } 1037 1038 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1039 EVT VT) { 1040 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1041 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1042 "The sizes of the input and result must match in order to perform the " 1043 "extend in-register."); 1044 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1045 "The destination vector type must have fewer lanes than the input."); 1046 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1047 } 1048 1049 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1050 EVT VT) { 1051 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1052 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1053 "The sizes of the input and result must match in order to perform the " 1054 "extend in-register."); 1055 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1056 "The destination vector type must have fewer lanes than the input."); 1057 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1058 } 1059 1060 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1061 EVT VT) { 1062 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1063 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1064 "The sizes of the input and result must match in order to perform the " 1065 "extend in-register."); 1066 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1067 "The destination vector type must have fewer lanes than the input."); 1068 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1069 } 1070 1071 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1072 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1073 EVT EltVT = VT.getScalarType(); 1074 SDValue NegOne = 1075 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1076 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1077 } 1078 1079 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1080 EVT EltVT = VT.getScalarType(); 1081 SDValue TrueValue; 1082 switch (TLI->getBooleanContents(VT)) { 1083 case TargetLowering::ZeroOrOneBooleanContent: 1084 case TargetLowering::UndefinedBooleanContent: 1085 TrueValue = getConstant(1, DL, VT); 1086 break; 1087 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1088 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1089 VT); 1090 break; 1091 } 1092 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1093 } 1094 1095 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1096 bool isT, bool isO) { 1097 EVT EltVT = VT.getScalarType(); 1098 assert((EltVT.getSizeInBits() >= 64 || 1099 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1100 "getConstant with a uint64_t value that doesn't fit in the type!"); 1101 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1102 } 1103 1104 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1105 bool isT, bool isO) { 1106 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1107 } 1108 1109 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1110 EVT VT, bool isT, bool isO) { 1111 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1112 1113 EVT EltVT = VT.getScalarType(); 1114 const ConstantInt *Elt = &Val; 1115 1116 // In some cases the vector type is legal but the element type is illegal and 1117 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1118 // inserted value (the type does not need to match the vector element type). 1119 // Any extra bits introduced will be truncated away. 1120 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1121 TargetLowering::TypePromoteInteger) { 1122 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1123 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1124 Elt = ConstantInt::get(*getContext(), NewVal); 1125 } 1126 // In other cases the element type is illegal and needs to be expanded, for 1127 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1128 // the value into n parts and use a vector type with n-times the elements. 1129 // Then bitcast to the type requested. 1130 // Legalizing constants too early makes the DAGCombiner's job harder so we 1131 // only legalize if the DAG tells us we must produce legal types. 1132 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1133 TLI->getTypeAction(*getContext(), EltVT) == 1134 TargetLowering::TypeExpandInteger) { 1135 const APInt &NewVal = Elt->getValue(); 1136 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1137 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1138 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1139 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1140 1141 // Check the temporary vector is the correct size. If this fails then 1142 // getTypeToTransformTo() probably returned a type whose size (in bits) 1143 // isn't a power-of-2 factor of the requested type size. 1144 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1145 1146 SmallVector<SDValue, 2> EltParts; 1147 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1148 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1149 .zextOrTrunc(ViaEltSizeInBits), DL, 1150 ViaEltVT, isT, isO)); 1151 } 1152 1153 // EltParts is currently in little endian order. If we actually want 1154 // big-endian order then reverse it now. 1155 if (getDataLayout().isBigEndian()) 1156 std::reverse(EltParts.begin(), EltParts.end()); 1157 1158 // The elements must be reversed when the element order is different 1159 // to the endianness of the elements (because the BITCAST is itself a 1160 // vector shuffle in this situation). However, we do not need any code to 1161 // perform this reversal because getConstant() is producing a vector 1162 // splat. 1163 // This situation occurs in MIPS MSA. 1164 1165 SmallVector<SDValue, 8> Ops; 1166 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1167 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1168 1169 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1170 return V; 1171 } 1172 1173 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1174 "APInt size does not match type size!"); 1175 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1176 FoldingSetNodeID ID; 1177 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1178 ID.AddPointer(Elt); 1179 ID.AddBoolean(isO); 1180 void *IP = nullptr; 1181 SDNode *N = nullptr; 1182 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1183 if (!VT.isVector()) 1184 return SDValue(N, 0); 1185 1186 if (!N) { 1187 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1188 CSEMap.InsertNode(N, IP); 1189 InsertNode(N); 1190 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1191 } 1192 1193 SDValue Result(N, 0); 1194 if (VT.isVector()) 1195 Result = getSplatBuildVector(VT, DL, Result); 1196 1197 return Result; 1198 } 1199 1200 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1201 bool isTarget) { 1202 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1203 } 1204 1205 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1206 bool isTarget) { 1207 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1208 } 1209 1210 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1211 EVT VT, bool isTarget) { 1212 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1213 1214 EVT EltVT = VT.getScalarType(); 1215 1216 // Do the map lookup using the actual bit pattern for the floating point 1217 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1218 // we don't have issues with SNANs. 1219 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1220 FoldingSetNodeID ID; 1221 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1222 ID.AddPointer(&V); 1223 void *IP = nullptr; 1224 SDNode *N = nullptr; 1225 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1226 if (!VT.isVector()) 1227 return SDValue(N, 0); 1228 1229 if (!N) { 1230 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1231 CSEMap.InsertNode(N, IP); 1232 InsertNode(N); 1233 } 1234 1235 SDValue Result(N, 0); 1236 if (VT.isVector()) 1237 Result = getSplatBuildVector(VT, DL, Result); 1238 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1239 return Result; 1240 } 1241 1242 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1243 bool isTarget) { 1244 EVT EltVT = VT.getScalarType(); 1245 if (EltVT == MVT::f32) 1246 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1247 else if (EltVT == MVT::f64) 1248 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1249 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1250 EltVT == MVT::f16) { 1251 bool Ignored; 1252 APFloat APF = APFloat(Val); 1253 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1254 &Ignored); 1255 return getConstantFP(APF, DL, VT, isTarget); 1256 } else 1257 llvm_unreachable("Unsupported type in getConstantFP"); 1258 } 1259 1260 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1261 EVT VT, int64_t Offset, bool isTargetGA, 1262 unsigned char TargetFlags) { 1263 assert((TargetFlags == 0 || isTargetGA) && 1264 "Cannot set target flags on target-independent globals"); 1265 1266 // Truncate (with sign-extension) the offset value to the pointer size. 1267 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1268 if (BitWidth < 64) 1269 Offset = SignExtend64(Offset, BitWidth); 1270 1271 unsigned Opc; 1272 if (GV->isThreadLocal()) 1273 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1274 else 1275 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1276 1277 FoldingSetNodeID ID; 1278 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1279 ID.AddPointer(GV); 1280 ID.AddInteger(Offset); 1281 ID.AddInteger(TargetFlags); 1282 void *IP = nullptr; 1283 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1284 return SDValue(E, 0); 1285 1286 auto *N = newSDNode<GlobalAddressSDNode>( 1287 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1288 CSEMap.InsertNode(N, IP); 1289 InsertNode(N); 1290 return SDValue(N, 0); 1291 } 1292 1293 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1294 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1295 FoldingSetNodeID ID; 1296 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1297 ID.AddInteger(FI); 1298 void *IP = nullptr; 1299 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1300 return SDValue(E, 0); 1301 1302 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1303 CSEMap.InsertNode(N, IP); 1304 InsertNode(N); 1305 return SDValue(N, 0); 1306 } 1307 1308 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1309 unsigned char TargetFlags) { 1310 assert((TargetFlags == 0 || isTarget) && 1311 "Cannot set target flags on target-independent jump tables"); 1312 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1313 FoldingSetNodeID ID; 1314 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1315 ID.AddInteger(JTI); 1316 ID.AddInteger(TargetFlags); 1317 void *IP = nullptr; 1318 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1319 return SDValue(E, 0); 1320 1321 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1322 CSEMap.InsertNode(N, IP); 1323 InsertNode(N); 1324 return SDValue(N, 0); 1325 } 1326 1327 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1328 unsigned Alignment, int Offset, 1329 bool isTarget, 1330 unsigned char TargetFlags) { 1331 assert((TargetFlags == 0 || isTarget) && 1332 "Cannot set target flags on target-independent globals"); 1333 if (Alignment == 0) 1334 Alignment = MF->getFunction().optForSize() 1335 ? getDataLayout().getABITypeAlignment(C->getType()) 1336 : getDataLayout().getPrefTypeAlignment(C->getType()); 1337 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1338 FoldingSetNodeID ID; 1339 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1340 ID.AddInteger(Alignment); 1341 ID.AddInteger(Offset); 1342 ID.AddPointer(C); 1343 ID.AddInteger(TargetFlags); 1344 void *IP = nullptr; 1345 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1346 return SDValue(E, 0); 1347 1348 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1349 TargetFlags); 1350 CSEMap.InsertNode(N, IP); 1351 InsertNode(N); 1352 return SDValue(N, 0); 1353 } 1354 1355 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1356 unsigned Alignment, int Offset, 1357 bool isTarget, 1358 unsigned char TargetFlags) { 1359 assert((TargetFlags == 0 || isTarget) && 1360 "Cannot set target flags on target-independent globals"); 1361 if (Alignment == 0) 1362 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1363 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1364 FoldingSetNodeID ID; 1365 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1366 ID.AddInteger(Alignment); 1367 ID.AddInteger(Offset); 1368 C->addSelectionDAGCSEId(ID); 1369 ID.AddInteger(TargetFlags); 1370 void *IP = nullptr; 1371 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1372 return SDValue(E, 0); 1373 1374 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1375 TargetFlags); 1376 CSEMap.InsertNode(N, IP); 1377 InsertNode(N); 1378 return SDValue(N, 0); 1379 } 1380 1381 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1382 unsigned char TargetFlags) { 1383 FoldingSetNodeID ID; 1384 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1385 ID.AddInteger(Index); 1386 ID.AddInteger(Offset); 1387 ID.AddInteger(TargetFlags); 1388 void *IP = nullptr; 1389 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1390 return SDValue(E, 0); 1391 1392 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1393 CSEMap.InsertNode(N, IP); 1394 InsertNode(N); 1395 return SDValue(N, 0); 1396 } 1397 1398 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1399 FoldingSetNodeID ID; 1400 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1401 ID.AddPointer(MBB); 1402 void *IP = nullptr; 1403 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1404 return SDValue(E, 0); 1405 1406 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1407 CSEMap.InsertNode(N, IP); 1408 InsertNode(N); 1409 return SDValue(N, 0); 1410 } 1411 1412 SDValue SelectionDAG::getValueType(EVT VT) { 1413 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1414 ValueTypeNodes.size()) 1415 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1416 1417 SDNode *&N = VT.isExtended() ? 1418 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1419 1420 if (N) return SDValue(N, 0); 1421 N = newSDNode<VTSDNode>(VT); 1422 InsertNode(N); 1423 return SDValue(N, 0); 1424 } 1425 1426 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1427 SDNode *&N = ExternalSymbols[Sym]; 1428 if (N) return SDValue(N, 0); 1429 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1430 InsertNode(N); 1431 return SDValue(N, 0); 1432 } 1433 1434 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1435 SDNode *&N = MCSymbols[Sym]; 1436 if (N) 1437 return SDValue(N, 0); 1438 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1439 InsertNode(N); 1440 return SDValue(N, 0); 1441 } 1442 1443 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1444 unsigned char TargetFlags) { 1445 SDNode *&N = 1446 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1447 TargetFlags)]; 1448 if (N) return SDValue(N, 0); 1449 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1450 InsertNode(N); 1451 return SDValue(N, 0); 1452 } 1453 1454 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1455 if ((unsigned)Cond >= CondCodeNodes.size()) 1456 CondCodeNodes.resize(Cond+1); 1457 1458 if (!CondCodeNodes[Cond]) { 1459 auto *N = newSDNode<CondCodeSDNode>(Cond); 1460 CondCodeNodes[Cond] = N; 1461 InsertNode(N); 1462 } 1463 1464 return SDValue(CondCodeNodes[Cond], 0); 1465 } 1466 1467 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1468 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1469 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1470 std::swap(N1, N2); 1471 ShuffleVectorSDNode::commuteMask(M); 1472 } 1473 1474 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1475 SDValue N2, ArrayRef<int> Mask) { 1476 assert(VT.getVectorNumElements() == Mask.size() && 1477 "Must have the same number of vector elements as mask elements!"); 1478 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1479 "Invalid VECTOR_SHUFFLE"); 1480 1481 // Canonicalize shuffle undef, undef -> undef 1482 if (N1.isUndef() && N2.isUndef()) 1483 return getUNDEF(VT); 1484 1485 // Validate that all indices in Mask are within the range of the elements 1486 // input to the shuffle. 1487 int NElts = Mask.size(); 1488 assert(llvm::all_of(Mask, 1489 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1490 "Index out of range"); 1491 1492 // Copy the mask so we can do any needed cleanup. 1493 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1494 1495 // Canonicalize shuffle v, v -> v, undef 1496 if (N1 == N2) { 1497 N2 = getUNDEF(VT); 1498 for (int i = 0; i != NElts; ++i) 1499 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1500 } 1501 1502 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1503 if (N1.isUndef()) 1504 commuteShuffle(N1, N2, MaskVec); 1505 1506 // If shuffling a splat, try to blend the splat instead. We do this here so 1507 // that even when this arises during lowering we don't have to re-handle it. 1508 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1509 BitVector UndefElements; 1510 SDValue Splat = BV->getSplatValue(&UndefElements); 1511 if (!Splat) 1512 return; 1513 1514 for (int i = 0; i < NElts; ++i) { 1515 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1516 continue; 1517 1518 // If this input comes from undef, mark it as such. 1519 if (UndefElements[MaskVec[i] - Offset]) { 1520 MaskVec[i] = -1; 1521 continue; 1522 } 1523 1524 // If we can blend a non-undef lane, use that instead. 1525 if (!UndefElements[i]) 1526 MaskVec[i] = i + Offset; 1527 } 1528 }; 1529 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1530 BlendSplat(N1BV, 0); 1531 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1532 BlendSplat(N2BV, NElts); 1533 1534 // Canonicalize all index into lhs, -> shuffle lhs, undef 1535 // Canonicalize all index into rhs, -> shuffle rhs, undef 1536 bool AllLHS = true, AllRHS = true; 1537 bool N2Undef = N2.isUndef(); 1538 for (int i = 0; i != NElts; ++i) { 1539 if (MaskVec[i] >= NElts) { 1540 if (N2Undef) 1541 MaskVec[i] = -1; 1542 else 1543 AllLHS = false; 1544 } else if (MaskVec[i] >= 0) { 1545 AllRHS = false; 1546 } 1547 } 1548 if (AllLHS && AllRHS) 1549 return getUNDEF(VT); 1550 if (AllLHS && !N2Undef) 1551 N2 = getUNDEF(VT); 1552 if (AllRHS) { 1553 N1 = getUNDEF(VT); 1554 commuteShuffle(N1, N2, MaskVec); 1555 } 1556 // Reset our undef status after accounting for the mask. 1557 N2Undef = N2.isUndef(); 1558 // Re-check whether both sides ended up undef. 1559 if (N1.isUndef() && N2Undef) 1560 return getUNDEF(VT); 1561 1562 // If Identity shuffle return that node. 1563 bool Identity = true, AllSame = true; 1564 for (int i = 0; i != NElts; ++i) { 1565 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1566 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1567 } 1568 if (Identity && NElts) 1569 return N1; 1570 1571 // Shuffling a constant splat doesn't change the result. 1572 if (N2Undef) { 1573 SDValue V = N1; 1574 1575 // Look through any bitcasts. We check that these don't change the number 1576 // (and size) of elements and just changes their types. 1577 while (V.getOpcode() == ISD::BITCAST) 1578 V = V->getOperand(0); 1579 1580 // A splat should always show up as a build vector node. 1581 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1582 BitVector UndefElements; 1583 SDValue Splat = BV->getSplatValue(&UndefElements); 1584 // If this is a splat of an undef, shuffling it is also undef. 1585 if (Splat && Splat.isUndef()) 1586 return getUNDEF(VT); 1587 1588 bool SameNumElts = 1589 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1590 1591 // We only have a splat which can skip shuffles if there is a splatted 1592 // value and no undef lanes rearranged by the shuffle. 1593 if (Splat && UndefElements.none()) { 1594 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1595 // number of elements match or the value splatted is a zero constant. 1596 if (SameNumElts) 1597 return N1; 1598 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1599 if (C->isNullValue()) 1600 return N1; 1601 } 1602 1603 // If the shuffle itself creates a splat, build the vector directly. 1604 if (AllSame && SameNumElts) { 1605 EVT BuildVT = BV->getValueType(0); 1606 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1607 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1608 1609 // We may have jumped through bitcasts, so the type of the 1610 // BUILD_VECTOR may not match the type of the shuffle. 1611 if (BuildVT != VT) 1612 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1613 return NewBV; 1614 } 1615 } 1616 } 1617 1618 FoldingSetNodeID ID; 1619 SDValue Ops[2] = { N1, N2 }; 1620 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1621 for (int i = 0; i != NElts; ++i) 1622 ID.AddInteger(MaskVec[i]); 1623 1624 void* IP = nullptr; 1625 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1626 return SDValue(E, 0); 1627 1628 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1629 // SDNode doesn't have access to it. This memory will be "leaked" when 1630 // the node is deallocated, but recovered when the NodeAllocator is released. 1631 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1632 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1633 1634 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1635 dl.getDebugLoc(), MaskAlloc); 1636 createOperands(N, Ops); 1637 1638 CSEMap.InsertNode(N, IP); 1639 InsertNode(N); 1640 SDValue V = SDValue(N, 0); 1641 NewSDValueDbgMsg(V, "Creating new node: ", this); 1642 return V; 1643 } 1644 1645 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1646 MVT VT = SV.getSimpleValueType(0); 1647 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1648 ShuffleVectorSDNode::commuteMask(MaskVec); 1649 1650 SDValue Op0 = SV.getOperand(0); 1651 SDValue Op1 = SV.getOperand(1); 1652 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1653 } 1654 1655 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1656 FoldingSetNodeID ID; 1657 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1658 ID.AddInteger(RegNo); 1659 void *IP = nullptr; 1660 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1661 return SDValue(E, 0); 1662 1663 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1664 CSEMap.InsertNode(N, IP); 1665 InsertNode(N); 1666 return SDValue(N, 0); 1667 } 1668 1669 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1670 FoldingSetNodeID ID; 1671 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1672 ID.AddPointer(RegMask); 1673 void *IP = nullptr; 1674 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1675 return SDValue(E, 0); 1676 1677 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1678 CSEMap.InsertNode(N, IP); 1679 InsertNode(N); 1680 return SDValue(N, 0); 1681 } 1682 1683 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1684 MCSymbol *Label) { 1685 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1686 } 1687 1688 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1689 SDValue Root, MCSymbol *Label) { 1690 FoldingSetNodeID ID; 1691 SDValue Ops[] = { Root }; 1692 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1693 ID.AddPointer(Label); 1694 void *IP = nullptr; 1695 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1696 return SDValue(E, 0); 1697 1698 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1699 createOperands(N, Ops); 1700 1701 CSEMap.InsertNode(N, IP); 1702 InsertNode(N); 1703 return SDValue(N, 0); 1704 } 1705 1706 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1707 int64_t Offset, 1708 bool isTarget, 1709 unsigned char TargetFlags) { 1710 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1711 1712 FoldingSetNodeID ID; 1713 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1714 ID.AddPointer(BA); 1715 ID.AddInteger(Offset); 1716 ID.AddInteger(TargetFlags); 1717 void *IP = nullptr; 1718 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1719 return SDValue(E, 0); 1720 1721 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1722 CSEMap.InsertNode(N, IP); 1723 InsertNode(N); 1724 return SDValue(N, 0); 1725 } 1726 1727 SDValue SelectionDAG::getSrcValue(const Value *V) { 1728 assert((!V || V->getType()->isPointerTy()) && 1729 "SrcValue is not a pointer?"); 1730 1731 FoldingSetNodeID ID; 1732 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1733 ID.AddPointer(V); 1734 1735 void *IP = nullptr; 1736 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1737 return SDValue(E, 0); 1738 1739 auto *N = newSDNode<SrcValueSDNode>(V); 1740 CSEMap.InsertNode(N, IP); 1741 InsertNode(N); 1742 return SDValue(N, 0); 1743 } 1744 1745 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1746 FoldingSetNodeID ID; 1747 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1748 ID.AddPointer(MD); 1749 1750 void *IP = nullptr; 1751 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1752 return SDValue(E, 0); 1753 1754 auto *N = newSDNode<MDNodeSDNode>(MD); 1755 CSEMap.InsertNode(N, IP); 1756 InsertNode(N); 1757 return SDValue(N, 0); 1758 } 1759 1760 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1761 if (VT == V.getValueType()) 1762 return V; 1763 1764 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1765 } 1766 1767 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1768 unsigned SrcAS, unsigned DestAS) { 1769 SDValue Ops[] = {Ptr}; 1770 FoldingSetNodeID ID; 1771 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1772 ID.AddInteger(SrcAS); 1773 ID.AddInteger(DestAS); 1774 1775 void *IP = nullptr; 1776 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1777 return SDValue(E, 0); 1778 1779 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1780 VT, SrcAS, DestAS); 1781 createOperands(N, Ops); 1782 1783 CSEMap.InsertNode(N, IP); 1784 InsertNode(N); 1785 return SDValue(N, 0); 1786 } 1787 1788 /// getShiftAmountOperand - Return the specified value casted to 1789 /// the target's desired shift amount type. 1790 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1791 EVT OpTy = Op.getValueType(); 1792 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1793 if (OpTy == ShTy || OpTy.isVector()) return Op; 1794 1795 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1796 } 1797 1798 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1799 SDLoc dl(Node); 1800 const TargetLowering &TLI = getTargetLoweringInfo(); 1801 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1802 EVT VT = Node->getValueType(0); 1803 SDValue Tmp1 = Node->getOperand(0); 1804 SDValue Tmp2 = Node->getOperand(1); 1805 unsigned Align = Node->getConstantOperandVal(3); 1806 1807 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1808 Tmp2, MachinePointerInfo(V)); 1809 SDValue VAList = VAListLoad; 1810 1811 if (Align > TLI.getMinStackArgumentAlignment()) { 1812 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1813 1814 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1815 getConstant(Align - 1, dl, VAList.getValueType())); 1816 1817 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1818 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1819 } 1820 1821 // Increment the pointer, VAList, to the next vaarg 1822 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1823 getConstant(getDataLayout().getTypeAllocSize( 1824 VT.getTypeForEVT(*getContext())), 1825 dl, VAList.getValueType())); 1826 // Store the incremented VAList to the legalized pointer 1827 Tmp1 = 1828 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1829 // Load the actual argument out of the pointer VAList 1830 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1831 } 1832 1833 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1834 SDLoc dl(Node); 1835 const TargetLowering &TLI = getTargetLoweringInfo(); 1836 // This defaults to loading a pointer from the input and storing it to the 1837 // output, returning the chain. 1838 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1839 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1840 SDValue Tmp1 = 1841 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1842 Node->getOperand(2), MachinePointerInfo(VS)); 1843 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1844 MachinePointerInfo(VD)); 1845 } 1846 1847 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1848 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1849 unsigned ByteSize = VT.getStoreSize(); 1850 Type *Ty = VT.getTypeForEVT(*getContext()); 1851 unsigned StackAlign = 1852 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1853 1854 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1855 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1856 } 1857 1858 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1859 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1860 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1861 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1862 const DataLayout &DL = getDataLayout(); 1863 unsigned Align = 1864 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1865 1866 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1867 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1868 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1869 } 1870 1871 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1872 ISD::CondCode Cond, const SDLoc &dl) { 1873 // These setcc operations always fold. 1874 switch (Cond) { 1875 default: break; 1876 case ISD::SETFALSE: 1877 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1878 case ISD::SETTRUE: 1879 case ISD::SETTRUE2: { 1880 TargetLowering::BooleanContent Cnt = 1881 TLI->getBooleanContents(N1->getValueType(0)); 1882 return getConstant( 1883 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1884 VT); 1885 } 1886 1887 case ISD::SETOEQ: 1888 case ISD::SETOGT: 1889 case ISD::SETOGE: 1890 case ISD::SETOLT: 1891 case ISD::SETOLE: 1892 case ISD::SETONE: 1893 case ISD::SETO: 1894 case ISD::SETUO: 1895 case ISD::SETUEQ: 1896 case ISD::SETUNE: 1897 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1898 break; 1899 } 1900 1901 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1902 const APInt &C2 = N2C->getAPIntValue(); 1903 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1904 const APInt &C1 = N1C->getAPIntValue(); 1905 1906 switch (Cond) { 1907 default: llvm_unreachable("Unknown integer setcc!"); 1908 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1909 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1910 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1911 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1912 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1913 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1914 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1915 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1916 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1917 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1918 } 1919 } 1920 } 1921 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1922 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1923 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1924 switch (Cond) { 1925 default: break; 1926 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1927 return getUNDEF(VT); 1928 LLVM_FALLTHROUGH; 1929 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1930 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1931 return getUNDEF(VT); 1932 LLVM_FALLTHROUGH; 1933 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1934 R==APFloat::cmpLessThan, dl, VT); 1935 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1936 return getUNDEF(VT); 1937 LLVM_FALLTHROUGH; 1938 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1939 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1940 return getUNDEF(VT); 1941 LLVM_FALLTHROUGH; 1942 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1943 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1944 return getUNDEF(VT); 1945 LLVM_FALLTHROUGH; 1946 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1947 R==APFloat::cmpEqual, dl, VT); 1948 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1949 return getUNDEF(VT); 1950 LLVM_FALLTHROUGH; 1951 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1952 R==APFloat::cmpEqual, dl, VT); 1953 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1954 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1955 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1956 R==APFloat::cmpEqual, dl, VT); 1957 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1958 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1959 R==APFloat::cmpLessThan, dl, VT); 1960 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1961 R==APFloat::cmpUnordered, dl, VT); 1962 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1963 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1964 } 1965 } else { 1966 // Ensure that the constant occurs on the RHS. 1967 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1968 MVT CompVT = N1.getValueType().getSimpleVT(); 1969 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1970 return SDValue(); 1971 1972 return getSetCC(dl, VT, N2, N1, SwappedCond); 1973 } 1974 } 1975 1976 // Could not fold it. 1977 return SDValue(); 1978 } 1979 1980 /// See if the specified operand can be simplified with the knowledge that only 1981 /// the bits specified by Mask are used. 1982 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) { 1983 switch (V.getOpcode()) { 1984 default: 1985 break; 1986 case ISD::Constant: { 1987 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 1988 assert(CV && "Const value should be ConstSDNode."); 1989 const APInt &CVal = CV->getAPIntValue(); 1990 APInt NewVal = CVal & Mask; 1991 if (NewVal != CVal) 1992 return getConstant(NewVal, SDLoc(V), V.getValueType()); 1993 break; 1994 } 1995 case ISD::OR: 1996 case ISD::XOR: 1997 // If the LHS or RHS don't contribute bits to the or, drop them. 1998 if (MaskedValueIsZero(V.getOperand(0), Mask)) 1999 return V.getOperand(1); 2000 if (MaskedValueIsZero(V.getOperand(1), Mask)) 2001 return V.getOperand(0); 2002 break; 2003 case ISD::SRL: 2004 // Only look at single-use SRLs. 2005 if (!V.getNode()->hasOneUse()) 2006 break; 2007 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2008 // See if we can recursively simplify the LHS. 2009 unsigned Amt = RHSC->getZExtValue(); 2010 2011 // Watch out for shift count overflow though. 2012 if (Amt >= Mask.getBitWidth()) 2013 break; 2014 APInt NewMask = Mask << Amt; 2015 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 2016 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2017 V.getOperand(1)); 2018 } 2019 break; 2020 case ISD::AND: { 2021 // X & -1 -> X (ignoring bits which aren't demanded). 2022 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 2023 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue())) 2024 return V.getOperand(0); 2025 break; 2026 } 2027 case ISD::ANY_EXTEND: { 2028 SDValue Src = V.getOperand(0); 2029 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2030 // Being conservative here - only peek through if we only demand bits in the 2031 // non-extended source (even though the extended bits are technically undef). 2032 if (Mask.getActiveBits() > SrcBitWidth) 2033 break; 2034 APInt SrcMask = Mask.trunc(SrcBitWidth); 2035 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask)) 2036 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2037 break; 2038 } 2039 } 2040 return SDValue(); 2041 } 2042 2043 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2044 /// use this predicate to simplify operations downstream. 2045 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2046 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2047 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2048 } 2049 2050 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2051 /// this predicate to simplify operations downstream. Mask is known to be zero 2052 /// for bits that V cannot have. 2053 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2054 unsigned Depth) const { 2055 KnownBits Known; 2056 computeKnownBits(Op, Known, Depth); 2057 return Mask.isSubsetOf(Known.Zero); 2058 } 2059 2060 /// Helper function that checks to see if a node is a constant or a 2061 /// build vector of splat constants at least within the demanded elts. 2062 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N, 2063 const APInt &DemandedElts) { 2064 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 2065 return CN; 2066 if (N.getOpcode() != ISD::BUILD_VECTOR) 2067 return nullptr; 2068 EVT VT = N.getValueType(); 2069 ConstantSDNode *Cst = nullptr; 2070 unsigned NumElts = VT.getVectorNumElements(); 2071 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size"); 2072 for (unsigned i = 0; i != NumElts; ++i) { 2073 if (!DemandedElts[i]) 2074 continue; 2075 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i)); 2076 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) || 2077 C->getValueType(0) != VT.getScalarType()) 2078 return nullptr; 2079 Cst = C; 2080 } 2081 return Cst; 2082 } 2083 2084 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2085 /// is less than the element bit-width of the shift node, return it. 2086 static const APInt *getValidShiftAmountConstant(SDValue V) { 2087 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2088 // Shifting more than the bitwidth is not valid. 2089 const APInt &ShAmt = SA->getAPIntValue(); 2090 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2091 return &ShAmt; 2092 } 2093 return nullptr; 2094 } 2095 2096 /// Determine which bits of Op are known to be either zero or one and return 2097 /// them in Known. For vectors, the known bits are those that are shared by 2098 /// every vector element. 2099 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2100 unsigned Depth) const { 2101 EVT VT = Op.getValueType(); 2102 APInt DemandedElts = VT.isVector() 2103 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2104 : APInt(1, 1); 2105 computeKnownBits(Op, Known, DemandedElts, Depth); 2106 } 2107 2108 /// Determine which bits of Op are known to be either zero or one and return 2109 /// them in Known. The DemandedElts argument allows us to only collect the known 2110 /// bits that are shared by the requested vector elements. 2111 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2112 const APInt &DemandedElts, 2113 unsigned Depth) const { 2114 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2115 2116 Known = KnownBits(BitWidth); // Don't know anything. 2117 2118 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2119 // We know all of the bits for a constant! 2120 Known.One = C->getAPIntValue(); 2121 Known.Zero = ~Known.One; 2122 return; 2123 } 2124 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2125 // We know all of the bits for a constant fp! 2126 Known.One = C->getValueAPF().bitcastToAPInt(); 2127 Known.Zero = ~Known.One; 2128 return; 2129 } 2130 2131 if (Depth == 6) 2132 return; // Limit search depth. 2133 2134 KnownBits Known2; 2135 unsigned NumElts = DemandedElts.getBitWidth(); 2136 2137 if (!DemandedElts) 2138 return; // No demanded elts, better to assume we don't know anything. 2139 2140 unsigned Opcode = Op.getOpcode(); 2141 switch (Opcode) { 2142 case ISD::BUILD_VECTOR: 2143 // Collect the known bits that are shared by every demanded vector element. 2144 assert(NumElts == Op.getValueType().getVectorNumElements() && 2145 "Unexpected vector size"); 2146 Known.Zero.setAllBits(); Known.One.setAllBits(); 2147 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2148 if (!DemandedElts[i]) 2149 continue; 2150 2151 SDValue SrcOp = Op.getOperand(i); 2152 computeKnownBits(SrcOp, Known2, Depth + 1); 2153 2154 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2155 if (SrcOp.getValueSizeInBits() != BitWidth) { 2156 assert(SrcOp.getValueSizeInBits() > BitWidth && 2157 "Expected BUILD_VECTOR implicit truncation"); 2158 Known2 = Known2.trunc(BitWidth); 2159 } 2160 2161 // Known bits are the values that are shared by every demanded element. 2162 Known.One &= Known2.One; 2163 Known.Zero &= Known2.Zero; 2164 2165 // If we don't know any bits, early out. 2166 if (Known.isUnknown()) 2167 break; 2168 } 2169 break; 2170 case ISD::VECTOR_SHUFFLE: { 2171 // Collect the known bits that are shared by every vector element referenced 2172 // by the shuffle. 2173 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2174 Known.Zero.setAllBits(); Known.One.setAllBits(); 2175 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2176 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2177 for (unsigned i = 0; i != NumElts; ++i) { 2178 if (!DemandedElts[i]) 2179 continue; 2180 2181 int M = SVN->getMaskElt(i); 2182 if (M < 0) { 2183 // For UNDEF elements, we don't know anything about the common state of 2184 // the shuffle result. 2185 Known.resetAll(); 2186 DemandedLHS.clearAllBits(); 2187 DemandedRHS.clearAllBits(); 2188 break; 2189 } 2190 2191 if ((unsigned)M < NumElts) 2192 DemandedLHS.setBit((unsigned)M % NumElts); 2193 else 2194 DemandedRHS.setBit((unsigned)M % NumElts); 2195 } 2196 // Known bits are the values that are shared by every demanded element. 2197 if (!!DemandedLHS) { 2198 SDValue LHS = Op.getOperand(0); 2199 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2200 Known.One &= Known2.One; 2201 Known.Zero &= Known2.Zero; 2202 } 2203 // If we don't know any bits, early out. 2204 if (Known.isUnknown()) 2205 break; 2206 if (!!DemandedRHS) { 2207 SDValue RHS = Op.getOperand(1); 2208 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2209 Known.One &= Known2.One; 2210 Known.Zero &= Known2.Zero; 2211 } 2212 break; 2213 } 2214 case ISD::CONCAT_VECTORS: { 2215 // Split DemandedElts and test each of the demanded subvectors. 2216 Known.Zero.setAllBits(); Known.One.setAllBits(); 2217 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2218 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2219 unsigned NumSubVectors = Op.getNumOperands(); 2220 for (unsigned i = 0; i != NumSubVectors; ++i) { 2221 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2222 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2223 if (!!DemandedSub) { 2224 SDValue Sub = Op.getOperand(i); 2225 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2226 Known.One &= Known2.One; 2227 Known.Zero &= Known2.Zero; 2228 } 2229 // If we don't know any bits, early out. 2230 if (Known.isUnknown()) 2231 break; 2232 } 2233 break; 2234 } 2235 case ISD::INSERT_SUBVECTOR: { 2236 // If we know the element index, demand any elements from the subvector and 2237 // the remainder from the src its inserted into, otherwise demand them all. 2238 SDValue Src = Op.getOperand(0); 2239 SDValue Sub = Op.getOperand(1); 2240 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2241 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2242 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2243 Known.One.setAllBits(); 2244 Known.Zero.setAllBits(); 2245 uint64_t Idx = SubIdx->getZExtValue(); 2246 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2247 if (!!DemandedSubElts) { 2248 computeKnownBits(Sub, Known, DemandedSubElts, Depth + 1); 2249 if (Known.isUnknown()) 2250 break; // early-out. 2251 } 2252 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2253 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2254 if (!!DemandedSrcElts) { 2255 computeKnownBits(Src, Known2, DemandedSrcElts, Depth + 1); 2256 Known.One &= Known2.One; 2257 Known.Zero &= Known2.Zero; 2258 } 2259 } else { 2260 computeKnownBits(Sub, Known, Depth + 1); 2261 if (Known.isUnknown()) 2262 break; // early-out. 2263 computeKnownBits(Src, Known2, Depth + 1); 2264 Known.One &= Known2.One; 2265 Known.Zero &= Known2.Zero; 2266 } 2267 break; 2268 } 2269 case ISD::EXTRACT_SUBVECTOR: { 2270 // If we know the element index, just demand that subvector elements, 2271 // otherwise demand them all. 2272 SDValue Src = Op.getOperand(0); 2273 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2274 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2275 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2276 // Offset the demanded elts by the subvector index. 2277 uint64_t Idx = SubIdx->getZExtValue(); 2278 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2279 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2280 } else { 2281 computeKnownBits(Src, Known, Depth + 1); 2282 } 2283 break; 2284 } 2285 case ISD::BITCAST: { 2286 SDValue N0 = Op.getOperand(0); 2287 EVT SubVT = N0.getValueType(); 2288 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2289 2290 // Ignore bitcasts from unsupported types. 2291 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2292 break; 2293 2294 // Fast handling of 'identity' bitcasts. 2295 if (BitWidth == SubBitWidth) { 2296 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2297 break; 2298 } 2299 2300 // Support big-endian targets when it becomes useful. 2301 bool IsLE = getDataLayout().isLittleEndian(); 2302 if (!IsLE) 2303 break; 2304 2305 // Bitcast 'small element' vector to 'large element' scalar/vector. 2306 if ((BitWidth % SubBitWidth) == 0) { 2307 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2308 2309 // Collect known bits for the (larger) output by collecting the known 2310 // bits from each set of sub elements and shift these into place. 2311 // We need to separately call computeKnownBits for each set of 2312 // sub elements as the knownbits for each is likely to be different. 2313 unsigned SubScale = BitWidth / SubBitWidth; 2314 APInt SubDemandedElts(NumElts * SubScale, 0); 2315 for (unsigned i = 0; i != NumElts; ++i) 2316 if (DemandedElts[i]) 2317 SubDemandedElts.setBit(i * SubScale); 2318 2319 for (unsigned i = 0; i != SubScale; ++i) { 2320 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2321 Depth + 1); 2322 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i); 2323 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i); 2324 } 2325 } 2326 2327 // Bitcast 'large element' scalar/vector to 'small element' vector. 2328 if ((SubBitWidth % BitWidth) == 0) { 2329 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2330 2331 // Collect known bits for the (smaller) output by collecting the known 2332 // bits from the overlapping larger input elements and extracting the 2333 // sub sections we actually care about. 2334 unsigned SubScale = SubBitWidth / BitWidth; 2335 APInt SubDemandedElts(NumElts / SubScale, 0); 2336 for (unsigned i = 0; i != NumElts; ++i) 2337 if (DemandedElts[i]) 2338 SubDemandedElts.setBit(i / SubScale); 2339 2340 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2341 2342 Known.Zero.setAllBits(); Known.One.setAllBits(); 2343 for (unsigned i = 0; i != NumElts; ++i) 2344 if (DemandedElts[i]) { 2345 unsigned Offset = (i % SubScale) * BitWidth; 2346 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2347 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2348 // If we don't know any bits, early out. 2349 if (Known.isUnknown()) 2350 break; 2351 } 2352 } 2353 break; 2354 } 2355 case ISD::AND: 2356 // If either the LHS or the RHS are Zero, the result is zero. 2357 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2358 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2359 2360 // Output known-1 bits are only known if set in both the LHS & RHS. 2361 Known.One &= Known2.One; 2362 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2363 Known.Zero |= Known2.Zero; 2364 break; 2365 case ISD::OR: 2366 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2367 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2368 2369 // Output known-0 bits are only known if clear in both the LHS & RHS. 2370 Known.Zero &= Known2.Zero; 2371 // Output known-1 are known to be set if set in either the LHS | RHS. 2372 Known.One |= Known2.One; 2373 break; 2374 case ISD::XOR: { 2375 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2376 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2377 2378 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2379 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2380 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2381 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2382 Known.Zero = KnownZeroOut; 2383 break; 2384 } 2385 case ISD::MUL: { 2386 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2387 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2388 2389 // If low bits are zero in either operand, output low known-0 bits. 2390 // Also compute a conservative estimate for high known-0 bits. 2391 // More trickiness is possible, but this is sufficient for the 2392 // interesting case of alignment computation. 2393 unsigned TrailZ = Known.countMinTrailingZeros() + 2394 Known2.countMinTrailingZeros(); 2395 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2396 Known2.countMinLeadingZeros(), 2397 BitWidth) - BitWidth; 2398 2399 Known.resetAll(); 2400 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2401 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2402 break; 2403 } 2404 case ISD::UDIV: { 2405 // For the purposes of computing leading zeros we can conservatively 2406 // treat a udiv as a logical right shift by the power of 2 known to 2407 // be less than the denominator. 2408 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2409 unsigned LeadZ = Known2.countMinLeadingZeros(); 2410 2411 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2412 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2413 if (RHSMaxLeadingZeros != BitWidth) 2414 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2415 2416 Known.Zero.setHighBits(LeadZ); 2417 break; 2418 } 2419 case ISD::SELECT: 2420 case ISD::VSELECT: 2421 computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1); 2422 // If we don't know any bits, early out. 2423 if (Known.isUnknown()) 2424 break; 2425 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1); 2426 2427 // Only known if known in both the LHS and RHS. 2428 Known.One &= Known2.One; 2429 Known.Zero &= Known2.Zero; 2430 break; 2431 case ISD::SELECT_CC: 2432 computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1); 2433 // If we don't know any bits, early out. 2434 if (Known.isUnknown()) 2435 break; 2436 computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1); 2437 2438 // Only known if known in both the LHS and RHS. 2439 Known.One &= Known2.One; 2440 Known.Zero &= Known2.Zero; 2441 break; 2442 case ISD::SMULO: 2443 case ISD::UMULO: 2444 if (Op.getResNo() != 1) 2445 break; 2446 // The boolean result conforms to getBooleanContents. 2447 // If we know the result of a setcc has the top bits zero, use this info. 2448 // We know that we have an integer-based boolean since these operations 2449 // are only available for integer. 2450 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2451 TargetLowering::ZeroOrOneBooleanContent && 2452 BitWidth > 1) 2453 Known.Zero.setBitsFrom(1); 2454 break; 2455 case ISD::SETCC: 2456 // If we know the result of a setcc has the top bits zero, use this info. 2457 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2458 TargetLowering::ZeroOrOneBooleanContent && 2459 BitWidth > 1) 2460 Known.Zero.setBitsFrom(1); 2461 break; 2462 case ISD::SHL: 2463 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2464 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2465 unsigned Shift = ShAmt->getZExtValue(); 2466 Known.Zero <<= Shift; 2467 Known.One <<= Shift; 2468 // Low bits are known zero. 2469 Known.Zero.setLowBits(Shift); 2470 } 2471 break; 2472 case ISD::SRL: 2473 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2474 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2475 unsigned Shift = ShAmt->getZExtValue(); 2476 Known.Zero.lshrInPlace(Shift); 2477 Known.One.lshrInPlace(Shift); 2478 // High bits are known zero. 2479 Known.Zero.setHighBits(Shift); 2480 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) { 2481 // If the shift amount is a vector of constants see if we can bound 2482 // the number of upper zero bits. 2483 unsigned ShiftAmountMin = BitWidth; 2484 for (unsigned i = 0; i != BV->getNumOperands(); ++i) { 2485 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) { 2486 const APInt &ShAmt = C->getAPIntValue(); 2487 if (ShAmt.ult(BitWidth)) { 2488 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin, 2489 ShAmt.getZExtValue()); 2490 continue; 2491 } 2492 } 2493 // Don't know anything. 2494 ShiftAmountMin = 0; 2495 break; 2496 } 2497 2498 Known.Zero.setHighBits(ShiftAmountMin); 2499 } 2500 break; 2501 case ISD::SRA: 2502 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2503 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2504 unsigned Shift = ShAmt->getZExtValue(); 2505 // Sign extend known zero/one bit (else is unknown). 2506 Known.Zero.ashrInPlace(Shift); 2507 Known.One.ashrInPlace(Shift); 2508 } 2509 break; 2510 case ISD::SIGN_EXTEND_INREG: { 2511 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2512 unsigned EBits = EVT.getScalarSizeInBits(); 2513 2514 // Sign extension. Compute the demanded bits in the result that are not 2515 // present in the input. 2516 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2517 2518 APInt InSignMask = APInt::getSignMask(EBits); 2519 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2520 2521 // If the sign extended bits are demanded, we know that the sign 2522 // bit is demanded. 2523 InSignMask = InSignMask.zext(BitWidth); 2524 if (NewBits.getBoolValue()) 2525 InputDemandedBits |= InSignMask; 2526 2527 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2528 Known.One &= InputDemandedBits; 2529 Known.Zero &= InputDemandedBits; 2530 2531 // If the sign bit of the input is known set or clear, then we know the 2532 // top bits of the result. 2533 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2534 Known.Zero |= NewBits; 2535 Known.One &= ~NewBits; 2536 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2537 Known.One |= NewBits; 2538 Known.Zero &= ~NewBits; 2539 } else { // Input sign bit unknown 2540 Known.Zero &= ~NewBits; 2541 Known.One &= ~NewBits; 2542 } 2543 break; 2544 } 2545 case ISD::CTTZ: 2546 case ISD::CTTZ_ZERO_UNDEF: { 2547 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2548 // If we have a known 1, its position is our upper bound. 2549 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2550 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2551 Known.Zero.setBitsFrom(LowBits); 2552 break; 2553 } 2554 case ISD::CTLZ: 2555 case ISD::CTLZ_ZERO_UNDEF: { 2556 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2557 // If we have a known 1, its position is our upper bound. 2558 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2559 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2560 Known.Zero.setBitsFrom(LowBits); 2561 break; 2562 } 2563 case ISD::CTPOP: { 2564 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2565 // If we know some of the bits are zero, they can't be one. 2566 unsigned PossibleOnes = Known2.countMaxPopulation(); 2567 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2568 break; 2569 } 2570 case ISD::LOAD: { 2571 LoadSDNode *LD = cast<LoadSDNode>(Op); 2572 // If this is a ZEXTLoad and we are looking at the loaded value. 2573 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2574 EVT VT = LD->getMemoryVT(); 2575 unsigned MemBits = VT.getScalarSizeInBits(); 2576 Known.Zero.setBitsFrom(MemBits); 2577 } else if (const MDNode *Ranges = LD->getRanges()) { 2578 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2579 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2580 } 2581 break; 2582 } 2583 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2584 EVT InVT = Op.getOperand(0).getValueType(); 2585 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); 2586 computeKnownBits(Op.getOperand(0), Known, InDemandedElts, Depth + 1); 2587 Known = Known.zext(BitWidth); 2588 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2589 break; 2590 } 2591 case ISD::ZERO_EXTEND: { 2592 EVT InVT = Op.getOperand(0).getValueType(); 2593 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2594 Known = Known.zext(BitWidth); 2595 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2596 break; 2597 } 2598 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2599 case ISD::SIGN_EXTEND: { 2600 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2601 // If the sign bit is known to be zero or one, then sext will extend 2602 // it to the top bits, else it will just zext. 2603 Known = Known.sext(BitWidth); 2604 break; 2605 } 2606 case ISD::ANY_EXTEND: { 2607 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2608 Known = Known.zext(BitWidth); 2609 break; 2610 } 2611 case ISD::TRUNCATE: { 2612 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2613 Known = Known.trunc(BitWidth); 2614 break; 2615 } 2616 case ISD::AssertZext: { 2617 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2618 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2619 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2620 Known.Zero |= (~InMask); 2621 Known.One &= (~Known.Zero); 2622 break; 2623 } 2624 case ISD::FGETSIGN: 2625 // All bits are zero except the low bit. 2626 Known.Zero.setBitsFrom(1); 2627 break; 2628 case ISD::USUBO: 2629 case ISD::SSUBO: 2630 if (Op.getResNo() == 1) { 2631 // If we know the result of a setcc has the top bits zero, use this info. 2632 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2633 TargetLowering::ZeroOrOneBooleanContent && 2634 BitWidth > 1) 2635 Known.Zero.setBitsFrom(1); 2636 break; 2637 } 2638 LLVM_FALLTHROUGH; 2639 case ISD::SUB: 2640 case ISD::SUBC: { 2641 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2642 // We know that the top bits of C-X are clear if X contains less bits 2643 // than C (i.e. no wrap-around can happen). For example, 20-X is 2644 // positive if we can prove that X is >= 0 and < 16. 2645 if (CLHS->getAPIntValue().isNonNegative()) { 2646 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2647 // NLZ can't be BitWidth with no sign bit 2648 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2649 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2650 Depth + 1); 2651 2652 // If all of the MaskV bits are known to be zero, then we know the 2653 // output top bits are zero, because we now know that the output is 2654 // from [0-C]. 2655 if ((Known2.Zero & MaskV) == MaskV) { 2656 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2657 // Top bits known zero. 2658 Known.Zero.setHighBits(NLZ2); 2659 } 2660 } 2661 } 2662 2663 // If low bits are know to be zero in both operands, then we know they are 2664 // going to be 0 in the result. Both addition and complement operations 2665 // preserve the low zero bits. 2666 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2667 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2668 if (KnownZeroLow == 0) 2669 break; 2670 2671 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2672 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2673 Known.Zero.setLowBits(KnownZeroLow); 2674 break; 2675 } 2676 case ISD::UADDO: 2677 case ISD::SADDO: 2678 case ISD::ADDCARRY: 2679 if (Op.getResNo() == 1) { 2680 // If we know the result of a setcc has the top bits zero, use this info. 2681 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2682 TargetLowering::ZeroOrOneBooleanContent && 2683 BitWidth > 1) 2684 Known.Zero.setBitsFrom(1); 2685 break; 2686 } 2687 LLVM_FALLTHROUGH; 2688 case ISD::ADD: 2689 case ISD::ADDC: 2690 case ISD::ADDE: { 2691 // Output known-0 bits are known if clear or set in both the low clear bits 2692 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2693 // low 3 bits clear. 2694 // Output known-0 bits are also known if the top bits of each input are 2695 // known to be clear. For example, if one input has the top 10 bits clear 2696 // and the other has the top 8 bits clear, we know the top 7 bits of the 2697 // output must be clear. 2698 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2699 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2700 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2701 2702 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2703 Depth + 1); 2704 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2705 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2706 2707 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2708 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2709 // use this information if we know (at least) that the low two bits are 2710 // clear. We then return to the caller that the low bit is unknown but 2711 // that other bits are known zero. 2712 if (KnownZeroLow >= 2) 2713 Known.Zero.setBits(1, KnownZeroLow); 2714 break; 2715 } 2716 2717 Known.Zero.setLowBits(KnownZeroLow); 2718 if (KnownZeroHigh > 1) 2719 Known.Zero.setHighBits(KnownZeroHigh - 1); 2720 break; 2721 } 2722 case ISD::SREM: 2723 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2724 const APInt &RA = Rem->getAPIntValue().abs(); 2725 if (RA.isPowerOf2()) { 2726 APInt LowBits = RA - 1; 2727 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2728 2729 // The low bits of the first operand are unchanged by the srem. 2730 Known.Zero = Known2.Zero & LowBits; 2731 Known.One = Known2.One & LowBits; 2732 2733 // If the first operand is non-negative or has all low bits zero, then 2734 // the upper bits are all zero. 2735 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2736 Known.Zero |= ~LowBits; 2737 2738 // If the first operand is negative and not all low bits are zero, then 2739 // the upper bits are all one. 2740 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2741 Known.One |= ~LowBits; 2742 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2743 } 2744 } 2745 break; 2746 case ISD::UREM: { 2747 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2748 const APInt &RA = Rem->getAPIntValue(); 2749 if (RA.isPowerOf2()) { 2750 APInt LowBits = (RA - 1); 2751 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2752 2753 // The upper bits are all zero, the lower ones are unchanged. 2754 Known.Zero = Known2.Zero | ~LowBits; 2755 Known.One = Known2.One & LowBits; 2756 break; 2757 } 2758 } 2759 2760 // Since the result is less than or equal to either operand, any leading 2761 // zero bits in either operand must also exist in the result. 2762 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2763 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2764 2765 uint32_t Leaders = 2766 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2767 Known.resetAll(); 2768 Known.Zero.setHighBits(Leaders); 2769 break; 2770 } 2771 case ISD::EXTRACT_ELEMENT: { 2772 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2773 const unsigned Index = Op.getConstantOperandVal(1); 2774 const unsigned BitWidth = Op.getValueSizeInBits(); 2775 2776 // Remove low part of known bits mask 2777 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2778 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2779 2780 // Remove high part of known bit mask 2781 Known = Known.trunc(BitWidth); 2782 break; 2783 } 2784 case ISD::EXTRACT_VECTOR_ELT: { 2785 SDValue InVec = Op.getOperand(0); 2786 SDValue EltNo = Op.getOperand(1); 2787 EVT VecVT = InVec.getValueType(); 2788 const unsigned BitWidth = Op.getValueSizeInBits(); 2789 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2790 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2791 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2792 // anything about the extended bits. 2793 if (BitWidth > EltBitWidth) 2794 Known = Known.trunc(EltBitWidth); 2795 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2796 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2797 // If we know the element index, just demand that vector element. 2798 unsigned Idx = ConstEltNo->getZExtValue(); 2799 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2800 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2801 } else { 2802 // Unknown element index, so ignore DemandedElts and demand them all. 2803 computeKnownBits(InVec, Known, Depth + 1); 2804 } 2805 if (BitWidth > EltBitWidth) 2806 Known = Known.zext(BitWidth); 2807 break; 2808 } 2809 case ISD::INSERT_VECTOR_ELT: { 2810 SDValue InVec = Op.getOperand(0); 2811 SDValue InVal = Op.getOperand(1); 2812 SDValue EltNo = Op.getOperand(2); 2813 2814 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2815 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2816 // If we know the element index, split the demand between the 2817 // source vector and the inserted element. 2818 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2819 unsigned EltIdx = CEltNo->getZExtValue(); 2820 2821 // If we demand the inserted element then add its common known bits. 2822 if (DemandedElts[EltIdx]) { 2823 computeKnownBits(InVal, Known2, Depth + 1); 2824 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2825 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2826 } 2827 2828 // If we demand the source vector then add its common known bits, ensuring 2829 // that we don't demand the inserted element. 2830 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2831 if (!!VectorElts) { 2832 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2833 Known.One &= Known2.One; 2834 Known.Zero &= Known2.Zero; 2835 } 2836 } else { 2837 // Unknown element index, so ignore DemandedElts and demand them all. 2838 computeKnownBits(InVec, Known, Depth + 1); 2839 computeKnownBits(InVal, Known2, Depth + 1); 2840 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2841 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2842 } 2843 break; 2844 } 2845 case ISD::BITREVERSE: { 2846 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2847 Known.Zero = Known2.Zero.reverseBits(); 2848 Known.One = Known2.One.reverseBits(); 2849 break; 2850 } 2851 case ISD::BSWAP: { 2852 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2853 Known.Zero = Known2.Zero.byteSwap(); 2854 Known.One = Known2.One.byteSwap(); 2855 break; 2856 } 2857 case ISD::ABS: { 2858 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2859 2860 // If the source's MSB is zero then we know the rest of the bits already. 2861 if (Known2.isNonNegative()) { 2862 Known.Zero = Known2.Zero; 2863 Known.One = Known2.One; 2864 break; 2865 } 2866 2867 // We only know that the absolute values's MSB will be zero iff there is 2868 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2869 Known2.One.clearSignBit(); 2870 if (Known2.One.getBoolValue()) { 2871 Known.Zero = APInt::getSignMask(BitWidth); 2872 break; 2873 } 2874 break; 2875 } 2876 case ISD::UMIN: { 2877 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2878 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2879 2880 // UMIN - we know that the result will have the maximum of the 2881 // known zero leading bits of the inputs. 2882 unsigned LeadZero = Known.countMinLeadingZeros(); 2883 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2884 2885 Known.Zero &= Known2.Zero; 2886 Known.One &= Known2.One; 2887 Known.Zero.setHighBits(LeadZero); 2888 break; 2889 } 2890 case ISD::UMAX: { 2891 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2892 Depth + 1); 2893 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2894 2895 // UMAX - we know that the result will have the maximum of the 2896 // known one leading bits of the inputs. 2897 unsigned LeadOne = Known.countMinLeadingOnes(); 2898 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2899 2900 Known.Zero &= Known2.Zero; 2901 Known.One &= Known2.One; 2902 Known.One.setHighBits(LeadOne); 2903 break; 2904 } 2905 case ISD::SMIN: 2906 case ISD::SMAX: { 2907 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2908 Depth + 1); 2909 // If we don't know any bits, early out. 2910 if (Known.isUnknown()) 2911 break; 2912 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2913 Known.Zero &= Known2.Zero; 2914 Known.One &= Known2.One; 2915 break; 2916 } 2917 case ISD::FrameIndex: 2918 case ISD::TargetFrameIndex: 2919 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 2920 break; 2921 2922 default: 2923 if (Opcode < ISD::BUILTIN_OP_END) 2924 break; 2925 LLVM_FALLTHROUGH; 2926 case ISD::INTRINSIC_WO_CHAIN: 2927 case ISD::INTRINSIC_W_CHAIN: 2928 case ISD::INTRINSIC_VOID: 2929 // Allow the target to implement this method for its nodes. 2930 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 2931 break; 2932 } 2933 2934 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2935 } 2936 2937 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 2938 SDValue N1) const { 2939 // X + 0 never overflow 2940 if (isNullConstant(N1)) 2941 return OFK_Never; 2942 2943 KnownBits N1Known; 2944 computeKnownBits(N1, N1Known); 2945 if (N1Known.Zero.getBoolValue()) { 2946 KnownBits N0Known; 2947 computeKnownBits(N0, N0Known); 2948 2949 bool overflow; 2950 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 2951 if (!overflow) 2952 return OFK_Never; 2953 } 2954 2955 // mulhi + 1 never overflow 2956 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 2957 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 2958 return OFK_Never; 2959 2960 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 2961 KnownBits N0Known; 2962 computeKnownBits(N0, N0Known); 2963 2964 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 2965 return OFK_Never; 2966 } 2967 2968 return OFK_Sometime; 2969 } 2970 2971 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2972 EVT OpVT = Val.getValueType(); 2973 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2974 2975 // Is the constant a known power of 2? 2976 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2977 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2978 2979 // A left-shift of a constant one will have exactly one bit set because 2980 // shifting the bit off the end is undefined. 2981 if (Val.getOpcode() == ISD::SHL) { 2982 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2983 if (C && C->getAPIntValue() == 1) 2984 return true; 2985 } 2986 2987 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2988 // one bit set. 2989 if (Val.getOpcode() == ISD::SRL) { 2990 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2991 if (C && C->getAPIntValue().isSignMask()) 2992 return true; 2993 } 2994 2995 // Are all operands of a build vector constant powers of two? 2996 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2997 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2998 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2999 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3000 return false; 3001 })) 3002 return true; 3003 3004 // More could be done here, though the above checks are enough 3005 // to handle some common cases. 3006 3007 // Fall back to computeKnownBits to catch other known cases. 3008 KnownBits Known; 3009 computeKnownBits(Val, Known); 3010 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3011 } 3012 3013 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3014 EVT VT = Op.getValueType(); 3015 APInt DemandedElts = VT.isVector() 3016 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3017 : APInt(1, 1); 3018 return ComputeNumSignBits(Op, DemandedElts, Depth); 3019 } 3020 3021 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3022 unsigned Depth) const { 3023 EVT VT = Op.getValueType(); 3024 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3025 unsigned VTBits = VT.getScalarSizeInBits(); 3026 unsigned NumElts = DemandedElts.getBitWidth(); 3027 unsigned Tmp, Tmp2; 3028 unsigned FirstAnswer = 1; 3029 3030 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3031 const APInt &Val = C->getAPIntValue(); 3032 return Val.getNumSignBits(); 3033 } 3034 3035 if (Depth == 6) 3036 return 1; // Limit search depth. 3037 3038 if (!DemandedElts) 3039 return 1; // No demanded elts, better to assume we don't know anything. 3040 3041 switch (Op.getOpcode()) { 3042 default: break; 3043 case ISD::AssertSext: 3044 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3045 return VTBits-Tmp+1; 3046 case ISD::AssertZext: 3047 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3048 return VTBits-Tmp; 3049 3050 case ISD::BUILD_VECTOR: 3051 Tmp = VTBits; 3052 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3053 if (!DemandedElts[i]) 3054 continue; 3055 3056 SDValue SrcOp = Op.getOperand(i); 3057 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3058 3059 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3060 if (SrcOp.getValueSizeInBits() != VTBits) { 3061 assert(SrcOp.getValueSizeInBits() > VTBits && 3062 "Expected BUILD_VECTOR implicit truncation"); 3063 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3064 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3065 } 3066 Tmp = std::min(Tmp, Tmp2); 3067 } 3068 return Tmp; 3069 3070 case ISD::VECTOR_SHUFFLE: { 3071 // Collect the minimum number of sign bits that are shared by every vector 3072 // element referenced by the shuffle. 3073 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3074 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3075 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3076 for (unsigned i = 0; i != NumElts; ++i) { 3077 int M = SVN->getMaskElt(i); 3078 if (!DemandedElts[i]) 3079 continue; 3080 // For UNDEF elements, we don't know anything about the common state of 3081 // the shuffle result. 3082 if (M < 0) 3083 return 1; 3084 if ((unsigned)M < NumElts) 3085 DemandedLHS.setBit((unsigned)M % NumElts); 3086 else 3087 DemandedRHS.setBit((unsigned)M % NumElts); 3088 } 3089 Tmp = std::numeric_limits<unsigned>::max(); 3090 if (!!DemandedLHS) 3091 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3092 if (!!DemandedRHS) { 3093 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3094 Tmp = std::min(Tmp, Tmp2); 3095 } 3096 // If we don't know anything, early out and try computeKnownBits fall-back. 3097 if (Tmp == 1) 3098 break; 3099 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3100 return Tmp; 3101 } 3102 3103 case ISD::BITCAST: { 3104 SDValue N0 = Op.getOperand(0); 3105 EVT SrcVT = N0.getValueType(); 3106 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3107 3108 // Ignore bitcasts from unsupported types.. 3109 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3110 break; 3111 3112 // Fast handling of 'identity' bitcasts. 3113 if (VTBits == SrcBits) 3114 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3115 3116 // Bitcast 'large element' scalar/vector to 'small element' vector. 3117 // TODO: Handle cases other than 'sign splat' when we have a use case. 3118 // Requires handling of DemandedElts and Endianness. 3119 if ((SrcBits % VTBits) == 0) { 3120 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 3121 Tmp = ComputeNumSignBits(N0, Depth + 1); 3122 if (Tmp == SrcBits) 3123 return VTBits; 3124 } 3125 break; 3126 } 3127 3128 case ISD::SIGN_EXTEND: 3129 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3130 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3131 case ISD::SIGN_EXTEND_INREG: 3132 // Max of the input and what this extends. 3133 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3134 Tmp = VTBits-Tmp+1; 3135 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3136 return std::max(Tmp, Tmp2); 3137 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3138 SDValue Src = Op.getOperand(0); 3139 EVT SrcVT = Src.getValueType(); 3140 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); 3141 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3142 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3143 } 3144 3145 case ISD::SRA: 3146 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3147 // SRA X, C -> adds C sign bits. 3148 if (ConstantSDNode *C = 3149 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3150 APInt ShiftVal = C->getAPIntValue(); 3151 ShiftVal += Tmp; 3152 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3153 } 3154 return Tmp; 3155 case ISD::SHL: 3156 if (ConstantSDNode *C = 3157 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3158 // shl destroys sign bits. 3159 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3160 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3161 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3162 return Tmp - C->getZExtValue(); 3163 } 3164 break; 3165 case ISD::AND: 3166 case ISD::OR: 3167 case ISD::XOR: // NOT is handled here. 3168 // Logical binary ops preserve the number of sign bits at the worst. 3169 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3170 if (Tmp != 1) { 3171 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3172 FirstAnswer = std::min(Tmp, Tmp2); 3173 // We computed what we know about the sign bits as our first 3174 // answer. Now proceed to the generic code that uses 3175 // computeKnownBits, and pick whichever answer is better. 3176 } 3177 break; 3178 3179 case ISD::SELECT: 3180 case ISD::VSELECT: 3181 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3182 if (Tmp == 1) return 1; // Early out. 3183 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3184 return std::min(Tmp, Tmp2); 3185 case ISD::SELECT_CC: 3186 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3187 if (Tmp == 1) return 1; // Early out. 3188 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3189 return std::min(Tmp, Tmp2); 3190 3191 case ISD::SMIN: 3192 case ISD::SMAX: 3193 case ISD::UMIN: 3194 case ISD::UMAX: 3195 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3196 if (Tmp == 1) 3197 return 1; // Early out. 3198 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3199 return std::min(Tmp, Tmp2); 3200 case ISD::SADDO: 3201 case ISD::UADDO: 3202 case ISD::SSUBO: 3203 case ISD::USUBO: 3204 case ISD::SMULO: 3205 case ISD::UMULO: 3206 if (Op.getResNo() != 1) 3207 break; 3208 // The boolean result conforms to getBooleanContents. Fall through. 3209 // If setcc returns 0/-1, all bits are sign bits. 3210 // We know that we have an integer-based boolean since these operations 3211 // are only available for integer. 3212 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3213 TargetLowering::ZeroOrNegativeOneBooleanContent) 3214 return VTBits; 3215 break; 3216 case ISD::SETCC: 3217 // If setcc returns 0/-1, all bits are sign bits. 3218 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3219 TargetLowering::ZeroOrNegativeOneBooleanContent) 3220 return VTBits; 3221 break; 3222 case ISD::ROTL: 3223 case ISD::ROTR: 3224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3225 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3226 3227 // Handle rotate right by N like a rotate left by 32-N. 3228 if (Op.getOpcode() == ISD::ROTR) 3229 RotAmt = (VTBits - RotAmt) % VTBits; 3230 3231 // If we aren't rotating out all of the known-in sign bits, return the 3232 // number that are left. This handles rotl(sext(x), 1) for example. 3233 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3234 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3235 } 3236 break; 3237 case ISD::ADD: 3238 case ISD::ADDC: 3239 // Add can have at most one carry bit. Thus we know that the output 3240 // is, at worst, one more bit than the inputs. 3241 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3242 if (Tmp == 1) return 1; // Early out. 3243 3244 // Special case decrementing a value (ADD X, -1): 3245 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3246 if (CRHS->isAllOnesValue()) { 3247 KnownBits Known; 3248 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3249 3250 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3251 // sign bits set. 3252 if ((Known.Zero | 1).isAllOnesValue()) 3253 return VTBits; 3254 3255 // If we are subtracting one from a positive number, there is no carry 3256 // out of the result. 3257 if (Known.isNonNegative()) 3258 return Tmp; 3259 } 3260 3261 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3262 if (Tmp2 == 1) return 1; 3263 return std::min(Tmp, Tmp2)-1; 3264 3265 case ISD::SUB: 3266 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3267 if (Tmp2 == 1) return 1; 3268 3269 // Handle NEG. 3270 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3271 if (CLHS->isNullValue()) { 3272 KnownBits Known; 3273 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3274 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3275 // sign bits set. 3276 if ((Known.Zero | 1).isAllOnesValue()) 3277 return VTBits; 3278 3279 // If the input is known to be positive (the sign bit is known clear), 3280 // the output of the NEG has the same number of sign bits as the input. 3281 if (Known.isNonNegative()) 3282 return Tmp2; 3283 3284 // Otherwise, we treat this like a SUB. 3285 } 3286 3287 // Sub can have at most one carry bit. Thus we know that the output 3288 // is, at worst, one more bit than the inputs. 3289 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3290 if (Tmp == 1) return 1; // Early out. 3291 return std::min(Tmp, Tmp2)-1; 3292 case ISD::TRUNCATE: { 3293 // Check if the sign bits of source go down as far as the truncated value. 3294 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3295 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3296 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3297 return NumSrcSignBits - (NumSrcBits - VTBits); 3298 break; 3299 } 3300 case ISD::EXTRACT_ELEMENT: { 3301 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3302 const int BitWidth = Op.getValueSizeInBits(); 3303 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3304 3305 // Get reverse index (starting from 1), Op1 value indexes elements from 3306 // little end. Sign starts at big end. 3307 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3308 3309 // If the sign portion ends in our element the subtraction gives correct 3310 // result. Otherwise it gives either negative or > bitwidth result 3311 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3312 } 3313 case ISD::INSERT_VECTOR_ELT: { 3314 SDValue InVec = Op.getOperand(0); 3315 SDValue InVal = Op.getOperand(1); 3316 SDValue EltNo = Op.getOperand(2); 3317 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3318 3319 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3320 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3321 // If we know the element index, split the demand between the 3322 // source vector and the inserted element. 3323 unsigned EltIdx = CEltNo->getZExtValue(); 3324 3325 // If we demand the inserted element then get its sign bits. 3326 Tmp = std::numeric_limits<unsigned>::max(); 3327 if (DemandedElts[EltIdx]) { 3328 // TODO - handle implicit truncation of inserted elements. 3329 if (InVal.getScalarValueSizeInBits() != VTBits) 3330 break; 3331 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3332 } 3333 3334 // If we demand the source vector then get its sign bits, and determine 3335 // the minimum. 3336 APInt VectorElts = DemandedElts; 3337 VectorElts.clearBit(EltIdx); 3338 if (!!VectorElts) { 3339 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3340 Tmp = std::min(Tmp, Tmp2); 3341 } 3342 } else { 3343 // Unknown element index, so ignore DemandedElts and demand them all. 3344 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3345 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3346 Tmp = std::min(Tmp, Tmp2); 3347 } 3348 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3349 return Tmp; 3350 } 3351 case ISD::EXTRACT_VECTOR_ELT: { 3352 SDValue InVec = Op.getOperand(0); 3353 SDValue EltNo = Op.getOperand(1); 3354 EVT VecVT = InVec.getValueType(); 3355 const unsigned BitWidth = Op.getValueSizeInBits(); 3356 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3357 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3358 3359 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3360 // anything about sign bits. But if the sizes match we can derive knowledge 3361 // about sign bits from the vector operand. 3362 if (BitWidth != EltBitWidth) 3363 break; 3364 3365 // If we know the element index, just demand that vector element, else for 3366 // an unknown element index, ignore DemandedElts and demand them all. 3367 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3368 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3369 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3370 DemandedSrcElts = 3371 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3372 3373 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3374 } 3375 case ISD::EXTRACT_SUBVECTOR: { 3376 // If we know the element index, just demand that subvector elements, 3377 // otherwise demand them all. 3378 SDValue Src = Op.getOperand(0); 3379 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3380 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3381 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3382 // Offset the demanded elts by the subvector index. 3383 uint64_t Idx = SubIdx->getZExtValue(); 3384 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3385 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3386 } 3387 return ComputeNumSignBits(Src, Depth + 1); 3388 } 3389 case ISD::CONCAT_VECTORS: 3390 // Determine the minimum number of sign bits across all demanded 3391 // elts of the input vectors. Early out if the result is already 1. 3392 Tmp = std::numeric_limits<unsigned>::max(); 3393 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3394 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3395 unsigned NumSubVectors = Op.getNumOperands(); 3396 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3397 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3398 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3399 if (!DemandedSub) 3400 continue; 3401 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3402 Tmp = std::min(Tmp, Tmp2); 3403 } 3404 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3405 return Tmp; 3406 } 3407 3408 // If we are looking at the loaded value of the SDNode. 3409 if (Op.getResNo() == 0) { 3410 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3411 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3412 unsigned ExtType = LD->getExtensionType(); 3413 switch (ExtType) { 3414 default: break; 3415 case ISD::SEXTLOAD: // '17' bits known 3416 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3417 return VTBits-Tmp+1; 3418 case ISD::ZEXTLOAD: // '16' bits known 3419 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3420 return VTBits-Tmp; 3421 } 3422 } 3423 } 3424 3425 // Allow the target to implement this method for its nodes. 3426 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3427 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3428 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3429 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3430 unsigned NumBits = 3431 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3432 if (NumBits > 1) 3433 FirstAnswer = std::max(FirstAnswer, NumBits); 3434 } 3435 3436 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3437 // use this information. 3438 KnownBits Known; 3439 computeKnownBits(Op, Known, DemandedElts, Depth); 3440 3441 APInt Mask; 3442 if (Known.isNonNegative()) { // sign bit is 0 3443 Mask = Known.Zero; 3444 } else if (Known.isNegative()) { // sign bit is 1; 3445 Mask = Known.One; 3446 } else { 3447 // Nothing known. 3448 return FirstAnswer; 3449 } 3450 3451 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3452 // the number of identical bits in the top of the input value. 3453 Mask = ~Mask; 3454 Mask <<= Mask.getBitWidth()-VTBits; 3455 // Return # leading zeros. We use 'min' here in case Val was zero before 3456 // shifting. We don't want to return '64' as for an i32 "0". 3457 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3458 } 3459 3460 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3461 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3462 !isa<ConstantSDNode>(Op.getOperand(1))) 3463 return false; 3464 3465 if (Op.getOpcode() == ISD::OR && 3466 !MaskedValueIsZero(Op.getOperand(0), 3467 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3468 return false; 3469 3470 return true; 3471 } 3472 3473 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3474 // If we're told that NaNs won't happen, assume they won't. 3475 if (getTarget().Options.NoNaNsFPMath) 3476 return true; 3477 3478 if (Op->getFlags().hasNoNaNs()) 3479 return true; 3480 3481 // If the value is a constant, we can obviously see if it is a NaN or not. 3482 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3483 return !C->getValueAPF().isNaN(); 3484 3485 // TODO: Recognize more cases here. 3486 3487 return false; 3488 } 3489 3490 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3491 // If the value is a constant, we can obviously see if it is a zero or not. 3492 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3493 return !C->isZero(); 3494 3495 // TODO: Recognize more cases here. 3496 switch (Op.getOpcode()) { 3497 default: break; 3498 case ISD::OR: 3499 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3500 return !C->isNullValue(); 3501 break; 3502 } 3503 3504 return false; 3505 } 3506 3507 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3508 // Check the obvious case. 3509 if (A == B) return true; 3510 3511 // For for negative and positive zero. 3512 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3513 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3514 if (CA->isZero() && CB->isZero()) return true; 3515 3516 // Otherwise they may not be equal. 3517 return false; 3518 } 3519 3520 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3521 assert(A.getValueType() == B.getValueType() && 3522 "Values must have the same type"); 3523 KnownBits AKnown, BKnown; 3524 computeKnownBits(A, AKnown); 3525 computeKnownBits(B, BKnown); 3526 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3527 } 3528 3529 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3530 ArrayRef<SDValue> Ops, 3531 SelectionDAG &DAG) { 3532 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3533 assert(llvm::all_of(Ops, 3534 [Ops](SDValue Op) { 3535 return Ops[0].getValueType() == Op.getValueType(); 3536 }) && 3537 "Concatenation of vectors with inconsistent value types!"); 3538 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3539 VT.getVectorNumElements() && 3540 "Incorrect element count in vector concatenation!"); 3541 3542 if (Ops.size() == 1) 3543 return Ops[0]; 3544 3545 // Concat of UNDEFs is UNDEF. 3546 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3547 return DAG.getUNDEF(VT); 3548 3549 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3550 // simplified to one big BUILD_VECTOR. 3551 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3552 EVT SVT = VT.getScalarType(); 3553 SmallVector<SDValue, 16> Elts; 3554 for (SDValue Op : Ops) { 3555 EVT OpVT = Op.getValueType(); 3556 if (Op.isUndef()) 3557 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3558 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3559 Elts.append(Op->op_begin(), Op->op_end()); 3560 else 3561 return SDValue(); 3562 } 3563 3564 // BUILD_VECTOR requires all inputs to be of the same type, find the 3565 // maximum type and extend them all. 3566 for (SDValue Op : Elts) 3567 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3568 3569 if (SVT.bitsGT(VT.getScalarType())) 3570 for (SDValue &Op : Elts) 3571 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3572 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3573 : DAG.getSExtOrTrunc(Op, DL, SVT); 3574 3575 SDValue V = DAG.getBuildVector(VT, DL, Elts); 3576 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 3577 return V; 3578 } 3579 3580 /// Gets or creates the specified node. 3581 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3582 FoldingSetNodeID ID; 3583 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3584 void *IP = nullptr; 3585 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3586 return SDValue(E, 0); 3587 3588 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3589 getVTList(VT)); 3590 CSEMap.InsertNode(N, IP); 3591 3592 InsertNode(N); 3593 SDValue V = SDValue(N, 0); 3594 NewSDValueDbgMsg(V, "Creating new node: ", this); 3595 return V; 3596 } 3597 3598 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3599 SDValue Operand, const SDNodeFlags Flags) { 3600 // Constant fold unary operations with an integer constant operand. Even 3601 // opaque constant will be folded, because the folding of unary operations 3602 // doesn't create new constants with different values. Nevertheless, the 3603 // opaque flag is preserved during folding to prevent future folding with 3604 // other constants. 3605 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3606 const APInt &Val = C->getAPIntValue(); 3607 switch (Opcode) { 3608 default: break; 3609 case ISD::SIGN_EXTEND: 3610 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3611 C->isTargetOpcode(), C->isOpaque()); 3612 case ISD::ANY_EXTEND: 3613 case ISD::ZERO_EXTEND: 3614 case ISD::TRUNCATE: 3615 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3616 C->isTargetOpcode(), C->isOpaque()); 3617 case ISD::UINT_TO_FP: 3618 case ISD::SINT_TO_FP: { 3619 APFloat apf(EVTToAPFloatSemantics(VT), 3620 APInt::getNullValue(VT.getSizeInBits())); 3621 (void)apf.convertFromAPInt(Val, 3622 Opcode==ISD::SINT_TO_FP, 3623 APFloat::rmNearestTiesToEven); 3624 return getConstantFP(apf, DL, VT); 3625 } 3626 case ISD::BITCAST: 3627 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3628 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3629 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3630 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3631 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3632 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3633 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3634 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3635 break; 3636 case ISD::ABS: 3637 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3638 C->isOpaque()); 3639 case ISD::BITREVERSE: 3640 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3641 C->isOpaque()); 3642 case ISD::BSWAP: 3643 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3644 C->isOpaque()); 3645 case ISD::CTPOP: 3646 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3647 C->isOpaque()); 3648 case ISD::CTLZ: 3649 case ISD::CTLZ_ZERO_UNDEF: 3650 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3651 C->isOpaque()); 3652 case ISD::CTTZ: 3653 case ISD::CTTZ_ZERO_UNDEF: 3654 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3655 C->isOpaque()); 3656 case ISD::FP16_TO_FP: { 3657 bool Ignored; 3658 APFloat FPV(APFloat::IEEEhalf(), 3659 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3660 3661 // This can return overflow, underflow, or inexact; we don't care. 3662 // FIXME need to be more flexible about rounding mode. 3663 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3664 APFloat::rmNearestTiesToEven, &Ignored); 3665 return getConstantFP(FPV, DL, VT); 3666 } 3667 } 3668 } 3669 3670 // Constant fold unary operations with a floating point constant operand. 3671 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3672 APFloat V = C->getValueAPF(); // make copy 3673 switch (Opcode) { 3674 case ISD::FNEG: 3675 V.changeSign(); 3676 return getConstantFP(V, DL, VT); 3677 case ISD::FABS: 3678 V.clearSign(); 3679 return getConstantFP(V, DL, VT); 3680 case ISD::FCEIL: { 3681 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3682 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3683 return getConstantFP(V, DL, VT); 3684 break; 3685 } 3686 case ISD::FTRUNC: { 3687 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3688 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3689 return getConstantFP(V, DL, VT); 3690 break; 3691 } 3692 case ISD::FFLOOR: { 3693 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3694 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3695 return getConstantFP(V, DL, VT); 3696 break; 3697 } 3698 case ISD::FP_EXTEND: { 3699 bool ignored; 3700 // This can return overflow, underflow, or inexact; we don't care. 3701 // FIXME need to be more flexible about rounding mode. 3702 (void)V.convert(EVTToAPFloatSemantics(VT), 3703 APFloat::rmNearestTiesToEven, &ignored); 3704 return getConstantFP(V, DL, VT); 3705 } 3706 case ISD::FP_TO_SINT: 3707 case ISD::FP_TO_UINT: { 3708 bool ignored; 3709 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3710 // FIXME need to be more flexible about rounding mode. 3711 APFloat::opStatus s = 3712 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3713 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3714 break; 3715 return getConstant(IntVal, DL, VT); 3716 } 3717 case ISD::BITCAST: 3718 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3719 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3720 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3721 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3722 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3723 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3724 break; 3725 case ISD::FP_TO_FP16: { 3726 bool Ignored; 3727 // This can return overflow, underflow, or inexact; we don't care. 3728 // FIXME need to be more flexible about rounding mode. 3729 (void)V.convert(APFloat::IEEEhalf(), 3730 APFloat::rmNearestTiesToEven, &Ignored); 3731 return getConstant(V.bitcastToAPInt(), DL, VT); 3732 } 3733 } 3734 } 3735 3736 // Constant fold unary operations with a vector integer or float operand. 3737 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3738 if (BV->isConstant()) { 3739 switch (Opcode) { 3740 default: 3741 // FIXME: Entirely reasonable to perform folding of other unary 3742 // operations here as the need arises. 3743 break; 3744 case ISD::FNEG: 3745 case ISD::FABS: 3746 case ISD::FCEIL: 3747 case ISD::FTRUNC: 3748 case ISD::FFLOOR: 3749 case ISD::FP_EXTEND: 3750 case ISD::FP_TO_SINT: 3751 case ISD::FP_TO_UINT: 3752 case ISD::TRUNCATE: 3753 case ISD::ANY_EXTEND: 3754 case ISD::ZERO_EXTEND: 3755 case ISD::SIGN_EXTEND: 3756 case ISD::UINT_TO_FP: 3757 case ISD::SINT_TO_FP: 3758 case ISD::ABS: 3759 case ISD::BITREVERSE: 3760 case ISD::BSWAP: 3761 case ISD::CTLZ: 3762 case ISD::CTLZ_ZERO_UNDEF: 3763 case ISD::CTTZ: 3764 case ISD::CTTZ_ZERO_UNDEF: 3765 case ISD::CTPOP: { 3766 SDValue Ops = { Operand }; 3767 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3768 return Fold; 3769 } 3770 } 3771 } 3772 } 3773 3774 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3775 switch (Opcode) { 3776 case ISD::TokenFactor: 3777 case ISD::MERGE_VALUES: 3778 case ISD::CONCAT_VECTORS: 3779 return Operand; // Factor, merge or concat of one node? No need. 3780 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3781 case ISD::FP_EXTEND: 3782 assert(VT.isFloatingPoint() && 3783 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3784 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3785 assert((!VT.isVector() || 3786 VT.getVectorNumElements() == 3787 Operand.getValueType().getVectorNumElements()) && 3788 "Vector element count mismatch!"); 3789 assert(Operand.getValueType().bitsLT(VT) && 3790 "Invalid fpext node, dst < src!"); 3791 if (Operand.isUndef()) 3792 return getUNDEF(VT); 3793 break; 3794 case ISD::SIGN_EXTEND: 3795 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3796 "Invalid SIGN_EXTEND!"); 3797 if (Operand.getValueType() == VT) return Operand; // noop extension 3798 assert((!VT.isVector() || 3799 VT.getVectorNumElements() == 3800 Operand.getValueType().getVectorNumElements()) && 3801 "Vector element count mismatch!"); 3802 assert(Operand.getValueType().bitsLT(VT) && 3803 "Invalid sext node, dst < src!"); 3804 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3805 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3806 else if (OpOpcode == ISD::UNDEF) 3807 // sext(undef) = 0, because the top bits will all be the same. 3808 return getConstant(0, DL, VT); 3809 break; 3810 case ISD::ZERO_EXTEND: 3811 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3812 "Invalid ZERO_EXTEND!"); 3813 if (Operand.getValueType() == VT) return Operand; // noop extension 3814 assert((!VT.isVector() || 3815 VT.getVectorNumElements() == 3816 Operand.getValueType().getVectorNumElements()) && 3817 "Vector element count mismatch!"); 3818 assert(Operand.getValueType().bitsLT(VT) && 3819 "Invalid zext node, dst < src!"); 3820 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3821 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3822 else if (OpOpcode == ISD::UNDEF) 3823 // zext(undef) = 0, because the top bits will be zero. 3824 return getConstant(0, DL, VT); 3825 break; 3826 case ISD::ANY_EXTEND: 3827 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3828 "Invalid ANY_EXTEND!"); 3829 if (Operand.getValueType() == VT) return Operand; // noop extension 3830 assert((!VT.isVector() || 3831 VT.getVectorNumElements() == 3832 Operand.getValueType().getVectorNumElements()) && 3833 "Vector element count mismatch!"); 3834 assert(Operand.getValueType().bitsLT(VT) && 3835 "Invalid anyext node, dst < src!"); 3836 3837 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3838 OpOpcode == ISD::ANY_EXTEND) 3839 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3840 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3841 else if (OpOpcode == ISD::UNDEF) 3842 return getUNDEF(VT); 3843 3844 // (ext (trunx x)) -> x 3845 if (OpOpcode == ISD::TRUNCATE) { 3846 SDValue OpOp = Operand.getOperand(0); 3847 if (OpOp.getValueType() == VT) 3848 return OpOp; 3849 } 3850 break; 3851 case ISD::TRUNCATE: 3852 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3853 "Invalid TRUNCATE!"); 3854 if (Operand.getValueType() == VT) return Operand; // noop truncate 3855 assert((!VT.isVector() || 3856 VT.getVectorNumElements() == 3857 Operand.getValueType().getVectorNumElements()) && 3858 "Vector element count mismatch!"); 3859 assert(Operand.getValueType().bitsGT(VT) && 3860 "Invalid truncate node, src < dst!"); 3861 if (OpOpcode == ISD::TRUNCATE) 3862 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3863 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3864 OpOpcode == ISD::ANY_EXTEND) { 3865 // If the source is smaller than the dest, we still need an extend. 3866 if (Operand.getOperand(0).getValueType().getScalarType() 3867 .bitsLT(VT.getScalarType())) 3868 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3869 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3870 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3871 return Operand.getOperand(0); 3872 } 3873 if (OpOpcode == ISD::UNDEF) 3874 return getUNDEF(VT); 3875 break; 3876 case ISD::ABS: 3877 assert(VT.isInteger() && VT == Operand.getValueType() && 3878 "Invalid ABS!"); 3879 if (OpOpcode == ISD::UNDEF) 3880 return getUNDEF(VT); 3881 break; 3882 case ISD::BSWAP: 3883 assert(VT.isInteger() && VT == Operand.getValueType() && 3884 "Invalid BSWAP!"); 3885 assert((VT.getScalarSizeInBits() % 16 == 0) && 3886 "BSWAP types must be a multiple of 16 bits!"); 3887 if (OpOpcode == ISD::UNDEF) 3888 return getUNDEF(VT); 3889 break; 3890 case ISD::BITREVERSE: 3891 assert(VT.isInteger() && VT == Operand.getValueType() && 3892 "Invalid BITREVERSE!"); 3893 if (OpOpcode == ISD::UNDEF) 3894 return getUNDEF(VT); 3895 break; 3896 case ISD::BITCAST: 3897 // Basic sanity checking. 3898 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3899 "Cannot BITCAST between types of different sizes!"); 3900 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3901 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3902 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3903 if (OpOpcode == ISD::UNDEF) 3904 return getUNDEF(VT); 3905 break; 3906 case ISD::SCALAR_TO_VECTOR: 3907 assert(VT.isVector() && !Operand.getValueType().isVector() && 3908 (VT.getVectorElementType() == Operand.getValueType() || 3909 (VT.getVectorElementType().isInteger() && 3910 Operand.getValueType().isInteger() && 3911 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3912 "Illegal SCALAR_TO_VECTOR node!"); 3913 if (OpOpcode == ISD::UNDEF) 3914 return getUNDEF(VT); 3915 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3916 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3917 isa<ConstantSDNode>(Operand.getOperand(1)) && 3918 Operand.getConstantOperandVal(1) == 0 && 3919 Operand.getOperand(0).getValueType() == VT) 3920 return Operand.getOperand(0); 3921 break; 3922 case ISD::FNEG: 3923 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3924 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3925 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3926 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 3927 Operand.getOperand(0), Operand.getNode()->getFlags()); 3928 if (OpOpcode == ISD::FNEG) // --X -> X 3929 return Operand.getOperand(0); 3930 break; 3931 case ISD::FABS: 3932 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3933 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 3934 break; 3935 } 3936 3937 SDNode *N; 3938 SDVTList VTs = getVTList(VT); 3939 SDValue Ops[] = {Operand}; 3940 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3941 FoldingSetNodeID ID; 3942 AddNodeIDNode(ID, Opcode, VTs, Ops); 3943 void *IP = nullptr; 3944 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 3945 E->intersectFlagsWith(Flags); 3946 return SDValue(E, 0); 3947 } 3948 3949 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3950 N->setFlags(Flags); 3951 createOperands(N, Ops); 3952 CSEMap.InsertNode(N, IP); 3953 } else { 3954 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3955 createOperands(N, Ops); 3956 } 3957 3958 InsertNode(N); 3959 SDValue V = SDValue(N, 0); 3960 NewSDValueDbgMsg(V, "Creating new node: ", this); 3961 return V; 3962 } 3963 3964 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3965 const APInt &C2) { 3966 switch (Opcode) { 3967 case ISD::ADD: return std::make_pair(C1 + C2, true); 3968 case ISD::SUB: return std::make_pair(C1 - C2, true); 3969 case ISD::MUL: return std::make_pair(C1 * C2, true); 3970 case ISD::AND: return std::make_pair(C1 & C2, true); 3971 case ISD::OR: return std::make_pair(C1 | C2, true); 3972 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3973 case ISD::SHL: return std::make_pair(C1 << C2, true); 3974 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3975 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3976 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3977 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3978 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3979 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3980 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3981 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3982 case ISD::UDIV: 3983 if (!C2.getBoolValue()) 3984 break; 3985 return std::make_pair(C1.udiv(C2), true); 3986 case ISD::UREM: 3987 if (!C2.getBoolValue()) 3988 break; 3989 return std::make_pair(C1.urem(C2), true); 3990 case ISD::SDIV: 3991 if (!C2.getBoolValue()) 3992 break; 3993 return std::make_pair(C1.sdiv(C2), true); 3994 case ISD::SREM: 3995 if (!C2.getBoolValue()) 3996 break; 3997 return std::make_pair(C1.srem(C2), true); 3998 } 3999 return std::make_pair(APInt(1, 0), false); 4000 } 4001 4002 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4003 EVT VT, const ConstantSDNode *Cst1, 4004 const ConstantSDNode *Cst2) { 4005 if (Cst1->isOpaque() || Cst2->isOpaque()) 4006 return SDValue(); 4007 4008 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 4009 Cst2->getAPIntValue()); 4010 if (!Folded.second) 4011 return SDValue(); 4012 return getConstant(Folded.first, DL, VT); 4013 } 4014 4015 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4016 const GlobalAddressSDNode *GA, 4017 const SDNode *N2) { 4018 if (GA->getOpcode() != ISD::GlobalAddress) 4019 return SDValue(); 4020 if (!TLI->isOffsetFoldingLegal(GA)) 4021 return SDValue(); 4022 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 4023 if (!Cst2) 4024 return SDValue(); 4025 int64_t Offset = Cst2->getSExtValue(); 4026 switch (Opcode) { 4027 case ISD::ADD: break; 4028 case ISD::SUB: Offset = -uint64_t(Offset); break; 4029 default: return SDValue(); 4030 } 4031 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 4032 GA->getOffset() + uint64_t(Offset)); 4033 } 4034 4035 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4036 switch (Opcode) { 4037 case ISD::SDIV: 4038 case ISD::UDIV: 4039 case ISD::SREM: 4040 case ISD::UREM: { 4041 // If a divisor is zero/undef or any element of a divisor vector is 4042 // zero/undef, the whole op is undef. 4043 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4044 SDValue Divisor = Ops[1]; 4045 if (Divisor.isUndef() || isNullConstant(Divisor)) 4046 return true; 4047 4048 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4049 llvm::any_of(Divisor->op_values(), 4050 [](SDValue V) { return V.isUndef() || 4051 isNullConstant(V); }); 4052 // TODO: Handle signed overflow. 4053 } 4054 // TODO: Handle oversized shifts. 4055 default: 4056 return false; 4057 } 4058 } 4059 4060 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4061 EVT VT, SDNode *Cst1, 4062 SDNode *Cst2) { 4063 // If the opcode is a target-specific ISD node, there's nothing we can 4064 // do here and the operand rules may not line up with the below, so 4065 // bail early. 4066 if (Opcode >= ISD::BUILTIN_OP_END) 4067 return SDValue(); 4068 4069 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 4070 return getUNDEF(VT); 4071 4072 // Handle the case of two scalars. 4073 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 4074 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 4075 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 4076 assert((!Folded || !VT.isVector()) && 4077 "Can't fold vectors ops with scalar operands"); 4078 return Folded; 4079 } 4080 } 4081 4082 // fold (add Sym, c) -> Sym+c 4083 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 4084 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 4085 if (TLI->isCommutativeBinOp(Opcode)) 4086 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 4087 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 4088 4089 // For vectors extract each constant element into Inputs so we can constant 4090 // fold them individually. 4091 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 4092 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 4093 if (!BV1 || !BV2) 4094 return SDValue(); 4095 4096 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 4097 4098 EVT SVT = VT.getScalarType(); 4099 EVT LegalSVT = SVT; 4100 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4101 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4102 if (LegalSVT.bitsLT(SVT)) 4103 return SDValue(); 4104 } 4105 SmallVector<SDValue, 4> Outputs; 4106 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 4107 SDValue V1 = BV1->getOperand(I); 4108 SDValue V2 = BV2->getOperand(I); 4109 4110 if (SVT.isInteger()) { 4111 if (V1->getValueType(0).bitsGT(SVT)) 4112 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4113 if (V2->getValueType(0).bitsGT(SVT)) 4114 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4115 } 4116 4117 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4118 return SDValue(); 4119 4120 // Fold one vector element. 4121 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4122 if (LegalSVT != SVT) 4123 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4124 4125 // Scalar folding only succeeded if the result is a constant or UNDEF. 4126 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4127 ScalarResult.getOpcode() != ISD::ConstantFP) 4128 return SDValue(); 4129 Outputs.push_back(ScalarResult); 4130 } 4131 4132 assert(VT.getVectorNumElements() == Outputs.size() && 4133 "Vector size mismatch!"); 4134 4135 // We may have a vector type but a scalar result. Create a splat. 4136 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4137 4138 // Build a big vector out of the scalar elements we generated. 4139 return getBuildVector(VT, SDLoc(), Outputs); 4140 } 4141 4142 // TODO: Merge with FoldConstantArithmetic 4143 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4144 const SDLoc &DL, EVT VT, 4145 ArrayRef<SDValue> Ops, 4146 const SDNodeFlags Flags) { 4147 // If the opcode is a target-specific ISD node, there's nothing we can 4148 // do here and the operand rules may not line up with the below, so 4149 // bail early. 4150 if (Opcode >= ISD::BUILTIN_OP_END) 4151 return SDValue(); 4152 4153 if (isUndef(Opcode, Ops)) 4154 return getUNDEF(VT); 4155 4156 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4157 if (!VT.isVector()) 4158 return SDValue(); 4159 4160 unsigned NumElts = VT.getVectorNumElements(); 4161 4162 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4163 return !Op.getValueType().isVector() || 4164 Op.getValueType().getVectorNumElements() == NumElts; 4165 }; 4166 4167 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4168 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4169 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4170 (BV && BV->isConstant()); 4171 }; 4172 4173 // All operands must be vector types with the same number of elements as 4174 // the result type and must be either UNDEF or a build vector of constant 4175 // or UNDEF scalars. 4176 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4177 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4178 return SDValue(); 4179 4180 // If we are comparing vectors, then the result needs to be a i1 boolean 4181 // that is then sign-extended back to the legal result type. 4182 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4183 4184 // Find legal integer scalar type for constant promotion and 4185 // ensure that its scalar size is at least as large as source. 4186 EVT LegalSVT = VT.getScalarType(); 4187 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4188 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4189 if (LegalSVT.bitsLT(VT.getScalarType())) 4190 return SDValue(); 4191 } 4192 4193 // Constant fold each scalar lane separately. 4194 SmallVector<SDValue, 4> ScalarResults; 4195 for (unsigned i = 0; i != NumElts; i++) { 4196 SmallVector<SDValue, 4> ScalarOps; 4197 for (SDValue Op : Ops) { 4198 EVT InSVT = Op.getValueType().getScalarType(); 4199 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4200 if (!InBV) { 4201 // We've checked that this is UNDEF or a constant of some kind. 4202 if (Op.isUndef()) 4203 ScalarOps.push_back(getUNDEF(InSVT)); 4204 else 4205 ScalarOps.push_back(Op); 4206 continue; 4207 } 4208 4209 SDValue ScalarOp = InBV->getOperand(i); 4210 EVT ScalarVT = ScalarOp.getValueType(); 4211 4212 // Build vector (integer) scalar operands may need implicit 4213 // truncation - do this before constant folding. 4214 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4215 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4216 4217 ScalarOps.push_back(ScalarOp); 4218 } 4219 4220 // Constant fold the scalar operands. 4221 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4222 4223 // Legalize the (integer) scalar constant if necessary. 4224 if (LegalSVT != SVT) 4225 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4226 4227 // Scalar folding only succeeded if the result is a constant or UNDEF. 4228 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4229 ScalarResult.getOpcode() != ISD::ConstantFP) 4230 return SDValue(); 4231 ScalarResults.push_back(ScalarResult); 4232 } 4233 4234 SDValue V = getBuildVector(VT, DL, ScalarResults); 4235 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4236 return V; 4237 } 4238 4239 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4240 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4241 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4242 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4243 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4244 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4245 4246 // Canonicalize constant to RHS if commutative. 4247 if (TLI->isCommutativeBinOp(Opcode)) { 4248 if (N1C && !N2C) { 4249 std::swap(N1C, N2C); 4250 std::swap(N1, N2); 4251 } else if (N1CFP && !N2CFP) { 4252 std::swap(N1CFP, N2CFP); 4253 std::swap(N1, N2); 4254 } 4255 } 4256 4257 switch (Opcode) { 4258 default: break; 4259 case ISD::TokenFactor: 4260 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4261 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4262 // Fold trivial token factors. 4263 if (N1.getOpcode() == ISD::EntryToken) return N2; 4264 if (N2.getOpcode() == ISD::EntryToken) return N1; 4265 if (N1 == N2) return N1; 4266 break; 4267 case ISD::CONCAT_VECTORS: { 4268 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4269 SDValue Ops[] = {N1, N2}; 4270 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4271 return V; 4272 break; 4273 } 4274 case ISD::AND: 4275 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4276 assert(N1.getValueType() == N2.getValueType() && 4277 N1.getValueType() == VT && "Binary operator types must match!"); 4278 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4279 // worth handling here. 4280 if (N2C && N2C->isNullValue()) 4281 return N2; 4282 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4283 return N1; 4284 break; 4285 case ISD::OR: 4286 case ISD::XOR: 4287 case ISD::ADD: 4288 case ISD::SUB: 4289 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4290 assert(N1.getValueType() == N2.getValueType() && 4291 N1.getValueType() == VT && "Binary operator types must match!"); 4292 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4293 // it's worth handling here. 4294 if (N2C && N2C->isNullValue()) 4295 return N1; 4296 break; 4297 case ISD::UDIV: 4298 case ISD::UREM: 4299 case ISD::MULHU: 4300 case ISD::MULHS: 4301 case ISD::MUL: 4302 case ISD::SDIV: 4303 case ISD::SREM: 4304 case ISD::SMIN: 4305 case ISD::SMAX: 4306 case ISD::UMIN: 4307 case ISD::UMAX: 4308 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4309 assert(N1.getValueType() == N2.getValueType() && 4310 N1.getValueType() == VT && "Binary operator types must match!"); 4311 break; 4312 case ISD::FADD: 4313 case ISD::FSUB: 4314 case ISD::FMUL: 4315 case ISD::FDIV: 4316 case ISD::FREM: 4317 if (getTarget().Options.UnsafeFPMath) { 4318 if (Opcode == ISD::FADD) { 4319 // x+0 --> x 4320 if (N2CFP && N2CFP->getValueAPF().isZero()) 4321 return N1; 4322 } else if (Opcode == ISD::FSUB) { 4323 // x-0 --> x 4324 if (N2CFP && N2CFP->getValueAPF().isZero()) 4325 return N1; 4326 } else if (Opcode == ISD::FMUL) { 4327 // x*0 --> 0 4328 if (N2CFP && N2CFP->isZero()) 4329 return N2; 4330 // x*1 --> x 4331 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4332 return N1; 4333 } 4334 } 4335 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4336 assert(N1.getValueType() == N2.getValueType() && 4337 N1.getValueType() == VT && "Binary operator types must match!"); 4338 break; 4339 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4340 assert(N1.getValueType() == VT && 4341 N1.getValueType().isFloatingPoint() && 4342 N2.getValueType().isFloatingPoint() && 4343 "Invalid FCOPYSIGN!"); 4344 break; 4345 case ISD::SHL: 4346 case ISD::SRA: 4347 case ISD::SRL: 4348 case ISD::ROTL: 4349 case ISD::ROTR: 4350 assert(VT == N1.getValueType() && 4351 "Shift operators return type must be the same as their first arg"); 4352 assert(VT.isInteger() && N2.getValueType().isInteger() && 4353 "Shifts only work on integers"); 4354 assert((!VT.isVector() || VT == N2.getValueType()) && 4355 "Vector shift amounts must be in the same as their first arg"); 4356 // Verify that the shift amount VT is bit enough to hold valid shift 4357 // amounts. This catches things like trying to shift an i1024 value by an 4358 // i8, which is easy to fall into in generic code that uses 4359 // TLI.getShiftAmount(). 4360 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4361 "Invalid use of small shift amount with oversized value!"); 4362 4363 // Always fold shifts of i1 values so the code generator doesn't need to 4364 // handle them. Since we know the size of the shift has to be less than the 4365 // size of the value, the shift/rotate count is guaranteed to be zero. 4366 if (VT == MVT::i1) 4367 return N1; 4368 if (N2C && N2C->isNullValue()) 4369 return N1; 4370 break; 4371 case ISD::FP_ROUND_INREG: { 4372 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4373 assert(VT == N1.getValueType() && "Not an inreg round!"); 4374 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4375 "Cannot FP_ROUND_INREG integer types"); 4376 assert(EVT.isVector() == VT.isVector() && 4377 "FP_ROUND_INREG type should be vector iff the operand " 4378 "type is vector!"); 4379 assert((!EVT.isVector() || 4380 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4381 "Vector element counts must match in FP_ROUND_INREG"); 4382 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4383 (void)EVT; 4384 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4385 break; 4386 } 4387 case ISD::FP_ROUND: 4388 assert(VT.isFloatingPoint() && 4389 N1.getValueType().isFloatingPoint() && 4390 VT.bitsLE(N1.getValueType()) && 4391 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4392 "Invalid FP_ROUND!"); 4393 if (N1.getValueType() == VT) return N1; // noop conversion. 4394 break; 4395 case ISD::AssertSext: 4396 case ISD::AssertZext: { 4397 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4398 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4399 assert(VT.isInteger() && EVT.isInteger() && 4400 "Cannot *_EXTEND_INREG FP types"); 4401 assert(!EVT.isVector() && 4402 "AssertSExt/AssertZExt type should be the vector element type " 4403 "rather than the vector type!"); 4404 assert(EVT.bitsLE(VT) && "Not extending!"); 4405 if (VT == EVT) return N1; // noop assertion. 4406 break; 4407 } 4408 case ISD::SIGN_EXTEND_INREG: { 4409 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4410 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4411 assert(VT.isInteger() && EVT.isInteger() && 4412 "Cannot *_EXTEND_INREG FP types"); 4413 assert(EVT.isVector() == VT.isVector() && 4414 "SIGN_EXTEND_INREG type should be vector iff the operand " 4415 "type is vector!"); 4416 assert((!EVT.isVector() || 4417 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4418 "Vector element counts must match in SIGN_EXTEND_INREG"); 4419 assert(EVT.bitsLE(VT) && "Not extending!"); 4420 if (EVT == VT) return N1; // Not actually extending 4421 4422 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4423 unsigned FromBits = EVT.getScalarSizeInBits(); 4424 Val <<= Val.getBitWidth() - FromBits; 4425 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4426 return getConstant(Val, DL, ConstantVT); 4427 }; 4428 4429 if (N1C) { 4430 const APInt &Val = N1C->getAPIntValue(); 4431 return SignExtendInReg(Val, VT); 4432 } 4433 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4434 SmallVector<SDValue, 8> Ops; 4435 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4436 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4437 SDValue Op = N1.getOperand(i); 4438 if (Op.isUndef()) { 4439 Ops.push_back(getUNDEF(OpVT)); 4440 continue; 4441 } 4442 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4443 APInt Val = C->getAPIntValue(); 4444 Ops.push_back(SignExtendInReg(Val, OpVT)); 4445 } 4446 return getBuildVector(VT, DL, Ops); 4447 } 4448 break; 4449 } 4450 case ISD::EXTRACT_VECTOR_ELT: 4451 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4452 if (N1.isUndef()) 4453 return getUNDEF(VT); 4454 4455 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4456 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4457 return getUNDEF(VT); 4458 4459 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4460 // expanding copies of large vectors from registers. 4461 if (N2C && 4462 N1.getOpcode() == ISD::CONCAT_VECTORS && 4463 N1.getNumOperands() > 0) { 4464 unsigned Factor = 4465 N1.getOperand(0).getValueType().getVectorNumElements(); 4466 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4467 N1.getOperand(N2C->getZExtValue() / Factor), 4468 getConstant(N2C->getZExtValue() % Factor, DL, 4469 N2.getValueType())); 4470 } 4471 4472 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4473 // expanding large vector constants. 4474 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4475 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4476 4477 if (VT != Elt.getValueType()) 4478 // If the vector element type is not legal, the BUILD_VECTOR operands 4479 // are promoted and implicitly truncated, and the result implicitly 4480 // extended. Make that explicit here. 4481 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4482 4483 return Elt; 4484 } 4485 4486 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4487 // operations are lowered to scalars. 4488 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4489 // If the indices are the same, return the inserted element else 4490 // if the indices are known different, extract the element from 4491 // the original vector. 4492 SDValue N1Op2 = N1.getOperand(2); 4493 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4494 4495 if (N1Op2C && N2C) { 4496 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4497 if (VT == N1.getOperand(1).getValueType()) 4498 return N1.getOperand(1); 4499 else 4500 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4501 } 4502 4503 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4504 } 4505 } 4506 4507 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 4508 // when vector types are scalarized and v1iX is legal. 4509 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 4510 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 4511 N1.getValueType().getVectorNumElements() == 1) { 4512 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 4513 N1.getOperand(1)); 4514 } 4515 break; 4516 case ISD::EXTRACT_ELEMENT: 4517 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4518 assert(!N1.getValueType().isVector() && !VT.isVector() && 4519 (N1.getValueType().isInteger() == VT.isInteger()) && 4520 N1.getValueType() != VT && 4521 "Wrong types for EXTRACT_ELEMENT!"); 4522 4523 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4524 // 64-bit integers into 32-bit parts. Instead of building the extract of 4525 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4526 if (N1.getOpcode() == ISD::BUILD_PAIR) 4527 return N1.getOperand(N2C->getZExtValue()); 4528 4529 // EXTRACT_ELEMENT of a constant int is also very common. 4530 if (N1C) { 4531 unsigned ElementSize = VT.getSizeInBits(); 4532 unsigned Shift = ElementSize * N2C->getZExtValue(); 4533 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4534 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4535 } 4536 break; 4537 case ISD::EXTRACT_SUBVECTOR: 4538 if (VT.isSimple() && N1.getValueType().isSimple()) { 4539 assert(VT.isVector() && N1.getValueType().isVector() && 4540 "Extract subvector VTs must be a vectors!"); 4541 assert(VT.getVectorElementType() == 4542 N1.getValueType().getVectorElementType() && 4543 "Extract subvector VTs must have the same element type!"); 4544 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4545 "Extract subvector must be from larger vector to smaller vector!"); 4546 4547 if (N2C) { 4548 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4549 <= N1.getValueType().getVectorNumElements()) 4550 && "Extract subvector overflow!"); 4551 } 4552 4553 // Trivial extraction. 4554 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4555 return N1; 4556 4557 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4558 if (N1.isUndef()) 4559 return getUNDEF(VT); 4560 4561 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4562 // the concat have the same type as the extract. 4563 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4564 N1.getNumOperands() > 0 && 4565 VT == N1.getOperand(0).getValueType()) { 4566 unsigned Factor = VT.getVectorNumElements(); 4567 return N1.getOperand(N2C->getZExtValue() / Factor); 4568 } 4569 4570 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4571 // during shuffle legalization. 4572 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4573 VT == N1.getOperand(1).getValueType()) 4574 return N1.getOperand(1); 4575 } 4576 break; 4577 } 4578 4579 // Perform trivial constant folding. 4580 if (SDValue SV = 4581 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4582 return SV; 4583 4584 // Constant fold FP operations. 4585 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4586 if (N1CFP) { 4587 if (N2CFP) { 4588 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4589 APFloat::opStatus s; 4590 switch (Opcode) { 4591 case ISD::FADD: 4592 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4593 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4594 return getConstantFP(V1, DL, VT); 4595 break; 4596 case ISD::FSUB: 4597 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4598 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4599 return getConstantFP(V1, DL, VT); 4600 break; 4601 case ISD::FMUL: 4602 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4603 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4604 return getConstantFP(V1, DL, VT); 4605 break; 4606 case ISD::FDIV: 4607 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4608 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4609 s!=APFloat::opDivByZero)) { 4610 return getConstantFP(V1, DL, VT); 4611 } 4612 break; 4613 case ISD::FREM : 4614 s = V1.mod(V2); 4615 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4616 s!=APFloat::opDivByZero)) { 4617 return getConstantFP(V1, DL, VT); 4618 } 4619 break; 4620 case ISD::FCOPYSIGN: 4621 V1.copySign(V2); 4622 return getConstantFP(V1, DL, VT); 4623 default: break; 4624 } 4625 } 4626 4627 if (Opcode == ISD::FP_ROUND) { 4628 APFloat V = N1CFP->getValueAPF(); // make copy 4629 bool ignored; 4630 // This can return overflow, underflow, or inexact; we don't care. 4631 // FIXME need to be more flexible about rounding mode. 4632 (void)V.convert(EVTToAPFloatSemantics(VT), 4633 APFloat::rmNearestTiesToEven, &ignored); 4634 return getConstantFP(V, DL, VT); 4635 } 4636 } 4637 4638 // Canonicalize an UNDEF to the RHS, even over a constant. 4639 if (N1.isUndef()) { 4640 if (TLI->isCommutativeBinOp(Opcode)) { 4641 std::swap(N1, N2); 4642 } else { 4643 switch (Opcode) { 4644 case ISD::FP_ROUND_INREG: 4645 case ISD::SIGN_EXTEND_INREG: 4646 case ISD::SUB: 4647 case ISD::FSUB: 4648 case ISD::FDIV: 4649 case ISD::FREM: 4650 case ISD::SRA: 4651 return N1; // fold op(undef, arg2) -> undef 4652 case ISD::UDIV: 4653 case ISD::SDIV: 4654 case ISD::UREM: 4655 case ISD::SREM: 4656 case ISD::SRL: 4657 case ISD::SHL: 4658 if (!VT.isVector()) 4659 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4660 // For vectors, we can't easily build an all zero vector, just return 4661 // the LHS. 4662 return N2; 4663 } 4664 } 4665 } 4666 4667 // Fold a bunch of operators when the RHS is undef. 4668 if (N2.isUndef()) { 4669 switch (Opcode) { 4670 case ISD::XOR: 4671 if (N1.isUndef()) 4672 // Handle undef ^ undef -> 0 special case. This is a common 4673 // idiom (misuse). 4674 return getConstant(0, DL, VT); 4675 LLVM_FALLTHROUGH; 4676 case ISD::ADD: 4677 case ISD::ADDC: 4678 case ISD::ADDE: 4679 case ISD::SUB: 4680 case ISD::UDIV: 4681 case ISD::SDIV: 4682 case ISD::UREM: 4683 case ISD::SREM: 4684 return N2; // fold op(arg1, undef) -> undef 4685 case ISD::FADD: 4686 case ISD::FSUB: 4687 case ISD::FMUL: 4688 case ISD::FDIV: 4689 case ISD::FREM: 4690 if (getTarget().Options.UnsafeFPMath) 4691 return N2; 4692 break; 4693 case ISD::MUL: 4694 case ISD::AND: 4695 case ISD::SRL: 4696 case ISD::SHL: 4697 if (!VT.isVector()) 4698 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4699 // For vectors, we can't easily build an all zero vector, just return 4700 // the LHS. 4701 return N1; 4702 case ISD::OR: 4703 if (!VT.isVector()) 4704 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4705 // For vectors, we can't easily build an all one vector, just return 4706 // the LHS. 4707 return N1; 4708 case ISD::SRA: 4709 return N1; 4710 } 4711 } 4712 4713 // Memoize this node if possible. 4714 SDNode *N; 4715 SDVTList VTs = getVTList(VT); 4716 SDValue Ops[] = {N1, N2}; 4717 if (VT != MVT::Glue) { 4718 FoldingSetNodeID ID; 4719 AddNodeIDNode(ID, Opcode, VTs, Ops); 4720 void *IP = nullptr; 4721 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4722 E->intersectFlagsWith(Flags); 4723 return SDValue(E, 0); 4724 } 4725 4726 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4727 N->setFlags(Flags); 4728 createOperands(N, Ops); 4729 CSEMap.InsertNode(N, IP); 4730 } else { 4731 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4732 createOperands(N, Ops); 4733 } 4734 4735 InsertNode(N); 4736 SDValue V = SDValue(N, 0); 4737 NewSDValueDbgMsg(V, "Creating new node: ", this); 4738 return V; 4739 } 4740 4741 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4742 SDValue N1, SDValue N2, SDValue N3) { 4743 // Perform various simplifications. 4744 switch (Opcode) { 4745 case ISD::FMA: { 4746 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4747 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4748 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4749 if (N1CFP && N2CFP && N3CFP) { 4750 APFloat V1 = N1CFP->getValueAPF(); 4751 const APFloat &V2 = N2CFP->getValueAPF(); 4752 const APFloat &V3 = N3CFP->getValueAPF(); 4753 APFloat::opStatus s = 4754 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4755 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4756 return getConstantFP(V1, DL, VT); 4757 } 4758 break; 4759 } 4760 case ISD::CONCAT_VECTORS: { 4761 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4762 SDValue Ops[] = {N1, N2, N3}; 4763 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4764 return V; 4765 break; 4766 } 4767 case ISD::SETCC: { 4768 // Use FoldSetCC to simplify SETCC's. 4769 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4770 return V; 4771 // Vector constant folding. 4772 SDValue Ops[] = {N1, N2, N3}; 4773 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 4774 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 4775 return V; 4776 } 4777 break; 4778 } 4779 case ISD::SELECT: 4780 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4781 if (N1C->getZExtValue()) 4782 return N2; // select true, X, Y -> X 4783 return N3; // select false, X, Y -> Y 4784 } 4785 4786 if (N2 == N3) return N2; // select C, X, X -> X 4787 break; 4788 case ISD::VECTOR_SHUFFLE: 4789 llvm_unreachable("should use getVectorShuffle constructor!"); 4790 case ISD::INSERT_VECTOR_ELT: { 4791 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4792 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4793 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4794 return getUNDEF(VT); 4795 break; 4796 } 4797 case ISD::INSERT_SUBVECTOR: { 4798 SDValue Index = N3; 4799 if (VT.isSimple() && N1.getValueType().isSimple() 4800 && N2.getValueType().isSimple()) { 4801 assert(VT.isVector() && N1.getValueType().isVector() && 4802 N2.getValueType().isVector() && 4803 "Insert subvector VTs must be a vectors"); 4804 assert(VT == N1.getValueType() && 4805 "Dest and insert subvector source types must match!"); 4806 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4807 "Insert subvector must be from smaller vector to larger vector!"); 4808 if (isa<ConstantSDNode>(Index)) { 4809 assert((N2.getValueType().getVectorNumElements() + 4810 cast<ConstantSDNode>(Index)->getZExtValue() 4811 <= VT.getVectorNumElements()) 4812 && "Insert subvector overflow!"); 4813 } 4814 4815 // Trivial insertion. 4816 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4817 return N2; 4818 } 4819 break; 4820 } 4821 case ISD::BITCAST: 4822 // Fold bit_convert nodes from a type to themselves. 4823 if (N1.getValueType() == VT) 4824 return N1; 4825 break; 4826 } 4827 4828 // Memoize node if it doesn't produce a flag. 4829 SDNode *N; 4830 SDVTList VTs = getVTList(VT); 4831 SDValue Ops[] = {N1, N2, N3}; 4832 if (VT != MVT::Glue) { 4833 FoldingSetNodeID ID; 4834 AddNodeIDNode(ID, Opcode, VTs, Ops); 4835 void *IP = nullptr; 4836 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4837 return SDValue(E, 0); 4838 4839 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4840 createOperands(N, Ops); 4841 CSEMap.InsertNode(N, IP); 4842 } else { 4843 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4844 createOperands(N, Ops); 4845 } 4846 4847 InsertNode(N); 4848 SDValue V = SDValue(N, 0); 4849 NewSDValueDbgMsg(V, "Creating new node: ", this); 4850 return V; 4851 } 4852 4853 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4854 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4855 SDValue Ops[] = { N1, N2, N3, N4 }; 4856 return getNode(Opcode, DL, VT, Ops); 4857 } 4858 4859 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4860 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4861 SDValue N5) { 4862 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4863 return getNode(Opcode, DL, VT, Ops); 4864 } 4865 4866 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4867 /// the incoming stack arguments to be loaded from the stack. 4868 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4869 SmallVector<SDValue, 8> ArgChains; 4870 4871 // Include the original chain at the beginning of the list. When this is 4872 // used by target LowerCall hooks, this helps legalize find the 4873 // CALLSEQ_BEGIN node. 4874 ArgChains.push_back(Chain); 4875 4876 // Add a chain value for each stack argument. 4877 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4878 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4879 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4880 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4881 if (FI->getIndex() < 0) 4882 ArgChains.push_back(SDValue(L, 1)); 4883 4884 // Build a tokenfactor for all the chains. 4885 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4886 } 4887 4888 /// getMemsetValue - Vectorized representation of the memset value 4889 /// operand. 4890 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4891 const SDLoc &dl) { 4892 assert(!Value.isUndef()); 4893 4894 unsigned NumBits = VT.getScalarSizeInBits(); 4895 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4896 assert(C->getAPIntValue().getBitWidth() == 8); 4897 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4898 if (VT.isInteger()) 4899 return DAG.getConstant(Val, dl, VT); 4900 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4901 VT); 4902 } 4903 4904 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4905 EVT IntVT = VT.getScalarType(); 4906 if (!IntVT.isInteger()) 4907 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4908 4909 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4910 if (NumBits > 8) { 4911 // Use a multiplication with 0x010101... to extend the input to the 4912 // required length. 4913 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4914 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4915 DAG.getConstant(Magic, dl, IntVT)); 4916 } 4917 4918 if (VT != Value.getValueType() && !VT.isInteger()) 4919 Value = DAG.getBitcast(VT.getScalarType(), Value); 4920 if (VT != Value.getValueType()) 4921 Value = DAG.getSplatBuildVector(VT, dl, Value); 4922 4923 return Value; 4924 } 4925 4926 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4927 /// used when a memcpy is turned into a memset when the source is a constant 4928 /// string ptr. 4929 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4930 const TargetLowering &TLI, 4931 const ConstantDataArraySlice &Slice) { 4932 // Handle vector with all elements zero. 4933 if (Slice.Array == nullptr) { 4934 if (VT.isInteger()) 4935 return DAG.getConstant(0, dl, VT); 4936 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4937 return DAG.getConstantFP(0.0, dl, VT); 4938 else if (VT.isVector()) { 4939 unsigned NumElts = VT.getVectorNumElements(); 4940 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4941 return DAG.getNode(ISD::BITCAST, dl, VT, 4942 DAG.getConstant(0, dl, 4943 EVT::getVectorVT(*DAG.getContext(), 4944 EltVT, NumElts))); 4945 } else 4946 llvm_unreachable("Expected type!"); 4947 } 4948 4949 assert(!VT.isVector() && "Can't handle vector type here!"); 4950 unsigned NumVTBits = VT.getSizeInBits(); 4951 unsigned NumVTBytes = NumVTBits / 8; 4952 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 4953 4954 APInt Val(NumVTBits, 0); 4955 if (DAG.getDataLayout().isLittleEndian()) { 4956 for (unsigned i = 0; i != NumBytes; ++i) 4957 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 4958 } else { 4959 for (unsigned i = 0; i != NumBytes; ++i) 4960 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 4961 } 4962 4963 // If the "cost" of materializing the integer immediate is less than the cost 4964 // of a load, then it is cost effective to turn the load into the immediate. 4965 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4966 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4967 return DAG.getConstant(Val, dl, VT); 4968 return SDValue(nullptr, 0); 4969 } 4970 4971 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4972 const SDLoc &DL) { 4973 EVT VT = Base.getValueType(); 4974 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4975 } 4976 4977 /// Returns true if memcpy source is constant data. 4978 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 4979 uint64_t SrcDelta = 0; 4980 GlobalAddressSDNode *G = nullptr; 4981 if (Src.getOpcode() == ISD::GlobalAddress) 4982 G = cast<GlobalAddressSDNode>(Src); 4983 else if (Src.getOpcode() == ISD::ADD && 4984 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4985 Src.getOperand(1).getOpcode() == ISD::Constant) { 4986 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4987 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4988 } 4989 if (!G) 4990 return false; 4991 4992 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 4993 SrcDelta + G->getOffset()); 4994 } 4995 4996 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4997 /// Return true if the number of memory ops is below the threshold (Limit). 4998 /// It returns the types of the sequence of memory ops to perform 4999 /// memset / memcpy by reference. 5000 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 5001 unsigned Limit, uint64_t Size, 5002 unsigned DstAlign, unsigned SrcAlign, 5003 bool IsMemset, 5004 bool ZeroMemset, 5005 bool MemcpyStrSrc, 5006 bool AllowOverlap, 5007 unsigned DstAS, unsigned SrcAS, 5008 SelectionDAG &DAG, 5009 const TargetLowering &TLI) { 5010 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 5011 "Expecting memcpy / memset source to meet alignment requirement!"); 5012 // If 'SrcAlign' is zero, that means the memory operation does not need to 5013 // load the value, i.e. memset or memcpy from constant string. Otherwise, 5014 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 5015 // is the specified alignment of the memory operation. If it is zero, that 5016 // means it's possible to change the alignment of the destination. 5017 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 5018 // not need to be loaded. 5019 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 5020 IsMemset, ZeroMemset, MemcpyStrSrc, 5021 DAG.getMachineFunction()); 5022 5023 if (VT == MVT::Other) { 5024 // Use the largest integer type whose alignment constraints are satisfied. 5025 // We only need to check DstAlign here as SrcAlign is always greater or 5026 // equal to DstAlign (or zero). 5027 VT = MVT::i64; 5028 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 5029 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 5030 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 5031 assert(VT.isInteger()); 5032 5033 // Find the largest legal integer type. 5034 MVT LVT = MVT::i64; 5035 while (!TLI.isTypeLegal(LVT)) 5036 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 5037 assert(LVT.isInteger()); 5038 5039 // If the type we've chosen is larger than the largest legal integer type 5040 // then use that instead. 5041 if (VT.bitsGT(LVT)) 5042 VT = LVT; 5043 } 5044 5045 unsigned NumMemOps = 0; 5046 while (Size != 0) { 5047 unsigned VTSize = VT.getSizeInBits() / 8; 5048 while (VTSize > Size) { 5049 // For now, only use non-vector load / store's for the left-over pieces. 5050 EVT NewVT = VT; 5051 unsigned NewVTSize; 5052 5053 bool Found = false; 5054 if (VT.isVector() || VT.isFloatingPoint()) { 5055 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 5056 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 5057 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 5058 Found = true; 5059 else if (NewVT == MVT::i64 && 5060 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 5061 TLI.isSafeMemOpType(MVT::f64)) { 5062 // i64 is usually not legal on 32-bit targets, but f64 may be. 5063 NewVT = MVT::f64; 5064 Found = true; 5065 } 5066 } 5067 5068 if (!Found) { 5069 do { 5070 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 5071 if (NewVT == MVT::i8) 5072 break; 5073 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 5074 } 5075 NewVTSize = NewVT.getSizeInBits() / 8; 5076 5077 // If the new VT cannot cover all of the remaining bits, then consider 5078 // issuing a (or a pair of) unaligned and overlapping load / store. 5079 // FIXME: Only does this for 64-bit or more since we don't have proper 5080 // cost model for unaligned load / store. 5081 bool Fast; 5082 if (NumMemOps && AllowOverlap && 5083 VTSize >= 8 && NewVTSize < Size && 5084 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 5085 VTSize = Size; 5086 else { 5087 VT = NewVT; 5088 VTSize = NewVTSize; 5089 } 5090 } 5091 5092 if (++NumMemOps > Limit) 5093 return false; 5094 5095 MemOps.push_back(VT); 5096 Size -= VTSize; 5097 } 5098 5099 return true; 5100 } 5101 5102 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5103 // On Darwin, -Os means optimize for size without hurting performance, so 5104 // only really optimize for size when -Oz (MinSize) is used. 5105 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5106 return MF.getFunction().optForMinSize(); 5107 return MF.getFunction().optForSize(); 5108 } 5109 5110 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5111 SDValue Chain, SDValue Dst, SDValue Src, 5112 uint64_t Size, unsigned Align, 5113 bool isVol, bool AlwaysInline, 5114 MachinePointerInfo DstPtrInfo, 5115 MachinePointerInfo SrcPtrInfo) { 5116 // Turn a memcpy of undef to nop. 5117 if (Src.isUndef()) 5118 return Chain; 5119 5120 // Expand memcpy to a series of load and store ops if the size operand falls 5121 // below a certain threshold. 5122 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5123 // rather than maybe a humongous number of loads and stores. 5124 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5125 const DataLayout &DL = DAG.getDataLayout(); 5126 LLVMContext &C = *DAG.getContext(); 5127 std::vector<EVT> MemOps; 5128 bool DstAlignCanChange = false; 5129 MachineFunction &MF = DAG.getMachineFunction(); 5130 MachineFrameInfo &MFI = MF.getFrameInfo(); 5131 bool OptSize = shouldLowerMemFuncForSize(MF); 5132 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5133 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5134 DstAlignCanChange = true; 5135 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5136 if (Align > SrcAlign) 5137 SrcAlign = Align; 5138 ConstantDataArraySlice Slice; 5139 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5140 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5141 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5142 5143 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5144 (DstAlignCanChange ? 0 : Align), 5145 (isZeroConstant ? 0 : SrcAlign), 5146 false, false, CopyFromConstant, true, 5147 DstPtrInfo.getAddrSpace(), 5148 SrcPtrInfo.getAddrSpace(), 5149 DAG, TLI)) 5150 return SDValue(); 5151 5152 if (DstAlignCanChange) { 5153 Type *Ty = MemOps[0].getTypeForEVT(C); 5154 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5155 5156 // Don't promote to an alignment that would require dynamic stack 5157 // realignment. 5158 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5159 if (!TRI->needsStackRealignment(MF)) 5160 while (NewAlign > Align && 5161 DL.exceedsNaturalStackAlignment(NewAlign)) 5162 NewAlign /= 2; 5163 5164 if (NewAlign > Align) { 5165 // Give the stack frame object a larger alignment if needed. 5166 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5167 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5168 Align = NewAlign; 5169 } 5170 } 5171 5172 MachineMemOperand::Flags MMOFlags = 5173 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5174 SmallVector<SDValue, 8> OutChains; 5175 unsigned NumMemOps = MemOps.size(); 5176 uint64_t SrcOff = 0, DstOff = 0; 5177 for (unsigned i = 0; i != NumMemOps; ++i) { 5178 EVT VT = MemOps[i]; 5179 unsigned VTSize = VT.getSizeInBits() / 8; 5180 SDValue Value, Store; 5181 5182 if (VTSize > Size) { 5183 // Issuing an unaligned load / store pair that overlaps with the previous 5184 // pair. Adjust the offset accordingly. 5185 assert(i == NumMemOps-1 && i != 0); 5186 SrcOff -= VTSize - Size; 5187 DstOff -= VTSize - Size; 5188 } 5189 5190 if (CopyFromConstant && 5191 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5192 // It's unlikely a store of a vector immediate can be done in a single 5193 // instruction. It would require a load from a constantpool first. 5194 // We only handle zero vectors here. 5195 // FIXME: Handle other cases where store of vector immediate is done in 5196 // a single instruction. 5197 ConstantDataArraySlice SubSlice; 5198 if (SrcOff < Slice.Length) { 5199 SubSlice = Slice; 5200 SubSlice.move(SrcOff); 5201 } else { 5202 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5203 SubSlice.Array = nullptr; 5204 SubSlice.Offset = 0; 5205 SubSlice.Length = VTSize; 5206 } 5207 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5208 if (Value.getNode()) 5209 Store = DAG.getStore(Chain, dl, Value, 5210 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5211 DstPtrInfo.getWithOffset(DstOff), Align, 5212 MMOFlags); 5213 } 5214 5215 if (!Store.getNode()) { 5216 // The type might not be legal for the target. This should only happen 5217 // if the type is smaller than a legal type, as on PPC, so the right 5218 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5219 // to Load/Store if NVT==VT. 5220 // FIXME does the case above also need this? 5221 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5222 assert(NVT.bitsGE(VT)); 5223 5224 bool isDereferenceable = 5225 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5226 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5227 if (isDereferenceable) 5228 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5229 5230 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5231 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5232 SrcPtrInfo.getWithOffset(SrcOff), VT, 5233 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5234 OutChains.push_back(Value.getValue(1)); 5235 Store = DAG.getTruncStore( 5236 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5237 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5238 } 5239 OutChains.push_back(Store); 5240 SrcOff += VTSize; 5241 DstOff += VTSize; 5242 Size -= VTSize; 5243 } 5244 5245 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5246 } 5247 5248 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5249 SDValue Chain, SDValue Dst, SDValue Src, 5250 uint64_t Size, unsigned Align, 5251 bool isVol, bool AlwaysInline, 5252 MachinePointerInfo DstPtrInfo, 5253 MachinePointerInfo SrcPtrInfo) { 5254 // Turn a memmove of undef to nop. 5255 if (Src.isUndef()) 5256 return Chain; 5257 5258 // Expand memmove to a series of load and store ops if the size operand falls 5259 // below a certain threshold. 5260 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5261 const DataLayout &DL = DAG.getDataLayout(); 5262 LLVMContext &C = *DAG.getContext(); 5263 std::vector<EVT> MemOps; 5264 bool DstAlignCanChange = false; 5265 MachineFunction &MF = DAG.getMachineFunction(); 5266 MachineFrameInfo &MFI = MF.getFrameInfo(); 5267 bool OptSize = shouldLowerMemFuncForSize(MF); 5268 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5269 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5270 DstAlignCanChange = true; 5271 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5272 if (Align > SrcAlign) 5273 SrcAlign = Align; 5274 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5275 5276 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5277 (DstAlignCanChange ? 0 : Align), SrcAlign, 5278 false, false, false, false, 5279 DstPtrInfo.getAddrSpace(), 5280 SrcPtrInfo.getAddrSpace(), 5281 DAG, TLI)) 5282 return SDValue(); 5283 5284 if (DstAlignCanChange) { 5285 Type *Ty = MemOps[0].getTypeForEVT(C); 5286 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5287 if (NewAlign > Align) { 5288 // Give the stack frame object a larger alignment if needed. 5289 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5290 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5291 Align = NewAlign; 5292 } 5293 } 5294 5295 MachineMemOperand::Flags MMOFlags = 5296 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5297 uint64_t SrcOff = 0, DstOff = 0; 5298 SmallVector<SDValue, 8> LoadValues; 5299 SmallVector<SDValue, 8> LoadChains; 5300 SmallVector<SDValue, 8> OutChains; 5301 unsigned NumMemOps = MemOps.size(); 5302 for (unsigned i = 0; i < NumMemOps; i++) { 5303 EVT VT = MemOps[i]; 5304 unsigned VTSize = VT.getSizeInBits() / 8; 5305 SDValue Value; 5306 5307 bool isDereferenceable = 5308 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5309 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5310 if (isDereferenceable) 5311 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5312 5313 Value = 5314 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5315 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5316 LoadValues.push_back(Value); 5317 LoadChains.push_back(Value.getValue(1)); 5318 SrcOff += VTSize; 5319 } 5320 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5321 OutChains.clear(); 5322 for (unsigned i = 0; i < NumMemOps; i++) { 5323 EVT VT = MemOps[i]; 5324 unsigned VTSize = VT.getSizeInBits() / 8; 5325 SDValue Store; 5326 5327 Store = DAG.getStore(Chain, dl, LoadValues[i], 5328 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5329 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5330 OutChains.push_back(Store); 5331 DstOff += VTSize; 5332 } 5333 5334 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5335 } 5336 5337 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5338 /// operations. 5339 /// 5340 /// \param DAG Selection DAG where lowered code is placed. 5341 /// \param dl Link to corresponding IR location. 5342 /// \param Chain Control flow dependency. 5343 /// \param Dst Pointer to destination memory location. 5344 /// \param Src Value of byte to write into the memory. 5345 /// \param Size Number of bytes to write. 5346 /// \param Align Alignment of the destination in bytes. 5347 /// \param isVol True if destination is volatile. 5348 /// \param DstPtrInfo IR information on the memory pointer. 5349 /// \returns New head in the control flow, if lowering was successful, empty 5350 /// SDValue otherwise. 5351 /// 5352 /// The function tries to replace 'llvm.memset' intrinsic with several store 5353 /// operations and value calculation code. This is usually profitable for small 5354 /// memory size. 5355 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5356 SDValue Chain, SDValue Dst, SDValue Src, 5357 uint64_t Size, unsigned Align, bool isVol, 5358 MachinePointerInfo DstPtrInfo) { 5359 // Turn a memset of undef to nop. 5360 if (Src.isUndef()) 5361 return Chain; 5362 5363 // Expand memset to a series of load/store ops if the size operand 5364 // falls below a certain threshold. 5365 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5366 std::vector<EVT> MemOps; 5367 bool DstAlignCanChange = false; 5368 MachineFunction &MF = DAG.getMachineFunction(); 5369 MachineFrameInfo &MFI = MF.getFrameInfo(); 5370 bool OptSize = shouldLowerMemFuncForSize(MF); 5371 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5372 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5373 DstAlignCanChange = true; 5374 bool IsZeroVal = 5375 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5376 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5377 Size, (DstAlignCanChange ? 0 : Align), 0, 5378 true, IsZeroVal, false, true, 5379 DstPtrInfo.getAddrSpace(), ~0u, 5380 DAG, TLI)) 5381 return SDValue(); 5382 5383 if (DstAlignCanChange) { 5384 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5385 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5386 if (NewAlign > Align) { 5387 // Give the stack frame object a larger alignment if needed. 5388 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5389 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5390 Align = NewAlign; 5391 } 5392 } 5393 5394 SmallVector<SDValue, 8> OutChains; 5395 uint64_t DstOff = 0; 5396 unsigned NumMemOps = MemOps.size(); 5397 5398 // Find the largest store and generate the bit pattern for it. 5399 EVT LargestVT = MemOps[0]; 5400 for (unsigned i = 1; i < NumMemOps; i++) 5401 if (MemOps[i].bitsGT(LargestVT)) 5402 LargestVT = MemOps[i]; 5403 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5404 5405 for (unsigned i = 0; i < NumMemOps; i++) { 5406 EVT VT = MemOps[i]; 5407 unsigned VTSize = VT.getSizeInBits() / 8; 5408 if (VTSize > Size) { 5409 // Issuing an unaligned load / store pair that overlaps with the previous 5410 // pair. Adjust the offset accordingly. 5411 assert(i == NumMemOps-1 && i != 0); 5412 DstOff -= VTSize - Size; 5413 } 5414 5415 // If this store is smaller than the largest store see whether we can get 5416 // the smaller value for free with a truncate. 5417 SDValue Value = MemSetValue; 5418 if (VT.bitsLT(LargestVT)) { 5419 if (!LargestVT.isVector() && !VT.isVector() && 5420 TLI.isTruncateFree(LargestVT, VT)) 5421 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5422 else 5423 Value = getMemsetValue(Src, VT, DAG, dl); 5424 } 5425 assert(Value.getValueType() == VT && "Value with wrong type."); 5426 SDValue Store = DAG.getStore( 5427 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5428 DstPtrInfo.getWithOffset(DstOff), Align, 5429 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5430 OutChains.push_back(Store); 5431 DstOff += VT.getSizeInBits() / 8; 5432 Size -= VTSize; 5433 } 5434 5435 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5436 } 5437 5438 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5439 unsigned AS) { 5440 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5441 // pointer operands can be losslessly bitcasted to pointers of address space 0 5442 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5443 report_fatal_error("cannot lower memory intrinsic in address space " + 5444 Twine(AS)); 5445 } 5446 } 5447 5448 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5449 SDValue Src, SDValue Size, unsigned Align, 5450 bool isVol, bool AlwaysInline, bool isTailCall, 5451 MachinePointerInfo DstPtrInfo, 5452 MachinePointerInfo SrcPtrInfo) { 5453 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5454 5455 // Check to see if we should lower the memcpy to loads and stores first. 5456 // For cases within the target-specified limits, this is the best choice. 5457 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5458 if (ConstantSize) { 5459 // Memcpy with size zero? Just return the original chain. 5460 if (ConstantSize->isNullValue()) 5461 return Chain; 5462 5463 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5464 ConstantSize->getZExtValue(),Align, 5465 isVol, false, DstPtrInfo, SrcPtrInfo); 5466 if (Result.getNode()) 5467 return Result; 5468 } 5469 5470 // Then check to see if we should lower the memcpy with target-specific 5471 // code. If the target chooses to do this, this is the next best. 5472 if (TSI) { 5473 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5474 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5475 DstPtrInfo, SrcPtrInfo); 5476 if (Result.getNode()) 5477 return Result; 5478 } 5479 5480 // If we really need inline code and the target declined to provide it, 5481 // use a (potentially long) sequence of loads and stores. 5482 if (AlwaysInline) { 5483 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5484 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5485 ConstantSize->getZExtValue(), Align, isVol, 5486 true, DstPtrInfo, SrcPtrInfo); 5487 } 5488 5489 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5490 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5491 5492 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5493 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5494 // respect volatile, so they may do things like read or write memory 5495 // beyond the given memory regions. But fixing this isn't easy, and most 5496 // people don't care. 5497 5498 // Emit a library call. 5499 TargetLowering::ArgListTy Args; 5500 TargetLowering::ArgListEntry Entry; 5501 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5502 Entry.Node = Dst; Args.push_back(Entry); 5503 Entry.Node = Src; Args.push_back(Entry); 5504 Entry.Node = Size; Args.push_back(Entry); 5505 // FIXME: pass in SDLoc 5506 TargetLowering::CallLoweringInfo CLI(*this); 5507 CLI.setDebugLoc(dl) 5508 .setChain(Chain) 5509 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5510 Dst.getValueType().getTypeForEVT(*getContext()), 5511 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5512 TLI->getPointerTy(getDataLayout())), 5513 std::move(Args)) 5514 .setDiscardResult() 5515 .setTailCall(isTailCall); 5516 5517 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5518 return CallResult.second; 5519 } 5520 5521 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5522 SDValue Src, SDValue Size, unsigned Align, 5523 bool isVol, bool isTailCall, 5524 MachinePointerInfo DstPtrInfo, 5525 MachinePointerInfo SrcPtrInfo) { 5526 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5527 5528 // Check to see if we should lower the memmove to loads and stores first. 5529 // For cases within the target-specified limits, this is the best choice. 5530 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5531 if (ConstantSize) { 5532 // Memmove with size zero? Just return the original chain. 5533 if (ConstantSize->isNullValue()) 5534 return Chain; 5535 5536 SDValue Result = 5537 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5538 ConstantSize->getZExtValue(), Align, isVol, 5539 false, DstPtrInfo, SrcPtrInfo); 5540 if (Result.getNode()) 5541 return Result; 5542 } 5543 5544 // Then check to see if we should lower the memmove with target-specific 5545 // code. If the target chooses to do this, this is the next best. 5546 if (TSI) { 5547 SDValue Result = TSI->EmitTargetCodeForMemmove( 5548 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5549 if (Result.getNode()) 5550 return Result; 5551 } 5552 5553 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5554 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5555 5556 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5557 // not be safe. See memcpy above for more details. 5558 5559 // Emit a library call. 5560 TargetLowering::ArgListTy Args; 5561 TargetLowering::ArgListEntry Entry; 5562 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5563 Entry.Node = Dst; Args.push_back(Entry); 5564 Entry.Node = Src; Args.push_back(Entry); 5565 Entry.Node = Size; Args.push_back(Entry); 5566 // FIXME: pass in SDLoc 5567 TargetLowering::CallLoweringInfo CLI(*this); 5568 CLI.setDebugLoc(dl) 5569 .setChain(Chain) 5570 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5571 Dst.getValueType().getTypeForEVT(*getContext()), 5572 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5573 TLI->getPointerTy(getDataLayout())), 5574 std::move(Args)) 5575 .setDiscardResult() 5576 .setTailCall(isTailCall); 5577 5578 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5579 return CallResult.second; 5580 } 5581 5582 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5583 SDValue Src, SDValue Size, unsigned Align, 5584 bool isVol, bool isTailCall, 5585 MachinePointerInfo DstPtrInfo) { 5586 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5587 5588 // Check to see if we should lower the memset to stores first. 5589 // For cases within the target-specified limits, this is the best choice. 5590 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5591 if (ConstantSize) { 5592 // Memset with size zero? Just return the original chain. 5593 if (ConstantSize->isNullValue()) 5594 return Chain; 5595 5596 SDValue Result = 5597 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5598 Align, isVol, DstPtrInfo); 5599 5600 if (Result.getNode()) 5601 return Result; 5602 } 5603 5604 // Then check to see if we should lower the memset with target-specific 5605 // code. If the target chooses to do this, this is the next best. 5606 if (TSI) { 5607 SDValue Result = TSI->EmitTargetCodeForMemset( 5608 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5609 if (Result.getNode()) 5610 return Result; 5611 } 5612 5613 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5614 5615 // Emit a library call. 5616 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5617 TargetLowering::ArgListTy Args; 5618 TargetLowering::ArgListEntry Entry; 5619 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5620 Args.push_back(Entry); 5621 Entry.Node = Src; 5622 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5623 Args.push_back(Entry); 5624 Entry.Node = Size; 5625 Entry.Ty = IntPtrTy; 5626 Args.push_back(Entry); 5627 5628 // FIXME: pass in SDLoc 5629 TargetLowering::CallLoweringInfo CLI(*this); 5630 CLI.setDebugLoc(dl) 5631 .setChain(Chain) 5632 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5633 Dst.getValueType().getTypeForEVT(*getContext()), 5634 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5635 TLI->getPointerTy(getDataLayout())), 5636 std::move(Args)) 5637 .setDiscardResult() 5638 .setTailCall(isTailCall); 5639 5640 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5641 return CallResult.second; 5642 } 5643 5644 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5645 SDVTList VTList, ArrayRef<SDValue> Ops, 5646 MachineMemOperand *MMO) { 5647 FoldingSetNodeID ID; 5648 ID.AddInteger(MemVT.getRawBits()); 5649 AddNodeIDNode(ID, Opcode, VTList, Ops); 5650 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5651 void* IP = nullptr; 5652 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5653 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5654 return SDValue(E, 0); 5655 } 5656 5657 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5658 VTList, MemVT, MMO); 5659 createOperands(N, Ops); 5660 5661 CSEMap.InsertNode(N, IP); 5662 InsertNode(N); 5663 return SDValue(N, 0); 5664 } 5665 5666 SDValue SelectionDAG::getAtomicCmpSwap( 5667 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5668 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5669 unsigned Alignment, AtomicOrdering SuccessOrdering, 5670 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 5671 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5672 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5673 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5674 5675 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5676 Alignment = getEVTAlignment(MemVT); 5677 5678 MachineFunction &MF = getMachineFunction(); 5679 5680 // FIXME: Volatile isn't really correct; we should keep track of atomic 5681 // orderings in the memoperand. 5682 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5683 MachineMemOperand::MOStore; 5684 MachineMemOperand *MMO = 5685 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5686 AAMDNodes(), nullptr, SSID, SuccessOrdering, 5687 FailureOrdering); 5688 5689 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5690 } 5691 5692 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5693 EVT MemVT, SDVTList VTs, SDValue Chain, 5694 SDValue Ptr, SDValue Cmp, SDValue Swp, 5695 MachineMemOperand *MMO) { 5696 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5697 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5698 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5699 5700 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5701 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5702 } 5703 5704 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5705 SDValue Chain, SDValue Ptr, SDValue Val, 5706 const Value *PtrVal, unsigned Alignment, 5707 AtomicOrdering Ordering, 5708 SyncScope::ID SSID) { 5709 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5710 Alignment = getEVTAlignment(MemVT); 5711 5712 MachineFunction &MF = getMachineFunction(); 5713 // An atomic store does not load. An atomic load does not store. 5714 // (An atomicrmw obviously both loads and stores.) 5715 // For now, atomics are considered to be volatile always, and they are 5716 // chained as such. 5717 // FIXME: Volatile isn't really correct; we should keep track of atomic 5718 // orderings in the memoperand. 5719 auto Flags = MachineMemOperand::MOVolatile; 5720 if (Opcode != ISD::ATOMIC_STORE) 5721 Flags |= MachineMemOperand::MOLoad; 5722 if (Opcode != ISD::ATOMIC_LOAD) 5723 Flags |= MachineMemOperand::MOStore; 5724 5725 MachineMemOperand *MMO = 5726 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5727 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5728 nullptr, SSID, Ordering); 5729 5730 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5731 } 5732 5733 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5734 SDValue Chain, SDValue Ptr, SDValue Val, 5735 MachineMemOperand *MMO) { 5736 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5737 Opcode == ISD::ATOMIC_LOAD_SUB || 5738 Opcode == ISD::ATOMIC_LOAD_AND || 5739 Opcode == ISD::ATOMIC_LOAD_OR || 5740 Opcode == ISD::ATOMIC_LOAD_XOR || 5741 Opcode == ISD::ATOMIC_LOAD_NAND || 5742 Opcode == ISD::ATOMIC_LOAD_MIN || 5743 Opcode == ISD::ATOMIC_LOAD_MAX || 5744 Opcode == ISD::ATOMIC_LOAD_UMIN || 5745 Opcode == ISD::ATOMIC_LOAD_UMAX || 5746 Opcode == ISD::ATOMIC_SWAP || 5747 Opcode == ISD::ATOMIC_STORE) && 5748 "Invalid Atomic Op"); 5749 5750 EVT VT = Val.getValueType(); 5751 5752 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5753 getVTList(VT, MVT::Other); 5754 SDValue Ops[] = {Chain, Ptr, Val}; 5755 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5756 } 5757 5758 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5759 EVT VT, SDValue Chain, SDValue Ptr, 5760 MachineMemOperand *MMO) { 5761 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5762 5763 SDVTList VTs = getVTList(VT, MVT::Other); 5764 SDValue Ops[] = {Chain, Ptr}; 5765 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5766 } 5767 5768 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5769 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5770 if (Ops.size() == 1) 5771 return Ops[0]; 5772 5773 SmallVector<EVT, 4> VTs; 5774 VTs.reserve(Ops.size()); 5775 for (unsigned i = 0; i < Ops.size(); ++i) 5776 VTs.push_back(Ops[i].getValueType()); 5777 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5778 } 5779 5780 SDValue SelectionDAG::getMemIntrinsicNode( 5781 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5782 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 5783 MachineMemOperand::Flags Flags, unsigned Size) { 5784 if (Align == 0) // Ensure that codegen never sees alignment 0 5785 Align = getEVTAlignment(MemVT); 5786 5787 if (!Size) 5788 Size = MemVT.getStoreSize(); 5789 5790 MachineFunction &MF = getMachineFunction(); 5791 MachineMemOperand *MMO = 5792 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5793 5794 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5795 } 5796 5797 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5798 SDVTList VTList, 5799 ArrayRef<SDValue> Ops, EVT MemVT, 5800 MachineMemOperand *MMO) { 5801 assert((Opcode == ISD::INTRINSIC_VOID || 5802 Opcode == ISD::INTRINSIC_W_CHAIN || 5803 Opcode == ISD::PREFETCH || 5804 Opcode == ISD::LIFETIME_START || 5805 Opcode == ISD::LIFETIME_END || 5806 ((int)Opcode <= std::numeric_limits<int>::max() && 5807 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5808 "Opcode is not a memory-accessing opcode!"); 5809 5810 // Memoize the node unless it returns a flag. 5811 MemIntrinsicSDNode *N; 5812 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5813 FoldingSetNodeID ID; 5814 AddNodeIDNode(ID, Opcode, VTList, Ops); 5815 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 5816 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 5817 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5818 void *IP = nullptr; 5819 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5820 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5821 return SDValue(E, 0); 5822 } 5823 5824 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5825 VTList, MemVT, MMO); 5826 createOperands(N, Ops); 5827 5828 CSEMap.InsertNode(N, IP); 5829 } else { 5830 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5831 VTList, MemVT, MMO); 5832 createOperands(N, Ops); 5833 } 5834 InsertNode(N); 5835 return SDValue(N, 0); 5836 } 5837 5838 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5839 /// MachinePointerInfo record from it. This is particularly useful because the 5840 /// code generator has many cases where it doesn't bother passing in a 5841 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5842 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 5843 SelectionDAG &DAG, SDValue Ptr, 5844 int64_t Offset = 0) { 5845 // If this is FI+Offset, we can model it. 5846 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5847 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5848 FI->getIndex(), Offset); 5849 5850 // If this is (FI+Offset1)+Offset2, we can model it. 5851 if (Ptr.getOpcode() != ISD::ADD || 5852 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5853 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5854 return Info; 5855 5856 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5857 return MachinePointerInfo::getFixedStack( 5858 DAG.getMachineFunction(), FI, 5859 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5860 } 5861 5862 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5863 /// MachinePointerInfo record from it. This is particularly useful because the 5864 /// code generator has many cases where it doesn't bother passing in a 5865 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5866 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 5867 SelectionDAG &DAG, SDValue Ptr, 5868 SDValue OffsetOp) { 5869 // If the 'Offset' value isn't a constant, we can't handle this. 5870 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5871 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 5872 if (OffsetOp.isUndef()) 5873 return InferPointerInfo(Info, DAG, Ptr); 5874 return Info; 5875 } 5876 5877 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5878 EVT VT, const SDLoc &dl, SDValue Chain, 5879 SDValue Ptr, SDValue Offset, 5880 MachinePointerInfo PtrInfo, EVT MemVT, 5881 unsigned Alignment, 5882 MachineMemOperand::Flags MMOFlags, 5883 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5884 assert(Chain.getValueType() == MVT::Other && 5885 "Invalid chain type"); 5886 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5887 Alignment = getEVTAlignment(MemVT); 5888 5889 MMOFlags |= MachineMemOperand::MOLoad; 5890 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5891 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5892 // clients. 5893 if (PtrInfo.V.isNull()) 5894 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 5895 5896 MachineFunction &MF = getMachineFunction(); 5897 MachineMemOperand *MMO = MF.getMachineMemOperand( 5898 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5899 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5900 } 5901 5902 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5903 EVT VT, const SDLoc &dl, SDValue Chain, 5904 SDValue Ptr, SDValue Offset, EVT MemVT, 5905 MachineMemOperand *MMO) { 5906 if (VT == MemVT) { 5907 ExtType = ISD::NON_EXTLOAD; 5908 } else if (ExtType == ISD::NON_EXTLOAD) { 5909 assert(VT == MemVT && "Non-extending load from different memory type!"); 5910 } else { 5911 // Extending load. 5912 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5913 "Should only be an extending load, not truncating!"); 5914 assert(VT.isInteger() == MemVT.isInteger() && 5915 "Cannot convert from FP to Int or Int -> FP!"); 5916 assert(VT.isVector() == MemVT.isVector() && 5917 "Cannot use an ext load to convert to or from a vector!"); 5918 assert((!VT.isVector() || 5919 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5920 "Cannot use an ext load to change the number of vector elements!"); 5921 } 5922 5923 bool Indexed = AM != ISD::UNINDEXED; 5924 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5925 5926 SDVTList VTs = Indexed ? 5927 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5928 SDValue Ops[] = { Chain, Ptr, Offset }; 5929 FoldingSetNodeID ID; 5930 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5931 ID.AddInteger(MemVT.getRawBits()); 5932 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5933 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5934 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5935 void *IP = nullptr; 5936 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5937 cast<LoadSDNode>(E)->refineAlignment(MMO); 5938 return SDValue(E, 0); 5939 } 5940 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5941 ExtType, MemVT, MMO); 5942 createOperands(N, Ops); 5943 5944 CSEMap.InsertNode(N, IP); 5945 InsertNode(N); 5946 SDValue V(N, 0); 5947 NewSDValueDbgMsg(V, "Creating new node: ", this); 5948 return V; 5949 } 5950 5951 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5952 SDValue Ptr, MachinePointerInfo PtrInfo, 5953 unsigned Alignment, 5954 MachineMemOperand::Flags MMOFlags, 5955 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5956 SDValue Undef = getUNDEF(Ptr.getValueType()); 5957 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5958 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5959 } 5960 5961 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5962 SDValue Ptr, MachineMemOperand *MMO) { 5963 SDValue Undef = getUNDEF(Ptr.getValueType()); 5964 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5965 VT, MMO); 5966 } 5967 5968 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5969 EVT VT, SDValue Chain, SDValue Ptr, 5970 MachinePointerInfo PtrInfo, EVT MemVT, 5971 unsigned Alignment, 5972 MachineMemOperand::Flags MMOFlags, 5973 const AAMDNodes &AAInfo) { 5974 SDValue Undef = getUNDEF(Ptr.getValueType()); 5975 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5976 MemVT, Alignment, MMOFlags, AAInfo); 5977 } 5978 5979 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5980 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5981 MachineMemOperand *MMO) { 5982 SDValue Undef = getUNDEF(Ptr.getValueType()); 5983 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5984 MemVT, MMO); 5985 } 5986 5987 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5988 SDValue Base, SDValue Offset, 5989 ISD::MemIndexedMode AM) { 5990 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5991 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5992 // Don't propagate the invariant or dereferenceable flags. 5993 auto MMOFlags = 5994 LD->getMemOperand()->getFlags() & 5995 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5996 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5997 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5998 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5999 LD->getAAInfo()); 6000 } 6001 6002 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6003 SDValue Ptr, MachinePointerInfo PtrInfo, 6004 unsigned Alignment, 6005 MachineMemOperand::Flags MMOFlags, 6006 const AAMDNodes &AAInfo) { 6007 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6008 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6009 Alignment = getEVTAlignment(Val.getValueType()); 6010 6011 MMOFlags |= MachineMemOperand::MOStore; 6012 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6013 6014 if (PtrInfo.V.isNull()) 6015 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6016 6017 MachineFunction &MF = getMachineFunction(); 6018 MachineMemOperand *MMO = MF.getMachineMemOperand( 6019 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6020 return getStore(Chain, dl, Val, Ptr, MMO); 6021 } 6022 6023 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6024 SDValue Ptr, MachineMemOperand *MMO) { 6025 assert(Chain.getValueType() == MVT::Other && 6026 "Invalid chain type"); 6027 EVT VT = Val.getValueType(); 6028 SDVTList VTs = getVTList(MVT::Other); 6029 SDValue Undef = getUNDEF(Ptr.getValueType()); 6030 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6031 FoldingSetNodeID ID; 6032 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6033 ID.AddInteger(VT.getRawBits()); 6034 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6035 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6036 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6037 void *IP = nullptr; 6038 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6039 cast<StoreSDNode>(E)->refineAlignment(MMO); 6040 return SDValue(E, 0); 6041 } 6042 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6043 ISD::UNINDEXED, false, VT, MMO); 6044 createOperands(N, Ops); 6045 6046 CSEMap.InsertNode(N, IP); 6047 InsertNode(N); 6048 SDValue V(N, 0); 6049 NewSDValueDbgMsg(V, "Creating new node: ", this); 6050 return V; 6051 } 6052 6053 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6054 SDValue Ptr, MachinePointerInfo PtrInfo, 6055 EVT SVT, unsigned Alignment, 6056 MachineMemOperand::Flags MMOFlags, 6057 const AAMDNodes &AAInfo) { 6058 assert(Chain.getValueType() == MVT::Other && 6059 "Invalid chain type"); 6060 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6061 Alignment = getEVTAlignment(SVT); 6062 6063 MMOFlags |= MachineMemOperand::MOStore; 6064 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6065 6066 if (PtrInfo.V.isNull()) 6067 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6068 6069 MachineFunction &MF = getMachineFunction(); 6070 MachineMemOperand *MMO = MF.getMachineMemOperand( 6071 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6072 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6073 } 6074 6075 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6076 SDValue Ptr, EVT SVT, 6077 MachineMemOperand *MMO) { 6078 EVT VT = Val.getValueType(); 6079 6080 assert(Chain.getValueType() == MVT::Other && 6081 "Invalid chain type"); 6082 if (VT == SVT) 6083 return getStore(Chain, dl, Val, Ptr, MMO); 6084 6085 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6086 "Should only be a truncating store, not extending!"); 6087 assert(VT.isInteger() == SVT.isInteger() && 6088 "Can't do FP-INT conversion!"); 6089 assert(VT.isVector() == SVT.isVector() && 6090 "Cannot use trunc store to convert to or from a vector!"); 6091 assert((!VT.isVector() || 6092 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6093 "Cannot use trunc store to change the number of vector elements!"); 6094 6095 SDVTList VTs = getVTList(MVT::Other); 6096 SDValue Undef = getUNDEF(Ptr.getValueType()); 6097 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6098 FoldingSetNodeID ID; 6099 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6100 ID.AddInteger(SVT.getRawBits()); 6101 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6102 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6103 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6104 void *IP = nullptr; 6105 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6106 cast<StoreSDNode>(E)->refineAlignment(MMO); 6107 return SDValue(E, 0); 6108 } 6109 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6110 ISD::UNINDEXED, true, SVT, MMO); 6111 createOperands(N, Ops); 6112 6113 CSEMap.InsertNode(N, IP); 6114 InsertNode(N); 6115 SDValue V(N, 0); 6116 NewSDValueDbgMsg(V, "Creating new node: ", this); 6117 return V; 6118 } 6119 6120 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6121 SDValue Base, SDValue Offset, 6122 ISD::MemIndexedMode AM) { 6123 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6124 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6125 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6126 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6127 FoldingSetNodeID ID; 6128 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6129 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6130 ID.AddInteger(ST->getRawSubclassData()); 6131 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6132 void *IP = nullptr; 6133 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6134 return SDValue(E, 0); 6135 6136 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6137 ST->isTruncatingStore(), ST->getMemoryVT(), 6138 ST->getMemOperand()); 6139 createOperands(N, Ops); 6140 6141 CSEMap.InsertNode(N, IP); 6142 InsertNode(N); 6143 SDValue V(N, 0); 6144 NewSDValueDbgMsg(V, "Creating new node: ", this); 6145 return V; 6146 } 6147 6148 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6149 SDValue Ptr, SDValue Mask, SDValue Src0, 6150 EVT MemVT, MachineMemOperand *MMO, 6151 ISD::LoadExtType ExtTy, bool isExpanding) { 6152 SDVTList VTs = getVTList(VT, MVT::Other); 6153 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 6154 FoldingSetNodeID ID; 6155 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6156 ID.AddInteger(VT.getRawBits()); 6157 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6158 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6159 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6160 void *IP = nullptr; 6161 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6162 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6163 return SDValue(E, 0); 6164 } 6165 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6166 ExtTy, isExpanding, MemVT, MMO); 6167 createOperands(N, Ops); 6168 6169 CSEMap.InsertNode(N, IP); 6170 InsertNode(N); 6171 SDValue V(N, 0); 6172 NewSDValueDbgMsg(V, "Creating new node: ", this); 6173 return V; 6174 } 6175 6176 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6177 SDValue Val, SDValue Ptr, SDValue Mask, 6178 EVT MemVT, MachineMemOperand *MMO, 6179 bool IsTruncating, bool IsCompressing) { 6180 assert(Chain.getValueType() == MVT::Other && 6181 "Invalid chain type"); 6182 EVT VT = Val.getValueType(); 6183 SDVTList VTs = getVTList(MVT::Other); 6184 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 6185 FoldingSetNodeID ID; 6186 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6187 ID.AddInteger(VT.getRawBits()); 6188 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6189 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6190 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6191 void *IP = nullptr; 6192 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6193 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 6194 return SDValue(E, 0); 6195 } 6196 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6197 IsTruncating, IsCompressing, MemVT, MMO); 6198 createOperands(N, Ops); 6199 6200 CSEMap.InsertNode(N, IP); 6201 InsertNode(N); 6202 SDValue V(N, 0); 6203 NewSDValueDbgMsg(V, "Creating new node: ", this); 6204 return V; 6205 } 6206 6207 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 6208 ArrayRef<SDValue> Ops, 6209 MachineMemOperand *MMO) { 6210 assert(Ops.size() == 5 && "Incompatible number of operands"); 6211 6212 FoldingSetNodeID ID; 6213 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 6214 ID.AddInteger(VT.getRawBits()); 6215 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 6216 dl.getIROrder(), VTs, VT, MMO)); 6217 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6218 void *IP = nullptr; 6219 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6220 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 6221 return SDValue(E, 0); 6222 } 6223 6224 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6225 VTs, VT, MMO); 6226 createOperands(N, Ops); 6227 6228 assert(N->getValue().getValueType() == N->getValueType(0) && 6229 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 6230 assert(N->getMask().getValueType().getVectorNumElements() == 6231 N->getValueType(0).getVectorNumElements() && 6232 "Vector width mismatch between mask and data"); 6233 assert(N->getIndex().getValueType().getVectorNumElements() == 6234 N->getValueType(0).getVectorNumElements() && 6235 "Vector width mismatch between index and data"); 6236 6237 CSEMap.InsertNode(N, IP); 6238 InsertNode(N); 6239 SDValue V(N, 0); 6240 NewSDValueDbgMsg(V, "Creating new node: ", this); 6241 return V; 6242 } 6243 6244 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6245 ArrayRef<SDValue> Ops, 6246 MachineMemOperand *MMO) { 6247 assert(Ops.size() == 5 && "Incompatible number of operands"); 6248 6249 FoldingSetNodeID ID; 6250 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6251 ID.AddInteger(VT.getRawBits()); 6252 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6253 dl.getIROrder(), VTs, VT, MMO)); 6254 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6255 void *IP = nullptr; 6256 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6257 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6258 return SDValue(E, 0); 6259 } 6260 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6261 VTs, VT, MMO); 6262 createOperands(N, Ops); 6263 6264 assert(N->getMask().getValueType().getVectorNumElements() == 6265 N->getValue().getValueType().getVectorNumElements() && 6266 "Vector width mismatch between mask and data"); 6267 assert(N->getIndex().getValueType().getVectorNumElements() == 6268 N->getValue().getValueType().getVectorNumElements() && 6269 "Vector width mismatch between index and data"); 6270 6271 CSEMap.InsertNode(N, IP); 6272 InsertNode(N); 6273 SDValue V(N, 0); 6274 NewSDValueDbgMsg(V, "Creating new node: ", this); 6275 return V; 6276 } 6277 6278 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6279 SDValue Ptr, SDValue SV, unsigned Align) { 6280 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6281 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6282 } 6283 6284 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6285 ArrayRef<SDUse> Ops) { 6286 switch (Ops.size()) { 6287 case 0: return getNode(Opcode, DL, VT); 6288 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6289 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6290 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6291 default: break; 6292 } 6293 6294 // Copy from an SDUse array into an SDValue array for use with 6295 // the regular getNode logic. 6296 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6297 return getNode(Opcode, DL, VT, NewOps); 6298 } 6299 6300 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6301 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6302 unsigned NumOps = Ops.size(); 6303 switch (NumOps) { 6304 case 0: return getNode(Opcode, DL, VT); 6305 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6306 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6307 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6308 default: break; 6309 } 6310 6311 switch (Opcode) { 6312 default: break; 6313 case ISD::CONCAT_VECTORS: 6314 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6315 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6316 return V; 6317 break; 6318 case ISD::SELECT_CC: 6319 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6320 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6321 "LHS and RHS of condition must have same type!"); 6322 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6323 "True and False arms of SelectCC must have same type!"); 6324 assert(Ops[2].getValueType() == VT && 6325 "select_cc node must be of same type as true and false value!"); 6326 break; 6327 case ISD::BR_CC: 6328 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6329 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6330 "LHS/RHS of comparison should match types!"); 6331 break; 6332 } 6333 6334 // Memoize nodes. 6335 SDNode *N; 6336 SDVTList VTs = getVTList(VT); 6337 6338 if (VT != MVT::Glue) { 6339 FoldingSetNodeID ID; 6340 AddNodeIDNode(ID, Opcode, VTs, Ops); 6341 void *IP = nullptr; 6342 6343 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6344 return SDValue(E, 0); 6345 6346 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6347 createOperands(N, Ops); 6348 6349 CSEMap.InsertNode(N, IP); 6350 } else { 6351 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6352 createOperands(N, Ops); 6353 } 6354 6355 InsertNode(N); 6356 SDValue V(N, 0); 6357 NewSDValueDbgMsg(V, "Creating new node: ", this); 6358 return V; 6359 } 6360 6361 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6362 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6363 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6364 } 6365 6366 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6367 ArrayRef<SDValue> Ops) { 6368 if (VTList.NumVTs == 1) 6369 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6370 6371 #if 0 6372 switch (Opcode) { 6373 // FIXME: figure out how to safely handle things like 6374 // int foo(int x) { return 1 << (x & 255); } 6375 // int bar() { return foo(256); } 6376 case ISD::SRA_PARTS: 6377 case ISD::SRL_PARTS: 6378 case ISD::SHL_PARTS: 6379 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6380 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6381 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6382 else if (N3.getOpcode() == ISD::AND) 6383 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6384 // If the and is only masking out bits that cannot effect the shift, 6385 // eliminate the and. 6386 unsigned NumBits = VT.getScalarSizeInBits()*2; 6387 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6388 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6389 } 6390 break; 6391 } 6392 #endif 6393 6394 // Memoize the node unless it returns a flag. 6395 SDNode *N; 6396 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6397 FoldingSetNodeID ID; 6398 AddNodeIDNode(ID, Opcode, VTList, Ops); 6399 void *IP = nullptr; 6400 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6401 return SDValue(E, 0); 6402 6403 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6404 createOperands(N, Ops); 6405 CSEMap.InsertNode(N, IP); 6406 } else { 6407 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6408 createOperands(N, Ops); 6409 } 6410 InsertNode(N); 6411 SDValue V(N, 0); 6412 NewSDValueDbgMsg(V, "Creating new node: ", this); 6413 return V; 6414 } 6415 6416 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6417 SDVTList VTList) { 6418 return getNode(Opcode, DL, VTList, None); 6419 } 6420 6421 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6422 SDValue N1) { 6423 SDValue Ops[] = { N1 }; 6424 return getNode(Opcode, DL, VTList, Ops); 6425 } 6426 6427 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6428 SDValue N1, SDValue N2) { 6429 SDValue Ops[] = { N1, N2 }; 6430 return getNode(Opcode, DL, VTList, Ops); 6431 } 6432 6433 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6434 SDValue N1, SDValue N2, SDValue N3) { 6435 SDValue Ops[] = { N1, N2, N3 }; 6436 return getNode(Opcode, DL, VTList, Ops); 6437 } 6438 6439 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6440 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6441 SDValue Ops[] = { N1, N2, N3, N4 }; 6442 return getNode(Opcode, DL, VTList, Ops); 6443 } 6444 6445 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6446 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6447 SDValue N5) { 6448 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6449 return getNode(Opcode, DL, VTList, Ops); 6450 } 6451 6452 SDVTList SelectionDAG::getVTList(EVT VT) { 6453 return makeVTList(SDNode::getValueTypeList(VT), 1); 6454 } 6455 6456 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6457 FoldingSetNodeID ID; 6458 ID.AddInteger(2U); 6459 ID.AddInteger(VT1.getRawBits()); 6460 ID.AddInteger(VT2.getRawBits()); 6461 6462 void *IP = nullptr; 6463 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6464 if (!Result) { 6465 EVT *Array = Allocator.Allocate<EVT>(2); 6466 Array[0] = VT1; 6467 Array[1] = VT2; 6468 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6469 VTListMap.InsertNode(Result, IP); 6470 } 6471 return Result->getSDVTList(); 6472 } 6473 6474 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6475 FoldingSetNodeID ID; 6476 ID.AddInteger(3U); 6477 ID.AddInteger(VT1.getRawBits()); 6478 ID.AddInteger(VT2.getRawBits()); 6479 ID.AddInteger(VT3.getRawBits()); 6480 6481 void *IP = nullptr; 6482 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6483 if (!Result) { 6484 EVT *Array = Allocator.Allocate<EVT>(3); 6485 Array[0] = VT1; 6486 Array[1] = VT2; 6487 Array[2] = VT3; 6488 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6489 VTListMap.InsertNode(Result, IP); 6490 } 6491 return Result->getSDVTList(); 6492 } 6493 6494 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6495 FoldingSetNodeID ID; 6496 ID.AddInteger(4U); 6497 ID.AddInteger(VT1.getRawBits()); 6498 ID.AddInteger(VT2.getRawBits()); 6499 ID.AddInteger(VT3.getRawBits()); 6500 ID.AddInteger(VT4.getRawBits()); 6501 6502 void *IP = nullptr; 6503 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6504 if (!Result) { 6505 EVT *Array = Allocator.Allocate<EVT>(4); 6506 Array[0] = VT1; 6507 Array[1] = VT2; 6508 Array[2] = VT3; 6509 Array[3] = VT4; 6510 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6511 VTListMap.InsertNode(Result, IP); 6512 } 6513 return Result->getSDVTList(); 6514 } 6515 6516 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6517 unsigned NumVTs = VTs.size(); 6518 FoldingSetNodeID ID; 6519 ID.AddInteger(NumVTs); 6520 for (unsigned index = 0; index < NumVTs; index++) { 6521 ID.AddInteger(VTs[index].getRawBits()); 6522 } 6523 6524 void *IP = nullptr; 6525 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6526 if (!Result) { 6527 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6528 std::copy(VTs.begin(), VTs.end(), Array); 6529 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6530 VTListMap.InsertNode(Result, IP); 6531 } 6532 return Result->getSDVTList(); 6533 } 6534 6535 6536 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6537 /// specified operands. If the resultant node already exists in the DAG, 6538 /// this does not modify the specified node, instead it returns the node that 6539 /// already exists. If the resultant node does not exist in the DAG, the 6540 /// input node is returned. As a degenerate case, if you specify the same 6541 /// input operands as the node already has, the input node is returned. 6542 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6543 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6544 6545 // Check to see if there is no change. 6546 if (Op == N->getOperand(0)) return N; 6547 6548 // See if the modified node already exists. 6549 void *InsertPos = nullptr; 6550 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6551 return Existing; 6552 6553 // Nope it doesn't. Remove the node from its current place in the maps. 6554 if (InsertPos) 6555 if (!RemoveNodeFromCSEMaps(N)) 6556 InsertPos = nullptr; 6557 6558 // Now we update the operands. 6559 N->OperandList[0].set(Op); 6560 6561 // If this gets put into a CSE map, add it. 6562 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6563 return N; 6564 } 6565 6566 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6567 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6568 6569 // Check to see if there is no change. 6570 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6571 return N; // No operands changed, just return the input node. 6572 6573 // See if the modified node already exists. 6574 void *InsertPos = nullptr; 6575 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6576 return Existing; 6577 6578 // Nope it doesn't. Remove the node from its current place in the maps. 6579 if (InsertPos) 6580 if (!RemoveNodeFromCSEMaps(N)) 6581 InsertPos = nullptr; 6582 6583 // Now we update the operands. 6584 if (N->OperandList[0] != Op1) 6585 N->OperandList[0].set(Op1); 6586 if (N->OperandList[1] != Op2) 6587 N->OperandList[1].set(Op2); 6588 6589 // If this gets put into a CSE map, add it. 6590 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6591 return N; 6592 } 6593 6594 SDNode *SelectionDAG:: 6595 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6596 SDValue Ops[] = { Op1, Op2, Op3 }; 6597 return UpdateNodeOperands(N, Ops); 6598 } 6599 6600 SDNode *SelectionDAG:: 6601 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6602 SDValue Op3, SDValue Op4) { 6603 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6604 return UpdateNodeOperands(N, Ops); 6605 } 6606 6607 SDNode *SelectionDAG:: 6608 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6609 SDValue Op3, SDValue Op4, SDValue Op5) { 6610 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6611 return UpdateNodeOperands(N, Ops); 6612 } 6613 6614 SDNode *SelectionDAG:: 6615 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6616 unsigned NumOps = Ops.size(); 6617 assert(N->getNumOperands() == NumOps && 6618 "Update with wrong number of operands"); 6619 6620 // If no operands changed just return the input node. 6621 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6622 return N; 6623 6624 // See if the modified node already exists. 6625 void *InsertPos = nullptr; 6626 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6627 return Existing; 6628 6629 // Nope it doesn't. Remove the node from its current place in the maps. 6630 if (InsertPos) 6631 if (!RemoveNodeFromCSEMaps(N)) 6632 InsertPos = nullptr; 6633 6634 // Now we update the operands. 6635 for (unsigned i = 0; i != NumOps; ++i) 6636 if (N->OperandList[i] != Ops[i]) 6637 N->OperandList[i].set(Ops[i]); 6638 6639 // If this gets put into a CSE map, add it. 6640 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6641 return N; 6642 } 6643 6644 /// DropOperands - Release the operands and set this node to have 6645 /// zero operands. 6646 void SDNode::DropOperands() { 6647 // Unlike the code in MorphNodeTo that does this, we don't need to 6648 // watch for dead nodes here. 6649 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6650 SDUse &Use = *I++; 6651 Use.set(SDValue()); 6652 } 6653 } 6654 6655 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6656 /// machine opcode. 6657 /// 6658 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6659 EVT VT) { 6660 SDVTList VTs = getVTList(VT); 6661 return SelectNodeTo(N, MachineOpc, VTs, None); 6662 } 6663 6664 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6665 EVT VT, SDValue Op1) { 6666 SDVTList VTs = getVTList(VT); 6667 SDValue Ops[] = { Op1 }; 6668 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6669 } 6670 6671 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6672 EVT VT, SDValue Op1, 6673 SDValue Op2) { 6674 SDVTList VTs = getVTList(VT); 6675 SDValue Ops[] = { Op1, Op2 }; 6676 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6677 } 6678 6679 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6680 EVT VT, SDValue Op1, 6681 SDValue Op2, SDValue Op3) { 6682 SDVTList VTs = getVTList(VT); 6683 SDValue Ops[] = { Op1, Op2, Op3 }; 6684 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6685 } 6686 6687 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6688 EVT VT, ArrayRef<SDValue> Ops) { 6689 SDVTList VTs = getVTList(VT); 6690 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6691 } 6692 6693 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6694 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6695 SDVTList VTs = getVTList(VT1, VT2); 6696 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6697 } 6698 6699 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6700 EVT VT1, EVT VT2) { 6701 SDVTList VTs = getVTList(VT1, VT2); 6702 return SelectNodeTo(N, MachineOpc, VTs, None); 6703 } 6704 6705 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6706 EVT VT1, EVT VT2, EVT VT3, 6707 ArrayRef<SDValue> Ops) { 6708 SDVTList VTs = getVTList(VT1, VT2, VT3); 6709 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6710 } 6711 6712 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6713 EVT VT1, EVT VT2, 6714 SDValue Op1, SDValue Op2) { 6715 SDVTList VTs = getVTList(VT1, VT2); 6716 SDValue Ops[] = { Op1, Op2 }; 6717 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6718 } 6719 6720 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6721 SDVTList VTs,ArrayRef<SDValue> Ops) { 6722 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6723 // Reset the NodeID to -1. 6724 New->setNodeId(-1); 6725 if (New != N) { 6726 ReplaceAllUsesWith(N, New); 6727 RemoveDeadNode(N); 6728 } 6729 return New; 6730 } 6731 6732 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6733 /// the line number information on the merged node since it is not possible to 6734 /// preserve the information that operation is associated with multiple lines. 6735 /// This will make the debugger working better at -O0, were there is a higher 6736 /// probability having other instructions associated with that line. 6737 /// 6738 /// For IROrder, we keep the smaller of the two 6739 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6740 DebugLoc NLoc = N->getDebugLoc(); 6741 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6742 N->setDebugLoc(DebugLoc()); 6743 } 6744 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6745 N->setIROrder(Order); 6746 return N; 6747 } 6748 6749 /// MorphNodeTo - This *mutates* the specified node to have the specified 6750 /// return type, opcode, and operands. 6751 /// 6752 /// Note that MorphNodeTo returns the resultant node. If there is already a 6753 /// node of the specified opcode and operands, it returns that node instead of 6754 /// the current one. Note that the SDLoc need not be the same. 6755 /// 6756 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6757 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6758 /// node, and because it doesn't require CSE recalculation for any of 6759 /// the node's users. 6760 /// 6761 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6762 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6763 /// the legalizer which maintain worklists that would need to be updated when 6764 /// deleting things. 6765 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6766 SDVTList VTs, ArrayRef<SDValue> Ops) { 6767 // If an identical node already exists, use it. 6768 void *IP = nullptr; 6769 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6770 FoldingSetNodeID ID; 6771 AddNodeIDNode(ID, Opc, VTs, Ops); 6772 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6773 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6774 } 6775 6776 if (!RemoveNodeFromCSEMaps(N)) 6777 IP = nullptr; 6778 6779 // Start the morphing. 6780 N->NodeType = Opc; 6781 N->ValueList = VTs.VTs; 6782 N->NumValues = VTs.NumVTs; 6783 6784 // Clear the operands list, updating used nodes to remove this from their 6785 // use list. Keep track of any operands that become dead as a result. 6786 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6787 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6788 SDUse &Use = *I++; 6789 SDNode *Used = Use.getNode(); 6790 Use.set(SDValue()); 6791 if (Used->use_empty()) 6792 DeadNodeSet.insert(Used); 6793 } 6794 6795 // For MachineNode, initialize the memory references information. 6796 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6797 MN->setMemRefs(nullptr, nullptr); 6798 6799 // Swap for an appropriately sized array from the recycler. 6800 removeOperands(N); 6801 createOperands(N, Ops); 6802 6803 // Delete any nodes that are still dead after adding the uses for the 6804 // new operands. 6805 if (!DeadNodeSet.empty()) { 6806 SmallVector<SDNode *, 16> DeadNodes; 6807 for (SDNode *N : DeadNodeSet) 6808 if (N->use_empty()) 6809 DeadNodes.push_back(N); 6810 RemoveDeadNodes(DeadNodes); 6811 } 6812 6813 if (IP) 6814 CSEMap.InsertNode(N, IP); // Memoize the new node. 6815 return N; 6816 } 6817 6818 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 6819 unsigned OrigOpc = Node->getOpcode(); 6820 unsigned NewOpc; 6821 bool IsUnary = false; 6822 bool IsTernary = false; 6823 switch (OrigOpc) { 6824 default: 6825 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 6826 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 6827 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 6828 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 6829 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 6830 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 6831 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; 6832 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 6833 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 6834 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 6835 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 6836 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 6837 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 6838 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 6839 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 6840 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 6841 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 6842 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 6843 case ISD::STRICT_FNEARBYINT: 6844 NewOpc = ISD::FNEARBYINT; 6845 IsUnary = true; 6846 break; 6847 } 6848 6849 // We're taking this node out of the chain, so we need to re-link things. 6850 SDValue InputChain = Node->getOperand(0); 6851 SDValue OutputChain = SDValue(Node, 1); 6852 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 6853 6854 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 6855 SDNode *Res = nullptr; 6856 if (IsUnary) 6857 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 6858 else if (IsTernary) 6859 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6860 Node->getOperand(2), 6861 Node->getOperand(3)}); 6862 else 6863 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6864 Node->getOperand(2) }); 6865 6866 // MorphNodeTo can operate in two ways: if an existing node with the 6867 // specified operands exists, it can just return it. Otherwise, it 6868 // updates the node in place to have the requested operands. 6869 if (Res == Node) { 6870 // If we updated the node in place, reset the node ID. To the isel, 6871 // this should be just like a newly allocated machine node. 6872 Res->setNodeId(-1); 6873 } else { 6874 ReplaceAllUsesWith(Node, Res); 6875 RemoveDeadNode(Node); 6876 } 6877 6878 return Res; 6879 } 6880 6881 /// getMachineNode - These are used for target selectors to create a new node 6882 /// with specified return type(s), MachineInstr opcode, and operands. 6883 /// 6884 /// Note that getMachineNode returns the resultant node. If there is already a 6885 /// node of the specified opcode and operands, it returns that node instead of 6886 /// the current one. 6887 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6888 EVT VT) { 6889 SDVTList VTs = getVTList(VT); 6890 return getMachineNode(Opcode, dl, VTs, None); 6891 } 6892 6893 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6894 EVT VT, SDValue Op1) { 6895 SDVTList VTs = getVTList(VT); 6896 SDValue Ops[] = { Op1 }; 6897 return getMachineNode(Opcode, dl, VTs, Ops); 6898 } 6899 6900 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6901 EVT VT, SDValue Op1, SDValue Op2) { 6902 SDVTList VTs = getVTList(VT); 6903 SDValue Ops[] = { Op1, Op2 }; 6904 return getMachineNode(Opcode, dl, VTs, Ops); 6905 } 6906 6907 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6908 EVT VT, SDValue Op1, SDValue Op2, 6909 SDValue Op3) { 6910 SDVTList VTs = getVTList(VT); 6911 SDValue Ops[] = { Op1, Op2, Op3 }; 6912 return getMachineNode(Opcode, dl, VTs, Ops); 6913 } 6914 6915 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6916 EVT VT, ArrayRef<SDValue> Ops) { 6917 SDVTList VTs = getVTList(VT); 6918 return getMachineNode(Opcode, dl, VTs, Ops); 6919 } 6920 6921 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6922 EVT VT1, EVT VT2, SDValue Op1, 6923 SDValue Op2) { 6924 SDVTList VTs = getVTList(VT1, VT2); 6925 SDValue Ops[] = { Op1, Op2 }; 6926 return getMachineNode(Opcode, dl, VTs, Ops); 6927 } 6928 6929 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6930 EVT VT1, EVT VT2, SDValue Op1, 6931 SDValue Op2, SDValue Op3) { 6932 SDVTList VTs = getVTList(VT1, VT2); 6933 SDValue Ops[] = { Op1, Op2, Op3 }; 6934 return getMachineNode(Opcode, dl, VTs, Ops); 6935 } 6936 6937 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6938 EVT VT1, EVT VT2, 6939 ArrayRef<SDValue> Ops) { 6940 SDVTList VTs = getVTList(VT1, VT2); 6941 return getMachineNode(Opcode, dl, VTs, Ops); 6942 } 6943 6944 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6945 EVT VT1, EVT VT2, EVT VT3, 6946 SDValue Op1, SDValue Op2) { 6947 SDVTList VTs = getVTList(VT1, VT2, VT3); 6948 SDValue Ops[] = { Op1, Op2 }; 6949 return getMachineNode(Opcode, dl, VTs, Ops); 6950 } 6951 6952 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6953 EVT VT1, EVT VT2, EVT VT3, 6954 SDValue Op1, SDValue Op2, 6955 SDValue Op3) { 6956 SDVTList VTs = getVTList(VT1, VT2, VT3); 6957 SDValue Ops[] = { Op1, Op2, Op3 }; 6958 return getMachineNode(Opcode, dl, VTs, Ops); 6959 } 6960 6961 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6962 EVT VT1, EVT VT2, EVT VT3, 6963 ArrayRef<SDValue> Ops) { 6964 SDVTList VTs = getVTList(VT1, VT2, VT3); 6965 return getMachineNode(Opcode, dl, VTs, Ops); 6966 } 6967 6968 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6969 ArrayRef<EVT> ResultTys, 6970 ArrayRef<SDValue> Ops) { 6971 SDVTList VTs = getVTList(ResultTys); 6972 return getMachineNode(Opcode, dl, VTs, Ops); 6973 } 6974 6975 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6976 SDVTList VTs, 6977 ArrayRef<SDValue> Ops) { 6978 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6979 MachineSDNode *N; 6980 void *IP = nullptr; 6981 6982 if (DoCSE) { 6983 FoldingSetNodeID ID; 6984 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6985 IP = nullptr; 6986 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6987 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6988 } 6989 } 6990 6991 // Allocate a new MachineSDNode. 6992 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6993 createOperands(N, Ops); 6994 6995 if (DoCSE) 6996 CSEMap.InsertNode(N, IP); 6997 6998 InsertNode(N); 6999 return N; 7000 } 7001 7002 /// getTargetExtractSubreg - A convenience function for creating 7003 /// TargetOpcode::EXTRACT_SUBREG nodes. 7004 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7005 SDValue Operand) { 7006 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7007 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 7008 VT, Operand, SRIdxVal); 7009 return SDValue(Subreg, 0); 7010 } 7011 7012 /// getTargetInsertSubreg - A convenience function for creating 7013 /// TargetOpcode::INSERT_SUBREG nodes. 7014 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7015 SDValue Operand, SDValue Subreg) { 7016 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7017 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 7018 VT, Operand, Subreg, SRIdxVal); 7019 return SDValue(Result, 0); 7020 } 7021 7022 /// getNodeIfExists - Get the specified node if it's already available, or 7023 /// else return NULL. 7024 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 7025 ArrayRef<SDValue> Ops, 7026 const SDNodeFlags Flags) { 7027 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 7028 FoldingSetNodeID ID; 7029 AddNodeIDNode(ID, Opcode, VTList, Ops); 7030 void *IP = nullptr; 7031 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 7032 E->intersectFlagsWith(Flags); 7033 return E; 7034 } 7035 } 7036 return nullptr; 7037 } 7038 7039 /// getDbgValue - Creates a SDDbgValue node. 7040 /// 7041 /// SDNode 7042 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 7043 SDNode *N, unsigned R, bool IsIndirect, 7044 const DebugLoc &DL, unsigned O) { 7045 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7046 "Expected inlined-at fields to agree"); 7047 return new (DbgInfo->getAlloc()) 7048 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 7049 } 7050 7051 /// Constant 7052 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 7053 DIExpression *Expr, 7054 const Value *C, 7055 const DebugLoc &DL, unsigned O) { 7056 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7057 "Expected inlined-at fields to agree"); 7058 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 7059 } 7060 7061 /// FrameIndex 7062 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 7063 DIExpression *Expr, unsigned FI, 7064 const DebugLoc &DL, 7065 unsigned O) { 7066 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7067 "Expected inlined-at fields to agree"); 7068 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, DL, O); 7069 } 7070 7071 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 7072 unsigned OffsetInBits, unsigned SizeInBits, 7073 bool InvalidateDbg) { 7074 SDNode *FromNode = From.getNode(); 7075 SDNode *ToNode = To.getNode(); 7076 assert(FromNode && ToNode && "Can't modify dbg values"); 7077 7078 // PR35338 7079 // TODO: assert(From != To && "Redundant dbg value transfer"); 7080 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 7081 if (From == To || FromNode == ToNode) 7082 return; 7083 7084 if (!FromNode->getHasDebugValue()) 7085 return; 7086 7087 SmallVector<SDDbgValue *, 2> ClonedDVs; 7088 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 7089 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 7090 continue; 7091 7092 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 7093 7094 // Just transfer the dbg value attached to From. 7095 if (Dbg->getResNo() != From.getResNo()) 7096 continue; 7097 7098 DIVariable *Var = Dbg->getVariable(); 7099 auto *Expr = Dbg->getExpression(); 7100 // If a fragment is requested, update the expression. 7101 if (SizeInBits) { 7102 // When splitting a larger (e.g., sign-extended) value whose 7103 // lower bits are described with an SDDbgValue, do not attempt 7104 // to transfer the SDDbgValue to the upper bits. 7105 if (auto FI = Expr->getFragmentInfo()) 7106 if (OffsetInBits + SizeInBits > FI->SizeInBits) 7107 continue; 7108 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 7109 SizeInBits); 7110 if (!Fragment) 7111 continue; 7112 Expr = *Fragment; 7113 } 7114 // Clone the SDDbgValue and move it to To. 7115 SDDbgValue *Clone = 7116 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), 7117 Dbg->getDebugLoc(), Dbg->getOrder()); 7118 ClonedDVs.push_back(Clone); 7119 7120 if (InvalidateDbg) 7121 Dbg->setIsInvalidated(); 7122 } 7123 7124 for (SDDbgValue *Dbg : ClonedDVs) 7125 AddDbgValue(Dbg, ToNode, false); 7126 } 7127 7128 void SelectionDAG::salvageDebugInfo(SDNode &N) { 7129 if (!N.getHasDebugValue()) 7130 return; 7131 7132 SmallVector<SDDbgValue *, 2> ClonedDVs; 7133 for (auto DV : GetDbgValues(&N)) { 7134 if (DV->isInvalidated()) 7135 continue; 7136 switch (N.getOpcode()) { 7137 default: 7138 break; 7139 case ISD::ADD: 7140 SDValue N0 = N.getOperand(0); 7141 SDValue N1 = N.getOperand(1); 7142 if (!isConstantIntBuildVectorOrConstantInt(N0) && 7143 isConstantIntBuildVectorOrConstantInt(N1)) { 7144 uint64_t Offset = N.getConstantOperandVal(1); 7145 // Rewrite an ADD constant node into a DIExpression. Since we are 7146 // performing arithmetic to compute the variable's *value* in the 7147 // DIExpression, we need to mark the expression with a 7148 // DW_OP_stack_value. 7149 auto *DIExpr = DV->getExpression(); 7150 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset, 7151 DIExpression::NoDeref, 7152 DIExpression::WithStackValue); 7153 SDDbgValue *Clone = 7154 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 7155 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 7156 ClonedDVs.push_back(Clone); 7157 DV->setIsInvalidated(); 7158 DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this); 7159 dbgs() << " into " << *DIExpr << '\n'); 7160 } 7161 } 7162 } 7163 7164 for (SDDbgValue *Dbg : ClonedDVs) 7165 AddDbgValue(Dbg, Dbg->getSDNode(), false); 7166 } 7167 7168 namespace { 7169 7170 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 7171 /// pointed to by a use iterator is deleted, increment the use iterator 7172 /// so that it doesn't dangle. 7173 /// 7174 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 7175 SDNode::use_iterator &UI; 7176 SDNode::use_iterator &UE; 7177 7178 void NodeDeleted(SDNode *N, SDNode *E) override { 7179 // Increment the iterator as needed. 7180 while (UI != UE && N == *UI) 7181 ++UI; 7182 } 7183 7184 public: 7185 RAUWUpdateListener(SelectionDAG &d, 7186 SDNode::use_iterator &ui, 7187 SDNode::use_iterator &ue) 7188 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 7189 }; 7190 7191 } // end anonymous namespace 7192 7193 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7194 /// This can cause recursive merging of nodes in the DAG. 7195 /// 7196 /// This version assumes From has a single result value. 7197 /// 7198 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 7199 SDNode *From = FromN.getNode(); 7200 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 7201 "Cannot replace with this method!"); 7202 assert(From != To.getNode() && "Cannot replace uses of with self"); 7203 7204 // Preserve Debug Values 7205 transferDbgValues(FromN, To); 7206 7207 // Iterate over all the existing uses of From. New uses will be added 7208 // to the beginning of the use list, which we avoid visiting. 7209 // This specifically avoids visiting uses of From that arise while the 7210 // replacement is happening, because any such uses would be the result 7211 // of CSE: If an existing node looks like From after one of its operands 7212 // is replaced by To, we don't want to replace of all its users with To 7213 // too. See PR3018 for more info. 7214 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7215 RAUWUpdateListener Listener(*this, UI, UE); 7216 while (UI != UE) { 7217 SDNode *User = *UI; 7218 7219 // This node is about to morph, remove its old self from the CSE maps. 7220 RemoveNodeFromCSEMaps(User); 7221 7222 // A user can appear in a use list multiple times, and when this 7223 // happens the uses are usually next to each other in the list. 7224 // To help reduce the number of CSE recomputations, process all 7225 // the uses of this user that we can find this way. 7226 do { 7227 SDUse &Use = UI.getUse(); 7228 ++UI; 7229 Use.set(To); 7230 } while (UI != UE && *UI == User); 7231 7232 // Now that we have modified User, add it back to the CSE maps. If it 7233 // already exists there, recursively merge the results together. 7234 AddModifiedNodeToCSEMaps(User); 7235 } 7236 7237 // If we just RAUW'd the root, take note. 7238 if (FromN == getRoot()) 7239 setRoot(To); 7240 } 7241 7242 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7243 /// This can cause recursive merging of nodes in the DAG. 7244 /// 7245 /// This version assumes that for each value of From, there is a 7246 /// corresponding value in To in the same position with the same type. 7247 /// 7248 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 7249 #ifndef NDEBUG 7250 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7251 assert((!From->hasAnyUseOfValue(i) || 7252 From->getValueType(i) == To->getValueType(i)) && 7253 "Cannot use this version of ReplaceAllUsesWith!"); 7254 #endif 7255 7256 // Handle the trivial case. 7257 if (From == To) 7258 return; 7259 7260 // Preserve Debug Info. Only do this if there's a use. 7261 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7262 if (From->hasAnyUseOfValue(i)) { 7263 assert((i < To->getNumValues()) && "Invalid To location"); 7264 transferDbgValues(SDValue(From, i), SDValue(To, i)); 7265 } 7266 7267 // Iterate over just the existing users of From. See the comments in 7268 // the ReplaceAllUsesWith above. 7269 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7270 RAUWUpdateListener Listener(*this, UI, UE); 7271 while (UI != UE) { 7272 SDNode *User = *UI; 7273 7274 // This node is about to morph, remove its old self from the CSE maps. 7275 RemoveNodeFromCSEMaps(User); 7276 7277 // A user can appear in a use list multiple times, and when this 7278 // happens the uses are usually next to each other in the list. 7279 // To help reduce the number of CSE recomputations, process all 7280 // the uses of this user that we can find this way. 7281 do { 7282 SDUse &Use = UI.getUse(); 7283 ++UI; 7284 Use.setNode(To); 7285 } while (UI != UE && *UI == User); 7286 7287 // Now that we have modified User, add it back to the CSE maps. If it 7288 // already exists there, recursively merge the results together. 7289 AddModifiedNodeToCSEMaps(User); 7290 } 7291 7292 // If we just RAUW'd the root, take note. 7293 if (From == getRoot().getNode()) 7294 setRoot(SDValue(To, getRoot().getResNo())); 7295 } 7296 7297 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7298 /// This can cause recursive merging of nodes in the DAG. 7299 /// 7300 /// This version can replace From with any result values. To must match the 7301 /// number and types of values returned by From. 7302 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 7303 if (From->getNumValues() == 1) // Handle the simple case efficiently. 7304 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 7305 7306 // Preserve Debug Info. 7307 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7308 transferDbgValues(SDValue(From, i), *To); 7309 7310 // Iterate over just the existing users of From. See the comments in 7311 // the ReplaceAllUsesWith above. 7312 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7313 RAUWUpdateListener Listener(*this, UI, UE); 7314 while (UI != UE) { 7315 SDNode *User = *UI; 7316 7317 // This node is about to morph, remove its old self from the CSE maps. 7318 RemoveNodeFromCSEMaps(User); 7319 7320 // A user can appear in a use list multiple times, and when this 7321 // happens the uses are usually next to each other in the list. 7322 // To help reduce the number of CSE recomputations, process all 7323 // the uses of this user that we can find this way. 7324 do { 7325 SDUse &Use = UI.getUse(); 7326 const SDValue &ToOp = To[Use.getResNo()]; 7327 ++UI; 7328 Use.set(ToOp); 7329 } while (UI != UE && *UI == User); 7330 7331 // Now that we have modified User, add it back to the CSE maps. If it 7332 // already exists there, recursively merge the results together. 7333 AddModifiedNodeToCSEMaps(User); 7334 } 7335 7336 // If we just RAUW'd the root, take note. 7337 if (From == getRoot().getNode()) 7338 setRoot(SDValue(To[getRoot().getResNo()])); 7339 } 7340 7341 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 7342 /// uses of other values produced by From.getNode() alone. The Deleted 7343 /// vector is handled the same way as for ReplaceAllUsesWith. 7344 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 7345 // Handle the really simple, really trivial case efficiently. 7346 if (From == To) return; 7347 7348 // Handle the simple, trivial, case efficiently. 7349 if (From.getNode()->getNumValues() == 1) { 7350 ReplaceAllUsesWith(From, To); 7351 return; 7352 } 7353 7354 // Preserve Debug Info. 7355 transferDbgValues(From, To); 7356 7357 // Iterate over just the existing users of From. See the comments in 7358 // the ReplaceAllUsesWith above. 7359 SDNode::use_iterator UI = From.getNode()->use_begin(), 7360 UE = From.getNode()->use_end(); 7361 RAUWUpdateListener Listener(*this, UI, UE); 7362 while (UI != UE) { 7363 SDNode *User = *UI; 7364 bool UserRemovedFromCSEMaps = false; 7365 7366 // A user can appear in a use list multiple times, and when this 7367 // happens the uses are usually next to each other in the list. 7368 // To help reduce the number of CSE recomputations, process all 7369 // the uses of this user that we can find this way. 7370 do { 7371 SDUse &Use = UI.getUse(); 7372 7373 // Skip uses of different values from the same node. 7374 if (Use.getResNo() != From.getResNo()) { 7375 ++UI; 7376 continue; 7377 } 7378 7379 // If this node hasn't been modified yet, it's still in the CSE maps, 7380 // so remove its old self from the CSE maps. 7381 if (!UserRemovedFromCSEMaps) { 7382 RemoveNodeFromCSEMaps(User); 7383 UserRemovedFromCSEMaps = true; 7384 } 7385 7386 ++UI; 7387 Use.set(To); 7388 } while (UI != UE && *UI == User); 7389 7390 // We are iterating over all uses of the From node, so if a use 7391 // doesn't use the specific value, no changes are made. 7392 if (!UserRemovedFromCSEMaps) 7393 continue; 7394 7395 // Now that we have modified User, add it back to the CSE maps. If it 7396 // already exists there, recursively merge the results together. 7397 AddModifiedNodeToCSEMaps(User); 7398 } 7399 7400 // If we just RAUW'd the root, take note. 7401 if (From == getRoot()) 7402 setRoot(To); 7403 } 7404 7405 namespace { 7406 7407 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 7408 /// to record information about a use. 7409 struct UseMemo { 7410 SDNode *User; 7411 unsigned Index; 7412 SDUse *Use; 7413 }; 7414 7415 /// operator< - Sort Memos by User. 7416 bool operator<(const UseMemo &L, const UseMemo &R) { 7417 return (intptr_t)L.User < (intptr_t)R.User; 7418 } 7419 7420 } // end anonymous namespace 7421 7422 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 7423 /// uses of other values produced by From.getNode() alone. The same value 7424 /// may appear in both the From and To list. The Deleted vector is 7425 /// handled the same way as for ReplaceAllUsesWith. 7426 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 7427 const SDValue *To, 7428 unsigned Num){ 7429 // Handle the simple, trivial case efficiently. 7430 if (Num == 1) 7431 return ReplaceAllUsesOfValueWith(*From, *To); 7432 7433 transferDbgValues(*From, *To); 7434 7435 // Read up all the uses and make records of them. This helps 7436 // processing new uses that are introduced during the 7437 // replacement process. 7438 SmallVector<UseMemo, 4> Uses; 7439 for (unsigned i = 0; i != Num; ++i) { 7440 unsigned FromResNo = From[i].getResNo(); 7441 SDNode *FromNode = From[i].getNode(); 7442 for (SDNode::use_iterator UI = FromNode->use_begin(), 7443 E = FromNode->use_end(); UI != E; ++UI) { 7444 SDUse &Use = UI.getUse(); 7445 if (Use.getResNo() == FromResNo) { 7446 UseMemo Memo = { *UI, i, &Use }; 7447 Uses.push_back(Memo); 7448 } 7449 } 7450 } 7451 7452 // Sort the uses, so that all the uses from a given User are together. 7453 std::sort(Uses.begin(), Uses.end()); 7454 7455 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7456 UseIndex != UseIndexEnd; ) { 7457 // We know that this user uses some value of From. If it is the right 7458 // value, update it. 7459 SDNode *User = Uses[UseIndex].User; 7460 7461 // This node is about to morph, remove its old self from the CSE maps. 7462 RemoveNodeFromCSEMaps(User); 7463 7464 // The Uses array is sorted, so all the uses for a given User 7465 // are next to each other in the list. 7466 // To help reduce the number of CSE recomputations, process all 7467 // the uses of this user that we can find this way. 7468 do { 7469 unsigned i = Uses[UseIndex].Index; 7470 SDUse &Use = *Uses[UseIndex].Use; 7471 ++UseIndex; 7472 7473 Use.set(To[i]); 7474 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7475 7476 // Now that we have modified User, add it back to the CSE maps. If it 7477 // already exists there, recursively merge the results together. 7478 AddModifiedNodeToCSEMaps(User); 7479 } 7480 } 7481 7482 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7483 /// based on their topological order. It returns the maximum id and a vector 7484 /// of the SDNodes* in assigned order by reference. 7485 unsigned SelectionDAG::AssignTopologicalOrder() { 7486 unsigned DAGSize = 0; 7487 7488 // SortedPos tracks the progress of the algorithm. Nodes before it are 7489 // sorted, nodes after it are unsorted. When the algorithm completes 7490 // it is at the end of the list. 7491 allnodes_iterator SortedPos = allnodes_begin(); 7492 7493 // Visit all the nodes. Move nodes with no operands to the front of 7494 // the list immediately. Annotate nodes that do have operands with their 7495 // operand count. Before we do this, the Node Id fields of the nodes 7496 // may contain arbitrary values. After, the Node Id fields for nodes 7497 // before SortedPos will contain the topological sort index, and the 7498 // Node Id fields for nodes At SortedPos and after will contain the 7499 // count of outstanding operands. 7500 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7501 SDNode *N = &*I++; 7502 checkForCycles(N, this); 7503 unsigned Degree = N->getNumOperands(); 7504 if (Degree == 0) { 7505 // A node with no uses, add it to the result array immediately. 7506 N->setNodeId(DAGSize++); 7507 allnodes_iterator Q(N); 7508 if (Q != SortedPos) 7509 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7510 assert(SortedPos != AllNodes.end() && "Overran node list"); 7511 ++SortedPos; 7512 } else { 7513 // Temporarily use the Node Id as scratch space for the degree count. 7514 N->setNodeId(Degree); 7515 } 7516 } 7517 7518 // Visit all the nodes. As we iterate, move nodes into sorted order, 7519 // such that by the time the end is reached all nodes will be sorted. 7520 for (SDNode &Node : allnodes()) { 7521 SDNode *N = &Node; 7522 checkForCycles(N, this); 7523 // N is in sorted position, so all its uses have one less operand 7524 // that needs to be sorted. 7525 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7526 UI != UE; ++UI) { 7527 SDNode *P = *UI; 7528 unsigned Degree = P->getNodeId(); 7529 assert(Degree != 0 && "Invalid node degree"); 7530 --Degree; 7531 if (Degree == 0) { 7532 // All of P's operands are sorted, so P may sorted now. 7533 P->setNodeId(DAGSize++); 7534 if (P->getIterator() != SortedPos) 7535 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7536 assert(SortedPos != AllNodes.end() && "Overran node list"); 7537 ++SortedPos; 7538 } else { 7539 // Update P's outstanding operand count. 7540 P->setNodeId(Degree); 7541 } 7542 } 7543 if (Node.getIterator() == SortedPos) { 7544 #ifndef NDEBUG 7545 allnodes_iterator I(N); 7546 SDNode *S = &*++I; 7547 dbgs() << "Overran sorted position:\n"; 7548 S->dumprFull(this); dbgs() << "\n"; 7549 dbgs() << "Checking if this is due to cycles\n"; 7550 checkForCycles(this, true); 7551 #endif 7552 llvm_unreachable(nullptr); 7553 } 7554 } 7555 7556 assert(SortedPos == AllNodes.end() && 7557 "Topological sort incomplete!"); 7558 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7559 "First node in topological sort is not the entry token!"); 7560 assert(AllNodes.front().getNodeId() == 0 && 7561 "First node in topological sort has non-zero id!"); 7562 assert(AllNodes.front().getNumOperands() == 0 && 7563 "First node in topological sort has operands!"); 7564 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7565 "Last node in topologic sort has unexpected id!"); 7566 assert(AllNodes.back().use_empty() && 7567 "Last node in topologic sort has users!"); 7568 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7569 return DAGSize; 7570 } 7571 7572 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7573 /// value is produced by SD. 7574 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7575 if (SD) { 7576 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7577 SD->setHasDebugValue(true); 7578 } 7579 DbgInfo->add(DB, SD, isParameter); 7580 } 7581 7582 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 7583 SDValue NewMemOp) { 7584 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 7585 // The new memory operation must have the same position as the old load in 7586 // terms of memory dependency. Create a TokenFactor for the old load and new 7587 // memory operation and update uses of the old load's output chain to use that 7588 // TokenFactor. 7589 SDValue OldChain = SDValue(OldLoad, 1); 7590 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 7591 if (!OldLoad->hasAnyUseOfValue(1)) 7592 return NewChain; 7593 7594 SDValue TokenFactor = 7595 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 7596 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 7597 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 7598 return TokenFactor; 7599 } 7600 7601 //===----------------------------------------------------------------------===// 7602 // SDNode Class 7603 //===----------------------------------------------------------------------===// 7604 7605 bool llvm::isNullConstant(SDValue V) { 7606 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7607 return Const != nullptr && Const->isNullValue(); 7608 } 7609 7610 bool llvm::isNullFPConstant(SDValue V) { 7611 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7612 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7613 } 7614 7615 bool llvm::isAllOnesConstant(SDValue V) { 7616 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7617 return Const != nullptr && Const->isAllOnesValue(); 7618 } 7619 7620 bool llvm::isOneConstant(SDValue V) { 7621 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7622 return Const != nullptr && Const->isOne(); 7623 } 7624 7625 bool llvm::isBitwiseNot(SDValue V) { 7626 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7627 } 7628 7629 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7630 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7631 return CN; 7632 7633 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7634 BitVector UndefElements; 7635 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7636 7637 // BuildVectors can truncate their operands. Ignore that case here. 7638 // FIXME: We blindly ignore splats which include undef which is overly 7639 // pessimistic. 7640 if (CN && UndefElements.none() && 7641 CN->getValueType(0) == N.getValueType().getScalarType()) 7642 return CN; 7643 } 7644 7645 return nullptr; 7646 } 7647 7648 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7649 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7650 return CN; 7651 7652 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7653 BitVector UndefElements; 7654 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7655 7656 if (CN && UndefElements.none()) 7657 return CN; 7658 } 7659 7660 return nullptr; 7661 } 7662 7663 HandleSDNode::~HandleSDNode() { 7664 DropOperands(); 7665 } 7666 7667 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7668 const DebugLoc &DL, 7669 const GlobalValue *GA, EVT VT, 7670 int64_t o, unsigned char TF) 7671 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7672 TheGlobal = GA; 7673 } 7674 7675 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7676 EVT VT, unsigned SrcAS, 7677 unsigned DestAS) 7678 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7679 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7680 7681 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7682 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7683 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7684 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7685 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7686 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7687 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7688 7689 // We check here that the size of the memory operand fits within the size of 7690 // the MMO. This is because the MMO might indicate only a possible address 7691 // range instead of specifying the affected memory addresses precisely. 7692 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7693 } 7694 7695 /// Profile - Gather unique data for the node. 7696 /// 7697 void SDNode::Profile(FoldingSetNodeID &ID) const { 7698 AddNodeIDNode(ID, this); 7699 } 7700 7701 namespace { 7702 7703 struct EVTArray { 7704 std::vector<EVT> VTs; 7705 7706 EVTArray() { 7707 VTs.reserve(MVT::LAST_VALUETYPE); 7708 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7709 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7710 } 7711 }; 7712 7713 } // end anonymous namespace 7714 7715 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 7716 static ManagedStatic<EVTArray> SimpleVTArray; 7717 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 7718 7719 /// getValueTypeList - Return a pointer to the specified value type. 7720 /// 7721 const EVT *SDNode::getValueTypeList(EVT VT) { 7722 if (VT.isExtended()) { 7723 sys::SmartScopedLock<true> Lock(*VTMutex); 7724 return &(*EVTs->insert(VT).first); 7725 } else { 7726 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7727 "Value type out of range!"); 7728 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7729 } 7730 } 7731 7732 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7733 /// indicated value. This method ignores uses of other values defined by this 7734 /// operation. 7735 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7736 assert(Value < getNumValues() && "Bad value!"); 7737 7738 // TODO: Only iterate over uses of a given value of the node 7739 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7740 if (UI.getUse().getResNo() == Value) { 7741 if (NUses == 0) 7742 return false; 7743 --NUses; 7744 } 7745 } 7746 7747 // Found exactly the right number of uses? 7748 return NUses == 0; 7749 } 7750 7751 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7752 /// value. This method ignores uses of other values defined by this operation. 7753 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7754 assert(Value < getNumValues() && "Bad value!"); 7755 7756 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7757 if (UI.getUse().getResNo() == Value) 7758 return true; 7759 7760 return false; 7761 } 7762 7763 /// isOnlyUserOf - Return true if this node is the only use of N. 7764 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7765 bool Seen = false; 7766 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7767 SDNode *User = *I; 7768 if (User == this) 7769 Seen = true; 7770 else 7771 return false; 7772 } 7773 7774 return Seen; 7775 } 7776 7777 /// Return true if the only users of N are contained in Nodes. 7778 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7779 bool Seen = false; 7780 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7781 SDNode *User = *I; 7782 if (llvm::any_of(Nodes, 7783 [&User](const SDNode *Node) { return User == Node; })) 7784 Seen = true; 7785 else 7786 return false; 7787 } 7788 7789 return Seen; 7790 } 7791 7792 /// isOperand - Return true if this node is an operand of N. 7793 bool SDValue::isOperandOf(const SDNode *N) const { 7794 for (const SDValue &Op : N->op_values()) 7795 if (*this == Op) 7796 return true; 7797 return false; 7798 } 7799 7800 bool SDNode::isOperandOf(const SDNode *N) const { 7801 for (const SDValue &Op : N->op_values()) 7802 if (this == Op.getNode()) 7803 return true; 7804 return false; 7805 } 7806 7807 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7808 /// be a chain) reaches the specified operand without crossing any 7809 /// side-effecting instructions on any chain path. In practice, this looks 7810 /// through token factors and non-volatile loads. In order to remain efficient, 7811 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7812 /// 7813 /// Note that we only need to examine chains when we're searching for 7814 /// side-effects; SelectionDAG requires that all side-effects are represented 7815 /// by chains, even if another operand would force a specific ordering. This 7816 /// constraint is necessary to allow transformations like splitting loads. 7817 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7818 unsigned Depth) const { 7819 if (*this == Dest) return true; 7820 7821 // Don't search too deeply, we just want to be able to see through 7822 // TokenFactor's etc. 7823 if (Depth == 0) return false; 7824 7825 // If this is a token factor, all inputs to the TF happen in parallel. 7826 if (getOpcode() == ISD::TokenFactor) { 7827 // First, try a shallow search. 7828 if (is_contained((*this)->ops(), Dest)) { 7829 // We found the chain we want as an operand of this TokenFactor. 7830 // Essentially, we reach the chain without side-effects if we could 7831 // serialize the TokenFactor into a simple chain of operations with 7832 // Dest as the last operation. This is automatically true if the 7833 // chain has one use: there are no other ordering constraints. 7834 // If the chain has more than one use, we give up: some other 7835 // use of Dest might force a side-effect between Dest and the current 7836 // node. 7837 if (Dest.hasOneUse()) 7838 return true; 7839 } 7840 // Next, try a deep search: check whether every operand of the TokenFactor 7841 // reaches Dest. 7842 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 7843 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 7844 }); 7845 } 7846 7847 // Loads don't have side effects, look through them. 7848 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7849 if (!Ld->isVolatile()) 7850 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7851 } 7852 return false; 7853 } 7854 7855 bool SDNode::hasPredecessor(const SDNode *N) const { 7856 SmallPtrSet<const SDNode *, 32> Visited; 7857 SmallVector<const SDNode *, 16> Worklist; 7858 Worklist.push_back(this); 7859 return hasPredecessorHelper(N, Visited, Worklist); 7860 } 7861 7862 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 7863 this->Flags.intersectWith(Flags); 7864 } 7865 7866 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7867 assert(N->getNumValues() == 1 && 7868 "Can't unroll a vector with multiple results!"); 7869 7870 EVT VT = N->getValueType(0); 7871 unsigned NE = VT.getVectorNumElements(); 7872 EVT EltVT = VT.getVectorElementType(); 7873 SDLoc dl(N); 7874 7875 SmallVector<SDValue, 8> Scalars; 7876 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7877 7878 // If ResNE is 0, fully unroll the vector op. 7879 if (ResNE == 0) 7880 ResNE = NE; 7881 else if (NE > ResNE) 7882 NE = ResNE; 7883 7884 unsigned i; 7885 for (i= 0; i != NE; ++i) { 7886 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7887 SDValue Operand = N->getOperand(j); 7888 EVT OperandVT = Operand.getValueType(); 7889 if (OperandVT.isVector()) { 7890 // A vector operand; extract a single element. 7891 EVT OperandEltVT = OperandVT.getVectorElementType(); 7892 Operands[j] = 7893 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7894 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7895 } else { 7896 // A scalar operand; just use it as is. 7897 Operands[j] = Operand; 7898 } 7899 } 7900 7901 switch (N->getOpcode()) { 7902 default: { 7903 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7904 N->getFlags())); 7905 break; 7906 } 7907 case ISD::VSELECT: 7908 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7909 break; 7910 case ISD::SHL: 7911 case ISD::SRA: 7912 case ISD::SRL: 7913 case ISD::ROTL: 7914 case ISD::ROTR: 7915 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7916 getShiftAmountOperand(Operands[0].getValueType(), 7917 Operands[1]))); 7918 break; 7919 case ISD::SIGN_EXTEND_INREG: 7920 case ISD::FP_ROUND_INREG: { 7921 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7922 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7923 Operands[0], 7924 getValueType(ExtVT))); 7925 } 7926 } 7927 } 7928 7929 for (; i < ResNE; ++i) 7930 Scalars.push_back(getUNDEF(EltVT)); 7931 7932 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7933 return getBuildVector(VecVT, dl, Scalars); 7934 } 7935 7936 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7937 LoadSDNode *Base, 7938 unsigned Bytes, 7939 int Dist) const { 7940 if (LD->isVolatile() || Base->isVolatile()) 7941 return false; 7942 if (LD->isIndexed() || Base->isIndexed()) 7943 return false; 7944 if (LD->getChain() != Base->getChain()) 7945 return false; 7946 EVT VT = LD->getValueType(0); 7947 if (VT.getSizeInBits() / 8 != Bytes) 7948 return false; 7949 7950 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 7951 auto LocDecomp = BaseIndexOffset::match(LD, *this); 7952 7953 int64_t Offset = 0; 7954 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 7955 return (Dist * Bytes == Offset); 7956 return false; 7957 } 7958 7959 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7960 /// it cannot be inferred. 7961 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7962 // If this is a GlobalAddress + cst, return the alignment. 7963 const GlobalValue *GV; 7964 int64_t GVOffset = 0; 7965 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7966 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7967 KnownBits Known(PtrWidth); 7968 llvm::computeKnownBits(GV, Known, getDataLayout()); 7969 unsigned AlignBits = Known.countMinTrailingZeros(); 7970 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7971 if (Align) 7972 return MinAlign(Align, GVOffset); 7973 } 7974 7975 // If this is a direct reference to a stack slot, use information about the 7976 // stack slot's alignment. 7977 int FrameIdx = 1 << 31; 7978 int64_t FrameOffset = 0; 7979 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7980 FrameIdx = FI->getIndex(); 7981 } else if (isBaseWithConstantOffset(Ptr) && 7982 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7983 // Handle FI+Cst 7984 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7985 FrameOffset = Ptr.getConstantOperandVal(1); 7986 } 7987 7988 if (FrameIdx != (1 << 31)) { 7989 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7990 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7991 FrameOffset); 7992 return FIInfoAlign; 7993 } 7994 7995 return 0; 7996 } 7997 7998 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7999 /// which is split (or expanded) into two not necessarily identical pieces. 8000 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 8001 // Currently all types are split in half. 8002 EVT LoVT, HiVT; 8003 if (!VT.isVector()) 8004 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 8005 else 8006 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 8007 8008 return std::make_pair(LoVT, HiVT); 8009 } 8010 8011 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 8012 /// low/high part. 8013 std::pair<SDValue, SDValue> 8014 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 8015 const EVT &HiVT) { 8016 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 8017 N.getValueType().getVectorNumElements() && 8018 "More vector elements requested than available!"); 8019 SDValue Lo, Hi; 8020 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 8021 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 8022 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 8023 getConstant(LoVT.getVectorNumElements(), DL, 8024 TLI->getVectorIdxTy(getDataLayout()))); 8025 return std::make_pair(Lo, Hi); 8026 } 8027 8028 void SelectionDAG::ExtractVectorElements(SDValue Op, 8029 SmallVectorImpl<SDValue> &Args, 8030 unsigned Start, unsigned Count) { 8031 EVT VT = Op.getValueType(); 8032 if (Count == 0) 8033 Count = VT.getVectorNumElements(); 8034 8035 EVT EltVT = VT.getVectorElementType(); 8036 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 8037 SDLoc SL(Op); 8038 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 8039 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 8040 Op, getConstant(i, SL, IdxTy))); 8041 } 8042 } 8043 8044 // getAddressSpace - Return the address space this GlobalAddress belongs to. 8045 unsigned GlobalAddressSDNode::getAddressSpace() const { 8046 return getGlobal()->getType()->getAddressSpace(); 8047 } 8048 8049 Type *ConstantPoolSDNode::getType() const { 8050 if (isMachineConstantPoolEntry()) 8051 return Val.MachineCPVal->getType(); 8052 return Val.ConstVal->getType(); 8053 } 8054 8055 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 8056 unsigned &SplatBitSize, 8057 bool &HasAnyUndefs, 8058 unsigned MinSplatBits, 8059 bool IsBigEndian) const { 8060 EVT VT = getValueType(0); 8061 assert(VT.isVector() && "Expected a vector type"); 8062 unsigned VecWidth = VT.getSizeInBits(); 8063 if (MinSplatBits > VecWidth) 8064 return false; 8065 8066 // FIXME: The widths are based on this node's type, but build vectors can 8067 // truncate their operands. 8068 SplatValue = APInt(VecWidth, 0); 8069 SplatUndef = APInt(VecWidth, 0); 8070 8071 // Get the bits. Bits with undefined values (when the corresponding element 8072 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 8073 // in SplatValue. If any of the values are not constant, give up and return 8074 // false. 8075 unsigned int NumOps = getNumOperands(); 8076 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 8077 unsigned EltWidth = VT.getScalarSizeInBits(); 8078 8079 for (unsigned j = 0; j < NumOps; ++j) { 8080 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 8081 SDValue OpVal = getOperand(i); 8082 unsigned BitPos = j * EltWidth; 8083 8084 if (OpVal.isUndef()) 8085 SplatUndef.setBits(BitPos, BitPos + EltWidth); 8086 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 8087 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 8088 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 8089 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 8090 else 8091 return false; 8092 } 8093 8094 // The build_vector is all constants or undefs. Find the smallest element 8095 // size that splats the vector. 8096 HasAnyUndefs = (SplatUndef != 0); 8097 8098 // FIXME: This does not work for vectors with elements less than 8 bits. 8099 while (VecWidth > 8) { 8100 unsigned HalfSize = VecWidth / 2; 8101 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 8102 APInt LowValue = SplatValue.trunc(HalfSize); 8103 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 8104 APInt LowUndef = SplatUndef.trunc(HalfSize); 8105 8106 // If the two halves do not match (ignoring undef bits), stop here. 8107 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 8108 MinSplatBits > HalfSize) 8109 break; 8110 8111 SplatValue = HighValue | LowValue; 8112 SplatUndef = HighUndef & LowUndef; 8113 8114 VecWidth = HalfSize; 8115 } 8116 8117 SplatBitSize = VecWidth; 8118 return true; 8119 } 8120 8121 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 8122 if (UndefElements) { 8123 UndefElements->clear(); 8124 UndefElements->resize(getNumOperands()); 8125 } 8126 SDValue Splatted; 8127 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 8128 SDValue Op = getOperand(i); 8129 if (Op.isUndef()) { 8130 if (UndefElements) 8131 (*UndefElements)[i] = true; 8132 } else if (!Splatted) { 8133 Splatted = Op; 8134 } else if (Splatted != Op) { 8135 return SDValue(); 8136 } 8137 } 8138 8139 if (!Splatted) { 8140 assert(getOperand(0).isUndef() && 8141 "Can only have a splat without a constant for all undefs."); 8142 return getOperand(0); 8143 } 8144 8145 return Splatted; 8146 } 8147 8148 ConstantSDNode * 8149 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 8150 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 8151 } 8152 8153 ConstantFPSDNode * 8154 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 8155 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 8156 } 8157 8158 int32_t 8159 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 8160 uint32_t BitWidth) const { 8161 if (ConstantFPSDNode *CN = 8162 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 8163 bool IsExact; 8164 APSInt IntVal(BitWidth); 8165 const APFloat &APF = CN->getValueAPF(); 8166 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 8167 APFloat::opOK || 8168 !IsExact) 8169 return -1; 8170 8171 return IntVal.exactLogBase2(); 8172 } 8173 return -1; 8174 } 8175 8176 bool BuildVectorSDNode::isConstant() const { 8177 for (const SDValue &Op : op_values()) { 8178 unsigned Opc = Op.getOpcode(); 8179 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 8180 return false; 8181 } 8182 return true; 8183 } 8184 8185 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 8186 // Find the first non-undef value in the shuffle mask. 8187 unsigned i, e; 8188 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 8189 /* search */; 8190 8191 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 8192 8193 // Make sure all remaining elements are either undef or the same as the first 8194 // non-undef value. 8195 for (int Idx = Mask[i]; i != e; ++i) 8196 if (Mask[i] >= 0 && Mask[i] != Idx) 8197 return false; 8198 return true; 8199 } 8200 8201 // \brief Returns the SDNode if it is a constant integer BuildVector 8202 // or constant integer. 8203 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 8204 if (isa<ConstantSDNode>(N)) 8205 return N.getNode(); 8206 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 8207 return N.getNode(); 8208 // Treat a GlobalAddress supporting constant offset folding as a 8209 // constant integer. 8210 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 8211 if (GA->getOpcode() == ISD::GlobalAddress && 8212 TLI->isOffsetFoldingLegal(GA)) 8213 return GA; 8214 return nullptr; 8215 } 8216 8217 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 8218 if (isa<ConstantFPSDNode>(N)) 8219 return N.getNode(); 8220 8221 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 8222 return N.getNode(); 8223 8224 return nullptr; 8225 } 8226 8227 #ifndef NDEBUG 8228 static void checkForCyclesHelper(const SDNode *N, 8229 SmallPtrSetImpl<const SDNode*> &Visited, 8230 SmallPtrSetImpl<const SDNode*> &Checked, 8231 const llvm::SelectionDAG *DAG) { 8232 // If this node has already been checked, don't check it again. 8233 if (Checked.count(N)) 8234 return; 8235 8236 // If a node has already been visited on this depth-first walk, reject it as 8237 // a cycle. 8238 if (!Visited.insert(N).second) { 8239 errs() << "Detected cycle in SelectionDAG\n"; 8240 dbgs() << "Offending node:\n"; 8241 N->dumprFull(DAG); dbgs() << "\n"; 8242 abort(); 8243 } 8244 8245 for (const SDValue &Op : N->op_values()) 8246 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 8247 8248 Checked.insert(N); 8249 Visited.erase(N); 8250 } 8251 #endif 8252 8253 void llvm::checkForCycles(const llvm::SDNode *N, 8254 const llvm::SelectionDAG *DAG, 8255 bool force) { 8256 #ifndef NDEBUG 8257 bool check = force; 8258 #ifdef EXPENSIVE_CHECKS 8259 check = true; 8260 #endif // EXPENSIVE_CHECKS 8261 if (check) { 8262 assert(N && "Checking nonexistent SDNode"); 8263 SmallPtrSet<const SDNode*, 32> visited; 8264 SmallPtrSet<const SDNode*, 32> checked; 8265 checkForCyclesHelper(N, visited, checked, DAG); 8266 } 8267 #endif // !NDEBUG 8268 } 8269 8270 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 8271 checkForCycles(DAG->getRoot().getNode(), DAG, force); 8272 } 8273