1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineValueType.h" 36 #include "llvm/CodeGen/RuntimeLibcalls.h" 37 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Constant.h" 42 #include "llvm/IR/Constants.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DebugInfoMetadata.h" 45 #include "llvm/IR/DebugLoc.h" 46 #include "llvm/IR/DerivedTypes.h" 47 #include "llvm/IR/Function.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/Metadata.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CodeGen.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/KnownBits.h" 58 #include "llvm/Support/ManagedStatic.h" 59 #include "llvm/Support/MathExtras.h" 60 #include "llvm/Support/Mutex.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetLowering.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetOptions.h" 65 #include "llvm/Target/TargetRegisterInfo.h" 66 #include "llvm/Target/TargetSubtargetInfo.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 #define DEBUG_TYPE "selectiondag" 91 92 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 93 DEBUG( 94 dbgs() << Msg; 95 V.getNode()->dump(G); 96 ); 97 } 98 99 //===----------------------------------------------------------------------===// 100 // ConstantFPSDNode Class 101 //===----------------------------------------------------------------------===// 102 103 /// isExactlyValue - We don't rely on operator== working on double values, as 104 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 105 /// As such, this method can be used to do an exact bit-for-bit comparison of 106 /// two floating point values. 107 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 108 return getValueAPF().bitwiseIsEqual(V); 109 } 110 111 bool ConstantFPSDNode::isValueValidForType(EVT VT, 112 const APFloat& Val) { 113 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 114 115 // convert modifies in place, so make a copy. 116 APFloat Val2 = APFloat(Val); 117 bool losesInfo; 118 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 119 APFloat::rmNearestTiesToEven, 120 &losesInfo); 121 return !losesInfo; 122 } 123 124 //===----------------------------------------------------------------------===// 125 // ISD Namespace 126 //===----------------------------------------------------------------------===// 127 128 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 129 auto *BV = dyn_cast<BuildVectorSDNode>(N); 130 if (!BV) 131 return false; 132 133 APInt SplatUndef; 134 unsigned SplatBitSize; 135 bool HasUndefs; 136 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 137 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 138 EltSize) && 139 EltSize == SplatBitSize; 140 } 141 142 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 143 // specializations of the more general isConstantSplatVector()? 144 145 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 146 // Look through a bit convert. 147 while (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 unsigned i = 0, e = N->getNumOperands(); 153 154 // Skip over all of the undef values. 155 while (i != e && N->getOperand(i).isUndef()) 156 ++i; 157 158 // Do not accept an all-undef vector. 159 if (i == e) return false; 160 161 // Do not accept build_vectors that aren't all constants or which have non-~0 162 // elements. We have to be a bit careful here, as the type of the constant 163 // may not be the same as the type of the vector elements due to type 164 // legalization (the elements are promoted to a legal type for the target and 165 // a vector of a type may be legal when the base element type is not). 166 // We only want to check enough bits to cover the vector elements, because 167 // we care if the resultant vector is all ones, not whether the individual 168 // constants are. 169 SDValue NotZero = N->getOperand(i); 170 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 172 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 173 return false; 174 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 175 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 176 return false; 177 } else 178 return false; 179 180 // Okay, we have at least one ~0 value, check to see if the rest match or are 181 // undefs. Even with the above element type twiddling, this should be OK, as 182 // the same type legalization should have applied to all the elements. 183 for (++i; i != e; ++i) 184 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 185 return false; 186 return true; 187 } 188 189 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 190 // Look through a bit convert. 191 while (N->getOpcode() == ISD::BITCAST) 192 N = N->getOperand(0).getNode(); 193 194 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 195 196 bool IsAllUndef = true; 197 for (const SDValue &Op : N->op_values()) { 198 if (Op.isUndef()) 199 continue; 200 IsAllUndef = false; 201 // Do not accept build_vectors that aren't all constants or which have non-0 202 // elements. We have to be a bit careful here, as the type of the constant 203 // may not be the same as the type of the vector elements due to type 204 // legalization (the elements are promoted to a legal type for the target 205 // and a vector of a type may be legal when the base element type is not). 206 // We only want to check enough bits to cover the vector elements, because 207 // we care if the resultant vector is all zeros, not whether the individual 208 // constants are. 209 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 210 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 211 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 212 return false; 213 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 214 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 215 return false; 216 } else 217 return false; 218 } 219 220 // Do not accept an all-undef vector. 221 if (IsAllUndef) 222 return false; 223 return true; 224 } 225 226 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 227 if (N->getOpcode() != ISD::BUILD_VECTOR) 228 return false; 229 230 for (const SDValue &Op : N->op_values()) { 231 if (Op.isUndef()) 232 continue; 233 if (!isa<ConstantSDNode>(Op)) 234 return false; 235 } 236 return true; 237 } 238 239 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 240 if (N->getOpcode() != ISD::BUILD_VECTOR) 241 return false; 242 243 for (const SDValue &Op : N->op_values()) { 244 if (Op.isUndef()) 245 continue; 246 if (!isa<ConstantFPSDNode>(Op)) 247 return false; 248 } 249 return true; 250 } 251 252 bool ISD::allOperandsUndef(const SDNode *N) { 253 // Return false if the node has no operands. 254 // This is "logically inconsistent" with the definition of "all" but 255 // is probably the desired behavior. 256 if (N->getNumOperands() == 0) 257 return false; 258 259 for (const SDValue &Op : N->op_values()) 260 if (!Op.isUndef()) 261 return false; 262 263 return true; 264 } 265 266 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 267 switch (ExtType) { 268 case ISD::EXTLOAD: 269 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 270 case ISD::SEXTLOAD: 271 return ISD::SIGN_EXTEND; 272 case ISD::ZEXTLOAD: 273 return ISD::ZERO_EXTEND; 274 default: 275 break; 276 } 277 278 llvm_unreachable("Invalid LoadExtType"); 279 } 280 281 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 282 // To perform this operation, we just need to swap the L and G bits of the 283 // operation. 284 unsigned OldL = (Operation >> 2) & 1; 285 unsigned OldG = (Operation >> 1) & 1; 286 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 287 (OldL << 1) | // New G bit 288 (OldG << 2)); // New L bit. 289 } 290 291 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 292 unsigned Operation = Op; 293 if (isInteger) 294 Operation ^= 7; // Flip L, G, E bits, but not U. 295 else 296 Operation ^= 15; // Flip all of the condition bits. 297 298 if (Operation > ISD::SETTRUE2) 299 Operation &= ~8; // Don't let N and U bits get set. 300 301 return ISD::CondCode(Operation); 302 } 303 304 /// For an integer comparison, return 1 if the comparison is a signed operation 305 /// and 2 if the result is an unsigned comparison. Return zero if the operation 306 /// does not depend on the sign of the input (setne and seteq). 307 static int isSignedOp(ISD::CondCode Opcode) { 308 switch (Opcode) { 309 default: llvm_unreachable("Illegal integer setcc operation!"); 310 case ISD::SETEQ: 311 case ISD::SETNE: return 0; 312 case ISD::SETLT: 313 case ISD::SETLE: 314 case ISD::SETGT: 315 case ISD::SETGE: return 1; 316 case ISD::SETULT: 317 case ISD::SETULE: 318 case ISD::SETUGT: 319 case ISD::SETUGE: return 2; 320 } 321 } 322 323 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 324 bool IsInteger) { 325 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 326 // Cannot fold a signed integer setcc with an unsigned integer setcc. 327 return ISD::SETCC_INVALID; 328 329 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 330 331 // If the N and U bits get set, then the resultant comparison DOES suddenly 332 // care about orderedness, and it is true when ordered. 333 if (Op > ISD::SETTRUE2) 334 Op &= ~16; // Clear the U bit if the N bit is set. 335 336 // Canonicalize illegal integer setcc's. 337 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 338 Op = ISD::SETNE; 339 340 return ISD::CondCode(Op); 341 } 342 343 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 344 bool IsInteger) { 345 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 346 // Cannot fold a signed setcc with an unsigned setcc. 347 return ISD::SETCC_INVALID; 348 349 // Combine all of the condition bits. 350 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 351 352 // Canonicalize illegal integer setcc's. 353 if (IsInteger) { 354 switch (Result) { 355 default: break; 356 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 357 case ISD::SETOEQ: // SETEQ & SETU[LG]E 358 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 359 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 360 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 361 } 362 } 363 364 return Result; 365 } 366 367 //===----------------------------------------------------------------------===// 368 // SDNode Profile Support 369 //===----------------------------------------------------------------------===// 370 371 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 372 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 373 ID.AddInteger(OpC); 374 } 375 376 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 377 /// solely with their pointer. 378 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 379 ID.AddPointer(VTList.VTs); 380 } 381 382 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 383 static void AddNodeIDOperands(FoldingSetNodeID &ID, 384 ArrayRef<SDValue> Ops) { 385 for (auto& Op : Ops) { 386 ID.AddPointer(Op.getNode()); 387 ID.AddInteger(Op.getResNo()); 388 } 389 } 390 391 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 392 static void AddNodeIDOperands(FoldingSetNodeID &ID, 393 ArrayRef<SDUse> Ops) { 394 for (auto& Op : Ops) { 395 ID.AddPointer(Op.getNode()); 396 ID.AddInteger(Op.getResNo()); 397 } 398 } 399 400 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 401 SDVTList VTList, ArrayRef<SDValue> OpList) { 402 AddNodeIDOpcode(ID, OpC); 403 AddNodeIDValueTypes(ID, VTList); 404 AddNodeIDOperands(ID, OpList); 405 } 406 407 /// If this is an SDNode with special info, add this info to the NodeID data. 408 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 409 switch (N->getOpcode()) { 410 case ISD::TargetExternalSymbol: 411 case ISD::ExternalSymbol: 412 case ISD::MCSymbol: 413 llvm_unreachable("Should only be used on nodes with operands"); 414 default: break; // Normal nodes don't need extra info. 415 case ISD::TargetConstant: 416 case ISD::Constant: { 417 const ConstantSDNode *C = cast<ConstantSDNode>(N); 418 ID.AddPointer(C->getConstantIntValue()); 419 ID.AddBoolean(C->isOpaque()); 420 break; 421 } 422 case ISD::TargetConstantFP: 423 case ISD::ConstantFP: 424 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 425 break; 426 case ISD::TargetGlobalAddress: 427 case ISD::GlobalAddress: 428 case ISD::TargetGlobalTLSAddress: 429 case ISD::GlobalTLSAddress: { 430 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 431 ID.AddPointer(GA->getGlobal()); 432 ID.AddInteger(GA->getOffset()); 433 ID.AddInteger(GA->getTargetFlags()); 434 break; 435 } 436 case ISD::BasicBlock: 437 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 438 break; 439 case ISD::Register: 440 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 441 break; 442 case ISD::RegisterMask: 443 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 444 break; 445 case ISD::SRCVALUE: 446 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 447 break; 448 case ISD::FrameIndex: 449 case ISD::TargetFrameIndex: 450 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 451 break; 452 case ISD::JumpTable: 453 case ISD::TargetJumpTable: 454 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 455 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 456 break; 457 case ISD::ConstantPool: 458 case ISD::TargetConstantPool: { 459 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 460 ID.AddInteger(CP->getAlignment()); 461 ID.AddInteger(CP->getOffset()); 462 if (CP->isMachineConstantPoolEntry()) 463 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 464 else 465 ID.AddPointer(CP->getConstVal()); 466 ID.AddInteger(CP->getTargetFlags()); 467 break; 468 } 469 case ISD::TargetIndex: { 470 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 471 ID.AddInteger(TI->getIndex()); 472 ID.AddInteger(TI->getOffset()); 473 ID.AddInteger(TI->getTargetFlags()); 474 break; 475 } 476 case ISD::LOAD: { 477 const LoadSDNode *LD = cast<LoadSDNode>(N); 478 ID.AddInteger(LD->getMemoryVT().getRawBits()); 479 ID.AddInteger(LD->getRawSubclassData()); 480 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 481 break; 482 } 483 case ISD::STORE: { 484 const StoreSDNode *ST = cast<StoreSDNode>(N); 485 ID.AddInteger(ST->getMemoryVT().getRawBits()); 486 ID.AddInteger(ST->getRawSubclassData()); 487 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 488 break; 489 } 490 case ISD::ATOMIC_CMP_SWAP: 491 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 492 case ISD::ATOMIC_SWAP: 493 case ISD::ATOMIC_LOAD_ADD: 494 case ISD::ATOMIC_LOAD_SUB: 495 case ISD::ATOMIC_LOAD_AND: 496 case ISD::ATOMIC_LOAD_OR: 497 case ISD::ATOMIC_LOAD_XOR: 498 case ISD::ATOMIC_LOAD_NAND: 499 case ISD::ATOMIC_LOAD_MIN: 500 case ISD::ATOMIC_LOAD_MAX: 501 case ISD::ATOMIC_LOAD_UMIN: 502 case ISD::ATOMIC_LOAD_UMAX: 503 case ISD::ATOMIC_LOAD: 504 case ISD::ATOMIC_STORE: { 505 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 506 ID.AddInteger(AT->getMemoryVT().getRawBits()); 507 ID.AddInteger(AT->getRawSubclassData()); 508 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 509 break; 510 } 511 case ISD::PREFETCH: { 512 const MemSDNode *PF = cast<MemSDNode>(N); 513 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 514 break; 515 } 516 case ISD::VECTOR_SHUFFLE: { 517 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 518 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 519 i != e; ++i) 520 ID.AddInteger(SVN->getMaskElt(i)); 521 break; 522 } 523 case ISD::TargetBlockAddress: 524 case ISD::BlockAddress: { 525 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 526 ID.AddPointer(BA->getBlockAddress()); 527 ID.AddInteger(BA->getOffset()); 528 ID.AddInteger(BA->getTargetFlags()); 529 break; 530 } 531 } // end switch (N->getOpcode()) 532 533 // Target specific memory nodes could also have address spaces to check. 534 if (N->isTargetMemoryOpcode()) 535 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 536 } 537 538 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 539 /// data. 540 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 541 AddNodeIDOpcode(ID, N->getOpcode()); 542 // Add the return value info. 543 AddNodeIDValueTypes(ID, N->getVTList()); 544 // Add the operand info. 545 AddNodeIDOperands(ID, N->ops()); 546 547 // Handle SDNode leafs with special info. 548 AddNodeIDCustom(ID, N); 549 } 550 551 //===----------------------------------------------------------------------===// 552 // SelectionDAG Class 553 //===----------------------------------------------------------------------===// 554 555 /// doNotCSE - Return true if CSE should not be performed for this node. 556 static bool doNotCSE(SDNode *N) { 557 if (N->getValueType(0) == MVT::Glue) 558 return true; // Never CSE anything that produces a flag. 559 560 switch (N->getOpcode()) { 561 default: break; 562 case ISD::HANDLENODE: 563 case ISD::EH_LABEL: 564 return true; // Never CSE these nodes. 565 } 566 567 // Check that remaining values produced are not flags. 568 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 569 if (N->getValueType(i) == MVT::Glue) 570 return true; // Never CSE anything that produces a flag. 571 572 return false; 573 } 574 575 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 576 /// SelectionDAG. 577 void SelectionDAG::RemoveDeadNodes() { 578 // Create a dummy node (which is not added to allnodes), that adds a reference 579 // to the root node, preventing it from being deleted. 580 HandleSDNode Dummy(getRoot()); 581 582 SmallVector<SDNode*, 128> DeadNodes; 583 584 // Add all obviously-dead nodes to the DeadNodes worklist. 585 for (SDNode &Node : allnodes()) 586 if (Node.use_empty()) 587 DeadNodes.push_back(&Node); 588 589 RemoveDeadNodes(DeadNodes); 590 591 // If the root changed (e.g. it was a dead load, update the root). 592 setRoot(Dummy.getValue()); 593 } 594 595 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 596 /// given list, and any nodes that become unreachable as a result. 597 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 598 599 // Process the worklist, deleting the nodes and adding their uses to the 600 // worklist. 601 while (!DeadNodes.empty()) { 602 SDNode *N = DeadNodes.pop_back_val(); 603 // Skip to next node if we've already managed to delete the node. This could 604 // happen if replacing a node causes a node previously added to the node to 605 // be deleted. 606 if (N->getOpcode() == ISD::DELETED_NODE) 607 continue; 608 609 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 610 DUL->NodeDeleted(N, nullptr); 611 612 // Take the node out of the appropriate CSE map. 613 RemoveNodeFromCSEMaps(N); 614 615 // Next, brutally remove the operand list. This is safe to do, as there are 616 // no cycles in the graph. 617 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 618 SDUse &Use = *I++; 619 SDNode *Operand = Use.getNode(); 620 Use.set(SDValue()); 621 622 // Now that we removed this operand, see if there are no uses of it left. 623 if (Operand->use_empty()) 624 DeadNodes.push_back(Operand); 625 } 626 627 DeallocateNode(N); 628 } 629 } 630 631 void SelectionDAG::RemoveDeadNode(SDNode *N){ 632 SmallVector<SDNode*, 16> DeadNodes(1, N); 633 634 // Create a dummy node that adds a reference to the root node, preventing 635 // it from being deleted. (This matters if the root is an operand of the 636 // dead node.) 637 HandleSDNode Dummy(getRoot()); 638 639 RemoveDeadNodes(DeadNodes); 640 } 641 642 void SelectionDAG::DeleteNode(SDNode *N) { 643 // First take this out of the appropriate CSE map. 644 RemoveNodeFromCSEMaps(N); 645 646 // Finally, remove uses due to operands of this node, remove from the 647 // AllNodes list, and delete the node. 648 DeleteNodeNotInCSEMaps(N); 649 } 650 651 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 652 assert(N->getIterator() != AllNodes.begin() && 653 "Cannot delete the entry node!"); 654 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 655 656 // Drop all of the operands and decrement used node's use counts. 657 N->DropOperands(); 658 659 DeallocateNode(N); 660 } 661 662 void SDDbgInfo::erase(const SDNode *Node) { 663 DbgValMapType::iterator I = DbgValMap.find(Node); 664 if (I == DbgValMap.end()) 665 return; 666 for (auto &Val: I->second) 667 Val->setIsInvalidated(); 668 DbgValMap.erase(I); 669 } 670 671 void SelectionDAG::DeallocateNode(SDNode *N) { 672 // If we have operands, deallocate them. 673 removeOperands(N); 674 675 NodeAllocator.Deallocate(AllNodes.remove(N)); 676 677 // Set the opcode to DELETED_NODE to help catch bugs when node 678 // memory is reallocated. 679 // FIXME: There are places in SDag that have grown a dependency on the opcode 680 // value in the released node. 681 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 682 N->NodeType = ISD::DELETED_NODE; 683 684 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 685 // them and forget about that node. 686 DbgInfo->erase(N); 687 } 688 689 #ifndef NDEBUG 690 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 691 static void VerifySDNode(SDNode *N) { 692 switch (N->getOpcode()) { 693 default: 694 break; 695 case ISD::BUILD_PAIR: { 696 EVT VT = N->getValueType(0); 697 assert(N->getNumValues() == 1 && "Too many results!"); 698 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 699 "Wrong return type!"); 700 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 701 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 702 "Mismatched operand types!"); 703 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 704 "Wrong operand type!"); 705 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 706 "Wrong return type size"); 707 break; 708 } 709 case ISD::BUILD_VECTOR: { 710 assert(N->getNumValues() == 1 && "Too many results!"); 711 assert(N->getValueType(0).isVector() && "Wrong return type!"); 712 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 713 "Wrong number of operands!"); 714 EVT EltVT = N->getValueType(0).getVectorElementType(); 715 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 716 assert((I->getValueType() == EltVT || 717 (EltVT.isInteger() && I->getValueType().isInteger() && 718 EltVT.bitsLE(I->getValueType()))) && 719 "Wrong operand type!"); 720 assert(I->getValueType() == N->getOperand(0).getValueType() && 721 "Operands must all have the same type"); 722 } 723 break; 724 } 725 } 726 } 727 #endif // NDEBUG 728 729 /// \brief Insert a newly allocated node into the DAG. 730 /// 731 /// Handles insertion into the all nodes list and CSE map, as well as 732 /// verification and other common operations when a new node is allocated. 733 void SelectionDAG::InsertNode(SDNode *N) { 734 AllNodes.push_back(N); 735 #ifndef NDEBUG 736 N->PersistentId = NextPersistentId++; 737 VerifySDNode(N); 738 #endif 739 } 740 741 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 742 /// correspond to it. This is useful when we're about to delete or repurpose 743 /// the node. We don't want future request for structurally identical nodes 744 /// to return N anymore. 745 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 746 bool Erased = false; 747 switch (N->getOpcode()) { 748 case ISD::HANDLENODE: return false; // noop. 749 case ISD::CONDCODE: 750 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 751 "Cond code doesn't exist!"); 752 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 753 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 754 break; 755 case ISD::ExternalSymbol: 756 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 757 break; 758 case ISD::TargetExternalSymbol: { 759 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 760 Erased = TargetExternalSymbols.erase( 761 std::pair<std::string,unsigned char>(ESN->getSymbol(), 762 ESN->getTargetFlags())); 763 break; 764 } 765 case ISD::MCSymbol: { 766 auto *MCSN = cast<MCSymbolSDNode>(N); 767 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 768 break; 769 } 770 case ISD::VALUETYPE: { 771 EVT VT = cast<VTSDNode>(N)->getVT(); 772 if (VT.isExtended()) { 773 Erased = ExtendedValueTypeNodes.erase(VT); 774 } else { 775 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 776 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 777 } 778 break; 779 } 780 default: 781 // Remove it from the CSE Map. 782 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 783 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 784 Erased = CSEMap.RemoveNode(N); 785 break; 786 } 787 #ifndef NDEBUG 788 // Verify that the node was actually in one of the CSE maps, unless it has a 789 // flag result (which cannot be CSE'd) or is one of the special cases that are 790 // not subject to CSE. 791 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 792 !N->isMachineOpcode() && !doNotCSE(N)) { 793 N->dump(this); 794 dbgs() << "\n"; 795 llvm_unreachable("Node is not in map!"); 796 } 797 #endif 798 return Erased; 799 } 800 801 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 802 /// maps and modified in place. Add it back to the CSE maps, unless an identical 803 /// node already exists, in which case transfer all its users to the existing 804 /// node. This transfer can potentially trigger recursive merging. 805 void 806 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 807 // For node types that aren't CSE'd, just act as if no identical node 808 // already exists. 809 if (!doNotCSE(N)) { 810 SDNode *Existing = CSEMap.GetOrInsertNode(N); 811 if (Existing != N) { 812 // If there was already an existing matching node, use ReplaceAllUsesWith 813 // to replace the dead one with the existing one. This can cause 814 // recursive merging of other unrelated nodes down the line. 815 ReplaceAllUsesWith(N, Existing); 816 817 // N is now dead. Inform the listeners and delete it. 818 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 819 DUL->NodeDeleted(N, Existing); 820 DeleteNodeNotInCSEMaps(N); 821 return; 822 } 823 } 824 825 // If the node doesn't already exist, we updated it. Inform listeners. 826 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 827 DUL->NodeUpdated(N); 828 } 829 830 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 831 /// were replaced with those specified. If this node is never memoized, 832 /// return null, otherwise return a pointer to the slot it would take. If a 833 /// node already exists with these operands, the slot will be non-null. 834 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 835 void *&InsertPos) { 836 if (doNotCSE(N)) 837 return nullptr; 838 839 SDValue Ops[] = { Op }; 840 FoldingSetNodeID ID; 841 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 842 AddNodeIDCustom(ID, N); 843 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 844 if (Node) 845 Node->intersectFlagsWith(N->getFlags()); 846 return Node; 847 } 848 849 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 850 /// were replaced with those specified. If this node is never memoized, 851 /// return null, otherwise return a pointer to the slot it would take. If a 852 /// node already exists with these operands, the slot will be non-null. 853 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 854 SDValue Op1, SDValue Op2, 855 void *&InsertPos) { 856 if (doNotCSE(N)) 857 return nullptr; 858 859 SDValue Ops[] = { Op1, Op2 }; 860 FoldingSetNodeID ID; 861 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 862 AddNodeIDCustom(ID, N); 863 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 864 if (Node) 865 Node->intersectFlagsWith(N->getFlags()); 866 return Node; 867 } 868 869 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 870 /// were replaced with those specified. If this node is never memoized, 871 /// return null, otherwise return a pointer to the slot it would take. If a 872 /// node already exists with these operands, the slot will be non-null. 873 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 874 void *&InsertPos) { 875 if (doNotCSE(N)) 876 return nullptr; 877 878 FoldingSetNodeID ID; 879 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 880 AddNodeIDCustom(ID, N); 881 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 882 if (Node) 883 Node->intersectFlagsWith(N->getFlags()); 884 return Node; 885 } 886 887 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 888 Type *Ty = VT == MVT::iPTR ? 889 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 890 VT.getTypeForEVT(*getContext()); 891 892 return getDataLayout().getABITypeAlignment(Ty); 893 } 894 895 // EntryNode could meaningfully have debug info if we can find it... 896 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 897 : TM(tm), OptLevel(OL), 898 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 899 Root(getEntryNode()) { 900 InsertNode(&EntryNode); 901 DbgInfo = new SDDbgInfo(); 902 } 903 904 void SelectionDAG::init(MachineFunction &NewMF, 905 OptimizationRemarkEmitter &NewORE, 906 Pass *PassPtr) { 907 MF = &NewMF; 908 SDAGISelPass = PassPtr; 909 ORE = &NewORE; 910 TLI = getSubtarget().getTargetLowering(); 911 TSI = getSubtarget().getSelectionDAGInfo(); 912 Context = &MF->getFunction()->getContext(); 913 } 914 915 SelectionDAG::~SelectionDAG() { 916 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 917 allnodes_clear(); 918 OperandRecycler.clear(OperandAllocator); 919 delete DbgInfo; 920 } 921 922 void SelectionDAG::allnodes_clear() { 923 assert(&*AllNodes.begin() == &EntryNode); 924 AllNodes.remove(AllNodes.begin()); 925 while (!AllNodes.empty()) 926 DeallocateNode(&AllNodes.front()); 927 #ifndef NDEBUG 928 NextPersistentId = 0; 929 #endif 930 } 931 932 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 933 void *&InsertPos) { 934 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 935 if (N) { 936 switch (N->getOpcode()) { 937 default: break; 938 case ISD::Constant: 939 case ISD::ConstantFP: 940 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 941 "debug location. Use another overload."); 942 } 943 } 944 return N; 945 } 946 947 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 948 const SDLoc &DL, void *&InsertPos) { 949 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 950 if (N) { 951 switch (N->getOpcode()) { 952 case ISD::Constant: 953 case ISD::ConstantFP: 954 // Erase debug location from the node if the node is used at several 955 // different places. Do not propagate one location to all uses as it 956 // will cause a worse single stepping debugging experience. 957 if (N->getDebugLoc() != DL.getDebugLoc()) 958 N->setDebugLoc(DebugLoc()); 959 break; 960 default: 961 // When the node's point of use is located earlier in the instruction 962 // sequence than its prior point of use, update its debug info to the 963 // earlier location. 964 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 965 N->setDebugLoc(DL.getDebugLoc()); 966 break; 967 } 968 } 969 return N; 970 } 971 972 void SelectionDAG::clear() { 973 allnodes_clear(); 974 OperandRecycler.clear(OperandAllocator); 975 OperandAllocator.Reset(); 976 CSEMap.clear(); 977 978 ExtendedValueTypeNodes.clear(); 979 ExternalSymbols.clear(); 980 TargetExternalSymbols.clear(); 981 MCSymbols.clear(); 982 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 983 static_cast<CondCodeSDNode*>(nullptr)); 984 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 985 static_cast<SDNode*>(nullptr)); 986 987 EntryNode.UseList = nullptr; 988 InsertNode(&EntryNode); 989 Root = getEntryNode(); 990 DbgInfo->clear(); 991 } 992 993 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 994 return VT.bitsGT(Op.getValueType()) 995 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 996 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 997 } 998 999 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1000 return VT.bitsGT(Op.getValueType()) ? 1001 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1002 getNode(ISD::TRUNCATE, DL, VT, Op); 1003 } 1004 1005 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1006 return VT.bitsGT(Op.getValueType()) ? 1007 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1008 getNode(ISD::TRUNCATE, DL, VT, Op); 1009 } 1010 1011 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1012 return VT.bitsGT(Op.getValueType()) ? 1013 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1014 getNode(ISD::TRUNCATE, DL, VT, Op); 1015 } 1016 1017 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1018 EVT OpVT) { 1019 if (VT.bitsLE(Op.getValueType())) 1020 return getNode(ISD::TRUNCATE, SL, VT, Op); 1021 1022 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1023 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1024 } 1025 1026 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1027 assert(!VT.isVector() && 1028 "getZeroExtendInReg should use the vector element type instead of " 1029 "the vector type!"); 1030 if (Op.getValueType().getScalarType() == VT) return Op; 1031 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1032 APInt Imm = APInt::getLowBitsSet(BitWidth, 1033 VT.getSizeInBits()); 1034 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1035 getConstant(Imm, DL, Op.getValueType())); 1036 } 1037 1038 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1039 EVT VT) { 1040 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1041 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1042 "The sizes of the input and result must match in order to perform the " 1043 "extend in-register."); 1044 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1045 "The destination vector type must have fewer lanes than the input."); 1046 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1047 } 1048 1049 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1050 EVT VT) { 1051 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1052 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1053 "The sizes of the input and result must match in order to perform the " 1054 "extend in-register."); 1055 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1056 "The destination vector type must have fewer lanes than the input."); 1057 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1058 } 1059 1060 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1061 EVT VT) { 1062 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1063 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1064 "The sizes of the input and result must match in order to perform the " 1065 "extend in-register."); 1066 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1067 "The destination vector type must have fewer lanes than the input."); 1068 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1069 } 1070 1071 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1072 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1073 EVT EltVT = VT.getScalarType(); 1074 SDValue NegOne = 1075 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1076 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1077 } 1078 1079 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1080 EVT EltVT = VT.getScalarType(); 1081 SDValue TrueValue; 1082 switch (TLI->getBooleanContents(VT)) { 1083 case TargetLowering::ZeroOrOneBooleanContent: 1084 case TargetLowering::UndefinedBooleanContent: 1085 TrueValue = getConstant(1, DL, VT); 1086 break; 1087 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1088 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1089 VT); 1090 break; 1091 } 1092 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1093 } 1094 1095 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1096 bool isT, bool isO) { 1097 EVT EltVT = VT.getScalarType(); 1098 assert((EltVT.getSizeInBits() >= 64 || 1099 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1100 "getConstant with a uint64_t value that doesn't fit in the type!"); 1101 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1102 } 1103 1104 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1105 bool isT, bool isO) { 1106 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1107 } 1108 1109 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1110 EVT VT, bool isT, bool isO) { 1111 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1112 1113 EVT EltVT = VT.getScalarType(); 1114 const ConstantInt *Elt = &Val; 1115 1116 // In some cases the vector type is legal but the element type is illegal and 1117 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1118 // inserted value (the type does not need to match the vector element type). 1119 // Any extra bits introduced will be truncated away. 1120 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1121 TargetLowering::TypePromoteInteger) { 1122 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1123 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1124 Elt = ConstantInt::get(*getContext(), NewVal); 1125 } 1126 // In other cases the element type is illegal and needs to be expanded, for 1127 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1128 // the value into n parts and use a vector type with n-times the elements. 1129 // Then bitcast to the type requested. 1130 // Legalizing constants too early makes the DAGCombiner's job harder so we 1131 // only legalize if the DAG tells us we must produce legal types. 1132 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1133 TLI->getTypeAction(*getContext(), EltVT) == 1134 TargetLowering::TypeExpandInteger) { 1135 const APInt &NewVal = Elt->getValue(); 1136 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1137 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1138 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1139 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1140 1141 // Check the temporary vector is the correct size. If this fails then 1142 // getTypeToTransformTo() probably returned a type whose size (in bits) 1143 // isn't a power-of-2 factor of the requested type size. 1144 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1145 1146 SmallVector<SDValue, 2> EltParts; 1147 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1148 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1149 .zextOrTrunc(ViaEltSizeInBits), DL, 1150 ViaEltVT, isT, isO)); 1151 } 1152 1153 // EltParts is currently in little endian order. If we actually want 1154 // big-endian order then reverse it now. 1155 if (getDataLayout().isBigEndian()) 1156 std::reverse(EltParts.begin(), EltParts.end()); 1157 1158 // The elements must be reversed when the element order is different 1159 // to the endianness of the elements (because the BITCAST is itself a 1160 // vector shuffle in this situation). However, we do not need any code to 1161 // perform this reversal because getConstant() is producing a vector 1162 // splat. 1163 // This situation occurs in MIPS MSA. 1164 1165 SmallVector<SDValue, 8> Ops; 1166 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1167 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1168 1169 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1170 NewSDValueDbgMsg(V, "Creating constant: ", this); 1171 return V; 1172 } 1173 1174 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1175 "APInt size does not match type size!"); 1176 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1177 FoldingSetNodeID ID; 1178 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1179 ID.AddPointer(Elt); 1180 ID.AddBoolean(isO); 1181 void *IP = nullptr; 1182 SDNode *N = nullptr; 1183 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1184 if (!VT.isVector()) 1185 return SDValue(N, 0); 1186 1187 if (!N) { 1188 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1189 CSEMap.InsertNode(N, IP); 1190 InsertNode(N); 1191 } 1192 1193 SDValue Result(N, 0); 1194 if (VT.isVector()) 1195 Result = getSplatBuildVector(VT, DL, Result); 1196 1197 NewSDValueDbgMsg(Result, "Creating constant: ", this); 1198 return Result; 1199 } 1200 1201 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1202 bool isTarget) { 1203 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1204 } 1205 1206 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1207 bool isTarget) { 1208 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1209 } 1210 1211 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1212 EVT VT, bool isTarget) { 1213 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1214 1215 EVT EltVT = VT.getScalarType(); 1216 1217 // Do the map lookup using the actual bit pattern for the floating point 1218 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1219 // we don't have issues with SNANs. 1220 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1221 FoldingSetNodeID ID; 1222 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1223 ID.AddPointer(&V); 1224 void *IP = nullptr; 1225 SDNode *N = nullptr; 1226 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1227 if (!VT.isVector()) 1228 return SDValue(N, 0); 1229 1230 if (!N) { 1231 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1232 CSEMap.InsertNode(N, IP); 1233 InsertNode(N); 1234 } 1235 1236 SDValue Result(N, 0); 1237 if (VT.isVector()) 1238 Result = getSplatBuildVector(VT, DL, Result); 1239 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1240 return Result; 1241 } 1242 1243 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1244 bool isTarget) { 1245 EVT EltVT = VT.getScalarType(); 1246 if (EltVT == MVT::f32) 1247 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1248 else if (EltVT == MVT::f64) 1249 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1250 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1251 EltVT == MVT::f16) { 1252 bool Ignored; 1253 APFloat APF = APFloat(Val); 1254 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1255 &Ignored); 1256 return getConstantFP(APF, DL, VT, isTarget); 1257 } else 1258 llvm_unreachable("Unsupported type in getConstantFP"); 1259 } 1260 1261 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1262 EVT VT, int64_t Offset, bool isTargetGA, 1263 unsigned char TargetFlags) { 1264 assert((TargetFlags == 0 || isTargetGA) && 1265 "Cannot set target flags on target-independent globals"); 1266 1267 // Truncate (with sign-extension) the offset value to the pointer size. 1268 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1269 if (BitWidth < 64) 1270 Offset = SignExtend64(Offset, BitWidth); 1271 1272 unsigned Opc; 1273 if (GV->isThreadLocal()) 1274 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1275 else 1276 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1277 1278 FoldingSetNodeID ID; 1279 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1280 ID.AddPointer(GV); 1281 ID.AddInteger(Offset); 1282 ID.AddInteger(TargetFlags); 1283 void *IP = nullptr; 1284 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1285 return SDValue(E, 0); 1286 1287 auto *N = newSDNode<GlobalAddressSDNode>( 1288 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1289 CSEMap.InsertNode(N, IP); 1290 InsertNode(N); 1291 return SDValue(N, 0); 1292 } 1293 1294 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1295 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1296 FoldingSetNodeID ID; 1297 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1298 ID.AddInteger(FI); 1299 void *IP = nullptr; 1300 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1301 return SDValue(E, 0); 1302 1303 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1304 CSEMap.InsertNode(N, IP); 1305 InsertNode(N); 1306 return SDValue(N, 0); 1307 } 1308 1309 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1310 unsigned char TargetFlags) { 1311 assert((TargetFlags == 0 || isTarget) && 1312 "Cannot set target flags on target-independent jump tables"); 1313 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1314 FoldingSetNodeID ID; 1315 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1316 ID.AddInteger(JTI); 1317 ID.AddInteger(TargetFlags); 1318 void *IP = nullptr; 1319 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1320 return SDValue(E, 0); 1321 1322 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1323 CSEMap.InsertNode(N, IP); 1324 InsertNode(N); 1325 return SDValue(N, 0); 1326 } 1327 1328 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1329 unsigned Alignment, int Offset, 1330 bool isTarget, 1331 unsigned char TargetFlags) { 1332 assert((TargetFlags == 0 || isTarget) && 1333 "Cannot set target flags on target-independent globals"); 1334 if (Alignment == 0) 1335 Alignment = MF->getFunction()->optForSize() 1336 ? getDataLayout().getABITypeAlignment(C->getType()) 1337 : getDataLayout().getPrefTypeAlignment(C->getType()); 1338 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1339 FoldingSetNodeID ID; 1340 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1341 ID.AddInteger(Alignment); 1342 ID.AddInteger(Offset); 1343 ID.AddPointer(C); 1344 ID.AddInteger(TargetFlags); 1345 void *IP = nullptr; 1346 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1347 return SDValue(E, 0); 1348 1349 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1350 TargetFlags); 1351 CSEMap.InsertNode(N, IP); 1352 InsertNode(N); 1353 return SDValue(N, 0); 1354 } 1355 1356 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1357 unsigned Alignment, int Offset, 1358 bool isTarget, 1359 unsigned char TargetFlags) { 1360 assert((TargetFlags == 0 || isTarget) && 1361 "Cannot set target flags on target-independent globals"); 1362 if (Alignment == 0) 1363 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1364 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1365 FoldingSetNodeID ID; 1366 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1367 ID.AddInteger(Alignment); 1368 ID.AddInteger(Offset); 1369 C->addSelectionDAGCSEId(ID); 1370 ID.AddInteger(TargetFlags); 1371 void *IP = nullptr; 1372 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1373 return SDValue(E, 0); 1374 1375 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1376 TargetFlags); 1377 CSEMap.InsertNode(N, IP); 1378 InsertNode(N); 1379 return SDValue(N, 0); 1380 } 1381 1382 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1383 unsigned char TargetFlags) { 1384 FoldingSetNodeID ID; 1385 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1386 ID.AddInteger(Index); 1387 ID.AddInteger(Offset); 1388 ID.AddInteger(TargetFlags); 1389 void *IP = nullptr; 1390 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1391 return SDValue(E, 0); 1392 1393 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1394 CSEMap.InsertNode(N, IP); 1395 InsertNode(N); 1396 return SDValue(N, 0); 1397 } 1398 1399 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1400 FoldingSetNodeID ID; 1401 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1402 ID.AddPointer(MBB); 1403 void *IP = nullptr; 1404 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1405 return SDValue(E, 0); 1406 1407 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1408 CSEMap.InsertNode(N, IP); 1409 InsertNode(N); 1410 return SDValue(N, 0); 1411 } 1412 1413 SDValue SelectionDAG::getValueType(EVT VT) { 1414 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1415 ValueTypeNodes.size()) 1416 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1417 1418 SDNode *&N = VT.isExtended() ? 1419 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1420 1421 if (N) return SDValue(N, 0); 1422 N = newSDNode<VTSDNode>(VT); 1423 InsertNode(N); 1424 return SDValue(N, 0); 1425 } 1426 1427 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1428 SDNode *&N = ExternalSymbols[Sym]; 1429 if (N) return SDValue(N, 0); 1430 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1431 InsertNode(N); 1432 return SDValue(N, 0); 1433 } 1434 1435 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1436 SDNode *&N = MCSymbols[Sym]; 1437 if (N) 1438 return SDValue(N, 0); 1439 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1440 InsertNode(N); 1441 return SDValue(N, 0); 1442 } 1443 1444 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1445 unsigned char TargetFlags) { 1446 SDNode *&N = 1447 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1448 TargetFlags)]; 1449 if (N) return SDValue(N, 0); 1450 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1451 InsertNode(N); 1452 return SDValue(N, 0); 1453 } 1454 1455 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1456 if ((unsigned)Cond >= CondCodeNodes.size()) 1457 CondCodeNodes.resize(Cond+1); 1458 1459 if (!CondCodeNodes[Cond]) { 1460 auto *N = newSDNode<CondCodeSDNode>(Cond); 1461 CondCodeNodes[Cond] = N; 1462 InsertNode(N); 1463 } 1464 1465 return SDValue(CondCodeNodes[Cond], 0); 1466 } 1467 1468 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1469 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1470 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1471 std::swap(N1, N2); 1472 ShuffleVectorSDNode::commuteMask(M); 1473 } 1474 1475 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1476 SDValue N2, ArrayRef<int> Mask) { 1477 assert(VT.getVectorNumElements() == Mask.size() && 1478 "Must have the same number of vector elements as mask elements!"); 1479 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1480 "Invalid VECTOR_SHUFFLE"); 1481 1482 // Canonicalize shuffle undef, undef -> undef 1483 if (N1.isUndef() && N2.isUndef()) 1484 return getUNDEF(VT); 1485 1486 // Validate that all indices in Mask are within the range of the elements 1487 // input to the shuffle. 1488 int NElts = Mask.size(); 1489 assert(llvm::all_of(Mask, 1490 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1491 "Index out of range"); 1492 1493 // Copy the mask so we can do any needed cleanup. 1494 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1495 1496 // Canonicalize shuffle v, v -> v, undef 1497 if (N1 == N2) { 1498 N2 = getUNDEF(VT); 1499 for (int i = 0; i != NElts; ++i) 1500 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1501 } 1502 1503 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1504 if (N1.isUndef()) 1505 commuteShuffle(N1, N2, MaskVec); 1506 1507 // If shuffling a splat, try to blend the splat instead. We do this here so 1508 // that even when this arises during lowering we don't have to re-handle it. 1509 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1510 BitVector UndefElements; 1511 SDValue Splat = BV->getSplatValue(&UndefElements); 1512 if (!Splat) 1513 return; 1514 1515 for (int i = 0; i < NElts; ++i) { 1516 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1517 continue; 1518 1519 // If this input comes from undef, mark it as such. 1520 if (UndefElements[MaskVec[i] - Offset]) { 1521 MaskVec[i] = -1; 1522 continue; 1523 } 1524 1525 // If we can blend a non-undef lane, use that instead. 1526 if (!UndefElements[i]) 1527 MaskVec[i] = i + Offset; 1528 } 1529 }; 1530 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1531 BlendSplat(N1BV, 0); 1532 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1533 BlendSplat(N2BV, NElts); 1534 1535 // Canonicalize all index into lhs, -> shuffle lhs, undef 1536 // Canonicalize all index into rhs, -> shuffle rhs, undef 1537 bool AllLHS = true, AllRHS = true; 1538 bool N2Undef = N2.isUndef(); 1539 for (int i = 0; i != NElts; ++i) { 1540 if (MaskVec[i] >= NElts) { 1541 if (N2Undef) 1542 MaskVec[i] = -1; 1543 else 1544 AllLHS = false; 1545 } else if (MaskVec[i] >= 0) { 1546 AllRHS = false; 1547 } 1548 } 1549 if (AllLHS && AllRHS) 1550 return getUNDEF(VT); 1551 if (AllLHS && !N2Undef) 1552 N2 = getUNDEF(VT); 1553 if (AllRHS) { 1554 N1 = getUNDEF(VT); 1555 commuteShuffle(N1, N2, MaskVec); 1556 } 1557 // Reset our undef status after accounting for the mask. 1558 N2Undef = N2.isUndef(); 1559 // Re-check whether both sides ended up undef. 1560 if (N1.isUndef() && N2Undef) 1561 return getUNDEF(VT); 1562 1563 // If Identity shuffle return that node. 1564 bool Identity = true, AllSame = true; 1565 for (int i = 0; i != NElts; ++i) { 1566 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1567 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1568 } 1569 if (Identity && NElts) 1570 return N1; 1571 1572 // Shuffling a constant splat doesn't change the result. 1573 if (N2Undef) { 1574 SDValue V = N1; 1575 1576 // Look through any bitcasts. We check that these don't change the number 1577 // (and size) of elements and just changes their types. 1578 while (V.getOpcode() == ISD::BITCAST) 1579 V = V->getOperand(0); 1580 1581 // A splat should always show up as a build vector node. 1582 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1583 BitVector UndefElements; 1584 SDValue Splat = BV->getSplatValue(&UndefElements); 1585 // If this is a splat of an undef, shuffling it is also undef. 1586 if (Splat && Splat.isUndef()) 1587 return getUNDEF(VT); 1588 1589 bool SameNumElts = 1590 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1591 1592 // We only have a splat which can skip shuffles if there is a splatted 1593 // value and no undef lanes rearranged by the shuffle. 1594 if (Splat && UndefElements.none()) { 1595 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1596 // number of elements match or the value splatted is a zero constant. 1597 if (SameNumElts) 1598 return N1; 1599 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1600 if (C->isNullValue()) 1601 return N1; 1602 } 1603 1604 // If the shuffle itself creates a splat, build the vector directly. 1605 if (AllSame && SameNumElts) { 1606 EVT BuildVT = BV->getValueType(0); 1607 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1608 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1609 1610 // We may have jumped through bitcasts, so the type of the 1611 // BUILD_VECTOR may not match the type of the shuffle. 1612 if (BuildVT != VT) 1613 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1614 return NewBV; 1615 } 1616 } 1617 } 1618 1619 FoldingSetNodeID ID; 1620 SDValue Ops[2] = { N1, N2 }; 1621 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1622 for (int i = 0; i != NElts; ++i) 1623 ID.AddInteger(MaskVec[i]); 1624 1625 void* IP = nullptr; 1626 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1627 return SDValue(E, 0); 1628 1629 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1630 // SDNode doesn't have access to it. This memory will be "leaked" when 1631 // the node is deallocated, but recovered when the NodeAllocator is released. 1632 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1633 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1634 1635 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1636 dl.getDebugLoc(), MaskAlloc); 1637 createOperands(N, Ops); 1638 1639 CSEMap.InsertNode(N, IP); 1640 InsertNode(N); 1641 return SDValue(N, 0); 1642 } 1643 1644 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1645 MVT VT = SV.getSimpleValueType(0); 1646 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1647 ShuffleVectorSDNode::commuteMask(MaskVec); 1648 1649 SDValue Op0 = SV.getOperand(0); 1650 SDValue Op1 = SV.getOperand(1); 1651 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1652 } 1653 1654 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1655 FoldingSetNodeID ID; 1656 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1657 ID.AddInteger(RegNo); 1658 void *IP = nullptr; 1659 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1660 return SDValue(E, 0); 1661 1662 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1663 CSEMap.InsertNode(N, IP); 1664 InsertNode(N); 1665 return SDValue(N, 0); 1666 } 1667 1668 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1669 FoldingSetNodeID ID; 1670 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1671 ID.AddPointer(RegMask); 1672 void *IP = nullptr; 1673 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1674 return SDValue(E, 0); 1675 1676 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1677 CSEMap.InsertNode(N, IP); 1678 InsertNode(N); 1679 return SDValue(N, 0); 1680 } 1681 1682 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1683 MCSymbol *Label) { 1684 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1685 } 1686 1687 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1688 SDValue Root, MCSymbol *Label) { 1689 FoldingSetNodeID ID; 1690 SDValue Ops[] = { Root }; 1691 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1692 ID.AddPointer(Label); 1693 void *IP = nullptr; 1694 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1695 return SDValue(E, 0); 1696 1697 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1698 createOperands(N, Ops); 1699 1700 CSEMap.InsertNode(N, IP); 1701 InsertNode(N); 1702 return SDValue(N, 0); 1703 } 1704 1705 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1706 int64_t Offset, 1707 bool isTarget, 1708 unsigned char TargetFlags) { 1709 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1710 1711 FoldingSetNodeID ID; 1712 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1713 ID.AddPointer(BA); 1714 ID.AddInteger(Offset); 1715 ID.AddInteger(TargetFlags); 1716 void *IP = nullptr; 1717 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1718 return SDValue(E, 0); 1719 1720 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1721 CSEMap.InsertNode(N, IP); 1722 InsertNode(N); 1723 return SDValue(N, 0); 1724 } 1725 1726 SDValue SelectionDAG::getSrcValue(const Value *V) { 1727 assert((!V || V->getType()->isPointerTy()) && 1728 "SrcValue is not a pointer?"); 1729 1730 FoldingSetNodeID ID; 1731 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1732 ID.AddPointer(V); 1733 1734 void *IP = nullptr; 1735 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1736 return SDValue(E, 0); 1737 1738 auto *N = newSDNode<SrcValueSDNode>(V); 1739 CSEMap.InsertNode(N, IP); 1740 InsertNode(N); 1741 return SDValue(N, 0); 1742 } 1743 1744 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1745 FoldingSetNodeID ID; 1746 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1747 ID.AddPointer(MD); 1748 1749 void *IP = nullptr; 1750 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1751 return SDValue(E, 0); 1752 1753 auto *N = newSDNode<MDNodeSDNode>(MD); 1754 CSEMap.InsertNode(N, IP); 1755 InsertNode(N); 1756 return SDValue(N, 0); 1757 } 1758 1759 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1760 if (VT == V.getValueType()) 1761 return V; 1762 1763 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1764 } 1765 1766 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1767 unsigned SrcAS, unsigned DestAS) { 1768 SDValue Ops[] = {Ptr}; 1769 FoldingSetNodeID ID; 1770 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1771 ID.AddInteger(SrcAS); 1772 ID.AddInteger(DestAS); 1773 1774 void *IP = nullptr; 1775 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1776 return SDValue(E, 0); 1777 1778 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1779 VT, SrcAS, DestAS); 1780 createOperands(N, Ops); 1781 1782 CSEMap.InsertNode(N, IP); 1783 InsertNode(N); 1784 return SDValue(N, 0); 1785 } 1786 1787 /// getShiftAmountOperand - Return the specified value casted to 1788 /// the target's desired shift amount type. 1789 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1790 EVT OpTy = Op.getValueType(); 1791 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1792 if (OpTy == ShTy || OpTy.isVector()) return Op; 1793 1794 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1795 } 1796 1797 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1798 SDLoc dl(Node); 1799 const TargetLowering &TLI = getTargetLoweringInfo(); 1800 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1801 EVT VT = Node->getValueType(0); 1802 SDValue Tmp1 = Node->getOperand(0); 1803 SDValue Tmp2 = Node->getOperand(1); 1804 unsigned Align = Node->getConstantOperandVal(3); 1805 1806 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1807 Tmp2, MachinePointerInfo(V)); 1808 SDValue VAList = VAListLoad; 1809 1810 if (Align > TLI.getMinStackArgumentAlignment()) { 1811 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1812 1813 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1814 getConstant(Align - 1, dl, VAList.getValueType())); 1815 1816 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1817 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1818 } 1819 1820 // Increment the pointer, VAList, to the next vaarg 1821 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1822 getConstant(getDataLayout().getTypeAllocSize( 1823 VT.getTypeForEVT(*getContext())), 1824 dl, VAList.getValueType())); 1825 // Store the incremented VAList to the legalized pointer 1826 Tmp1 = 1827 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1828 // Load the actual argument out of the pointer VAList 1829 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1830 } 1831 1832 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1833 SDLoc dl(Node); 1834 const TargetLowering &TLI = getTargetLoweringInfo(); 1835 // This defaults to loading a pointer from the input and storing it to the 1836 // output, returning the chain. 1837 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1838 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1839 SDValue Tmp1 = 1840 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1841 Node->getOperand(2), MachinePointerInfo(VS)); 1842 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1843 MachinePointerInfo(VD)); 1844 } 1845 1846 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1847 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1848 unsigned ByteSize = VT.getStoreSize(); 1849 Type *Ty = VT.getTypeForEVT(*getContext()); 1850 unsigned StackAlign = 1851 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1852 1853 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1854 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1855 } 1856 1857 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1858 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1859 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1860 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1861 const DataLayout &DL = getDataLayout(); 1862 unsigned Align = 1863 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1864 1865 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1866 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1867 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1868 } 1869 1870 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1871 ISD::CondCode Cond, const SDLoc &dl) { 1872 // These setcc operations always fold. 1873 switch (Cond) { 1874 default: break; 1875 case ISD::SETFALSE: 1876 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1877 case ISD::SETTRUE: 1878 case ISD::SETTRUE2: { 1879 TargetLowering::BooleanContent Cnt = 1880 TLI->getBooleanContents(N1->getValueType(0)); 1881 return getConstant( 1882 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1883 VT); 1884 } 1885 1886 case ISD::SETOEQ: 1887 case ISD::SETOGT: 1888 case ISD::SETOGE: 1889 case ISD::SETOLT: 1890 case ISD::SETOLE: 1891 case ISD::SETONE: 1892 case ISD::SETO: 1893 case ISD::SETUO: 1894 case ISD::SETUEQ: 1895 case ISD::SETUNE: 1896 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1897 break; 1898 } 1899 1900 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1901 const APInt &C2 = N2C->getAPIntValue(); 1902 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1903 const APInt &C1 = N1C->getAPIntValue(); 1904 1905 switch (Cond) { 1906 default: llvm_unreachable("Unknown integer setcc!"); 1907 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1908 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1909 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1910 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1911 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1912 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1913 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1914 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1915 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1916 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1917 } 1918 } 1919 } 1920 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1921 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1922 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1923 switch (Cond) { 1924 default: break; 1925 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1926 return getUNDEF(VT); 1927 LLVM_FALLTHROUGH; 1928 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1929 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1930 return getUNDEF(VT); 1931 LLVM_FALLTHROUGH; 1932 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1933 R==APFloat::cmpLessThan, dl, VT); 1934 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1935 return getUNDEF(VT); 1936 LLVM_FALLTHROUGH; 1937 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1938 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1939 return getUNDEF(VT); 1940 LLVM_FALLTHROUGH; 1941 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1942 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1943 return getUNDEF(VT); 1944 LLVM_FALLTHROUGH; 1945 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1946 R==APFloat::cmpEqual, dl, VT); 1947 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1948 return getUNDEF(VT); 1949 LLVM_FALLTHROUGH; 1950 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1951 R==APFloat::cmpEqual, dl, VT); 1952 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1953 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1954 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1955 R==APFloat::cmpEqual, dl, VT); 1956 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1957 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1958 R==APFloat::cmpLessThan, dl, VT); 1959 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1960 R==APFloat::cmpUnordered, dl, VT); 1961 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1962 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1963 } 1964 } else { 1965 // Ensure that the constant occurs on the RHS. 1966 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1967 MVT CompVT = N1.getValueType().getSimpleVT(); 1968 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1969 return SDValue(); 1970 1971 return getSetCC(dl, VT, N2, N1, SwappedCond); 1972 } 1973 } 1974 1975 // Could not fold it. 1976 return SDValue(); 1977 } 1978 1979 /// See if the specified operand can be simplified with the knowledge that only 1980 /// the bits specified by Mask are used. 1981 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) { 1982 switch (V.getOpcode()) { 1983 default: 1984 break; 1985 case ISD::Constant: { 1986 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 1987 assert(CV && "Const value should be ConstSDNode."); 1988 const APInt &CVal = CV->getAPIntValue(); 1989 APInt NewVal = CVal & Mask; 1990 if (NewVal != CVal) 1991 return getConstant(NewVal, SDLoc(V), V.getValueType()); 1992 break; 1993 } 1994 case ISD::OR: 1995 case ISD::XOR: 1996 // If the LHS or RHS don't contribute bits to the or, drop them. 1997 if (MaskedValueIsZero(V.getOperand(0), Mask)) 1998 return V.getOperand(1); 1999 if (MaskedValueIsZero(V.getOperand(1), Mask)) 2000 return V.getOperand(0); 2001 break; 2002 case ISD::SRL: 2003 // Only look at single-use SRLs. 2004 if (!V.getNode()->hasOneUse()) 2005 break; 2006 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2007 // See if we can recursively simplify the LHS. 2008 unsigned Amt = RHSC->getZExtValue(); 2009 2010 // Watch out for shift count overflow though. 2011 if (Amt >= Mask.getBitWidth()) 2012 break; 2013 APInt NewMask = Mask << Amt; 2014 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 2015 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2016 V.getOperand(1)); 2017 } 2018 break; 2019 case ISD::AND: { 2020 // X & -1 -> X (ignoring bits which aren't demanded). 2021 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 2022 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue())) 2023 return V.getOperand(0); 2024 break; 2025 } 2026 case ISD::ANY_EXTEND: { 2027 SDValue Src = V.getOperand(0); 2028 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2029 // Being conservative here - only peek through if we only demand bits in the 2030 // non-extended source (even though the extended bits are technically undef). 2031 if (Mask.getActiveBits() > SrcBitWidth) 2032 break; 2033 APInt SrcMask = Mask.trunc(SrcBitWidth); 2034 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask)) 2035 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2036 break; 2037 } 2038 } 2039 return SDValue(); 2040 } 2041 2042 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2043 /// use this predicate to simplify operations downstream. 2044 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2045 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2046 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2047 } 2048 2049 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2050 /// this predicate to simplify operations downstream. Mask is known to be zero 2051 /// for bits that V cannot have. 2052 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2053 unsigned Depth) const { 2054 KnownBits Known; 2055 computeKnownBits(Op, Known, Depth); 2056 return Mask.isSubsetOf(Known.Zero); 2057 } 2058 2059 /// Helper function that checks to see if a node is a constant or a 2060 /// build vector of splat constants at least within the demanded elts. 2061 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N, 2062 const APInt &DemandedElts) { 2063 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 2064 return CN; 2065 if (N.getOpcode() != ISD::BUILD_VECTOR) 2066 return nullptr; 2067 EVT VT = N.getValueType(); 2068 ConstantSDNode *Cst = nullptr; 2069 unsigned NumElts = VT.getVectorNumElements(); 2070 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size"); 2071 for (unsigned i = 0; i != NumElts; ++i) { 2072 if (!DemandedElts[i]) 2073 continue; 2074 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i)); 2075 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) || 2076 C->getValueType(0) != VT.getScalarType()) 2077 return nullptr; 2078 Cst = C; 2079 } 2080 return Cst; 2081 } 2082 2083 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2084 /// is less than the element bit-width of the shift node, return it. 2085 static const APInt *getValidShiftAmountConstant(SDValue V) { 2086 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2087 // Shifting more than the bitwidth is not valid. 2088 const APInt &ShAmt = SA->getAPIntValue(); 2089 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2090 return &ShAmt; 2091 } 2092 return nullptr; 2093 } 2094 2095 /// Determine which bits of Op are known to be either zero or one and return 2096 /// them in Known. For vectors, the known bits are those that are shared by 2097 /// every vector element. 2098 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2099 unsigned Depth) const { 2100 EVT VT = Op.getValueType(); 2101 APInt DemandedElts = VT.isVector() 2102 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2103 : APInt(1, 1); 2104 computeKnownBits(Op, Known, DemandedElts, Depth); 2105 } 2106 2107 /// Determine which bits of Op are known to be either zero or one and return 2108 /// them in Known. The DemandedElts argument allows us to only collect the known 2109 /// bits that are shared by the requested vector elements. 2110 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2111 const APInt &DemandedElts, 2112 unsigned Depth) const { 2113 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2114 2115 Known = KnownBits(BitWidth); // Don't know anything. 2116 2117 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2118 // We know all of the bits for a constant! 2119 Known.One = C->getAPIntValue(); 2120 Known.Zero = ~Known.One; 2121 return; 2122 } 2123 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2124 // We know all of the bits for a constant fp! 2125 Known.One = C->getValueAPF().bitcastToAPInt(); 2126 Known.Zero = ~Known.One; 2127 return; 2128 } 2129 2130 if (Depth == 6) 2131 return; // Limit search depth. 2132 2133 KnownBits Known2; 2134 unsigned NumElts = DemandedElts.getBitWidth(); 2135 2136 if (!DemandedElts) 2137 return; // No demanded elts, better to assume we don't know anything. 2138 2139 unsigned Opcode = Op.getOpcode(); 2140 switch (Opcode) { 2141 case ISD::BUILD_VECTOR: 2142 // Collect the known bits that are shared by every demanded vector element. 2143 assert(NumElts == Op.getValueType().getVectorNumElements() && 2144 "Unexpected vector size"); 2145 Known.Zero.setAllBits(); Known.One.setAllBits(); 2146 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2147 if (!DemandedElts[i]) 2148 continue; 2149 2150 SDValue SrcOp = Op.getOperand(i); 2151 computeKnownBits(SrcOp, Known2, Depth + 1); 2152 2153 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2154 if (SrcOp.getValueSizeInBits() != BitWidth) { 2155 assert(SrcOp.getValueSizeInBits() > BitWidth && 2156 "Expected BUILD_VECTOR implicit truncation"); 2157 Known2 = Known2.trunc(BitWidth); 2158 } 2159 2160 // Known bits are the values that are shared by every demanded element. 2161 Known.One &= Known2.One; 2162 Known.Zero &= Known2.Zero; 2163 2164 // If we don't know any bits, early out. 2165 if (Known.isUnknown()) 2166 break; 2167 } 2168 break; 2169 case ISD::VECTOR_SHUFFLE: { 2170 // Collect the known bits that are shared by every vector element referenced 2171 // by the shuffle. 2172 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2173 Known.Zero.setAllBits(); Known.One.setAllBits(); 2174 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2175 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2176 for (unsigned i = 0; i != NumElts; ++i) { 2177 if (!DemandedElts[i]) 2178 continue; 2179 2180 int M = SVN->getMaskElt(i); 2181 if (M < 0) { 2182 // For UNDEF elements, we don't know anything about the common state of 2183 // the shuffle result. 2184 Known.resetAll(); 2185 DemandedLHS.clearAllBits(); 2186 DemandedRHS.clearAllBits(); 2187 break; 2188 } 2189 2190 if ((unsigned)M < NumElts) 2191 DemandedLHS.setBit((unsigned)M % NumElts); 2192 else 2193 DemandedRHS.setBit((unsigned)M % NumElts); 2194 } 2195 // Known bits are the values that are shared by every demanded element. 2196 if (!!DemandedLHS) { 2197 SDValue LHS = Op.getOperand(0); 2198 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2199 Known.One &= Known2.One; 2200 Known.Zero &= Known2.Zero; 2201 } 2202 // If we don't know any bits, early out. 2203 if (Known.isUnknown()) 2204 break; 2205 if (!!DemandedRHS) { 2206 SDValue RHS = Op.getOperand(1); 2207 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2208 Known.One &= Known2.One; 2209 Known.Zero &= Known2.Zero; 2210 } 2211 break; 2212 } 2213 case ISD::CONCAT_VECTORS: { 2214 // Split DemandedElts and test each of the demanded subvectors. 2215 Known.Zero.setAllBits(); Known.One.setAllBits(); 2216 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2217 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2218 unsigned NumSubVectors = Op.getNumOperands(); 2219 for (unsigned i = 0; i != NumSubVectors; ++i) { 2220 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2221 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2222 if (!!DemandedSub) { 2223 SDValue Sub = Op.getOperand(i); 2224 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2225 Known.One &= Known2.One; 2226 Known.Zero &= Known2.Zero; 2227 } 2228 // If we don't know any bits, early out. 2229 if (Known.isUnknown()) 2230 break; 2231 } 2232 break; 2233 } 2234 case ISD::INSERT_SUBVECTOR: { 2235 // If we know the element index, demand any elements from the subvector and 2236 // the remainder from the src its inserted into, otherwise demand them all. 2237 SDValue Src = Op.getOperand(0); 2238 SDValue Sub = Op.getOperand(1); 2239 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2240 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2241 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2242 Known.One.setAllBits(); 2243 Known.Zero.setAllBits(); 2244 uint64_t Idx = SubIdx->getZExtValue(); 2245 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2246 if (!!DemandedSubElts) { 2247 computeKnownBits(Sub, Known, DemandedSubElts, Depth + 1); 2248 if (Known.isUnknown()) 2249 break; // early-out. 2250 } 2251 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2252 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2253 if (!!DemandedSrcElts) { 2254 computeKnownBits(Src, Known2, DemandedSrcElts, Depth + 1); 2255 Known.One &= Known2.One; 2256 Known.Zero &= Known2.Zero; 2257 } 2258 } else { 2259 computeKnownBits(Sub, Known, Depth + 1); 2260 if (Known.isUnknown()) 2261 break; // early-out. 2262 computeKnownBits(Src, Known2, Depth + 1); 2263 Known.One &= Known2.One; 2264 Known.Zero &= Known2.Zero; 2265 } 2266 break; 2267 } 2268 case ISD::EXTRACT_SUBVECTOR: { 2269 // If we know the element index, just demand that subvector elements, 2270 // otherwise demand them all. 2271 SDValue Src = Op.getOperand(0); 2272 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2273 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2274 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2275 // Offset the demanded elts by the subvector index. 2276 uint64_t Idx = SubIdx->getZExtValue(); 2277 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2278 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2279 } else { 2280 computeKnownBits(Src, Known, Depth + 1); 2281 } 2282 break; 2283 } 2284 case ISD::BITCAST: { 2285 SDValue N0 = Op.getOperand(0); 2286 EVT SubVT = N0.getValueType(); 2287 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2288 2289 // Ignore bitcasts from unsupported types. 2290 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2291 break; 2292 2293 // Fast handling of 'identity' bitcasts. 2294 if (BitWidth == SubBitWidth) { 2295 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2296 break; 2297 } 2298 2299 // Support big-endian targets when it becomes useful. 2300 bool IsLE = getDataLayout().isLittleEndian(); 2301 if (!IsLE) 2302 break; 2303 2304 // Bitcast 'small element' vector to 'large element' scalar/vector. 2305 if ((BitWidth % SubBitWidth) == 0) { 2306 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2307 2308 // Collect known bits for the (larger) output by collecting the known 2309 // bits from each set of sub elements and shift these into place. 2310 // We need to separately call computeKnownBits for each set of 2311 // sub elements as the knownbits for each is likely to be different. 2312 unsigned SubScale = BitWidth / SubBitWidth; 2313 APInt SubDemandedElts(NumElts * SubScale, 0); 2314 for (unsigned i = 0; i != NumElts; ++i) 2315 if (DemandedElts[i]) 2316 SubDemandedElts.setBit(i * SubScale); 2317 2318 for (unsigned i = 0; i != SubScale; ++i) { 2319 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2320 Depth + 1); 2321 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i); 2322 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i); 2323 } 2324 } 2325 2326 // Bitcast 'large element' scalar/vector to 'small element' vector. 2327 if ((SubBitWidth % BitWidth) == 0) { 2328 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2329 2330 // Collect known bits for the (smaller) output by collecting the known 2331 // bits from the overlapping larger input elements and extracting the 2332 // sub sections we actually care about. 2333 unsigned SubScale = SubBitWidth / BitWidth; 2334 APInt SubDemandedElts(NumElts / SubScale, 0); 2335 for (unsigned i = 0; i != NumElts; ++i) 2336 if (DemandedElts[i]) 2337 SubDemandedElts.setBit(i / SubScale); 2338 2339 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2340 2341 Known.Zero.setAllBits(); Known.One.setAllBits(); 2342 for (unsigned i = 0; i != NumElts; ++i) 2343 if (DemandedElts[i]) { 2344 unsigned Offset = (i % SubScale) * BitWidth; 2345 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2346 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2347 // If we don't know any bits, early out. 2348 if (Known.isUnknown()) 2349 break; 2350 } 2351 } 2352 break; 2353 } 2354 case ISD::AND: 2355 // If either the LHS or the RHS are Zero, the result is zero. 2356 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2357 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2358 2359 // Output known-1 bits are only known if set in both the LHS & RHS. 2360 Known.One &= Known2.One; 2361 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2362 Known.Zero |= Known2.Zero; 2363 break; 2364 case ISD::OR: 2365 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2366 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2367 2368 // Output known-0 bits are only known if clear in both the LHS & RHS. 2369 Known.Zero &= Known2.Zero; 2370 // Output known-1 are known to be set if set in either the LHS | RHS. 2371 Known.One |= Known2.One; 2372 break; 2373 case ISD::XOR: { 2374 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2375 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2376 2377 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2378 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2379 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2380 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2381 Known.Zero = KnownZeroOut; 2382 break; 2383 } 2384 case ISD::MUL: { 2385 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2386 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2387 2388 // If low bits are zero in either operand, output low known-0 bits. 2389 // Also compute a conservative estimate for high known-0 bits. 2390 // More trickiness is possible, but this is sufficient for the 2391 // interesting case of alignment computation. 2392 unsigned TrailZ = Known.countMinTrailingZeros() + 2393 Known2.countMinTrailingZeros(); 2394 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2395 Known2.countMinLeadingZeros(), 2396 BitWidth) - BitWidth; 2397 2398 Known.resetAll(); 2399 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2400 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2401 break; 2402 } 2403 case ISD::UDIV: { 2404 // For the purposes of computing leading zeros we can conservatively 2405 // treat a udiv as a logical right shift by the power of 2 known to 2406 // be less than the denominator. 2407 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2408 unsigned LeadZ = Known2.countMinLeadingZeros(); 2409 2410 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2411 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2412 if (RHSMaxLeadingZeros != BitWidth) 2413 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2414 2415 Known.Zero.setHighBits(LeadZ); 2416 break; 2417 } 2418 case ISD::SELECT: 2419 case ISD::VSELECT: 2420 computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1); 2421 // If we don't know any bits, early out. 2422 if (Known.isUnknown()) 2423 break; 2424 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1); 2425 2426 // Only known if known in both the LHS and RHS. 2427 Known.One &= Known2.One; 2428 Known.Zero &= Known2.Zero; 2429 break; 2430 case ISD::SELECT_CC: 2431 computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1); 2432 // If we don't know any bits, early out. 2433 if (Known.isUnknown()) 2434 break; 2435 computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1); 2436 2437 // Only known if known in both the LHS and RHS. 2438 Known.One &= Known2.One; 2439 Known.Zero &= Known2.Zero; 2440 break; 2441 case ISD::SMULO: 2442 case ISD::UMULO: 2443 if (Op.getResNo() != 1) 2444 break; 2445 // The boolean result conforms to getBooleanContents. 2446 // If we know the result of a setcc has the top bits zero, use this info. 2447 // We know that we have an integer-based boolean since these operations 2448 // are only available for integer. 2449 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2450 TargetLowering::ZeroOrOneBooleanContent && 2451 BitWidth > 1) 2452 Known.Zero.setBitsFrom(1); 2453 break; 2454 case ISD::SETCC: 2455 // If we know the result of a setcc has the top bits zero, use this info. 2456 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2457 TargetLowering::ZeroOrOneBooleanContent && 2458 BitWidth > 1) 2459 Known.Zero.setBitsFrom(1); 2460 break; 2461 case ISD::SHL: 2462 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2463 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2464 Known.Zero <<= *ShAmt; 2465 Known.One <<= *ShAmt; 2466 // Low bits are known zero. 2467 Known.Zero.setLowBits(ShAmt->getZExtValue()); 2468 } 2469 break; 2470 case ISD::SRL: 2471 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2472 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2473 Known.Zero.lshrInPlace(*ShAmt); 2474 Known.One.lshrInPlace(*ShAmt); 2475 // High bits are known zero. 2476 Known.Zero.setHighBits(ShAmt->getZExtValue()); 2477 } 2478 break; 2479 case ISD::SRA: 2480 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2481 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2482 // Sign extend known zero/one bit (else is unknown). 2483 Known.Zero.ashrInPlace(*ShAmt); 2484 Known.One.ashrInPlace(*ShAmt); 2485 } 2486 break; 2487 case ISD::SIGN_EXTEND_INREG: { 2488 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2489 unsigned EBits = EVT.getScalarSizeInBits(); 2490 2491 // Sign extension. Compute the demanded bits in the result that are not 2492 // present in the input. 2493 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2494 2495 APInt InSignMask = APInt::getSignMask(EBits); 2496 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2497 2498 // If the sign extended bits are demanded, we know that the sign 2499 // bit is demanded. 2500 InSignMask = InSignMask.zext(BitWidth); 2501 if (NewBits.getBoolValue()) 2502 InputDemandedBits |= InSignMask; 2503 2504 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2505 Known.One &= InputDemandedBits; 2506 Known.Zero &= InputDemandedBits; 2507 2508 // If the sign bit of the input is known set or clear, then we know the 2509 // top bits of the result. 2510 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2511 Known.Zero |= NewBits; 2512 Known.One &= ~NewBits; 2513 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2514 Known.One |= NewBits; 2515 Known.Zero &= ~NewBits; 2516 } else { // Input sign bit unknown 2517 Known.Zero &= ~NewBits; 2518 Known.One &= ~NewBits; 2519 } 2520 break; 2521 } 2522 case ISD::CTTZ: 2523 case ISD::CTTZ_ZERO_UNDEF: { 2524 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2525 // If we have a known 1, its position is our upper bound. 2526 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2527 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2528 Known.Zero.setBitsFrom(LowBits); 2529 break; 2530 } 2531 case ISD::CTLZ: 2532 case ISD::CTLZ_ZERO_UNDEF: { 2533 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2534 // If we have a known 1, its position is our upper bound. 2535 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2536 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2537 Known.Zero.setBitsFrom(LowBits); 2538 break; 2539 } 2540 case ISD::CTPOP: { 2541 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2542 // If we know some of the bits are zero, they can't be one. 2543 unsigned PossibleOnes = Known2.countMaxPopulation(); 2544 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2545 break; 2546 } 2547 case ISD::LOAD: { 2548 LoadSDNode *LD = cast<LoadSDNode>(Op); 2549 // If this is a ZEXTLoad and we are looking at the loaded value. 2550 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2551 EVT VT = LD->getMemoryVT(); 2552 unsigned MemBits = VT.getScalarSizeInBits(); 2553 Known.Zero.setBitsFrom(MemBits); 2554 } else if (const MDNode *Ranges = LD->getRanges()) { 2555 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2556 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2557 } 2558 break; 2559 } 2560 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2561 EVT InVT = Op.getOperand(0).getValueType(); 2562 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); 2563 computeKnownBits(Op.getOperand(0), Known, InDemandedElts, Depth + 1); 2564 Known = Known.zext(BitWidth); 2565 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2566 break; 2567 } 2568 case ISD::ZERO_EXTEND: { 2569 EVT InVT = Op.getOperand(0).getValueType(); 2570 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2571 Known = Known.zext(BitWidth); 2572 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); 2573 break; 2574 } 2575 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2576 case ISD::SIGN_EXTEND: { 2577 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2578 // If the sign bit is known to be zero or one, then sext will extend 2579 // it to the top bits, else it will just zext. 2580 Known = Known.sext(BitWidth); 2581 break; 2582 } 2583 case ISD::ANY_EXTEND: { 2584 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2585 Known = Known.zext(BitWidth); 2586 break; 2587 } 2588 case ISD::TRUNCATE: { 2589 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2590 Known = Known.trunc(BitWidth); 2591 break; 2592 } 2593 case ISD::AssertZext: { 2594 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2595 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2596 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2597 Known.Zero |= (~InMask); 2598 Known.One &= (~Known.Zero); 2599 break; 2600 } 2601 case ISD::FGETSIGN: 2602 // All bits are zero except the low bit. 2603 Known.Zero.setBitsFrom(1); 2604 break; 2605 case ISD::USUBO: 2606 case ISD::SSUBO: 2607 if (Op.getResNo() == 1) { 2608 // If we know the result of a setcc has the top bits zero, use this info. 2609 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2610 TargetLowering::ZeroOrOneBooleanContent && 2611 BitWidth > 1) 2612 Known.Zero.setBitsFrom(1); 2613 break; 2614 } 2615 LLVM_FALLTHROUGH; 2616 case ISD::SUB: 2617 case ISD::SUBC: { 2618 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2619 // We know that the top bits of C-X are clear if X contains less bits 2620 // than C (i.e. no wrap-around can happen). For example, 20-X is 2621 // positive if we can prove that X is >= 0 and < 16. 2622 if (CLHS->getAPIntValue().isNonNegative()) { 2623 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2624 // NLZ can't be BitWidth with no sign bit 2625 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2626 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2627 Depth + 1); 2628 2629 // If all of the MaskV bits are known to be zero, then we know the 2630 // output top bits are zero, because we now know that the output is 2631 // from [0-C]. 2632 if ((Known2.Zero & MaskV) == MaskV) { 2633 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2634 // Top bits known zero. 2635 Known.Zero.setHighBits(NLZ2); 2636 } 2637 } 2638 } 2639 2640 // If low bits are know to be zero in both operands, then we know they are 2641 // going to be 0 in the result. Both addition and complement operations 2642 // preserve the low zero bits. 2643 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2644 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2645 if (KnownZeroLow == 0) 2646 break; 2647 2648 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2649 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2650 Known.Zero.setLowBits(KnownZeroLow); 2651 break; 2652 } 2653 case ISD::UADDO: 2654 case ISD::SADDO: 2655 case ISD::ADDCARRY: 2656 if (Op.getResNo() == 1) { 2657 // If we know the result of a setcc has the top bits zero, use this info. 2658 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2659 TargetLowering::ZeroOrOneBooleanContent && 2660 BitWidth > 1) 2661 Known.Zero.setBitsFrom(1); 2662 break; 2663 } 2664 LLVM_FALLTHROUGH; 2665 case ISD::ADD: 2666 case ISD::ADDC: 2667 case ISD::ADDE: { 2668 // Output known-0 bits are known if clear or set in both the low clear bits 2669 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2670 // low 3 bits clear. 2671 // Output known-0 bits are also known if the top bits of each input are 2672 // known to be clear. For example, if one input has the top 10 bits clear 2673 // and the other has the top 8 bits clear, we know the top 7 bits of the 2674 // output must be clear. 2675 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2676 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2677 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2678 2679 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2680 Depth + 1); 2681 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2682 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2683 2684 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2685 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2686 // use this information if we know (at least) that the low two bits are 2687 // clear. We then return to the caller that the low bit is unknown but 2688 // that other bits are known zero. 2689 if (KnownZeroLow >= 2) 2690 Known.Zero.setBits(1, KnownZeroLow); 2691 break; 2692 } 2693 2694 Known.Zero.setLowBits(KnownZeroLow); 2695 if (KnownZeroHigh > 1) 2696 Known.Zero.setHighBits(KnownZeroHigh - 1); 2697 break; 2698 } 2699 case ISD::SREM: 2700 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2701 const APInt &RA = Rem->getAPIntValue().abs(); 2702 if (RA.isPowerOf2()) { 2703 APInt LowBits = RA - 1; 2704 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2705 2706 // The low bits of the first operand are unchanged by the srem. 2707 Known.Zero = Known2.Zero & LowBits; 2708 Known.One = Known2.One & LowBits; 2709 2710 // If the first operand is non-negative or has all low bits zero, then 2711 // the upper bits are all zero. 2712 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2713 Known.Zero |= ~LowBits; 2714 2715 // If the first operand is negative and not all low bits are zero, then 2716 // the upper bits are all one. 2717 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2718 Known.One |= ~LowBits; 2719 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2720 } 2721 } 2722 break; 2723 case ISD::UREM: { 2724 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2725 const APInt &RA = Rem->getAPIntValue(); 2726 if (RA.isPowerOf2()) { 2727 APInt LowBits = (RA - 1); 2728 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2729 2730 // The upper bits are all zero, the lower ones are unchanged. 2731 Known.Zero = Known2.Zero | ~LowBits; 2732 Known.One = Known2.One & LowBits; 2733 break; 2734 } 2735 } 2736 2737 // Since the result is less than or equal to either operand, any leading 2738 // zero bits in either operand must also exist in the result. 2739 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2740 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2741 2742 uint32_t Leaders = 2743 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2744 Known.resetAll(); 2745 Known.Zero.setHighBits(Leaders); 2746 break; 2747 } 2748 case ISD::EXTRACT_ELEMENT: { 2749 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2750 const unsigned Index = Op.getConstantOperandVal(1); 2751 const unsigned BitWidth = Op.getValueSizeInBits(); 2752 2753 // Remove low part of known bits mask 2754 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2755 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2756 2757 // Remove high part of known bit mask 2758 Known = Known.trunc(BitWidth); 2759 break; 2760 } 2761 case ISD::EXTRACT_VECTOR_ELT: { 2762 SDValue InVec = Op.getOperand(0); 2763 SDValue EltNo = Op.getOperand(1); 2764 EVT VecVT = InVec.getValueType(); 2765 const unsigned BitWidth = Op.getValueSizeInBits(); 2766 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2767 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2768 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2769 // anything about the extended bits. 2770 if (BitWidth > EltBitWidth) 2771 Known = Known.trunc(EltBitWidth); 2772 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2773 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2774 // If we know the element index, just demand that vector element. 2775 unsigned Idx = ConstEltNo->getZExtValue(); 2776 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2777 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2778 } else { 2779 // Unknown element index, so ignore DemandedElts and demand them all. 2780 computeKnownBits(InVec, Known, Depth + 1); 2781 } 2782 if (BitWidth > EltBitWidth) 2783 Known = Known.zext(BitWidth); 2784 break; 2785 } 2786 case ISD::INSERT_VECTOR_ELT: { 2787 SDValue InVec = Op.getOperand(0); 2788 SDValue InVal = Op.getOperand(1); 2789 SDValue EltNo = Op.getOperand(2); 2790 2791 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2792 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2793 // If we know the element index, split the demand between the 2794 // source vector and the inserted element. 2795 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2796 unsigned EltIdx = CEltNo->getZExtValue(); 2797 2798 // If we demand the inserted element then add its common known bits. 2799 if (DemandedElts[EltIdx]) { 2800 computeKnownBits(InVal, Known2, Depth + 1); 2801 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2802 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2803 } 2804 2805 // If we demand the source vector then add its common known bits, ensuring 2806 // that we don't demand the inserted element. 2807 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2808 if (!!VectorElts) { 2809 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2810 Known.One &= Known2.One; 2811 Known.Zero &= Known2.Zero; 2812 } 2813 } else { 2814 // Unknown element index, so ignore DemandedElts and demand them all. 2815 computeKnownBits(InVec, Known, Depth + 1); 2816 computeKnownBits(InVal, Known2, Depth + 1); 2817 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2818 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2819 } 2820 break; 2821 } 2822 case ISD::BITREVERSE: { 2823 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2824 Known.Zero = Known2.Zero.reverseBits(); 2825 Known.One = Known2.One.reverseBits(); 2826 break; 2827 } 2828 case ISD::BSWAP: { 2829 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2830 Known.Zero = Known2.Zero.byteSwap(); 2831 Known.One = Known2.One.byteSwap(); 2832 break; 2833 } 2834 case ISD::ABS: { 2835 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2836 2837 // If the source's MSB is zero then we know the rest of the bits already. 2838 if (Known2.isNonNegative()) { 2839 Known.Zero = Known2.Zero; 2840 Known.One = Known2.One; 2841 break; 2842 } 2843 2844 // We only know that the absolute values's MSB will be zero iff there is 2845 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2846 Known2.One.clearSignBit(); 2847 if (Known2.One.getBoolValue()) { 2848 Known.Zero = APInt::getSignMask(BitWidth); 2849 break; 2850 } 2851 break; 2852 } 2853 case ISD::UMIN: { 2854 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2855 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2856 2857 // UMIN - we know that the result will have the maximum of the 2858 // known zero leading bits of the inputs. 2859 unsigned LeadZero = Known.countMinLeadingZeros(); 2860 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2861 2862 Known.Zero &= Known2.Zero; 2863 Known.One &= Known2.One; 2864 Known.Zero.setHighBits(LeadZero); 2865 break; 2866 } 2867 case ISD::UMAX: { 2868 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2869 Depth + 1); 2870 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2871 2872 // UMAX - we know that the result will have the maximum of the 2873 // known one leading bits of the inputs. 2874 unsigned LeadOne = Known.countMinLeadingOnes(); 2875 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2876 2877 Known.Zero &= Known2.Zero; 2878 Known.One &= Known2.One; 2879 Known.One.setHighBits(LeadOne); 2880 break; 2881 } 2882 case ISD::SMIN: 2883 case ISD::SMAX: { 2884 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2885 Depth + 1); 2886 // If we don't know any bits, early out. 2887 if (Known.isUnknown()) 2888 break; 2889 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2890 Known.Zero &= Known2.Zero; 2891 Known.One &= Known2.One; 2892 break; 2893 } 2894 case ISD::FrameIndex: 2895 case ISD::TargetFrameIndex: 2896 if (unsigned Align = InferPtrAlignment(Op)) { 2897 // The low bits are known zero if the pointer is aligned. 2898 Known.Zero.setLowBits(Log2_32(Align)); 2899 break; 2900 } 2901 break; 2902 2903 default: 2904 if (Opcode < ISD::BUILTIN_OP_END) 2905 break; 2906 LLVM_FALLTHROUGH; 2907 case ISD::INTRINSIC_WO_CHAIN: 2908 case ISD::INTRINSIC_W_CHAIN: 2909 case ISD::INTRINSIC_VOID: 2910 // Allow the target to implement this method for its nodes. 2911 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 2912 break; 2913 } 2914 2915 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2916 } 2917 2918 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 2919 SDValue N1) const { 2920 // X + 0 never overflow 2921 if (isNullConstant(N1)) 2922 return OFK_Never; 2923 2924 KnownBits N1Known; 2925 computeKnownBits(N1, N1Known); 2926 if (N1Known.Zero.getBoolValue()) { 2927 KnownBits N0Known; 2928 computeKnownBits(N0, N0Known); 2929 2930 bool overflow; 2931 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 2932 if (!overflow) 2933 return OFK_Never; 2934 } 2935 2936 // mulhi + 1 never overflow 2937 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 2938 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 2939 return OFK_Never; 2940 2941 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 2942 KnownBits N0Known; 2943 computeKnownBits(N0, N0Known); 2944 2945 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 2946 return OFK_Never; 2947 } 2948 2949 return OFK_Sometime; 2950 } 2951 2952 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2953 EVT OpVT = Val.getValueType(); 2954 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2955 2956 // Is the constant a known power of 2? 2957 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2958 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2959 2960 // A left-shift of a constant one will have exactly one bit set because 2961 // shifting the bit off the end is undefined. 2962 if (Val.getOpcode() == ISD::SHL) { 2963 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2964 if (C && C->getAPIntValue() == 1) 2965 return true; 2966 } 2967 2968 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2969 // one bit set. 2970 if (Val.getOpcode() == ISD::SRL) { 2971 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2972 if (C && C->getAPIntValue().isSignMask()) 2973 return true; 2974 } 2975 2976 // Are all operands of a build vector constant powers of two? 2977 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2978 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2979 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2980 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2981 return false; 2982 })) 2983 return true; 2984 2985 // More could be done here, though the above checks are enough 2986 // to handle some common cases. 2987 2988 // Fall back to computeKnownBits to catch other known cases. 2989 KnownBits Known; 2990 computeKnownBits(Val, Known); 2991 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 2992 } 2993 2994 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 2995 EVT VT = Op.getValueType(); 2996 APInt DemandedElts = VT.isVector() 2997 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2998 : APInt(1, 1); 2999 return ComputeNumSignBits(Op, DemandedElts, Depth); 3000 } 3001 3002 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3003 unsigned Depth) const { 3004 EVT VT = Op.getValueType(); 3005 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3006 unsigned VTBits = VT.getScalarSizeInBits(); 3007 unsigned NumElts = DemandedElts.getBitWidth(); 3008 unsigned Tmp, Tmp2; 3009 unsigned FirstAnswer = 1; 3010 3011 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3012 const APInt &Val = C->getAPIntValue(); 3013 return Val.getNumSignBits(); 3014 } 3015 3016 if (Depth == 6) 3017 return 1; // Limit search depth. 3018 3019 if (!DemandedElts) 3020 return 1; // No demanded elts, better to assume we don't know anything. 3021 3022 switch (Op.getOpcode()) { 3023 default: break; 3024 case ISD::AssertSext: 3025 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3026 return VTBits-Tmp+1; 3027 case ISD::AssertZext: 3028 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3029 return VTBits-Tmp; 3030 3031 case ISD::BUILD_VECTOR: 3032 Tmp = VTBits; 3033 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3034 if (!DemandedElts[i]) 3035 continue; 3036 3037 SDValue SrcOp = Op.getOperand(i); 3038 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3039 3040 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3041 if (SrcOp.getValueSizeInBits() != VTBits) { 3042 assert(SrcOp.getValueSizeInBits() > VTBits && 3043 "Expected BUILD_VECTOR implicit truncation"); 3044 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3045 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3046 } 3047 Tmp = std::min(Tmp, Tmp2); 3048 } 3049 return Tmp; 3050 3051 case ISD::VECTOR_SHUFFLE: { 3052 // Collect the minimum number of sign bits that are shared by every vector 3053 // element referenced by the shuffle. 3054 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3055 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3056 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3057 for (unsigned i = 0; i != NumElts; ++i) { 3058 int M = SVN->getMaskElt(i); 3059 if (!DemandedElts[i]) 3060 continue; 3061 // For UNDEF elements, we don't know anything about the common state of 3062 // the shuffle result. 3063 if (M < 0) 3064 return 1; 3065 if ((unsigned)M < NumElts) 3066 DemandedLHS.setBit((unsigned)M % NumElts); 3067 else 3068 DemandedRHS.setBit((unsigned)M % NumElts); 3069 } 3070 Tmp = std::numeric_limits<unsigned>::max(); 3071 if (!!DemandedLHS) 3072 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3073 if (!!DemandedRHS) { 3074 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3075 Tmp = std::min(Tmp, Tmp2); 3076 } 3077 // If we don't know anything, early out and try computeKnownBits fall-back. 3078 if (Tmp == 1) 3079 break; 3080 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3081 return Tmp; 3082 } 3083 3084 case ISD::BITCAST: { 3085 SDValue N0 = Op.getOperand(0); 3086 EVT SrcVT = N0.getValueType(); 3087 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3088 3089 // Ignore bitcasts from unsupported types.. 3090 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3091 break; 3092 3093 // Fast handling of 'identity' bitcasts. 3094 if (VTBits == SrcBits) 3095 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3096 3097 // Bitcast 'large element' scalar/vector to 'small element' vector. 3098 // TODO: Handle cases other than 'sign splat' when we have a use case. 3099 // Requires handling of DemandedElts and Endianness. 3100 if ((SrcBits % VTBits) == 0) { 3101 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 3102 Tmp = ComputeNumSignBits(N0, Depth + 1); 3103 if (Tmp == SrcBits) 3104 return VTBits; 3105 } 3106 break; 3107 } 3108 3109 case ISD::SIGN_EXTEND: 3110 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3111 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3112 case ISD::SIGN_EXTEND_INREG: 3113 // Max of the input and what this extends. 3114 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3115 Tmp = VTBits-Tmp+1; 3116 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3117 return std::max(Tmp, Tmp2); 3118 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3119 SDValue Src = Op.getOperand(0); 3120 EVT SrcVT = Src.getValueType(); 3121 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); 3122 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3123 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3124 } 3125 3126 case ISD::SRA: 3127 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3128 // SRA X, C -> adds C sign bits. 3129 if (ConstantSDNode *C = 3130 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3131 APInt ShiftVal = C->getAPIntValue(); 3132 ShiftVal += Tmp; 3133 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3134 } 3135 return Tmp; 3136 case ISD::SHL: 3137 if (ConstantSDNode *C = 3138 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { 3139 // shl destroys sign bits. 3140 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3141 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3142 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3143 return Tmp - C->getZExtValue(); 3144 } 3145 break; 3146 case ISD::AND: 3147 case ISD::OR: 3148 case ISD::XOR: // NOT is handled here. 3149 // Logical binary ops preserve the number of sign bits at the worst. 3150 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3151 if (Tmp != 1) { 3152 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3153 FirstAnswer = std::min(Tmp, Tmp2); 3154 // We computed what we know about the sign bits as our first 3155 // answer. Now proceed to the generic code that uses 3156 // computeKnownBits, and pick whichever answer is better. 3157 } 3158 break; 3159 3160 case ISD::SELECT: 3161 case ISD::VSELECT: 3162 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3163 if (Tmp == 1) return 1; // Early out. 3164 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3165 return std::min(Tmp, Tmp2); 3166 case ISD::SELECT_CC: 3167 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3168 if (Tmp == 1) return 1; // Early out. 3169 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3170 return std::min(Tmp, Tmp2); 3171 3172 case ISD::SMIN: 3173 case ISD::SMAX: 3174 case ISD::UMIN: 3175 case ISD::UMAX: 3176 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3177 if (Tmp == 1) 3178 return 1; // Early out. 3179 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3180 return std::min(Tmp, Tmp2); 3181 case ISD::SADDO: 3182 case ISD::UADDO: 3183 case ISD::SSUBO: 3184 case ISD::USUBO: 3185 case ISD::SMULO: 3186 case ISD::UMULO: 3187 if (Op.getResNo() != 1) 3188 break; 3189 // The boolean result conforms to getBooleanContents. Fall through. 3190 // If setcc returns 0/-1, all bits are sign bits. 3191 // We know that we have an integer-based boolean since these operations 3192 // are only available for integer. 3193 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3194 TargetLowering::ZeroOrNegativeOneBooleanContent) 3195 return VTBits; 3196 break; 3197 case ISD::SETCC: 3198 // If setcc returns 0/-1, all bits are sign bits. 3199 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3200 TargetLowering::ZeroOrNegativeOneBooleanContent) 3201 return VTBits; 3202 break; 3203 case ISD::ROTL: 3204 case ISD::ROTR: 3205 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3206 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3207 3208 // Handle rotate right by N like a rotate left by 32-N. 3209 if (Op.getOpcode() == ISD::ROTR) 3210 RotAmt = (VTBits - RotAmt) % VTBits; 3211 3212 // If we aren't rotating out all of the known-in sign bits, return the 3213 // number that are left. This handles rotl(sext(x), 1) for example. 3214 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3215 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3216 } 3217 break; 3218 case ISD::ADD: 3219 case ISD::ADDC: 3220 // Add can have at most one carry bit. Thus we know that the output 3221 // is, at worst, one more bit than the inputs. 3222 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3223 if (Tmp == 1) return 1; // Early out. 3224 3225 // Special case decrementing a value (ADD X, -1): 3226 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3227 if (CRHS->isAllOnesValue()) { 3228 KnownBits Known; 3229 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3230 3231 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3232 // sign bits set. 3233 if ((Known.Zero | 1).isAllOnesValue()) 3234 return VTBits; 3235 3236 // If we are subtracting one from a positive number, there is no carry 3237 // out of the result. 3238 if (Known.isNonNegative()) 3239 return Tmp; 3240 } 3241 3242 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3243 if (Tmp2 == 1) return 1; 3244 return std::min(Tmp, Tmp2)-1; 3245 3246 case ISD::SUB: 3247 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3248 if (Tmp2 == 1) return 1; 3249 3250 // Handle NEG. 3251 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3252 if (CLHS->isNullValue()) { 3253 KnownBits Known; 3254 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3255 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3256 // sign bits set. 3257 if ((Known.Zero | 1).isAllOnesValue()) 3258 return VTBits; 3259 3260 // If the input is known to be positive (the sign bit is known clear), 3261 // the output of the NEG has the same number of sign bits as the input. 3262 if (Known.isNonNegative()) 3263 return Tmp2; 3264 3265 // Otherwise, we treat this like a SUB. 3266 } 3267 3268 // Sub can have at most one carry bit. Thus we know that the output 3269 // is, at worst, one more bit than the inputs. 3270 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3271 if (Tmp == 1) return 1; // Early out. 3272 return std::min(Tmp, Tmp2)-1; 3273 case ISD::TRUNCATE: { 3274 // Check if the sign bits of source go down as far as the truncated value. 3275 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3276 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3277 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3278 return NumSrcSignBits - (NumSrcBits - VTBits); 3279 break; 3280 } 3281 case ISD::EXTRACT_ELEMENT: { 3282 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3283 const int BitWidth = Op.getValueSizeInBits(); 3284 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3285 3286 // Get reverse index (starting from 1), Op1 value indexes elements from 3287 // little end. Sign starts at big end. 3288 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3289 3290 // If the sign portion ends in our element the subtraction gives correct 3291 // result. Otherwise it gives either negative or > bitwidth result 3292 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3293 } 3294 case ISD::INSERT_VECTOR_ELT: { 3295 SDValue InVec = Op.getOperand(0); 3296 SDValue InVal = Op.getOperand(1); 3297 SDValue EltNo = Op.getOperand(2); 3298 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3299 3300 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3301 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3302 // If we know the element index, split the demand between the 3303 // source vector and the inserted element. 3304 unsigned EltIdx = CEltNo->getZExtValue(); 3305 3306 // If we demand the inserted element then get its sign bits. 3307 Tmp = std::numeric_limits<unsigned>::max(); 3308 if (DemandedElts[EltIdx]) { 3309 // TODO - handle implicit truncation of inserted elements. 3310 if (InVal.getScalarValueSizeInBits() != VTBits) 3311 break; 3312 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3313 } 3314 3315 // If we demand the source vector then get its sign bits, and determine 3316 // the minimum. 3317 APInt VectorElts = DemandedElts; 3318 VectorElts.clearBit(EltIdx); 3319 if (!!VectorElts) { 3320 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3321 Tmp = std::min(Tmp, Tmp2); 3322 } 3323 } else { 3324 // Unknown element index, so ignore DemandedElts and demand them all. 3325 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3326 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3327 Tmp = std::min(Tmp, Tmp2); 3328 } 3329 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3330 return Tmp; 3331 } 3332 case ISD::EXTRACT_VECTOR_ELT: { 3333 SDValue InVec = Op.getOperand(0); 3334 SDValue EltNo = Op.getOperand(1); 3335 EVT VecVT = InVec.getValueType(); 3336 const unsigned BitWidth = Op.getValueSizeInBits(); 3337 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3338 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3339 3340 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3341 // anything about sign bits. But if the sizes match we can derive knowledge 3342 // about sign bits from the vector operand. 3343 if (BitWidth != EltBitWidth) 3344 break; 3345 3346 // If we know the element index, just demand that vector element, else for 3347 // an unknown element index, ignore DemandedElts and demand them all. 3348 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3349 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3350 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3351 DemandedSrcElts = 3352 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3353 3354 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3355 } 3356 case ISD::EXTRACT_SUBVECTOR: { 3357 // If we know the element index, just demand that subvector elements, 3358 // otherwise demand them all. 3359 SDValue Src = Op.getOperand(0); 3360 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3361 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3362 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3363 // Offset the demanded elts by the subvector index. 3364 uint64_t Idx = SubIdx->getZExtValue(); 3365 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3366 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3367 } 3368 return ComputeNumSignBits(Src, Depth + 1); 3369 } 3370 case ISD::CONCAT_VECTORS: 3371 // Determine the minimum number of sign bits across all demanded 3372 // elts of the input vectors. Early out if the result is already 1. 3373 Tmp = std::numeric_limits<unsigned>::max(); 3374 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3375 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3376 unsigned NumSubVectors = Op.getNumOperands(); 3377 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3378 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3379 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3380 if (!DemandedSub) 3381 continue; 3382 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3383 Tmp = std::min(Tmp, Tmp2); 3384 } 3385 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3386 return Tmp; 3387 } 3388 3389 // If we are looking at the loaded value of the SDNode. 3390 if (Op.getResNo() == 0) { 3391 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3392 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3393 unsigned ExtType = LD->getExtensionType(); 3394 switch (ExtType) { 3395 default: break; 3396 case ISD::SEXTLOAD: // '17' bits known 3397 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3398 return VTBits-Tmp+1; 3399 case ISD::ZEXTLOAD: // '16' bits known 3400 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3401 return VTBits-Tmp; 3402 } 3403 } 3404 } 3405 3406 // Allow the target to implement this method for its nodes. 3407 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3408 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3409 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3410 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3411 unsigned NumBits = 3412 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3413 if (NumBits > 1) 3414 FirstAnswer = std::max(FirstAnswer, NumBits); 3415 } 3416 3417 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3418 // use this information. 3419 KnownBits Known; 3420 computeKnownBits(Op, Known, DemandedElts, Depth); 3421 3422 APInt Mask; 3423 if (Known.isNonNegative()) { // sign bit is 0 3424 Mask = Known.Zero; 3425 } else if (Known.isNegative()) { // sign bit is 1; 3426 Mask = Known.One; 3427 } else { 3428 // Nothing known. 3429 return FirstAnswer; 3430 } 3431 3432 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3433 // the number of identical bits in the top of the input value. 3434 Mask = ~Mask; 3435 Mask <<= Mask.getBitWidth()-VTBits; 3436 // Return # leading zeros. We use 'min' here in case Val was zero before 3437 // shifting. We don't want to return '64' as for an i32 "0". 3438 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3439 } 3440 3441 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3442 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3443 !isa<ConstantSDNode>(Op.getOperand(1))) 3444 return false; 3445 3446 if (Op.getOpcode() == ISD::OR && 3447 !MaskedValueIsZero(Op.getOperand(0), 3448 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3449 return false; 3450 3451 return true; 3452 } 3453 3454 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3455 // If we're told that NaNs won't happen, assume they won't. 3456 if (getTarget().Options.NoNaNsFPMath) 3457 return true; 3458 3459 if (Op->getFlags().hasNoNaNs()) 3460 return true; 3461 3462 // If the value is a constant, we can obviously see if it is a NaN or not. 3463 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3464 return !C->getValueAPF().isNaN(); 3465 3466 // TODO: Recognize more cases here. 3467 3468 return false; 3469 } 3470 3471 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3472 // If the value is a constant, we can obviously see if it is a zero or not. 3473 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3474 return !C->isZero(); 3475 3476 // TODO: Recognize more cases here. 3477 switch (Op.getOpcode()) { 3478 default: break; 3479 case ISD::OR: 3480 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3481 return !C->isNullValue(); 3482 break; 3483 } 3484 3485 return false; 3486 } 3487 3488 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3489 // Check the obvious case. 3490 if (A == B) return true; 3491 3492 // For for negative and positive zero. 3493 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3494 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3495 if (CA->isZero() && CB->isZero()) return true; 3496 3497 // Otherwise they may not be equal. 3498 return false; 3499 } 3500 3501 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3502 assert(A.getValueType() == B.getValueType() && 3503 "Values must have the same type"); 3504 KnownBits AKnown, BKnown; 3505 computeKnownBits(A, AKnown); 3506 computeKnownBits(B, BKnown); 3507 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3508 } 3509 3510 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3511 ArrayRef<SDValue> Ops, 3512 SelectionDAG &DAG) { 3513 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3514 assert(llvm::all_of(Ops, 3515 [Ops](SDValue Op) { 3516 return Ops[0].getValueType() == Op.getValueType(); 3517 }) && 3518 "Concatenation of vectors with inconsistent value types!"); 3519 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3520 VT.getVectorNumElements() && 3521 "Incorrect element count in vector concatenation!"); 3522 3523 if (Ops.size() == 1) 3524 return Ops[0]; 3525 3526 // Concat of UNDEFs is UNDEF. 3527 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3528 return DAG.getUNDEF(VT); 3529 3530 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3531 // simplified to one big BUILD_VECTOR. 3532 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3533 EVT SVT = VT.getScalarType(); 3534 SmallVector<SDValue, 16> Elts; 3535 for (SDValue Op : Ops) { 3536 EVT OpVT = Op.getValueType(); 3537 if (Op.isUndef()) 3538 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3539 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3540 Elts.append(Op->op_begin(), Op->op_end()); 3541 else 3542 return SDValue(); 3543 } 3544 3545 // BUILD_VECTOR requires all inputs to be of the same type, find the 3546 // maximum type and extend them all. 3547 for (SDValue Op : Elts) 3548 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3549 3550 if (SVT.bitsGT(VT.getScalarType())) 3551 for (SDValue &Op : Elts) 3552 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3553 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3554 : DAG.getSExtOrTrunc(Op, DL, SVT); 3555 3556 SDValue V = DAG.getBuildVector(VT, DL, Elts); 3557 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 3558 return V; 3559 } 3560 3561 /// Gets or creates the specified node. 3562 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3563 FoldingSetNodeID ID; 3564 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3565 void *IP = nullptr; 3566 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3567 return SDValue(E, 0); 3568 3569 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3570 getVTList(VT)); 3571 CSEMap.InsertNode(N, IP); 3572 3573 InsertNode(N); 3574 SDValue V = SDValue(N, 0); 3575 NewSDValueDbgMsg(V, "Creating new node: ", this); 3576 return V; 3577 } 3578 3579 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3580 SDValue Operand, const SDNodeFlags Flags) { 3581 // Constant fold unary operations with an integer constant operand. Even 3582 // opaque constant will be folded, because the folding of unary operations 3583 // doesn't create new constants with different values. Nevertheless, the 3584 // opaque flag is preserved during folding to prevent future folding with 3585 // other constants. 3586 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3587 const APInt &Val = C->getAPIntValue(); 3588 switch (Opcode) { 3589 default: break; 3590 case ISD::SIGN_EXTEND: 3591 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3592 C->isTargetOpcode(), C->isOpaque()); 3593 case ISD::ANY_EXTEND: 3594 case ISD::ZERO_EXTEND: 3595 case ISD::TRUNCATE: 3596 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3597 C->isTargetOpcode(), C->isOpaque()); 3598 case ISD::UINT_TO_FP: 3599 case ISD::SINT_TO_FP: { 3600 APFloat apf(EVTToAPFloatSemantics(VT), 3601 APInt::getNullValue(VT.getSizeInBits())); 3602 (void)apf.convertFromAPInt(Val, 3603 Opcode==ISD::SINT_TO_FP, 3604 APFloat::rmNearestTiesToEven); 3605 return getConstantFP(apf, DL, VT); 3606 } 3607 case ISD::BITCAST: 3608 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3609 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3610 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3611 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3612 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3613 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3614 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3615 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3616 break; 3617 case ISD::ABS: 3618 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3619 C->isOpaque()); 3620 case ISD::BITREVERSE: 3621 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3622 C->isOpaque()); 3623 case ISD::BSWAP: 3624 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3625 C->isOpaque()); 3626 case ISD::CTPOP: 3627 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3628 C->isOpaque()); 3629 case ISD::CTLZ: 3630 case ISD::CTLZ_ZERO_UNDEF: 3631 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3632 C->isOpaque()); 3633 case ISD::CTTZ: 3634 case ISD::CTTZ_ZERO_UNDEF: 3635 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3636 C->isOpaque()); 3637 case ISD::FP16_TO_FP: { 3638 bool Ignored; 3639 APFloat FPV(APFloat::IEEEhalf(), 3640 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3641 3642 // This can return overflow, underflow, or inexact; we don't care. 3643 // FIXME need to be more flexible about rounding mode. 3644 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3645 APFloat::rmNearestTiesToEven, &Ignored); 3646 return getConstantFP(FPV, DL, VT); 3647 } 3648 } 3649 } 3650 3651 // Constant fold unary operations with a floating point constant operand. 3652 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3653 APFloat V = C->getValueAPF(); // make copy 3654 switch (Opcode) { 3655 case ISD::FNEG: 3656 V.changeSign(); 3657 return getConstantFP(V, DL, VT); 3658 case ISD::FABS: 3659 V.clearSign(); 3660 return getConstantFP(V, DL, VT); 3661 case ISD::FCEIL: { 3662 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3663 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3664 return getConstantFP(V, DL, VT); 3665 break; 3666 } 3667 case ISD::FTRUNC: { 3668 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3669 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3670 return getConstantFP(V, DL, VT); 3671 break; 3672 } 3673 case ISD::FFLOOR: { 3674 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3675 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3676 return getConstantFP(V, DL, VT); 3677 break; 3678 } 3679 case ISD::FP_EXTEND: { 3680 bool ignored; 3681 // This can return overflow, underflow, or inexact; we don't care. 3682 // FIXME need to be more flexible about rounding mode. 3683 (void)V.convert(EVTToAPFloatSemantics(VT), 3684 APFloat::rmNearestTiesToEven, &ignored); 3685 return getConstantFP(V, DL, VT); 3686 } 3687 case ISD::FP_TO_SINT: 3688 case ISD::FP_TO_UINT: { 3689 bool ignored; 3690 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3691 // FIXME need to be more flexible about rounding mode. 3692 APFloat::opStatus s = 3693 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3694 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3695 break; 3696 return getConstant(IntVal, DL, VT); 3697 } 3698 case ISD::BITCAST: 3699 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3700 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3701 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3702 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3703 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3704 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3705 break; 3706 case ISD::FP_TO_FP16: { 3707 bool Ignored; 3708 // This can return overflow, underflow, or inexact; we don't care. 3709 // FIXME need to be more flexible about rounding mode. 3710 (void)V.convert(APFloat::IEEEhalf(), 3711 APFloat::rmNearestTiesToEven, &Ignored); 3712 return getConstant(V.bitcastToAPInt(), DL, VT); 3713 } 3714 } 3715 } 3716 3717 // Constant fold unary operations with a vector integer or float operand. 3718 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3719 if (BV->isConstant()) { 3720 switch (Opcode) { 3721 default: 3722 // FIXME: Entirely reasonable to perform folding of other unary 3723 // operations here as the need arises. 3724 break; 3725 case ISD::FNEG: 3726 case ISD::FABS: 3727 case ISD::FCEIL: 3728 case ISD::FTRUNC: 3729 case ISD::FFLOOR: 3730 case ISD::FP_EXTEND: 3731 case ISD::FP_TO_SINT: 3732 case ISD::FP_TO_UINT: 3733 case ISD::TRUNCATE: 3734 case ISD::UINT_TO_FP: 3735 case ISD::SINT_TO_FP: 3736 case ISD::ABS: 3737 case ISD::BITREVERSE: 3738 case ISD::BSWAP: 3739 case ISD::CTLZ: 3740 case ISD::CTLZ_ZERO_UNDEF: 3741 case ISD::CTTZ: 3742 case ISD::CTTZ_ZERO_UNDEF: 3743 case ISD::CTPOP: { 3744 SDValue Ops = { Operand }; 3745 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3746 return Fold; 3747 } 3748 } 3749 } 3750 } 3751 3752 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3753 switch (Opcode) { 3754 case ISD::TokenFactor: 3755 case ISD::MERGE_VALUES: 3756 case ISD::CONCAT_VECTORS: 3757 return Operand; // Factor, merge or concat of one node? No need. 3758 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3759 case ISD::FP_EXTEND: 3760 assert(VT.isFloatingPoint() && 3761 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3762 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3763 assert((!VT.isVector() || 3764 VT.getVectorNumElements() == 3765 Operand.getValueType().getVectorNumElements()) && 3766 "Vector element count mismatch!"); 3767 assert(Operand.getValueType().bitsLT(VT) && 3768 "Invalid fpext node, dst < src!"); 3769 if (Operand.isUndef()) 3770 return getUNDEF(VT); 3771 break; 3772 case ISD::SIGN_EXTEND: 3773 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3774 "Invalid SIGN_EXTEND!"); 3775 if (Operand.getValueType() == VT) return Operand; // noop extension 3776 assert((!VT.isVector() || 3777 VT.getVectorNumElements() == 3778 Operand.getValueType().getVectorNumElements()) && 3779 "Vector element count mismatch!"); 3780 assert(Operand.getValueType().bitsLT(VT) && 3781 "Invalid sext node, dst < src!"); 3782 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3783 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3784 else if (OpOpcode == ISD::UNDEF) 3785 // sext(undef) = 0, because the top bits will all be the same. 3786 return getConstant(0, DL, VT); 3787 break; 3788 case ISD::ZERO_EXTEND: 3789 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3790 "Invalid ZERO_EXTEND!"); 3791 if (Operand.getValueType() == VT) return Operand; // noop extension 3792 assert((!VT.isVector() || 3793 VT.getVectorNumElements() == 3794 Operand.getValueType().getVectorNumElements()) && 3795 "Vector element count mismatch!"); 3796 assert(Operand.getValueType().bitsLT(VT) && 3797 "Invalid zext node, dst < src!"); 3798 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3799 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3800 else if (OpOpcode == ISD::UNDEF) 3801 // zext(undef) = 0, because the top bits will be zero. 3802 return getConstant(0, DL, VT); 3803 break; 3804 case ISD::ANY_EXTEND: 3805 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3806 "Invalid ANY_EXTEND!"); 3807 if (Operand.getValueType() == VT) return Operand; // noop extension 3808 assert((!VT.isVector() || 3809 VT.getVectorNumElements() == 3810 Operand.getValueType().getVectorNumElements()) && 3811 "Vector element count mismatch!"); 3812 assert(Operand.getValueType().bitsLT(VT) && 3813 "Invalid anyext node, dst < src!"); 3814 3815 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3816 OpOpcode == ISD::ANY_EXTEND) 3817 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3818 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3819 else if (OpOpcode == ISD::UNDEF) 3820 return getUNDEF(VT); 3821 3822 // (ext (trunx x)) -> x 3823 if (OpOpcode == ISD::TRUNCATE) { 3824 SDValue OpOp = Operand.getOperand(0); 3825 if (OpOp.getValueType() == VT) 3826 return OpOp; 3827 } 3828 break; 3829 case ISD::TRUNCATE: 3830 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3831 "Invalid TRUNCATE!"); 3832 if (Operand.getValueType() == VT) return Operand; // noop truncate 3833 assert((!VT.isVector() || 3834 VT.getVectorNumElements() == 3835 Operand.getValueType().getVectorNumElements()) && 3836 "Vector element count mismatch!"); 3837 assert(Operand.getValueType().bitsGT(VT) && 3838 "Invalid truncate node, src < dst!"); 3839 if (OpOpcode == ISD::TRUNCATE) 3840 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3841 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3842 OpOpcode == ISD::ANY_EXTEND) { 3843 // If the source is smaller than the dest, we still need an extend. 3844 if (Operand.getOperand(0).getValueType().getScalarType() 3845 .bitsLT(VT.getScalarType())) 3846 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3847 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3848 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3849 return Operand.getOperand(0); 3850 } 3851 if (OpOpcode == ISD::UNDEF) 3852 return getUNDEF(VT); 3853 break; 3854 case ISD::ABS: 3855 assert(VT.isInteger() && VT == Operand.getValueType() && 3856 "Invalid ABS!"); 3857 if (OpOpcode == ISD::UNDEF) 3858 return getUNDEF(VT); 3859 break; 3860 case ISD::BSWAP: 3861 assert(VT.isInteger() && VT == Operand.getValueType() && 3862 "Invalid BSWAP!"); 3863 assert((VT.getScalarSizeInBits() % 16 == 0) && 3864 "BSWAP types must be a multiple of 16 bits!"); 3865 if (OpOpcode == ISD::UNDEF) 3866 return getUNDEF(VT); 3867 break; 3868 case ISD::BITREVERSE: 3869 assert(VT.isInteger() && VT == Operand.getValueType() && 3870 "Invalid BITREVERSE!"); 3871 if (OpOpcode == ISD::UNDEF) 3872 return getUNDEF(VT); 3873 break; 3874 case ISD::BITCAST: 3875 // Basic sanity checking. 3876 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3877 "Cannot BITCAST between types of different sizes!"); 3878 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3879 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3880 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3881 if (OpOpcode == ISD::UNDEF) 3882 return getUNDEF(VT); 3883 break; 3884 case ISD::SCALAR_TO_VECTOR: 3885 assert(VT.isVector() && !Operand.getValueType().isVector() && 3886 (VT.getVectorElementType() == Operand.getValueType() || 3887 (VT.getVectorElementType().isInteger() && 3888 Operand.getValueType().isInteger() && 3889 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3890 "Illegal SCALAR_TO_VECTOR node!"); 3891 if (OpOpcode == ISD::UNDEF) 3892 return getUNDEF(VT); 3893 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3894 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3895 isa<ConstantSDNode>(Operand.getOperand(1)) && 3896 Operand.getConstantOperandVal(1) == 0 && 3897 Operand.getOperand(0).getValueType() == VT) 3898 return Operand.getOperand(0); 3899 break; 3900 case ISD::FNEG: 3901 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3902 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3903 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3904 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 3905 Operand.getOperand(0), Operand.getNode()->getFlags()); 3906 if (OpOpcode == ISD::FNEG) // --X -> X 3907 return Operand.getOperand(0); 3908 break; 3909 case ISD::FABS: 3910 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3911 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 3912 break; 3913 } 3914 3915 SDNode *N; 3916 SDVTList VTs = getVTList(VT); 3917 SDValue Ops[] = {Operand}; 3918 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3919 FoldingSetNodeID ID; 3920 AddNodeIDNode(ID, Opcode, VTs, Ops); 3921 void *IP = nullptr; 3922 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 3923 E->intersectFlagsWith(Flags); 3924 return SDValue(E, 0); 3925 } 3926 3927 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3928 N->setFlags(Flags); 3929 createOperands(N, Ops); 3930 CSEMap.InsertNode(N, IP); 3931 } else { 3932 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3933 createOperands(N, Ops); 3934 } 3935 3936 InsertNode(N); 3937 SDValue V = SDValue(N, 0); 3938 NewSDValueDbgMsg(V, "Creating new node: ", this); 3939 return V; 3940 } 3941 3942 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3943 const APInt &C2) { 3944 switch (Opcode) { 3945 case ISD::ADD: return std::make_pair(C1 + C2, true); 3946 case ISD::SUB: return std::make_pair(C1 - C2, true); 3947 case ISD::MUL: return std::make_pair(C1 * C2, true); 3948 case ISD::AND: return std::make_pair(C1 & C2, true); 3949 case ISD::OR: return std::make_pair(C1 | C2, true); 3950 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3951 case ISD::SHL: return std::make_pair(C1 << C2, true); 3952 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3953 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3954 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3955 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3956 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3957 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3958 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3959 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3960 case ISD::UDIV: 3961 if (!C2.getBoolValue()) 3962 break; 3963 return std::make_pair(C1.udiv(C2), true); 3964 case ISD::UREM: 3965 if (!C2.getBoolValue()) 3966 break; 3967 return std::make_pair(C1.urem(C2), true); 3968 case ISD::SDIV: 3969 if (!C2.getBoolValue()) 3970 break; 3971 return std::make_pair(C1.sdiv(C2), true); 3972 case ISD::SREM: 3973 if (!C2.getBoolValue()) 3974 break; 3975 return std::make_pair(C1.srem(C2), true); 3976 } 3977 return std::make_pair(APInt(1, 0), false); 3978 } 3979 3980 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3981 EVT VT, const ConstantSDNode *Cst1, 3982 const ConstantSDNode *Cst2) { 3983 if (Cst1->isOpaque() || Cst2->isOpaque()) 3984 return SDValue(); 3985 3986 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3987 Cst2->getAPIntValue()); 3988 if (!Folded.second) 3989 return SDValue(); 3990 return getConstant(Folded.first, DL, VT); 3991 } 3992 3993 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 3994 const GlobalAddressSDNode *GA, 3995 const SDNode *N2) { 3996 if (GA->getOpcode() != ISD::GlobalAddress) 3997 return SDValue(); 3998 if (!TLI->isOffsetFoldingLegal(GA)) 3999 return SDValue(); 4000 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 4001 if (!Cst2) 4002 return SDValue(); 4003 int64_t Offset = Cst2->getSExtValue(); 4004 switch (Opcode) { 4005 case ISD::ADD: break; 4006 case ISD::SUB: Offset = -uint64_t(Offset); break; 4007 default: return SDValue(); 4008 } 4009 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 4010 GA->getOffset() + uint64_t(Offset)); 4011 } 4012 4013 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4014 switch (Opcode) { 4015 case ISD::SDIV: 4016 case ISD::UDIV: 4017 case ISD::SREM: 4018 case ISD::UREM: { 4019 // If a divisor is zero/undef or any element of a divisor vector is 4020 // zero/undef, the whole op is undef. 4021 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4022 SDValue Divisor = Ops[1]; 4023 if (Divisor.isUndef() || isNullConstant(Divisor)) 4024 return true; 4025 4026 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4027 llvm::any_of(Divisor->op_values(), 4028 [](SDValue V) { return V.isUndef() || 4029 isNullConstant(V); }); 4030 // TODO: Handle signed overflow. 4031 } 4032 // TODO: Handle oversized shifts. 4033 default: 4034 return false; 4035 } 4036 } 4037 4038 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4039 EVT VT, SDNode *Cst1, 4040 SDNode *Cst2) { 4041 // If the opcode is a target-specific ISD node, there's nothing we can 4042 // do here and the operand rules may not line up with the below, so 4043 // bail early. 4044 if (Opcode >= ISD::BUILTIN_OP_END) 4045 return SDValue(); 4046 4047 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 4048 return getUNDEF(VT); 4049 4050 // Handle the case of two scalars. 4051 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 4052 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 4053 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 4054 assert((!Folded || !VT.isVector()) && 4055 "Can't fold vectors ops with scalar operands"); 4056 return Folded; 4057 } 4058 } 4059 4060 // fold (add Sym, c) -> Sym+c 4061 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 4062 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 4063 if (TLI->isCommutativeBinOp(Opcode)) 4064 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 4065 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 4066 4067 // For vectors extract each constant element into Inputs so we can constant 4068 // fold them individually. 4069 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 4070 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 4071 if (!BV1 || !BV2) 4072 return SDValue(); 4073 4074 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 4075 4076 EVT SVT = VT.getScalarType(); 4077 EVT LegalSVT = SVT; 4078 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4079 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4080 if (LegalSVT.bitsLT(SVT)) 4081 return SDValue(); 4082 } 4083 SmallVector<SDValue, 4> Outputs; 4084 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 4085 SDValue V1 = BV1->getOperand(I); 4086 SDValue V2 = BV2->getOperand(I); 4087 4088 if (SVT.isInteger()) { 4089 if (V1->getValueType(0).bitsGT(SVT)) 4090 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4091 if (V2->getValueType(0).bitsGT(SVT)) 4092 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4093 } 4094 4095 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4096 return SDValue(); 4097 4098 // Fold one vector element. 4099 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4100 if (LegalSVT != SVT) 4101 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4102 4103 // Scalar folding only succeeded if the result is a constant or UNDEF. 4104 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4105 ScalarResult.getOpcode() != ISD::ConstantFP) 4106 return SDValue(); 4107 Outputs.push_back(ScalarResult); 4108 } 4109 4110 assert(VT.getVectorNumElements() == Outputs.size() && 4111 "Vector size mismatch!"); 4112 4113 // We may have a vector type but a scalar result. Create a splat. 4114 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4115 4116 // Build a big vector out of the scalar elements we generated. 4117 return getBuildVector(VT, SDLoc(), Outputs); 4118 } 4119 4120 // TODO: Merge with FoldConstantArithmetic 4121 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4122 const SDLoc &DL, EVT VT, 4123 ArrayRef<SDValue> Ops, 4124 const SDNodeFlags Flags) { 4125 // If the opcode is a target-specific ISD node, there's nothing we can 4126 // do here and the operand rules may not line up with the below, so 4127 // bail early. 4128 if (Opcode >= ISD::BUILTIN_OP_END) 4129 return SDValue(); 4130 4131 if (isUndef(Opcode, Ops)) 4132 return getUNDEF(VT); 4133 4134 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4135 if (!VT.isVector()) 4136 return SDValue(); 4137 4138 unsigned NumElts = VT.getVectorNumElements(); 4139 4140 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4141 return !Op.getValueType().isVector() || 4142 Op.getValueType().getVectorNumElements() == NumElts; 4143 }; 4144 4145 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4146 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4147 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4148 (BV && BV->isConstant()); 4149 }; 4150 4151 // All operands must be vector types with the same number of elements as 4152 // the result type and must be either UNDEF or a build vector of constant 4153 // or UNDEF scalars. 4154 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4155 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4156 return SDValue(); 4157 4158 // If we are comparing vectors, then the result needs to be a i1 boolean 4159 // that is then sign-extended back to the legal result type. 4160 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4161 4162 // Find legal integer scalar type for constant promotion and 4163 // ensure that its scalar size is at least as large as source. 4164 EVT LegalSVT = VT.getScalarType(); 4165 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4166 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4167 if (LegalSVT.bitsLT(VT.getScalarType())) 4168 return SDValue(); 4169 } 4170 4171 // Constant fold each scalar lane separately. 4172 SmallVector<SDValue, 4> ScalarResults; 4173 for (unsigned i = 0; i != NumElts; i++) { 4174 SmallVector<SDValue, 4> ScalarOps; 4175 for (SDValue Op : Ops) { 4176 EVT InSVT = Op.getValueType().getScalarType(); 4177 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4178 if (!InBV) { 4179 // We've checked that this is UNDEF or a constant of some kind. 4180 if (Op.isUndef()) 4181 ScalarOps.push_back(getUNDEF(InSVT)); 4182 else 4183 ScalarOps.push_back(Op); 4184 continue; 4185 } 4186 4187 SDValue ScalarOp = InBV->getOperand(i); 4188 EVT ScalarVT = ScalarOp.getValueType(); 4189 4190 // Build vector (integer) scalar operands may need implicit 4191 // truncation - do this before constant folding. 4192 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4193 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4194 4195 ScalarOps.push_back(ScalarOp); 4196 } 4197 4198 // Constant fold the scalar operands. 4199 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4200 4201 // Legalize the (integer) scalar constant if necessary. 4202 if (LegalSVT != SVT) 4203 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4204 4205 // Scalar folding only succeeded if the result is a constant or UNDEF. 4206 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4207 ScalarResult.getOpcode() != ISD::ConstantFP) 4208 return SDValue(); 4209 ScalarResults.push_back(ScalarResult); 4210 } 4211 4212 SDValue V = getBuildVector(VT, DL, ScalarResults); 4213 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4214 return V; 4215 } 4216 4217 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4218 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4219 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4220 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4221 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4222 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4223 4224 // Canonicalize constant to RHS if commutative. 4225 if (TLI->isCommutativeBinOp(Opcode)) { 4226 if (N1C && !N2C) { 4227 std::swap(N1C, N2C); 4228 std::swap(N1, N2); 4229 } else if (N1CFP && !N2CFP) { 4230 std::swap(N1CFP, N2CFP); 4231 std::swap(N1, N2); 4232 } 4233 } 4234 4235 switch (Opcode) { 4236 default: break; 4237 case ISD::TokenFactor: 4238 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4239 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4240 // Fold trivial token factors. 4241 if (N1.getOpcode() == ISD::EntryToken) return N2; 4242 if (N2.getOpcode() == ISD::EntryToken) return N1; 4243 if (N1 == N2) return N1; 4244 break; 4245 case ISD::CONCAT_VECTORS: { 4246 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4247 SDValue Ops[] = {N1, N2}; 4248 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4249 return V; 4250 break; 4251 } 4252 case ISD::AND: 4253 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4254 assert(N1.getValueType() == N2.getValueType() && 4255 N1.getValueType() == VT && "Binary operator types must match!"); 4256 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4257 // worth handling here. 4258 if (N2C && N2C->isNullValue()) 4259 return N2; 4260 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4261 return N1; 4262 break; 4263 case ISD::OR: 4264 case ISD::XOR: 4265 case ISD::ADD: 4266 case ISD::SUB: 4267 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4268 assert(N1.getValueType() == N2.getValueType() && 4269 N1.getValueType() == VT && "Binary operator types must match!"); 4270 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4271 // it's worth handling here. 4272 if (N2C && N2C->isNullValue()) 4273 return N1; 4274 break; 4275 case ISD::UDIV: 4276 case ISD::UREM: 4277 case ISD::MULHU: 4278 case ISD::MULHS: 4279 case ISD::MUL: 4280 case ISD::SDIV: 4281 case ISD::SREM: 4282 case ISD::SMIN: 4283 case ISD::SMAX: 4284 case ISD::UMIN: 4285 case ISD::UMAX: 4286 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4287 assert(N1.getValueType() == N2.getValueType() && 4288 N1.getValueType() == VT && "Binary operator types must match!"); 4289 break; 4290 case ISD::FADD: 4291 case ISD::FSUB: 4292 case ISD::FMUL: 4293 case ISD::FDIV: 4294 case ISD::FREM: 4295 if (getTarget().Options.UnsafeFPMath) { 4296 if (Opcode == ISD::FADD) { 4297 // x+0 --> x 4298 if (N2CFP && N2CFP->getValueAPF().isZero()) 4299 return N1; 4300 } else if (Opcode == ISD::FSUB) { 4301 // x-0 --> x 4302 if (N2CFP && N2CFP->getValueAPF().isZero()) 4303 return N1; 4304 } else if (Opcode == ISD::FMUL) { 4305 // x*0 --> 0 4306 if (N2CFP && N2CFP->isZero()) 4307 return N2; 4308 // x*1 --> x 4309 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4310 return N1; 4311 } 4312 } 4313 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4314 assert(N1.getValueType() == N2.getValueType() && 4315 N1.getValueType() == VT && "Binary operator types must match!"); 4316 break; 4317 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4318 assert(N1.getValueType() == VT && 4319 N1.getValueType().isFloatingPoint() && 4320 N2.getValueType().isFloatingPoint() && 4321 "Invalid FCOPYSIGN!"); 4322 break; 4323 case ISD::SHL: 4324 case ISD::SRA: 4325 case ISD::SRL: 4326 case ISD::ROTL: 4327 case ISD::ROTR: 4328 assert(VT == N1.getValueType() && 4329 "Shift operators return type must be the same as their first arg"); 4330 assert(VT.isInteger() && N2.getValueType().isInteger() && 4331 "Shifts only work on integers"); 4332 assert((!VT.isVector() || VT == N2.getValueType()) && 4333 "Vector shift amounts must be in the same as their first arg"); 4334 // Verify that the shift amount VT is bit enough to hold valid shift 4335 // amounts. This catches things like trying to shift an i1024 value by an 4336 // i8, which is easy to fall into in generic code that uses 4337 // TLI.getShiftAmount(). 4338 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4339 "Invalid use of small shift amount with oversized value!"); 4340 4341 // Always fold shifts of i1 values so the code generator doesn't need to 4342 // handle them. Since we know the size of the shift has to be less than the 4343 // size of the value, the shift/rotate count is guaranteed to be zero. 4344 if (VT == MVT::i1) 4345 return N1; 4346 if (N2C && N2C->isNullValue()) 4347 return N1; 4348 break; 4349 case ISD::FP_ROUND_INREG: { 4350 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4351 assert(VT == N1.getValueType() && "Not an inreg round!"); 4352 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4353 "Cannot FP_ROUND_INREG integer types"); 4354 assert(EVT.isVector() == VT.isVector() && 4355 "FP_ROUND_INREG type should be vector iff the operand " 4356 "type is vector!"); 4357 assert((!EVT.isVector() || 4358 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4359 "Vector element counts must match in FP_ROUND_INREG"); 4360 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4361 (void)EVT; 4362 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4363 break; 4364 } 4365 case ISD::FP_ROUND: 4366 assert(VT.isFloatingPoint() && 4367 N1.getValueType().isFloatingPoint() && 4368 VT.bitsLE(N1.getValueType()) && 4369 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4370 "Invalid FP_ROUND!"); 4371 if (N1.getValueType() == VT) return N1; // noop conversion. 4372 break; 4373 case ISD::AssertSext: 4374 case ISD::AssertZext: { 4375 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4376 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4377 assert(VT.isInteger() && EVT.isInteger() && 4378 "Cannot *_EXTEND_INREG FP types"); 4379 assert(!EVT.isVector() && 4380 "AssertSExt/AssertZExt type should be the vector element type " 4381 "rather than the vector type!"); 4382 assert(EVT.bitsLE(VT) && "Not extending!"); 4383 if (VT == EVT) return N1; // noop assertion. 4384 break; 4385 } 4386 case ISD::SIGN_EXTEND_INREG: { 4387 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4388 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4389 assert(VT.isInteger() && EVT.isInteger() && 4390 "Cannot *_EXTEND_INREG FP types"); 4391 assert(EVT.isVector() == VT.isVector() && 4392 "SIGN_EXTEND_INREG type should be vector iff the operand " 4393 "type is vector!"); 4394 assert((!EVT.isVector() || 4395 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4396 "Vector element counts must match in SIGN_EXTEND_INREG"); 4397 assert(EVT.bitsLE(VT) && "Not extending!"); 4398 if (EVT == VT) return N1; // Not actually extending 4399 4400 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4401 unsigned FromBits = EVT.getScalarSizeInBits(); 4402 Val <<= Val.getBitWidth() - FromBits; 4403 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4404 return getConstant(Val, DL, ConstantVT); 4405 }; 4406 4407 if (N1C) { 4408 const APInt &Val = N1C->getAPIntValue(); 4409 return SignExtendInReg(Val, VT); 4410 } 4411 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4412 SmallVector<SDValue, 8> Ops; 4413 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4414 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4415 SDValue Op = N1.getOperand(i); 4416 if (Op.isUndef()) { 4417 Ops.push_back(getUNDEF(OpVT)); 4418 continue; 4419 } 4420 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4421 APInt Val = C->getAPIntValue(); 4422 Ops.push_back(SignExtendInReg(Val, OpVT)); 4423 } 4424 return getBuildVector(VT, DL, Ops); 4425 } 4426 break; 4427 } 4428 case ISD::EXTRACT_VECTOR_ELT: 4429 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4430 if (N1.isUndef()) 4431 return getUNDEF(VT); 4432 4433 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4434 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4435 return getUNDEF(VT); 4436 4437 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4438 // expanding copies of large vectors from registers. 4439 if (N2C && 4440 N1.getOpcode() == ISD::CONCAT_VECTORS && 4441 N1.getNumOperands() > 0) { 4442 unsigned Factor = 4443 N1.getOperand(0).getValueType().getVectorNumElements(); 4444 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4445 N1.getOperand(N2C->getZExtValue() / Factor), 4446 getConstant(N2C->getZExtValue() % Factor, DL, 4447 N2.getValueType())); 4448 } 4449 4450 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4451 // expanding large vector constants. 4452 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4453 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4454 4455 if (VT != Elt.getValueType()) 4456 // If the vector element type is not legal, the BUILD_VECTOR operands 4457 // are promoted and implicitly truncated, and the result implicitly 4458 // extended. Make that explicit here. 4459 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4460 4461 return Elt; 4462 } 4463 4464 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4465 // operations are lowered to scalars. 4466 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4467 // If the indices are the same, return the inserted element else 4468 // if the indices are known different, extract the element from 4469 // the original vector. 4470 SDValue N1Op2 = N1.getOperand(2); 4471 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4472 4473 if (N1Op2C && N2C) { 4474 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4475 if (VT == N1.getOperand(1).getValueType()) 4476 return N1.getOperand(1); 4477 else 4478 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4479 } 4480 4481 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4482 } 4483 } 4484 4485 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 4486 // when vector types are scalarized and v1iX is legal. 4487 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 4488 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 4489 N1.getValueType().getVectorNumElements() == 1) { 4490 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 4491 N1.getOperand(1)); 4492 } 4493 break; 4494 case ISD::EXTRACT_ELEMENT: 4495 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4496 assert(!N1.getValueType().isVector() && !VT.isVector() && 4497 (N1.getValueType().isInteger() == VT.isInteger()) && 4498 N1.getValueType() != VT && 4499 "Wrong types for EXTRACT_ELEMENT!"); 4500 4501 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4502 // 64-bit integers into 32-bit parts. Instead of building the extract of 4503 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4504 if (N1.getOpcode() == ISD::BUILD_PAIR) 4505 return N1.getOperand(N2C->getZExtValue()); 4506 4507 // EXTRACT_ELEMENT of a constant int is also very common. 4508 if (N1C) { 4509 unsigned ElementSize = VT.getSizeInBits(); 4510 unsigned Shift = ElementSize * N2C->getZExtValue(); 4511 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4512 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4513 } 4514 break; 4515 case ISD::EXTRACT_SUBVECTOR: 4516 if (VT.isSimple() && N1.getValueType().isSimple()) { 4517 assert(VT.isVector() && N1.getValueType().isVector() && 4518 "Extract subvector VTs must be a vectors!"); 4519 assert(VT.getVectorElementType() == 4520 N1.getValueType().getVectorElementType() && 4521 "Extract subvector VTs must have the same element type!"); 4522 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4523 "Extract subvector must be from larger vector to smaller vector!"); 4524 4525 if (N2C) { 4526 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4527 <= N1.getValueType().getVectorNumElements()) 4528 && "Extract subvector overflow!"); 4529 } 4530 4531 // Trivial extraction. 4532 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4533 return N1; 4534 4535 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4536 if (N1.isUndef()) 4537 return getUNDEF(VT); 4538 4539 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4540 // the concat have the same type as the extract. 4541 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4542 N1.getNumOperands() > 0 && 4543 VT == N1.getOperand(0).getValueType()) { 4544 unsigned Factor = VT.getVectorNumElements(); 4545 return N1.getOperand(N2C->getZExtValue() / Factor); 4546 } 4547 4548 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4549 // during shuffle legalization. 4550 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4551 VT == N1.getOperand(1).getValueType()) 4552 return N1.getOperand(1); 4553 } 4554 break; 4555 } 4556 4557 // Perform trivial constant folding. 4558 if (SDValue SV = 4559 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4560 return SV; 4561 4562 // Constant fold FP operations. 4563 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4564 if (N1CFP) { 4565 if (N2CFP) { 4566 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4567 APFloat::opStatus s; 4568 switch (Opcode) { 4569 case ISD::FADD: 4570 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4571 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4572 return getConstantFP(V1, DL, VT); 4573 break; 4574 case ISD::FSUB: 4575 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4576 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4577 return getConstantFP(V1, DL, VT); 4578 break; 4579 case ISD::FMUL: 4580 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4581 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4582 return getConstantFP(V1, DL, VT); 4583 break; 4584 case ISD::FDIV: 4585 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4586 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4587 s!=APFloat::opDivByZero)) { 4588 return getConstantFP(V1, DL, VT); 4589 } 4590 break; 4591 case ISD::FREM : 4592 s = V1.mod(V2); 4593 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4594 s!=APFloat::opDivByZero)) { 4595 return getConstantFP(V1, DL, VT); 4596 } 4597 break; 4598 case ISD::FCOPYSIGN: 4599 V1.copySign(V2); 4600 return getConstantFP(V1, DL, VT); 4601 default: break; 4602 } 4603 } 4604 4605 if (Opcode == ISD::FP_ROUND) { 4606 APFloat V = N1CFP->getValueAPF(); // make copy 4607 bool ignored; 4608 // This can return overflow, underflow, or inexact; we don't care. 4609 // FIXME need to be more flexible about rounding mode. 4610 (void)V.convert(EVTToAPFloatSemantics(VT), 4611 APFloat::rmNearestTiesToEven, &ignored); 4612 return getConstantFP(V, DL, VT); 4613 } 4614 } 4615 4616 // Canonicalize an UNDEF to the RHS, even over a constant. 4617 if (N1.isUndef()) { 4618 if (TLI->isCommutativeBinOp(Opcode)) { 4619 std::swap(N1, N2); 4620 } else { 4621 switch (Opcode) { 4622 case ISD::FP_ROUND_INREG: 4623 case ISD::SIGN_EXTEND_INREG: 4624 case ISD::SUB: 4625 case ISD::FSUB: 4626 case ISD::FDIV: 4627 case ISD::FREM: 4628 case ISD::SRA: 4629 return N1; // fold op(undef, arg2) -> undef 4630 case ISD::UDIV: 4631 case ISD::SDIV: 4632 case ISD::UREM: 4633 case ISD::SREM: 4634 case ISD::SRL: 4635 case ISD::SHL: 4636 if (!VT.isVector()) 4637 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4638 // For vectors, we can't easily build an all zero vector, just return 4639 // the LHS. 4640 return N2; 4641 } 4642 } 4643 } 4644 4645 // Fold a bunch of operators when the RHS is undef. 4646 if (N2.isUndef()) { 4647 switch (Opcode) { 4648 case ISD::XOR: 4649 if (N1.isUndef()) 4650 // Handle undef ^ undef -> 0 special case. This is a common 4651 // idiom (misuse). 4652 return getConstant(0, DL, VT); 4653 LLVM_FALLTHROUGH; 4654 case ISD::ADD: 4655 case ISD::ADDC: 4656 case ISD::ADDE: 4657 case ISD::SUB: 4658 case ISD::UDIV: 4659 case ISD::SDIV: 4660 case ISD::UREM: 4661 case ISD::SREM: 4662 return N2; // fold op(arg1, undef) -> undef 4663 case ISD::FADD: 4664 case ISD::FSUB: 4665 case ISD::FMUL: 4666 case ISD::FDIV: 4667 case ISD::FREM: 4668 if (getTarget().Options.UnsafeFPMath) 4669 return N2; 4670 break; 4671 case ISD::MUL: 4672 case ISD::AND: 4673 case ISD::SRL: 4674 case ISD::SHL: 4675 if (!VT.isVector()) 4676 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4677 // For vectors, we can't easily build an all zero vector, just return 4678 // the LHS. 4679 return N1; 4680 case ISD::OR: 4681 if (!VT.isVector()) 4682 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4683 // For vectors, we can't easily build an all one vector, just return 4684 // the LHS. 4685 return N1; 4686 case ISD::SRA: 4687 return N1; 4688 } 4689 } 4690 4691 // Memoize this node if possible. 4692 SDNode *N; 4693 SDVTList VTs = getVTList(VT); 4694 SDValue Ops[] = {N1, N2}; 4695 if (VT != MVT::Glue) { 4696 FoldingSetNodeID ID; 4697 AddNodeIDNode(ID, Opcode, VTs, Ops); 4698 void *IP = nullptr; 4699 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4700 E->intersectFlagsWith(Flags); 4701 return SDValue(E, 0); 4702 } 4703 4704 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4705 N->setFlags(Flags); 4706 createOperands(N, Ops); 4707 CSEMap.InsertNode(N, IP); 4708 } else { 4709 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4710 createOperands(N, Ops); 4711 } 4712 4713 InsertNode(N); 4714 SDValue V = SDValue(N, 0); 4715 NewSDValueDbgMsg(V, "Creating new node: ", this); 4716 return V; 4717 } 4718 4719 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4720 SDValue N1, SDValue N2, SDValue N3) { 4721 // Perform various simplifications. 4722 switch (Opcode) { 4723 case ISD::FMA: { 4724 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4725 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4726 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4727 if (N1CFP && N2CFP && N3CFP) { 4728 APFloat V1 = N1CFP->getValueAPF(); 4729 const APFloat &V2 = N2CFP->getValueAPF(); 4730 const APFloat &V3 = N3CFP->getValueAPF(); 4731 APFloat::opStatus s = 4732 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4733 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4734 return getConstantFP(V1, DL, VT); 4735 } 4736 break; 4737 } 4738 case ISD::CONCAT_VECTORS: { 4739 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4740 SDValue Ops[] = {N1, N2, N3}; 4741 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4742 return V; 4743 break; 4744 } 4745 case ISD::SETCC: { 4746 // Use FoldSetCC to simplify SETCC's. 4747 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4748 return V; 4749 // Vector constant folding. 4750 SDValue Ops[] = {N1, N2, N3}; 4751 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 4752 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 4753 return V; 4754 } 4755 break; 4756 } 4757 case ISD::SELECT: 4758 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4759 if (N1C->getZExtValue()) 4760 return N2; // select true, X, Y -> X 4761 return N3; // select false, X, Y -> Y 4762 } 4763 4764 if (N2 == N3) return N2; // select C, X, X -> X 4765 break; 4766 case ISD::VECTOR_SHUFFLE: 4767 llvm_unreachable("should use getVectorShuffle constructor!"); 4768 case ISD::INSERT_VECTOR_ELT: { 4769 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4770 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4771 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4772 return getUNDEF(VT); 4773 break; 4774 } 4775 case ISD::INSERT_SUBVECTOR: { 4776 SDValue Index = N3; 4777 if (VT.isSimple() && N1.getValueType().isSimple() 4778 && N2.getValueType().isSimple()) { 4779 assert(VT.isVector() && N1.getValueType().isVector() && 4780 N2.getValueType().isVector() && 4781 "Insert subvector VTs must be a vectors"); 4782 assert(VT == N1.getValueType() && 4783 "Dest and insert subvector source types must match!"); 4784 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4785 "Insert subvector must be from smaller vector to larger vector!"); 4786 if (isa<ConstantSDNode>(Index)) { 4787 assert((N2.getValueType().getVectorNumElements() + 4788 cast<ConstantSDNode>(Index)->getZExtValue() 4789 <= VT.getVectorNumElements()) 4790 && "Insert subvector overflow!"); 4791 } 4792 4793 // Trivial insertion. 4794 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4795 return N2; 4796 } 4797 break; 4798 } 4799 case ISD::BITCAST: 4800 // Fold bit_convert nodes from a type to themselves. 4801 if (N1.getValueType() == VT) 4802 return N1; 4803 break; 4804 } 4805 4806 // Memoize node if it doesn't produce a flag. 4807 SDNode *N; 4808 SDVTList VTs = getVTList(VT); 4809 SDValue Ops[] = {N1, N2, N3}; 4810 if (VT != MVT::Glue) { 4811 FoldingSetNodeID ID; 4812 AddNodeIDNode(ID, Opcode, VTs, Ops); 4813 void *IP = nullptr; 4814 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4815 return SDValue(E, 0); 4816 4817 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4818 createOperands(N, Ops); 4819 CSEMap.InsertNode(N, IP); 4820 } else { 4821 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4822 createOperands(N, Ops); 4823 } 4824 4825 InsertNode(N); 4826 SDValue V = SDValue(N, 0); 4827 NewSDValueDbgMsg(V, "Creating new node: ", this); 4828 return V; 4829 } 4830 4831 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4832 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4833 SDValue Ops[] = { N1, N2, N3, N4 }; 4834 return getNode(Opcode, DL, VT, Ops); 4835 } 4836 4837 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4838 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4839 SDValue N5) { 4840 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4841 return getNode(Opcode, DL, VT, Ops); 4842 } 4843 4844 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4845 /// the incoming stack arguments to be loaded from the stack. 4846 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4847 SmallVector<SDValue, 8> ArgChains; 4848 4849 // Include the original chain at the beginning of the list. When this is 4850 // used by target LowerCall hooks, this helps legalize find the 4851 // CALLSEQ_BEGIN node. 4852 ArgChains.push_back(Chain); 4853 4854 // Add a chain value for each stack argument. 4855 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4856 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4857 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4858 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4859 if (FI->getIndex() < 0) 4860 ArgChains.push_back(SDValue(L, 1)); 4861 4862 // Build a tokenfactor for all the chains. 4863 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4864 } 4865 4866 /// getMemsetValue - Vectorized representation of the memset value 4867 /// operand. 4868 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4869 const SDLoc &dl) { 4870 assert(!Value.isUndef()); 4871 4872 unsigned NumBits = VT.getScalarSizeInBits(); 4873 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4874 assert(C->getAPIntValue().getBitWidth() == 8); 4875 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4876 if (VT.isInteger()) 4877 return DAG.getConstant(Val, dl, VT); 4878 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4879 VT); 4880 } 4881 4882 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4883 EVT IntVT = VT.getScalarType(); 4884 if (!IntVT.isInteger()) 4885 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4886 4887 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4888 if (NumBits > 8) { 4889 // Use a multiplication with 0x010101... to extend the input to the 4890 // required length. 4891 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4892 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4893 DAG.getConstant(Magic, dl, IntVT)); 4894 } 4895 4896 if (VT != Value.getValueType() && !VT.isInteger()) 4897 Value = DAG.getBitcast(VT.getScalarType(), Value); 4898 if (VT != Value.getValueType()) 4899 Value = DAG.getSplatBuildVector(VT, dl, Value); 4900 4901 return Value; 4902 } 4903 4904 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4905 /// used when a memcpy is turned into a memset when the source is a constant 4906 /// string ptr. 4907 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4908 const TargetLowering &TLI, 4909 const ConstantDataArraySlice &Slice) { 4910 // Handle vector with all elements zero. 4911 if (Slice.Array == nullptr) { 4912 if (VT.isInteger()) 4913 return DAG.getConstant(0, dl, VT); 4914 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4915 return DAG.getConstantFP(0.0, dl, VT); 4916 else if (VT.isVector()) { 4917 unsigned NumElts = VT.getVectorNumElements(); 4918 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4919 return DAG.getNode(ISD::BITCAST, dl, VT, 4920 DAG.getConstant(0, dl, 4921 EVT::getVectorVT(*DAG.getContext(), 4922 EltVT, NumElts))); 4923 } else 4924 llvm_unreachable("Expected type!"); 4925 } 4926 4927 assert(!VT.isVector() && "Can't handle vector type here!"); 4928 unsigned NumVTBits = VT.getSizeInBits(); 4929 unsigned NumVTBytes = NumVTBits / 8; 4930 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 4931 4932 APInt Val(NumVTBits, 0); 4933 if (DAG.getDataLayout().isLittleEndian()) { 4934 for (unsigned i = 0; i != NumBytes; ++i) 4935 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 4936 } else { 4937 for (unsigned i = 0; i != NumBytes; ++i) 4938 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 4939 } 4940 4941 // If the "cost" of materializing the integer immediate is less than the cost 4942 // of a load, then it is cost effective to turn the load into the immediate. 4943 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4944 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4945 return DAG.getConstant(Val, dl, VT); 4946 return SDValue(nullptr, 0); 4947 } 4948 4949 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4950 const SDLoc &DL) { 4951 EVT VT = Base.getValueType(); 4952 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4953 } 4954 4955 /// Returns true if memcpy source is constant data. 4956 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 4957 uint64_t SrcDelta = 0; 4958 GlobalAddressSDNode *G = nullptr; 4959 if (Src.getOpcode() == ISD::GlobalAddress) 4960 G = cast<GlobalAddressSDNode>(Src); 4961 else if (Src.getOpcode() == ISD::ADD && 4962 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4963 Src.getOperand(1).getOpcode() == ISD::Constant) { 4964 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4965 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4966 } 4967 if (!G) 4968 return false; 4969 4970 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 4971 SrcDelta + G->getOffset()); 4972 } 4973 4974 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4975 /// Return true if the number of memory ops is below the threshold (Limit). 4976 /// It returns the types of the sequence of memory ops to perform 4977 /// memset / memcpy by reference. 4978 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4979 unsigned Limit, uint64_t Size, 4980 unsigned DstAlign, unsigned SrcAlign, 4981 bool IsMemset, 4982 bool ZeroMemset, 4983 bool MemcpyStrSrc, 4984 bool AllowOverlap, 4985 unsigned DstAS, unsigned SrcAS, 4986 SelectionDAG &DAG, 4987 const TargetLowering &TLI) { 4988 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4989 "Expecting memcpy / memset source to meet alignment requirement!"); 4990 // If 'SrcAlign' is zero, that means the memory operation does not need to 4991 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4992 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4993 // is the specified alignment of the memory operation. If it is zero, that 4994 // means it's possible to change the alignment of the destination. 4995 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4996 // not need to be loaded. 4997 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4998 IsMemset, ZeroMemset, MemcpyStrSrc, 4999 DAG.getMachineFunction()); 5000 5001 if (VT == MVT::Other) { 5002 // Use the largest integer type whose alignment constraints are satisfied. 5003 // We only need to check DstAlign here as SrcAlign is always greater or 5004 // equal to DstAlign (or zero). 5005 VT = MVT::i64; 5006 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 5007 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 5008 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 5009 assert(VT.isInteger()); 5010 5011 // Find the largest legal integer type. 5012 MVT LVT = MVT::i64; 5013 while (!TLI.isTypeLegal(LVT)) 5014 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 5015 assert(LVT.isInteger()); 5016 5017 // If the type we've chosen is larger than the largest legal integer type 5018 // then use that instead. 5019 if (VT.bitsGT(LVT)) 5020 VT = LVT; 5021 } 5022 5023 unsigned NumMemOps = 0; 5024 while (Size != 0) { 5025 unsigned VTSize = VT.getSizeInBits() / 8; 5026 while (VTSize > Size) { 5027 // For now, only use non-vector load / store's for the left-over pieces. 5028 EVT NewVT = VT; 5029 unsigned NewVTSize; 5030 5031 bool Found = false; 5032 if (VT.isVector() || VT.isFloatingPoint()) { 5033 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 5034 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 5035 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 5036 Found = true; 5037 else if (NewVT == MVT::i64 && 5038 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 5039 TLI.isSafeMemOpType(MVT::f64)) { 5040 // i64 is usually not legal on 32-bit targets, but f64 may be. 5041 NewVT = MVT::f64; 5042 Found = true; 5043 } 5044 } 5045 5046 if (!Found) { 5047 do { 5048 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 5049 if (NewVT == MVT::i8) 5050 break; 5051 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 5052 } 5053 NewVTSize = NewVT.getSizeInBits() / 8; 5054 5055 // If the new VT cannot cover all of the remaining bits, then consider 5056 // issuing a (or a pair of) unaligned and overlapping load / store. 5057 // FIXME: Only does this for 64-bit or more since we don't have proper 5058 // cost model for unaligned load / store. 5059 bool Fast; 5060 if (NumMemOps && AllowOverlap && 5061 VTSize >= 8 && NewVTSize < Size && 5062 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 5063 VTSize = Size; 5064 else { 5065 VT = NewVT; 5066 VTSize = NewVTSize; 5067 } 5068 } 5069 5070 if (++NumMemOps > Limit) 5071 return false; 5072 5073 MemOps.push_back(VT); 5074 Size -= VTSize; 5075 } 5076 5077 return true; 5078 } 5079 5080 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5081 // On Darwin, -Os means optimize for size without hurting performance, so 5082 // only really optimize for size when -Oz (MinSize) is used. 5083 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5084 return MF.getFunction()->optForMinSize(); 5085 return MF.getFunction()->optForSize(); 5086 } 5087 5088 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5089 SDValue Chain, SDValue Dst, SDValue Src, 5090 uint64_t Size, unsigned Align, 5091 bool isVol, bool AlwaysInline, 5092 MachinePointerInfo DstPtrInfo, 5093 MachinePointerInfo SrcPtrInfo) { 5094 // Turn a memcpy of undef to nop. 5095 if (Src.isUndef()) 5096 return Chain; 5097 5098 // Expand memcpy to a series of load and store ops if the size operand falls 5099 // below a certain threshold. 5100 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5101 // rather than maybe a humongous number of loads and stores. 5102 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5103 const DataLayout &DL = DAG.getDataLayout(); 5104 LLVMContext &C = *DAG.getContext(); 5105 std::vector<EVT> MemOps; 5106 bool DstAlignCanChange = false; 5107 MachineFunction &MF = DAG.getMachineFunction(); 5108 MachineFrameInfo &MFI = MF.getFrameInfo(); 5109 bool OptSize = shouldLowerMemFuncForSize(MF); 5110 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5111 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5112 DstAlignCanChange = true; 5113 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5114 if (Align > SrcAlign) 5115 SrcAlign = Align; 5116 ConstantDataArraySlice Slice; 5117 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5118 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5119 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5120 5121 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5122 (DstAlignCanChange ? 0 : Align), 5123 (isZeroConstant ? 0 : SrcAlign), 5124 false, false, CopyFromConstant, true, 5125 DstPtrInfo.getAddrSpace(), 5126 SrcPtrInfo.getAddrSpace(), 5127 DAG, TLI)) 5128 return SDValue(); 5129 5130 if (DstAlignCanChange) { 5131 Type *Ty = MemOps[0].getTypeForEVT(C); 5132 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5133 5134 // Don't promote to an alignment that would require dynamic stack 5135 // realignment. 5136 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5137 if (!TRI->needsStackRealignment(MF)) 5138 while (NewAlign > Align && 5139 DL.exceedsNaturalStackAlignment(NewAlign)) 5140 NewAlign /= 2; 5141 5142 if (NewAlign > Align) { 5143 // Give the stack frame object a larger alignment if needed. 5144 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5145 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5146 Align = NewAlign; 5147 } 5148 } 5149 5150 MachineMemOperand::Flags MMOFlags = 5151 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5152 SmallVector<SDValue, 8> OutChains; 5153 unsigned NumMemOps = MemOps.size(); 5154 uint64_t SrcOff = 0, DstOff = 0; 5155 for (unsigned i = 0; i != NumMemOps; ++i) { 5156 EVT VT = MemOps[i]; 5157 unsigned VTSize = VT.getSizeInBits() / 8; 5158 SDValue Value, Store; 5159 5160 if (VTSize > Size) { 5161 // Issuing an unaligned load / store pair that overlaps with the previous 5162 // pair. Adjust the offset accordingly. 5163 assert(i == NumMemOps-1 && i != 0); 5164 SrcOff -= VTSize - Size; 5165 DstOff -= VTSize - Size; 5166 } 5167 5168 if (CopyFromConstant && 5169 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5170 // It's unlikely a store of a vector immediate can be done in a single 5171 // instruction. It would require a load from a constantpool first. 5172 // We only handle zero vectors here. 5173 // FIXME: Handle other cases where store of vector immediate is done in 5174 // a single instruction. 5175 ConstantDataArraySlice SubSlice; 5176 if (SrcOff < Slice.Length) { 5177 SubSlice = Slice; 5178 SubSlice.move(SrcOff); 5179 } else { 5180 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5181 SubSlice.Array = nullptr; 5182 SubSlice.Offset = 0; 5183 SubSlice.Length = VTSize; 5184 } 5185 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5186 if (Value.getNode()) 5187 Store = DAG.getStore(Chain, dl, Value, 5188 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5189 DstPtrInfo.getWithOffset(DstOff), Align, 5190 MMOFlags); 5191 } 5192 5193 if (!Store.getNode()) { 5194 // The type might not be legal for the target. This should only happen 5195 // if the type is smaller than a legal type, as on PPC, so the right 5196 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5197 // to Load/Store if NVT==VT. 5198 // FIXME does the case above also need this? 5199 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5200 assert(NVT.bitsGE(VT)); 5201 5202 bool isDereferenceable = 5203 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5204 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5205 if (isDereferenceable) 5206 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5207 5208 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5209 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5210 SrcPtrInfo.getWithOffset(SrcOff), VT, 5211 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5212 OutChains.push_back(Value.getValue(1)); 5213 Store = DAG.getTruncStore( 5214 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5215 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5216 } 5217 OutChains.push_back(Store); 5218 SrcOff += VTSize; 5219 DstOff += VTSize; 5220 Size -= VTSize; 5221 } 5222 5223 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5224 } 5225 5226 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5227 SDValue Chain, SDValue Dst, SDValue Src, 5228 uint64_t Size, unsigned Align, 5229 bool isVol, bool AlwaysInline, 5230 MachinePointerInfo DstPtrInfo, 5231 MachinePointerInfo SrcPtrInfo) { 5232 // Turn a memmove of undef to nop. 5233 if (Src.isUndef()) 5234 return Chain; 5235 5236 // Expand memmove to a series of load and store ops if the size operand falls 5237 // below a certain threshold. 5238 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5239 const DataLayout &DL = DAG.getDataLayout(); 5240 LLVMContext &C = *DAG.getContext(); 5241 std::vector<EVT> MemOps; 5242 bool DstAlignCanChange = false; 5243 MachineFunction &MF = DAG.getMachineFunction(); 5244 MachineFrameInfo &MFI = MF.getFrameInfo(); 5245 bool OptSize = shouldLowerMemFuncForSize(MF); 5246 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5247 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5248 DstAlignCanChange = true; 5249 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5250 if (Align > SrcAlign) 5251 SrcAlign = Align; 5252 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5253 5254 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5255 (DstAlignCanChange ? 0 : Align), SrcAlign, 5256 false, false, false, false, 5257 DstPtrInfo.getAddrSpace(), 5258 SrcPtrInfo.getAddrSpace(), 5259 DAG, TLI)) 5260 return SDValue(); 5261 5262 if (DstAlignCanChange) { 5263 Type *Ty = MemOps[0].getTypeForEVT(C); 5264 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5265 if (NewAlign > Align) { 5266 // Give the stack frame object a larger alignment if needed. 5267 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5268 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5269 Align = NewAlign; 5270 } 5271 } 5272 5273 MachineMemOperand::Flags MMOFlags = 5274 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5275 uint64_t SrcOff = 0, DstOff = 0; 5276 SmallVector<SDValue, 8> LoadValues; 5277 SmallVector<SDValue, 8> LoadChains; 5278 SmallVector<SDValue, 8> OutChains; 5279 unsigned NumMemOps = MemOps.size(); 5280 for (unsigned i = 0; i < NumMemOps; i++) { 5281 EVT VT = MemOps[i]; 5282 unsigned VTSize = VT.getSizeInBits() / 8; 5283 SDValue Value; 5284 5285 bool isDereferenceable = 5286 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5287 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5288 if (isDereferenceable) 5289 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5290 5291 Value = 5292 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5293 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5294 LoadValues.push_back(Value); 5295 LoadChains.push_back(Value.getValue(1)); 5296 SrcOff += VTSize; 5297 } 5298 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5299 OutChains.clear(); 5300 for (unsigned i = 0; i < NumMemOps; i++) { 5301 EVT VT = MemOps[i]; 5302 unsigned VTSize = VT.getSizeInBits() / 8; 5303 SDValue Store; 5304 5305 Store = DAG.getStore(Chain, dl, LoadValues[i], 5306 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5307 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5308 OutChains.push_back(Store); 5309 DstOff += VTSize; 5310 } 5311 5312 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5313 } 5314 5315 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5316 /// operations. 5317 /// 5318 /// \param DAG Selection DAG where lowered code is placed. 5319 /// \param dl Link to corresponding IR location. 5320 /// \param Chain Control flow dependency. 5321 /// \param Dst Pointer to destination memory location. 5322 /// \param Src Value of byte to write into the memory. 5323 /// \param Size Number of bytes to write. 5324 /// \param Align Alignment of the destination in bytes. 5325 /// \param isVol True if destination is volatile. 5326 /// \param DstPtrInfo IR information on the memory pointer. 5327 /// \returns New head in the control flow, if lowering was successful, empty 5328 /// SDValue otherwise. 5329 /// 5330 /// The function tries to replace 'llvm.memset' intrinsic with several store 5331 /// operations and value calculation code. This is usually profitable for small 5332 /// memory size. 5333 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5334 SDValue Chain, SDValue Dst, SDValue Src, 5335 uint64_t Size, unsigned Align, bool isVol, 5336 MachinePointerInfo DstPtrInfo) { 5337 // Turn a memset of undef to nop. 5338 if (Src.isUndef()) 5339 return Chain; 5340 5341 // Expand memset to a series of load/store ops if the size operand 5342 // falls below a certain threshold. 5343 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5344 std::vector<EVT> MemOps; 5345 bool DstAlignCanChange = false; 5346 MachineFunction &MF = DAG.getMachineFunction(); 5347 MachineFrameInfo &MFI = MF.getFrameInfo(); 5348 bool OptSize = shouldLowerMemFuncForSize(MF); 5349 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5350 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5351 DstAlignCanChange = true; 5352 bool IsZeroVal = 5353 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5354 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5355 Size, (DstAlignCanChange ? 0 : Align), 0, 5356 true, IsZeroVal, false, true, 5357 DstPtrInfo.getAddrSpace(), ~0u, 5358 DAG, TLI)) 5359 return SDValue(); 5360 5361 if (DstAlignCanChange) { 5362 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5363 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5364 if (NewAlign > Align) { 5365 // Give the stack frame object a larger alignment if needed. 5366 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5367 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5368 Align = NewAlign; 5369 } 5370 } 5371 5372 SmallVector<SDValue, 8> OutChains; 5373 uint64_t DstOff = 0; 5374 unsigned NumMemOps = MemOps.size(); 5375 5376 // Find the largest store and generate the bit pattern for it. 5377 EVT LargestVT = MemOps[0]; 5378 for (unsigned i = 1; i < NumMemOps; i++) 5379 if (MemOps[i].bitsGT(LargestVT)) 5380 LargestVT = MemOps[i]; 5381 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5382 5383 for (unsigned i = 0; i < NumMemOps; i++) { 5384 EVT VT = MemOps[i]; 5385 unsigned VTSize = VT.getSizeInBits() / 8; 5386 if (VTSize > Size) { 5387 // Issuing an unaligned load / store pair that overlaps with the previous 5388 // pair. Adjust the offset accordingly. 5389 assert(i == NumMemOps-1 && i != 0); 5390 DstOff -= VTSize - Size; 5391 } 5392 5393 // If this store is smaller than the largest store see whether we can get 5394 // the smaller value for free with a truncate. 5395 SDValue Value = MemSetValue; 5396 if (VT.bitsLT(LargestVT)) { 5397 if (!LargestVT.isVector() && !VT.isVector() && 5398 TLI.isTruncateFree(LargestVT, VT)) 5399 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5400 else 5401 Value = getMemsetValue(Src, VT, DAG, dl); 5402 } 5403 assert(Value.getValueType() == VT && "Value with wrong type."); 5404 SDValue Store = DAG.getStore( 5405 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5406 DstPtrInfo.getWithOffset(DstOff), Align, 5407 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5408 OutChains.push_back(Store); 5409 DstOff += VT.getSizeInBits() / 8; 5410 Size -= VTSize; 5411 } 5412 5413 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5414 } 5415 5416 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5417 unsigned AS) { 5418 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5419 // pointer operands can be losslessly bitcasted to pointers of address space 0 5420 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5421 report_fatal_error("cannot lower memory intrinsic in address space " + 5422 Twine(AS)); 5423 } 5424 } 5425 5426 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5427 SDValue Src, SDValue Size, unsigned Align, 5428 bool isVol, bool AlwaysInline, bool isTailCall, 5429 MachinePointerInfo DstPtrInfo, 5430 MachinePointerInfo SrcPtrInfo) { 5431 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5432 5433 // Check to see if we should lower the memcpy to loads and stores first. 5434 // For cases within the target-specified limits, this is the best choice. 5435 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5436 if (ConstantSize) { 5437 // Memcpy with size zero? Just return the original chain. 5438 if (ConstantSize->isNullValue()) 5439 return Chain; 5440 5441 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5442 ConstantSize->getZExtValue(),Align, 5443 isVol, false, DstPtrInfo, SrcPtrInfo); 5444 if (Result.getNode()) 5445 return Result; 5446 } 5447 5448 // Then check to see if we should lower the memcpy with target-specific 5449 // code. If the target chooses to do this, this is the next best. 5450 if (TSI) { 5451 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5452 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5453 DstPtrInfo, SrcPtrInfo); 5454 if (Result.getNode()) 5455 return Result; 5456 } 5457 5458 // If we really need inline code and the target declined to provide it, 5459 // use a (potentially long) sequence of loads and stores. 5460 if (AlwaysInline) { 5461 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5462 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5463 ConstantSize->getZExtValue(), Align, isVol, 5464 true, DstPtrInfo, SrcPtrInfo); 5465 } 5466 5467 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5468 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5469 5470 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5471 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5472 // respect volatile, so they may do things like read or write memory 5473 // beyond the given memory regions. But fixing this isn't easy, and most 5474 // people don't care. 5475 5476 // Emit a library call. 5477 TargetLowering::ArgListTy Args; 5478 TargetLowering::ArgListEntry Entry; 5479 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5480 Entry.Node = Dst; Args.push_back(Entry); 5481 Entry.Node = Src; Args.push_back(Entry); 5482 Entry.Node = Size; Args.push_back(Entry); 5483 // FIXME: pass in SDLoc 5484 TargetLowering::CallLoweringInfo CLI(*this); 5485 CLI.setDebugLoc(dl) 5486 .setChain(Chain) 5487 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5488 Dst.getValueType().getTypeForEVT(*getContext()), 5489 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5490 TLI->getPointerTy(getDataLayout())), 5491 std::move(Args)) 5492 .setDiscardResult() 5493 .setTailCall(isTailCall); 5494 5495 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5496 return CallResult.second; 5497 } 5498 5499 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5500 SDValue Src, SDValue Size, unsigned Align, 5501 bool isVol, bool isTailCall, 5502 MachinePointerInfo DstPtrInfo, 5503 MachinePointerInfo SrcPtrInfo) { 5504 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5505 5506 // Check to see if we should lower the memmove to loads and stores first. 5507 // For cases within the target-specified limits, this is the best choice. 5508 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5509 if (ConstantSize) { 5510 // Memmove with size zero? Just return the original chain. 5511 if (ConstantSize->isNullValue()) 5512 return Chain; 5513 5514 SDValue Result = 5515 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5516 ConstantSize->getZExtValue(), Align, isVol, 5517 false, DstPtrInfo, SrcPtrInfo); 5518 if (Result.getNode()) 5519 return Result; 5520 } 5521 5522 // Then check to see if we should lower the memmove with target-specific 5523 // code. If the target chooses to do this, this is the next best. 5524 if (TSI) { 5525 SDValue Result = TSI->EmitTargetCodeForMemmove( 5526 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5527 if (Result.getNode()) 5528 return Result; 5529 } 5530 5531 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5532 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5533 5534 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5535 // not be safe. See memcpy above for more details. 5536 5537 // Emit a library call. 5538 TargetLowering::ArgListTy Args; 5539 TargetLowering::ArgListEntry Entry; 5540 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5541 Entry.Node = Dst; Args.push_back(Entry); 5542 Entry.Node = Src; Args.push_back(Entry); 5543 Entry.Node = Size; Args.push_back(Entry); 5544 // FIXME: pass in SDLoc 5545 TargetLowering::CallLoweringInfo CLI(*this); 5546 CLI.setDebugLoc(dl) 5547 .setChain(Chain) 5548 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5549 Dst.getValueType().getTypeForEVT(*getContext()), 5550 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5551 TLI->getPointerTy(getDataLayout())), 5552 std::move(Args)) 5553 .setDiscardResult() 5554 .setTailCall(isTailCall); 5555 5556 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5557 return CallResult.second; 5558 } 5559 5560 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5561 SDValue Src, SDValue Size, unsigned Align, 5562 bool isVol, bool isTailCall, 5563 MachinePointerInfo DstPtrInfo) { 5564 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5565 5566 // Check to see if we should lower the memset to stores first. 5567 // For cases within the target-specified limits, this is the best choice. 5568 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5569 if (ConstantSize) { 5570 // Memset with size zero? Just return the original chain. 5571 if (ConstantSize->isNullValue()) 5572 return Chain; 5573 5574 SDValue Result = 5575 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5576 Align, isVol, DstPtrInfo); 5577 5578 if (Result.getNode()) 5579 return Result; 5580 } 5581 5582 // Then check to see if we should lower the memset with target-specific 5583 // code. If the target chooses to do this, this is the next best. 5584 if (TSI) { 5585 SDValue Result = TSI->EmitTargetCodeForMemset( 5586 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5587 if (Result.getNode()) 5588 return Result; 5589 } 5590 5591 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5592 5593 // Emit a library call. 5594 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5595 TargetLowering::ArgListTy Args; 5596 TargetLowering::ArgListEntry Entry; 5597 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5598 Args.push_back(Entry); 5599 Entry.Node = Src; 5600 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5601 Args.push_back(Entry); 5602 Entry.Node = Size; 5603 Entry.Ty = IntPtrTy; 5604 Args.push_back(Entry); 5605 5606 // FIXME: pass in SDLoc 5607 TargetLowering::CallLoweringInfo CLI(*this); 5608 CLI.setDebugLoc(dl) 5609 .setChain(Chain) 5610 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5611 Dst.getValueType().getTypeForEVT(*getContext()), 5612 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5613 TLI->getPointerTy(getDataLayout())), 5614 std::move(Args)) 5615 .setDiscardResult() 5616 .setTailCall(isTailCall); 5617 5618 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5619 return CallResult.second; 5620 } 5621 5622 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5623 SDVTList VTList, ArrayRef<SDValue> Ops, 5624 MachineMemOperand *MMO) { 5625 FoldingSetNodeID ID; 5626 ID.AddInteger(MemVT.getRawBits()); 5627 AddNodeIDNode(ID, Opcode, VTList, Ops); 5628 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5629 void* IP = nullptr; 5630 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5631 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5632 return SDValue(E, 0); 5633 } 5634 5635 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5636 VTList, MemVT, MMO); 5637 createOperands(N, Ops); 5638 5639 CSEMap.InsertNode(N, IP); 5640 InsertNode(N); 5641 return SDValue(N, 0); 5642 } 5643 5644 SDValue SelectionDAG::getAtomicCmpSwap( 5645 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5646 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5647 unsigned Alignment, AtomicOrdering SuccessOrdering, 5648 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 5649 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5650 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5651 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5652 5653 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5654 Alignment = getEVTAlignment(MemVT); 5655 5656 MachineFunction &MF = getMachineFunction(); 5657 5658 // FIXME: Volatile isn't really correct; we should keep track of atomic 5659 // orderings in the memoperand. 5660 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5661 MachineMemOperand::MOStore; 5662 MachineMemOperand *MMO = 5663 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5664 AAMDNodes(), nullptr, SSID, SuccessOrdering, 5665 FailureOrdering); 5666 5667 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5668 } 5669 5670 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5671 EVT MemVT, SDVTList VTs, SDValue Chain, 5672 SDValue Ptr, SDValue Cmp, SDValue Swp, 5673 MachineMemOperand *MMO) { 5674 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5675 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5676 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5677 5678 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5679 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5680 } 5681 5682 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5683 SDValue Chain, SDValue Ptr, SDValue Val, 5684 const Value *PtrVal, unsigned Alignment, 5685 AtomicOrdering Ordering, 5686 SyncScope::ID SSID) { 5687 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5688 Alignment = getEVTAlignment(MemVT); 5689 5690 MachineFunction &MF = getMachineFunction(); 5691 // An atomic store does not load. An atomic load does not store. 5692 // (An atomicrmw obviously both loads and stores.) 5693 // For now, atomics are considered to be volatile always, and they are 5694 // chained as such. 5695 // FIXME: Volatile isn't really correct; we should keep track of atomic 5696 // orderings in the memoperand. 5697 auto Flags = MachineMemOperand::MOVolatile; 5698 if (Opcode != ISD::ATOMIC_STORE) 5699 Flags |= MachineMemOperand::MOLoad; 5700 if (Opcode != ISD::ATOMIC_LOAD) 5701 Flags |= MachineMemOperand::MOStore; 5702 5703 MachineMemOperand *MMO = 5704 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5705 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5706 nullptr, SSID, Ordering); 5707 5708 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5709 } 5710 5711 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5712 SDValue Chain, SDValue Ptr, SDValue Val, 5713 MachineMemOperand *MMO) { 5714 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5715 Opcode == ISD::ATOMIC_LOAD_SUB || 5716 Opcode == ISD::ATOMIC_LOAD_AND || 5717 Opcode == ISD::ATOMIC_LOAD_OR || 5718 Opcode == ISD::ATOMIC_LOAD_XOR || 5719 Opcode == ISD::ATOMIC_LOAD_NAND || 5720 Opcode == ISD::ATOMIC_LOAD_MIN || 5721 Opcode == ISD::ATOMIC_LOAD_MAX || 5722 Opcode == ISD::ATOMIC_LOAD_UMIN || 5723 Opcode == ISD::ATOMIC_LOAD_UMAX || 5724 Opcode == ISD::ATOMIC_SWAP || 5725 Opcode == ISD::ATOMIC_STORE) && 5726 "Invalid Atomic Op"); 5727 5728 EVT VT = Val.getValueType(); 5729 5730 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5731 getVTList(VT, MVT::Other); 5732 SDValue Ops[] = {Chain, Ptr, Val}; 5733 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5734 } 5735 5736 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5737 EVT VT, SDValue Chain, SDValue Ptr, 5738 MachineMemOperand *MMO) { 5739 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5740 5741 SDVTList VTs = getVTList(VT, MVT::Other); 5742 SDValue Ops[] = {Chain, Ptr}; 5743 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5744 } 5745 5746 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5747 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5748 if (Ops.size() == 1) 5749 return Ops[0]; 5750 5751 SmallVector<EVT, 4> VTs; 5752 VTs.reserve(Ops.size()); 5753 for (unsigned i = 0; i < Ops.size(); ++i) 5754 VTs.push_back(Ops[i].getValueType()); 5755 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5756 } 5757 5758 SDValue SelectionDAG::getMemIntrinsicNode( 5759 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5760 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, 5761 bool ReadMem, bool WriteMem, unsigned Size) { 5762 if (Align == 0) // Ensure that codegen never sees alignment 0 5763 Align = getEVTAlignment(MemVT); 5764 5765 MachineFunction &MF = getMachineFunction(); 5766 auto Flags = MachineMemOperand::MONone; 5767 if (WriteMem) 5768 Flags |= MachineMemOperand::MOStore; 5769 if (ReadMem) 5770 Flags |= MachineMemOperand::MOLoad; 5771 if (Vol) 5772 Flags |= MachineMemOperand::MOVolatile; 5773 if (!Size) 5774 Size = MemVT.getStoreSize(); 5775 MachineMemOperand *MMO = 5776 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5777 5778 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5779 } 5780 5781 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5782 SDVTList VTList, 5783 ArrayRef<SDValue> Ops, EVT MemVT, 5784 MachineMemOperand *MMO) { 5785 assert((Opcode == ISD::INTRINSIC_VOID || 5786 Opcode == ISD::INTRINSIC_W_CHAIN || 5787 Opcode == ISD::PREFETCH || 5788 Opcode == ISD::LIFETIME_START || 5789 Opcode == ISD::LIFETIME_END || 5790 ((int)Opcode <= std::numeric_limits<int>::max() && 5791 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5792 "Opcode is not a memory-accessing opcode!"); 5793 5794 // Memoize the node unless it returns a flag. 5795 MemIntrinsicSDNode *N; 5796 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5797 FoldingSetNodeID ID; 5798 AddNodeIDNode(ID, Opcode, VTList, Ops); 5799 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 5800 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 5801 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5802 void *IP = nullptr; 5803 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5804 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5805 return SDValue(E, 0); 5806 } 5807 5808 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5809 VTList, MemVT, MMO); 5810 createOperands(N, Ops); 5811 5812 CSEMap.InsertNode(N, IP); 5813 } else { 5814 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5815 VTList, MemVT, MMO); 5816 createOperands(N, Ops); 5817 } 5818 InsertNode(N); 5819 return SDValue(N, 0); 5820 } 5821 5822 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5823 /// MachinePointerInfo record from it. This is particularly useful because the 5824 /// code generator has many cases where it doesn't bother passing in a 5825 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5826 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5827 int64_t Offset = 0) { 5828 // If this is FI+Offset, we can model it. 5829 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5830 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5831 FI->getIndex(), Offset); 5832 5833 // If this is (FI+Offset1)+Offset2, we can model it. 5834 if (Ptr.getOpcode() != ISD::ADD || 5835 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5836 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5837 return MachinePointerInfo(); 5838 5839 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5840 return MachinePointerInfo::getFixedStack( 5841 DAG.getMachineFunction(), FI, 5842 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5843 } 5844 5845 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5846 /// MachinePointerInfo record from it. This is particularly useful because the 5847 /// code generator has many cases where it doesn't bother passing in a 5848 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5849 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5850 SDValue OffsetOp) { 5851 // If the 'Offset' value isn't a constant, we can't handle this. 5852 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5853 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); 5854 if (OffsetOp.isUndef()) 5855 return InferPointerInfo(DAG, Ptr); 5856 return MachinePointerInfo(); 5857 } 5858 5859 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5860 EVT VT, const SDLoc &dl, SDValue Chain, 5861 SDValue Ptr, SDValue Offset, 5862 MachinePointerInfo PtrInfo, EVT MemVT, 5863 unsigned Alignment, 5864 MachineMemOperand::Flags MMOFlags, 5865 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5866 assert(Chain.getValueType() == MVT::Other && 5867 "Invalid chain type"); 5868 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5869 Alignment = getEVTAlignment(MemVT); 5870 5871 MMOFlags |= MachineMemOperand::MOLoad; 5872 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5873 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5874 // clients. 5875 if (PtrInfo.V.isNull()) 5876 PtrInfo = InferPointerInfo(*this, Ptr, Offset); 5877 5878 MachineFunction &MF = getMachineFunction(); 5879 MachineMemOperand *MMO = MF.getMachineMemOperand( 5880 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5881 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5882 } 5883 5884 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5885 EVT VT, const SDLoc &dl, SDValue Chain, 5886 SDValue Ptr, SDValue Offset, EVT MemVT, 5887 MachineMemOperand *MMO) { 5888 if (VT == MemVT) { 5889 ExtType = ISD::NON_EXTLOAD; 5890 } else if (ExtType == ISD::NON_EXTLOAD) { 5891 assert(VT == MemVT && "Non-extending load from different memory type!"); 5892 } else { 5893 // Extending load. 5894 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5895 "Should only be an extending load, not truncating!"); 5896 assert(VT.isInteger() == MemVT.isInteger() && 5897 "Cannot convert from FP to Int or Int -> FP!"); 5898 assert(VT.isVector() == MemVT.isVector() && 5899 "Cannot use an ext load to convert to or from a vector!"); 5900 assert((!VT.isVector() || 5901 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5902 "Cannot use an ext load to change the number of vector elements!"); 5903 } 5904 5905 bool Indexed = AM != ISD::UNINDEXED; 5906 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5907 5908 SDVTList VTs = Indexed ? 5909 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5910 SDValue Ops[] = { Chain, Ptr, Offset }; 5911 FoldingSetNodeID ID; 5912 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5913 ID.AddInteger(MemVT.getRawBits()); 5914 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5915 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5916 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5917 void *IP = nullptr; 5918 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5919 cast<LoadSDNode>(E)->refineAlignment(MMO); 5920 return SDValue(E, 0); 5921 } 5922 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5923 ExtType, MemVT, MMO); 5924 createOperands(N, Ops); 5925 5926 CSEMap.InsertNode(N, IP); 5927 InsertNode(N); 5928 return SDValue(N, 0); 5929 } 5930 5931 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5932 SDValue Ptr, MachinePointerInfo PtrInfo, 5933 unsigned Alignment, 5934 MachineMemOperand::Flags MMOFlags, 5935 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5936 SDValue Undef = getUNDEF(Ptr.getValueType()); 5937 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5938 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5939 } 5940 5941 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5942 SDValue Ptr, MachineMemOperand *MMO) { 5943 SDValue Undef = getUNDEF(Ptr.getValueType()); 5944 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5945 VT, MMO); 5946 } 5947 5948 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5949 EVT VT, SDValue Chain, SDValue Ptr, 5950 MachinePointerInfo PtrInfo, EVT MemVT, 5951 unsigned Alignment, 5952 MachineMemOperand::Flags MMOFlags, 5953 const AAMDNodes &AAInfo) { 5954 SDValue Undef = getUNDEF(Ptr.getValueType()); 5955 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5956 MemVT, Alignment, MMOFlags, AAInfo); 5957 } 5958 5959 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5960 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5961 MachineMemOperand *MMO) { 5962 SDValue Undef = getUNDEF(Ptr.getValueType()); 5963 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5964 MemVT, MMO); 5965 } 5966 5967 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5968 SDValue Base, SDValue Offset, 5969 ISD::MemIndexedMode AM) { 5970 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5971 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5972 // Don't propagate the invariant or dereferenceable flags. 5973 auto MMOFlags = 5974 LD->getMemOperand()->getFlags() & 5975 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5976 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5977 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5978 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5979 LD->getAAInfo()); 5980 } 5981 5982 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5983 SDValue Ptr, MachinePointerInfo PtrInfo, 5984 unsigned Alignment, 5985 MachineMemOperand::Flags MMOFlags, 5986 const AAMDNodes &AAInfo) { 5987 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 5988 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5989 Alignment = getEVTAlignment(Val.getValueType()); 5990 5991 MMOFlags |= MachineMemOperand::MOStore; 5992 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5993 5994 if (PtrInfo.V.isNull()) 5995 PtrInfo = InferPointerInfo(*this, Ptr); 5996 5997 MachineFunction &MF = getMachineFunction(); 5998 MachineMemOperand *MMO = MF.getMachineMemOperand( 5999 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6000 return getStore(Chain, dl, Val, Ptr, MMO); 6001 } 6002 6003 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6004 SDValue Ptr, MachineMemOperand *MMO) { 6005 assert(Chain.getValueType() == MVT::Other && 6006 "Invalid chain type"); 6007 EVT VT = Val.getValueType(); 6008 SDVTList VTs = getVTList(MVT::Other); 6009 SDValue Undef = getUNDEF(Ptr.getValueType()); 6010 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6011 FoldingSetNodeID ID; 6012 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6013 ID.AddInteger(VT.getRawBits()); 6014 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6015 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6016 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6017 void *IP = nullptr; 6018 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6019 cast<StoreSDNode>(E)->refineAlignment(MMO); 6020 return SDValue(E, 0); 6021 } 6022 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6023 ISD::UNINDEXED, false, VT, MMO); 6024 createOperands(N, Ops); 6025 6026 CSEMap.InsertNode(N, IP); 6027 InsertNode(N); 6028 return SDValue(N, 0); 6029 } 6030 6031 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6032 SDValue Ptr, MachinePointerInfo PtrInfo, 6033 EVT SVT, unsigned Alignment, 6034 MachineMemOperand::Flags MMOFlags, 6035 const AAMDNodes &AAInfo) { 6036 assert(Chain.getValueType() == MVT::Other && 6037 "Invalid chain type"); 6038 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6039 Alignment = getEVTAlignment(SVT); 6040 6041 MMOFlags |= MachineMemOperand::MOStore; 6042 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6043 6044 if (PtrInfo.V.isNull()) 6045 PtrInfo = InferPointerInfo(*this, Ptr); 6046 6047 MachineFunction &MF = getMachineFunction(); 6048 MachineMemOperand *MMO = MF.getMachineMemOperand( 6049 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6050 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6051 } 6052 6053 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6054 SDValue Ptr, EVT SVT, 6055 MachineMemOperand *MMO) { 6056 EVT VT = Val.getValueType(); 6057 6058 assert(Chain.getValueType() == MVT::Other && 6059 "Invalid chain type"); 6060 if (VT == SVT) 6061 return getStore(Chain, dl, Val, Ptr, MMO); 6062 6063 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6064 "Should only be a truncating store, not extending!"); 6065 assert(VT.isInteger() == SVT.isInteger() && 6066 "Can't do FP-INT conversion!"); 6067 assert(VT.isVector() == SVT.isVector() && 6068 "Cannot use trunc store to convert to or from a vector!"); 6069 assert((!VT.isVector() || 6070 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6071 "Cannot use trunc store to change the number of vector elements!"); 6072 6073 SDVTList VTs = getVTList(MVT::Other); 6074 SDValue Undef = getUNDEF(Ptr.getValueType()); 6075 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6076 FoldingSetNodeID ID; 6077 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6078 ID.AddInteger(SVT.getRawBits()); 6079 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6080 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6081 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6082 void *IP = nullptr; 6083 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6084 cast<StoreSDNode>(E)->refineAlignment(MMO); 6085 return SDValue(E, 0); 6086 } 6087 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6088 ISD::UNINDEXED, true, SVT, MMO); 6089 createOperands(N, Ops); 6090 6091 CSEMap.InsertNode(N, IP); 6092 InsertNode(N); 6093 return SDValue(N, 0); 6094 } 6095 6096 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6097 SDValue Base, SDValue Offset, 6098 ISD::MemIndexedMode AM) { 6099 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6100 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6101 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6102 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6103 FoldingSetNodeID ID; 6104 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6105 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6106 ID.AddInteger(ST->getRawSubclassData()); 6107 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6108 void *IP = nullptr; 6109 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6110 return SDValue(E, 0); 6111 6112 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6113 ST->isTruncatingStore(), ST->getMemoryVT(), 6114 ST->getMemOperand()); 6115 createOperands(N, Ops); 6116 6117 CSEMap.InsertNode(N, IP); 6118 InsertNode(N); 6119 return SDValue(N, 0); 6120 } 6121 6122 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6123 SDValue Ptr, SDValue Mask, SDValue Src0, 6124 EVT MemVT, MachineMemOperand *MMO, 6125 ISD::LoadExtType ExtTy, bool isExpanding) { 6126 SDVTList VTs = getVTList(VT, MVT::Other); 6127 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 6128 FoldingSetNodeID ID; 6129 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6130 ID.AddInteger(VT.getRawBits()); 6131 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6132 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6133 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6134 void *IP = nullptr; 6135 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6136 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6137 return SDValue(E, 0); 6138 } 6139 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6140 ExtTy, isExpanding, MemVT, MMO); 6141 createOperands(N, Ops); 6142 6143 CSEMap.InsertNode(N, IP); 6144 InsertNode(N); 6145 return SDValue(N, 0); 6146 } 6147 6148 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6149 SDValue Val, SDValue Ptr, SDValue Mask, 6150 EVT MemVT, MachineMemOperand *MMO, 6151 bool IsTruncating, bool IsCompressing) { 6152 assert(Chain.getValueType() == MVT::Other && 6153 "Invalid chain type"); 6154 EVT VT = Val.getValueType(); 6155 SDVTList VTs = getVTList(MVT::Other); 6156 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 6157 FoldingSetNodeID ID; 6158 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6159 ID.AddInteger(VT.getRawBits()); 6160 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6161 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6162 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6163 void *IP = nullptr; 6164 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6165 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 6166 return SDValue(E, 0); 6167 } 6168 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6169 IsTruncating, IsCompressing, MemVT, MMO); 6170 createOperands(N, Ops); 6171 6172 CSEMap.InsertNode(N, IP); 6173 InsertNode(N); 6174 return SDValue(N, 0); 6175 } 6176 6177 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 6178 ArrayRef<SDValue> Ops, 6179 MachineMemOperand *MMO) { 6180 assert(Ops.size() == 5 && "Incompatible number of operands"); 6181 6182 FoldingSetNodeID ID; 6183 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 6184 ID.AddInteger(VT.getRawBits()); 6185 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 6186 dl.getIROrder(), VTs, VT, MMO)); 6187 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6188 void *IP = nullptr; 6189 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6190 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 6191 return SDValue(E, 0); 6192 } 6193 6194 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6195 VTs, VT, MMO); 6196 createOperands(N, Ops); 6197 6198 assert(N->getValue().getValueType() == N->getValueType(0) && 6199 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 6200 assert(N->getMask().getValueType().getVectorNumElements() == 6201 N->getValueType(0).getVectorNumElements() && 6202 "Vector width mismatch between mask and data"); 6203 assert(N->getIndex().getValueType().getVectorNumElements() == 6204 N->getValueType(0).getVectorNumElements() && 6205 "Vector width mismatch between index and data"); 6206 6207 CSEMap.InsertNode(N, IP); 6208 InsertNode(N); 6209 return SDValue(N, 0); 6210 } 6211 6212 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6213 ArrayRef<SDValue> Ops, 6214 MachineMemOperand *MMO) { 6215 assert(Ops.size() == 5 && "Incompatible number of operands"); 6216 6217 FoldingSetNodeID ID; 6218 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6219 ID.AddInteger(VT.getRawBits()); 6220 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6221 dl.getIROrder(), VTs, VT, MMO)); 6222 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6223 void *IP = nullptr; 6224 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6225 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6226 return SDValue(E, 0); 6227 } 6228 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6229 VTs, VT, MMO); 6230 createOperands(N, Ops); 6231 6232 assert(N->getMask().getValueType().getVectorNumElements() == 6233 N->getValue().getValueType().getVectorNumElements() && 6234 "Vector width mismatch between mask and data"); 6235 assert(N->getIndex().getValueType().getVectorNumElements() == 6236 N->getValue().getValueType().getVectorNumElements() && 6237 "Vector width mismatch between index and data"); 6238 6239 CSEMap.InsertNode(N, IP); 6240 InsertNode(N); 6241 return SDValue(N, 0); 6242 } 6243 6244 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6245 SDValue Ptr, SDValue SV, unsigned Align) { 6246 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6247 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6248 } 6249 6250 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6251 ArrayRef<SDUse> Ops) { 6252 switch (Ops.size()) { 6253 case 0: return getNode(Opcode, DL, VT); 6254 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6255 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6256 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6257 default: break; 6258 } 6259 6260 // Copy from an SDUse array into an SDValue array for use with 6261 // the regular getNode logic. 6262 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6263 return getNode(Opcode, DL, VT, NewOps); 6264 } 6265 6266 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6267 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6268 unsigned NumOps = Ops.size(); 6269 switch (NumOps) { 6270 case 0: return getNode(Opcode, DL, VT); 6271 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6272 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6273 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6274 default: break; 6275 } 6276 6277 switch (Opcode) { 6278 default: break; 6279 case ISD::CONCAT_VECTORS: 6280 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6281 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6282 return V; 6283 break; 6284 case ISD::SELECT_CC: 6285 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6286 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6287 "LHS and RHS of condition must have same type!"); 6288 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6289 "True and False arms of SelectCC must have same type!"); 6290 assert(Ops[2].getValueType() == VT && 6291 "select_cc node must be of same type as true and false value!"); 6292 break; 6293 case ISD::BR_CC: 6294 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6295 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6296 "LHS/RHS of comparison should match types!"); 6297 break; 6298 } 6299 6300 // Memoize nodes. 6301 SDNode *N; 6302 SDVTList VTs = getVTList(VT); 6303 6304 if (VT != MVT::Glue) { 6305 FoldingSetNodeID ID; 6306 AddNodeIDNode(ID, Opcode, VTs, Ops); 6307 void *IP = nullptr; 6308 6309 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6310 return SDValue(E, 0); 6311 6312 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6313 createOperands(N, Ops); 6314 6315 CSEMap.InsertNode(N, IP); 6316 } else { 6317 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6318 createOperands(N, Ops); 6319 } 6320 6321 InsertNode(N); 6322 return SDValue(N, 0); 6323 } 6324 6325 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6326 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6327 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6328 } 6329 6330 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6331 ArrayRef<SDValue> Ops) { 6332 if (VTList.NumVTs == 1) 6333 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6334 6335 #if 0 6336 switch (Opcode) { 6337 // FIXME: figure out how to safely handle things like 6338 // int foo(int x) { return 1 << (x & 255); } 6339 // int bar() { return foo(256); } 6340 case ISD::SRA_PARTS: 6341 case ISD::SRL_PARTS: 6342 case ISD::SHL_PARTS: 6343 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6344 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6345 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6346 else if (N3.getOpcode() == ISD::AND) 6347 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6348 // If the and is only masking out bits that cannot effect the shift, 6349 // eliminate the and. 6350 unsigned NumBits = VT.getScalarSizeInBits()*2; 6351 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6352 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6353 } 6354 break; 6355 } 6356 #endif 6357 6358 // Memoize the node unless it returns a flag. 6359 SDNode *N; 6360 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6361 FoldingSetNodeID ID; 6362 AddNodeIDNode(ID, Opcode, VTList, Ops); 6363 void *IP = nullptr; 6364 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6365 return SDValue(E, 0); 6366 6367 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6368 createOperands(N, Ops); 6369 CSEMap.InsertNode(N, IP); 6370 } else { 6371 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6372 createOperands(N, Ops); 6373 } 6374 InsertNode(N); 6375 return SDValue(N, 0); 6376 } 6377 6378 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6379 SDVTList VTList) { 6380 return getNode(Opcode, DL, VTList, None); 6381 } 6382 6383 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6384 SDValue N1) { 6385 SDValue Ops[] = { N1 }; 6386 return getNode(Opcode, DL, VTList, Ops); 6387 } 6388 6389 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6390 SDValue N1, SDValue N2) { 6391 SDValue Ops[] = { N1, N2 }; 6392 return getNode(Opcode, DL, VTList, Ops); 6393 } 6394 6395 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6396 SDValue N1, SDValue N2, SDValue N3) { 6397 SDValue Ops[] = { N1, N2, N3 }; 6398 return getNode(Opcode, DL, VTList, Ops); 6399 } 6400 6401 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6402 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6403 SDValue Ops[] = { N1, N2, N3, N4 }; 6404 return getNode(Opcode, DL, VTList, Ops); 6405 } 6406 6407 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6408 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6409 SDValue N5) { 6410 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6411 return getNode(Opcode, DL, VTList, Ops); 6412 } 6413 6414 SDVTList SelectionDAG::getVTList(EVT VT) { 6415 return makeVTList(SDNode::getValueTypeList(VT), 1); 6416 } 6417 6418 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6419 FoldingSetNodeID ID; 6420 ID.AddInteger(2U); 6421 ID.AddInteger(VT1.getRawBits()); 6422 ID.AddInteger(VT2.getRawBits()); 6423 6424 void *IP = nullptr; 6425 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6426 if (!Result) { 6427 EVT *Array = Allocator.Allocate<EVT>(2); 6428 Array[0] = VT1; 6429 Array[1] = VT2; 6430 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6431 VTListMap.InsertNode(Result, IP); 6432 } 6433 return Result->getSDVTList(); 6434 } 6435 6436 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6437 FoldingSetNodeID ID; 6438 ID.AddInteger(3U); 6439 ID.AddInteger(VT1.getRawBits()); 6440 ID.AddInteger(VT2.getRawBits()); 6441 ID.AddInteger(VT3.getRawBits()); 6442 6443 void *IP = nullptr; 6444 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6445 if (!Result) { 6446 EVT *Array = Allocator.Allocate<EVT>(3); 6447 Array[0] = VT1; 6448 Array[1] = VT2; 6449 Array[2] = VT3; 6450 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6451 VTListMap.InsertNode(Result, IP); 6452 } 6453 return Result->getSDVTList(); 6454 } 6455 6456 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6457 FoldingSetNodeID ID; 6458 ID.AddInteger(4U); 6459 ID.AddInteger(VT1.getRawBits()); 6460 ID.AddInteger(VT2.getRawBits()); 6461 ID.AddInteger(VT3.getRawBits()); 6462 ID.AddInteger(VT4.getRawBits()); 6463 6464 void *IP = nullptr; 6465 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6466 if (!Result) { 6467 EVT *Array = Allocator.Allocate<EVT>(4); 6468 Array[0] = VT1; 6469 Array[1] = VT2; 6470 Array[2] = VT3; 6471 Array[3] = VT4; 6472 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6473 VTListMap.InsertNode(Result, IP); 6474 } 6475 return Result->getSDVTList(); 6476 } 6477 6478 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6479 unsigned NumVTs = VTs.size(); 6480 FoldingSetNodeID ID; 6481 ID.AddInteger(NumVTs); 6482 for (unsigned index = 0; index < NumVTs; index++) { 6483 ID.AddInteger(VTs[index].getRawBits()); 6484 } 6485 6486 void *IP = nullptr; 6487 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6488 if (!Result) { 6489 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6490 std::copy(VTs.begin(), VTs.end(), Array); 6491 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6492 VTListMap.InsertNode(Result, IP); 6493 } 6494 return Result->getSDVTList(); 6495 } 6496 6497 6498 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6499 /// specified operands. If the resultant node already exists in the DAG, 6500 /// this does not modify the specified node, instead it returns the node that 6501 /// already exists. If the resultant node does not exist in the DAG, the 6502 /// input node is returned. As a degenerate case, if you specify the same 6503 /// input operands as the node already has, the input node is returned. 6504 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6505 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6506 6507 // Check to see if there is no change. 6508 if (Op == N->getOperand(0)) return N; 6509 6510 // See if the modified node already exists. 6511 void *InsertPos = nullptr; 6512 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6513 return Existing; 6514 6515 // Nope it doesn't. Remove the node from its current place in the maps. 6516 if (InsertPos) 6517 if (!RemoveNodeFromCSEMaps(N)) 6518 InsertPos = nullptr; 6519 6520 // Now we update the operands. 6521 N->OperandList[0].set(Op); 6522 6523 // If this gets put into a CSE map, add it. 6524 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6525 return N; 6526 } 6527 6528 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6529 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6530 6531 // Check to see if there is no change. 6532 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6533 return N; // No operands changed, just return the input node. 6534 6535 // See if the modified node already exists. 6536 void *InsertPos = nullptr; 6537 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6538 return Existing; 6539 6540 // Nope it doesn't. Remove the node from its current place in the maps. 6541 if (InsertPos) 6542 if (!RemoveNodeFromCSEMaps(N)) 6543 InsertPos = nullptr; 6544 6545 // Now we update the operands. 6546 if (N->OperandList[0] != Op1) 6547 N->OperandList[0].set(Op1); 6548 if (N->OperandList[1] != Op2) 6549 N->OperandList[1].set(Op2); 6550 6551 // If this gets put into a CSE map, add it. 6552 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6553 return N; 6554 } 6555 6556 SDNode *SelectionDAG:: 6557 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6558 SDValue Ops[] = { Op1, Op2, Op3 }; 6559 return UpdateNodeOperands(N, Ops); 6560 } 6561 6562 SDNode *SelectionDAG:: 6563 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6564 SDValue Op3, SDValue Op4) { 6565 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6566 return UpdateNodeOperands(N, Ops); 6567 } 6568 6569 SDNode *SelectionDAG:: 6570 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6571 SDValue Op3, SDValue Op4, SDValue Op5) { 6572 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6573 return UpdateNodeOperands(N, Ops); 6574 } 6575 6576 SDNode *SelectionDAG:: 6577 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6578 unsigned NumOps = Ops.size(); 6579 assert(N->getNumOperands() == NumOps && 6580 "Update with wrong number of operands"); 6581 6582 // If no operands changed just return the input node. 6583 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6584 return N; 6585 6586 // See if the modified node already exists. 6587 void *InsertPos = nullptr; 6588 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6589 return Existing; 6590 6591 // Nope it doesn't. Remove the node from its current place in the maps. 6592 if (InsertPos) 6593 if (!RemoveNodeFromCSEMaps(N)) 6594 InsertPos = nullptr; 6595 6596 // Now we update the operands. 6597 for (unsigned i = 0; i != NumOps; ++i) 6598 if (N->OperandList[i] != Ops[i]) 6599 N->OperandList[i].set(Ops[i]); 6600 6601 // If this gets put into a CSE map, add it. 6602 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6603 return N; 6604 } 6605 6606 /// DropOperands - Release the operands and set this node to have 6607 /// zero operands. 6608 void SDNode::DropOperands() { 6609 // Unlike the code in MorphNodeTo that does this, we don't need to 6610 // watch for dead nodes here. 6611 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6612 SDUse &Use = *I++; 6613 Use.set(SDValue()); 6614 } 6615 } 6616 6617 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6618 /// machine opcode. 6619 /// 6620 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6621 EVT VT) { 6622 SDVTList VTs = getVTList(VT); 6623 return SelectNodeTo(N, MachineOpc, VTs, None); 6624 } 6625 6626 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6627 EVT VT, SDValue Op1) { 6628 SDVTList VTs = getVTList(VT); 6629 SDValue Ops[] = { Op1 }; 6630 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6631 } 6632 6633 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6634 EVT VT, SDValue Op1, 6635 SDValue Op2) { 6636 SDVTList VTs = getVTList(VT); 6637 SDValue Ops[] = { Op1, Op2 }; 6638 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6639 } 6640 6641 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6642 EVT VT, SDValue Op1, 6643 SDValue Op2, SDValue Op3) { 6644 SDVTList VTs = getVTList(VT); 6645 SDValue Ops[] = { Op1, Op2, Op3 }; 6646 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6647 } 6648 6649 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6650 EVT VT, ArrayRef<SDValue> Ops) { 6651 SDVTList VTs = getVTList(VT); 6652 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6653 } 6654 6655 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6656 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6657 SDVTList VTs = getVTList(VT1, VT2); 6658 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6659 } 6660 6661 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6662 EVT VT1, EVT VT2) { 6663 SDVTList VTs = getVTList(VT1, VT2); 6664 return SelectNodeTo(N, MachineOpc, VTs, None); 6665 } 6666 6667 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6668 EVT VT1, EVT VT2, EVT VT3, 6669 ArrayRef<SDValue> Ops) { 6670 SDVTList VTs = getVTList(VT1, VT2, VT3); 6671 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6672 } 6673 6674 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6675 EVT VT1, EVT VT2, 6676 SDValue Op1, SDValue Op2) { 6677 SDVTList VTs = getVTList(VT1, VT2); 6678 SDValue Ops[] = { Op1, Op2 }; 6679 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6680 } 6681 6682 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6683 SDVTList VTs,ArrayRef<SDValue> Ops) { 6684 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6685 // Reset the NodeID to -1. 6686 New->setNodeId(-1); 6687 if (New != N) { 6688 ReplaceAllUsesWith(N, New); 6689 RemoveDeadNode(N); 6690 } 6691 return New; 6692 } 6693 6694 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6695 /// the line number information on the merged node since it is not possible to 6696 /// preserve the information that operation is associated with multiple lines. 6697 /// This will make the debugger working better at -O0, were there is a higher 6698 /// probability having other instructions associated with that line. 6699 /// 6700 /// For IROrder, we keep the smaller of the two 6701 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6702 DebugLoc NLoc = N->getDebugLoc(); 6703 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6704 N->setDebugLoc(DebugLoc()); 6705 } 6706 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6707 N->setIROrder(Order); 6708 return N; 6709 } 6710 6711 /// MorphNodeTo - This *mutates* the specified node to have the specified 6712 /// return type, opcode, and operands. 6713 /// 6714 /// Note that MorphNodeTo returns the resultant node. If there is already a 6715 /// node of the specified opcode and operands, it returns that node instead of 6716 /// the current one. Note that the SDLoc need not be the same. 6717 /// 6718 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6719 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6720 /// node, and because it doesn't require CSE recalculation for any of 6721 /// the node's users. 6722 /// 6723 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6724 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6725 /// the legalizer which maintain worklists that would need to be updated when 6726 /// deleting things. 6727 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6728 SDVTList VTs, ArrayRef<SDValue> Ops) { 6729 // If an identical node already exists, use it. 6730 void *IP = nullptr; 6731 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6732 FoldingSetNodeID ID; 6733 AddNodeIDNode(ID, Opc, VTs, Ops); 6734 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6735 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6736 } 6737 6738 if (!RemoveNodeFromCSEMaps(N)) 6739 IP = nullptr; 6740 6741 // Start the morphing. 6742 N->NodeType = Opc; 6743 N->ValueList = VTs.VTs; 6744 N->NumValues = VTs.NumVTs; 6745 6746 // Clear the operands list, updating used nodes to remove this from their 6747 // use list. Keep track of any operands that become dead as a result. 6748 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6749 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6750 SDUse &Use = *I++; 6751 SDNode *Used = Use.getNode(); 6752 Use.set(SDValue()); 6753 if (Used->use_empty()) 6754 DeadNodeSet.insert(Used); 6755 } 6756 6757 // For MachineNode, initialize the memory references information. 6758 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6759 MN->setMemRefs(nullptr, nullptr); 6760 6761 // Swap for an appropriately sized array from the recycler. 6762 removeOperands(N); 6763 createOperands(N, Ops); 6764 6765 // Delete any nodes that are still dead after adding the uses for the 6766 // new operands. 6767 if (!DeadNodeSet.empty()) { 6768 SmallVector<SDNode *, 16> DeadNodes; 6769 for (SDNode *N : DeadNodeSet) 6770 if (N->use_empty()) 6771 DeadNodes.push_back(N); 6772 RemoveDeadNodes(DeadNodes); 6773 } 6774 6775 if (IP) 6776 CSEMap.InsertNode(N, IP); // Memoize the new node. 6777 return N; 6778 } 6779 6780 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 6781 unsigned OrigOpc = Node->getOpcode(); 6782 unsigned NewOpc; 6783 bool IsUnary = false; 6784 bool IsTernary = false; 6785 switch (OrigOpc) { 6786 default: 6787 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 6788 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 6789 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 6790 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 6791 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 6792 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 6793 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; 6794 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 6795 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 6796 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 6797 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 6798 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 6799 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 6800 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 6801 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 6802 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 6803 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 6804 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 6805 case ISD::STRICT_FNEARBYINT: 6806 NewOpc = ISD::FNEARBYINT; 6807 IsUnary = true; 6808 break; 6809 } 6810 6811 // We're taking this node out of the chain, so we need to re-link things. 6812 SDValue InputChain = Node->getOperand(0); 6813 SDValue OutputChain = SDValue(Node, 1); 6814 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 6815 6816 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 6817 SDNode *Res = nullptr; 6818 if (IsUnary) 6819 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 6820 else if (IsTernary) 6821 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6822 Node->getOperand(2), 6823 Node->getOperand(3)}); 6824 else 6825 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6826 Node->getOperand(2) }); 6827 6828 // MorphNodeTo can operate in two ways: if an existing node with the 6829 // specified operands exists, it can just return it. Otherwise, it 6830 // updates the node in place to have the requested operands. 6831 if (Res == Node) { 6832 // If we updated the node in place, reset the node ID. To the isel, 6833 // this should be just like a newly allocated machine node. 6834 Res->setNodeId(-1); 6835 } else { 6836 ReplaceAllUsesWith(Node, Res); 6837 RemoveDeadNode(Node); 6838 } 6839 6840 return Res; 6841 } 6842 6843 /// getMachineNode - These are used for target selectors to create a new node 6844 /// with specified return type(s), MachineInstr opcode, and operands. 6845 /// 6846 /// Note that getMachineNode returns the resultant node. If there is already a 6847 /// node of the specified opcode and operands, it returns that node instead of 6848 /// the current one. 6849 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6850 EVT VT) { 6851 SDVTList VTs = getVTList(VT); 6852 return getMachineNode(Opcode, dl, VTs, None); 6853 } 6854 6855 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6856 EVT VT, SDValue Op1) { 6857 SDVTList VTs = getVTList(VT); 6858 SDValue Ops[] = { Op1 }; 6859 return getMachineNode(Opcode, dl, VTs, Ops); 6860 } 6861 6862 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6863 EVT VT, SDValue Op1, SDValue Op2) { 6864 SDVTList VTs = getVTList(VT); 6865 SDValue Ops[] = { Op1, Op2 }; 6866 return getMachineNode(Opcode, dl, VTs, Ops); 6867 } 6868 6869 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6870 EVT VT, SDValue Op1, SDValue Op2, 6871 SDValue Op3) { 6872 SDVTList VTs = getVTList(VT); 6873 SDValue Ops[] = { Op1, Op2, Op3 }; 6874 return getMachineNode(Opcode, dl, VTs, Ops); 6875 } 6876 6877 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6878 EVT VT, ArrayRef<SDValue> Ops) { 6879 SDVTList VTs = getVTList(VT); 6880 return getMachineNode(Opcode, dl, VTs, Ops); 6881 } 6882 6883 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6884 EVT VT1, EVT VT2, SDValue Op1, 6885 SDValue Op2) { 6886 SDVTList VTs = getVTList(VT1, VT2); 6887 SDValue Ops[] = { Op1, Op2 }; 6888 return getMachineNode(Opcode, dl, VTs, Ops); 6889 } 6890 6891 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6892 EVT VT1, EVT VT2, SDValue Op1, 6893 SDValue Op2, SDValue Op3) { 6894 SDVTList VTs = getVTList(VT1, VT2); 6895 SDValue Ops[] = { Op1, Op2, Op3 }; 6896 return getMachineNode(Opcode, dl, VTs, Ops); 6897 } 6898 6899 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6900 EVT VT1, EVT VT2, 6901 ArrayRef<SDValue> Ops) { 6902 SDVTList VTs = getVTList(VT1, VT2); 6903 return getMachineNode(Opcode, dl, VTs, Ops); 6904 } 6905 6906 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6907 EVT VT1, EVT VT2, EVT VT3, 6908 SDValue Op1, SDValue Op2) { 6909 SDVTList VTs = getVTList(VT1, VT2, VT3); 6910 SDValue Ops[] = { Op1, Op2 }; 6911 return getMachineNode(Opcode, dl, VTs, Ops); 6912 } 6913 6914 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6915 EVT VT1, EVT VT2, EVT VT3, 6916 SDValue Op1, SDValue Op2, 6917 SDValue Op3) { 6918 SDVTList VTs = getVTList(VT1, VT2, VT3); 6919 SDValue Ops[] = { Op1, Op2, Op3 }; 6920 return getMachineNode(Opcode, dl, VTs, Ops); 6921 } 6922 6923 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6924 EVT VT1, EVT VT2, EVT VT3, 6925 ArrayRef<SDValue> Ops) { 6926 SDVTList VTs = getVTList(VT1, VT2, VT3); 6927 return getMachineNode(Opcode, dl, VTs, Ops); 6928 } 6929 6930 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6931 ArrayRef<EVT> ResultTys, 6932 ArrayRef<SDValue> Ops) { 6933 SDVTList VTs = getVTList(ResultTys); 6934 return getMachineNode(Opcode, dl, VTs, Ops); 6935 } 6936 6937 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6938 SDVTList VTs, 6939 ArrayRef<SDValue> Ops) { 6940 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6941 MachineSDNode *N; 6942 void *IP = nullptr; 6943 6944 if (DoCSE) { 6945 FoldingSetNodeID ID; 6946 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6947 IP = nullptr; 6948 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6949 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6950 } 6951 } 6952 6953 // Allocate a new MachineSDNode. 6954 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6955 createOperands(N, Ops); 6956 6957 if (DoCSE) 6958 CSEMap.InsertNode(N, IP); 6959 6960 InsertNode(N); 6961 return N; 6962 } 6963 6964 /// getTargetExtractSubreg - A convenience function for creating 6965 /// TargetOpcode::EXTRACT_SUBREG nodes. 6966 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6967 SDValue Operand) { 6968 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6969 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6970 VT, Operand, SRIdxVal); 6971 return SDValue(Subreg, 0); 6972 } 6973 6974 /// getTargetInsertSubreg - A convenience function for creating 6975 /// TargetOpcode::INSERT_SUBREG nodes. 6976 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6977 SDValue Operand, SDValue Subreg) { 6978 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6979 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6980 VT, Operand, Subreg, SRIdxVal); 6981 return SDValue(Result, 0); 6982 } 6983 6984 /// getNodeIfExists - Get the specified node if it's already available, or 6985 /// else return NULL. 6986 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6987 ArrayRef<SDValue> Ops, 6988 const SDNodeFlags Flags) { 6989 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6990 FoldingSetNodeID ID; 6991 AddNodeIDNode(ID, Opcode, VTList, Ops); 6992 void *IP = nullptr; 6993 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 6994 E->intersectFlagsWith(Flags); 6995 return E; 6996 } 6997 } 6998 return nullptr; 6999 } 7000 7001 /// getDbgValue - Creates a SDDbgValue node. 7002 /// 7003 /// SDNode 7004 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 7005 SDNode *N, unsigned R, bool IsIndirect, 7006 const DebugLoc &DL, unsigned O) { 7007 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7008 "Expected inlined-at fields to agree"); 7009 return new (DbgInfo->getAlloc()) 7010 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 7011 } 7012 7013 /// Constant 7014 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 7015 DIExpression *Expr, 7016 const Value *C, 7017 const DebugLoc &DL, unsigned O) { 7018 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7019 "Expected inlined-at fields to agree"); 7020 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 7021 } 7022 7023 /// FrameIndex 7024 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 7025 DIExpression *Expr, unsigned FI, 7026 const DebugLoc &DL, 7027 unsigned O) { 7028 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7029 "Expected inlined-at fields to agree"); 7030 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, DL, O); 7031 } 7032 7033 void SelectionDAG::salvageDebugInfo(SDNode &N) { 7034 if (!N.getHasDebugValue()) 7035 return; 7036 for (auto DV : GetDbgValues(&N)) { 7037 if (DV->isInvalidated()) 7038 continue; 7039 switch (N.getOpcode()) { 7040 default: 7041 break; 7042 case ISD::ADD: 7043 SDValue N0 = N.getOperand(0); 7044 SDValue N1 = N.getOperand(1); 7045 if (!isConstantIntBuildVectorOrConstantInt(N0) && 7046 isConstantIntBuildVectorOrConstantInt(N1)) { 7047 uint64_t Offset = N.getConstantOperandVal(1); 7048 // Rewrite an ADD constant node into a DIExpression. Since we are 7049 // performing arithmetic to compute the variable's *value* in the 7050 // DIExpression, we need to mark the expression with a 7051 // DW_OP_stack_value. 7052 auto *DIExpr = DV->getExpression(); 7053 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset, 7054 DIExpression::WithStackValue); 7055 SDDbgValue *Clone = 7056 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 7057 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 7058 DV->setIsInvalidated(); 7059 AddDbgValue(Clone, N0.getNode(), false); 7060 DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this); 7061 dbgs() << " into " << *DIExpr << '\n'); 7062 } 7063 } 7064 } 7065 } 7066 7067 namespace { 7068 7069 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 7070 /// pointed to by a use iterator is deleted, increment the use iterator 7071 /// so that it doesn't dangle. 7072 /// 7073 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 7074 SDNode::use_iterator &UI; 7075 SDNode::use_iterator &UE; 7076 7077 void NodeDeleted(SDNode *N, SDNode *E) override { 7078 // Increment the iterator as needed. 7079 while (UI != UE && N == *UI) 7080 ++UI; 7081 } 7082 7083 public: 7084 RAUWUpdateListener(SelectionDAG &d, 7085 SDNode::use_iterator &ui, 7086 SDNode::use_iterator &ue) 7087 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 7088 }; 7089 7090 } // end anonymous namespace 7091 7092 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7093 /// This can cause recursive merging of nodes in the DAG. 7094 /// 7095 /// This version assumes From has a single result value. 7096 /// 7097 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 7098 SDNode *From = FromN.getNode(); 7099 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 7100 "Cannot replace with this method!"); 7101 assert(From != To.getNode() && "Cannot replace uses of with self"); 7102 7103 // Preserve Debug Values 7104 TransferDbgValues(FromN, To); 7105 7106 // Iterate over all the existing uses of From. New uses will be added 7107 // to the beginning of the use list, which we avoid visiting. 7108 // This specifically avoids visiting uses of From that arise while the 7109 // replacement is happening, because any such uses would be the result 7110 // of CSE: If an existing node looks like From after one of its operands 7111 // is replaced by To, we don't want to replace of all its users with To 7112 // too. See PR3018 for more info. 7113 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7114 RAUWUpdateListener Listener(*this, UI, UE); 7115 while (UI != UE) { 7116 SDNode *User = *UI; 7117 7118 // This node is about to morph, remove its old self from the CSE maps. 7119 RemoveNodeFromCSEMaps(User); 7120 7121 // A user can appear in a use list multiple times, and when this 7122 // happens the uses are usually next to each other in the list. 7123 // To help reduce the number of CSE recomputations, process all 7124 // the uses of this user that we can find this way. 7125 do { 7126 SDUse &Use = UI.getUse(); 7127 ++UI; 7128 Use.set(To); 7129 } while (UI != UE && *UI == User); 7130 7131 // Now that we have modified User, add it back to the CSE maps. If it 7132 // already exists there, recursively merge the results together. 7133 AddModifiedNodeToCSEMaps(User); 7134 } 7135 7136 // If we just RAUW'd the root, take note. 7137 if (FromN == getRoot()) 7138 setRoot(To); 7139 } 7140 7141 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7142 /// This can cause recursive merging of nodes in the DAG. 7143 /// 7144 /// This version assumes that for each value of From, there is a 7145 /// corresponding value in To in the same position with the same type. 7146 /// 7147 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 7148 #ifndef NDEBUG 7149 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7150 assert((!From->hasAnyUseOfValue(i) || 7151 From->getValueType(i) == To->getValueType(i)) && 7152 "Cannot use this version of ReplaceAllUsesWith!"); 7153 #endif 7154 7155 // Handle the trivial case. 7156 if (From == To) 7157 return; 7158 7159 // Preserve Debug Info. Only do this if there's a use. 7160 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7161 if (From->hasAnyUseOfValue(i)) { 7162 assert((i < To->getNumValues()) && "Invalid To location"); 7163 TransferDbgValues(SDValue(From, i), SDValue(To, i)); 7164 } 7165 7166 // Iterate over just the existing users of From. See the comments in 7167 // the ReplaceAllUsesWith above. 7168 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7169 RAUWUpdateListener Listener(*this, UI, UE); 7170 while (UI != UE) { 7171 SDNode *User = *UI; 7172 7173 // This node is about to morph, remove its old self from the CSE maps. 7174 RemoveNodeFromCSEMaps(User); 7175 7176 // A user can appear in a use list multiple times, and when this 7177 // happens the uses are usually next to each other in the list. 7178 // To help reduce the number of CSE recomputations, process all 7179 // the uses of this user that we can find this way. 7180 do { 7181 SDUse &Use = UI.getUse(); 7182 ++UI; 7183 Use.setNode(To); 7184 } while (UI != UE && *UI == User); 7185 7186 // Now that we have modified User, add it back to the CSE maps. If it 7187 // already exists there, recursively merge the results together. 7188 AddModifiedNodeToCSEMaps(User); 7189 } 7190 7191 // If we just RAUW'd the root, take note. 7192 if (From == getRoot().getNode()) 7193 setRoot(SDValue(To, getRoot().getResNo())); 7194 } 7195 7196 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7197 /// This can cause recursive merging of nodes in the DAG. 7198 /// 7199 /// This version can replace From with any result values. To must match the 7200 /// number and types of values returned by From. 7201 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 7202 if (From->getNumValues() == 1) // Handle the simple case efficiently. 7203 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 7204 7205 // Preserve Debug Info. 7206 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7207 TransferDbgValues(SDValue(From, i), *To); 7208 7209 // Iterate over just the existing users of From. See the comments in 7210 // the ReplaceAllUsesWith above. 7211 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7212 RAUWUpdateListener Listener(*this, UI, UE); 7213 while (UI != UE) { 7214 SDNode *User = *UI; 7215 7216 // This node is about to morph, remove its old self from the CSE maps. 7217 RemoveNodeFromCSEMaps(User); 7218 7219 // A user can appear in a use list multiple times, and when this 7220 // happens the uses are usually next to each other in the list. 7221 // To help reduce the number of CSE recomputations, process all 7222 // the uses of this user that we can find this way. 7223 do { 7224 SDUse &Use = UI.getUse(); 7225 const SDValue &ToOp = To[Use.getResNo()]; 7226 ++UI; 7227 Use.set(ToOp); 7228 } while (UI != UE && *UI == User); 7229 7230 // Now that we have modified User, add it back to the CSE maps. If it 7231 // already exists there, recursively merge the results together. 7232 AddModifiedNodeToCSEMaps(User); 7233 } 7234 7235 // If we just RAUW'd the root, take note. 7236 if (From == getRoot().getNode()) 7237 setRoot(SDValue(To[getRoot().getResNo()])); 7238 } 7239 7240 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 7241 /// uses of other values produced by From.getNode() alone. The Deleted 7242 /// vector is handled the same way as for ReplaceAllUsesWith. 7243 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 7244 // Handle the really simple, really trivial case efficiently. 7245 if (From == To) return; 7246 7247 // Handle the simple, trivial, case efficiently. 7248 if (From.getNode()->getNumValues() == 1) { 7249 ReplaceAllUsesWith(From, To); 7250 return; 7251 } 7252 7253 // Preserve Debug Info. 7254 TransferDbgValues(From, To); 7255 7256 // Iterate over just the existing users of From. See the comments in 7257 // the ReplaceAllUsesWith above. 7258 SDNode::use_iterator UI = From.getNode()->use_begin(), 7259 UE = From.getNode()->use_end(); 7260 RAUWUpdateListener Listener(*this, UI, UE); 7261 while (UI != UE) { 7262 SDNode *User = *UI; 7263 bool UserRemovedFromCSEMaps = false; 7264 7265 // A user can appear in a use list multiple times, and when this 7266 // happens the uses are usually next to each other in the list. 7267 // To help reduce the number of CSE recomputations, process all 7268 // the uses of this user that we can find this way. 7269 do { 7270 SDUse &Use = UI.getUse(); 7271 7272 // Skip uses of different values from the same node. 7273 if (Use.getResNo() != From.getResNo()) { 7274 ++UI; 7275 continue; 7276 } 7277 7278 // If this node hasn't been modified yet, it's still in the CSE maps, 7279 // so remove its old self from the CSE maps. 7280 if (!UserRemovedFromCSEMaps) { 7281 RemoveNodeFromCSEMaps(User); 7282 UserRemovedFromCSEMaps = true; 7283 } 7284 7285 ++UI; 7286 Use.set(To); 7287 } while (UI != UE && *UI == User); 7288 7289 // We are iterating over all uses of the From node, so if a use 7290 // doesn't use the specific value, no changes are made. 7291 if (!UserRemovedFromCSEMaps) 7292 continue; 7293 7294 // Now that we have modified User, add it back to the CSE maps. If it 7295 // already exists there, recursively merge the results together. 7296 AddModifiedNodeToCSEMaps(User); 7297 } 7298 7299 // If we just RAUW'd the root, take note. 7300 if (From == getRoot()) 7301 setRoot(To); 7302 } 7303 7304 namespace { 7305 7306 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 7307 /// to record information about a use. 7308 struct UseMemo { 7309 SDNode *User; 7310 unsigned Index; 7311 SDUse *Use; 7312 }; 7313 7314 /// operator< - Sort Memos by User. 7315 bool operator<(const UseMemo &L, const UseMemo &R) { 7316 return (intptr_t)L.User < (intptr_t)R.User; 7317 } 7318 7319 } // end anonymous namespace 7320 7321 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 7322 /// uses of other values produced by From.getNode() alone. The same value 7323 /// may appear in both the From and To list. The Deleted vector is 7324 /// handled the same way as for ReplaceAllUsesWith. 7325 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 7326 const SDValue *To, 7327 unsigned Num){ 7328 // Handle the simple, trivial case efficiently. 7329 if (Num == 1) 7330 return ReplaceAllUsesOfValueWith(*From, *To); 7331 7332 TransferDbgValues(*From, *To); 7333 7334 // Read up all the uses and make records of them. This helps 7335 // processing new uses that are introduced during the 7336 // replacement process. 7337 SmallVector<UseMemo, 4> Uses; 7338 for (unsigned i = 0; i != Num; ++i) { 7339 unsigned FromResNo = From[i].getResNo(); 7340 SDNode *FromNode = From[i].getNode(); 7341 for (SDNode::use_iterator UI = FromNode->use_begin(), 7342 E = FromNode->use_end(); UI != E; ++UI) { 7343 SDUse &Use = UI.getUse(); 7344 if (Use.getResNo() == FromResNo) { 7345 UseMemo Memo = { *UI, i, &Use }; 7346 Uses.push_back(Memo); 7347 } 7348 } 7349 } 7350 7351 // Sort the uses, so that all the uses from a given User are together. 7352 std::sort(Uses.begin(), Uses.end()); 7353 7354 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7355 UseIndex != UseIndexEnd; ) { 7356 // We know that this user uses some value of From. If it is the right 7357 // value, update it. 7358 SDNode *User = Uses[UseIndex].User; 7359 7360 // This node is about to morph, remove its old self from the CSE maps. 7361 RemoveNodeFromCSEMaps(User); 7362 7363 // The Uses array is sorted, so all the uses for a given User 7364 // are next to each other in the list. 7365 // To help reduce the number of CSE recomputations, process all 7366 // the uses of this user that we can find this way. 7367 do { 7368 unsigned i = Uses[UseIndex].Index; 7369 SDUse &Use = *Uses[UseIndex].Use; 7370 ++UseIndex; 7371 7372 Use.set(To[i]); 7373 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7374 7375 // Now that we have modified User, add it back to the CSE maps. If it 7376 // already exists there, recursively merge the results together. 7377 AddModifiedNodeToCSEMaps(User); 7378 } 7379 } 7380 7381 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7382 /// based on their topological order. It returns the maximum id and a vector 7383 /// of the SDNodes* in assigned order by reference. 7384 unsigned SelectionDAG::AssignTopologicalOrder() { 7385 unsigned DAGSize = 0; 7386 7387 // SortedPos tracks the progress of the algorithm. Nodes before it are 7388 // sorted, nodes after it are unsorted. When the algorithm completes 7389 // it is at the end of the list. 7390 allnodes_iterator SortedPos = allnodes_begin(); 7391 7392 // Visit all the nodes. Move nodes with no operands to the front of 7393 // the list immediately. Annotate nodes that do have operands with their 7394 // operand count. Before we do this, the Node Id fields of the nodes 7395 // may contain arbitrary values. After, the Node Id fields for nodes 7396 // before SortedPos will contain the topological sort index, and the 7397 // Node Id fields for nodes At SortedPos and after will contain the 7398 // count of outstanding operands. 7399 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7400 SDNode *N = &*I++; 7401 checkForCycles(N, this); 7402 unsigned Degree = N->getNumOperands(); 7403 if (Degree == 0) { 7404 // A node with no uses, add it to the result array immediately. 7405 N->setNodeId(DAGSize++); 7406 allnodes_iterator Q(N); 7407 if (Q != SortedPos) 7408 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7409 assert(SortedPos != AllNodes.end() && "Overran node list"); 7410 ++SortedPos; 7411 } else { 7412 // Temporarily use the Node Id as scratch space for the degree count. 7413 N->setNodeId(Degree); 7414 } 7415 } 7416 7417 // Visit all the nodes. As we iterate, move nodes into sorted order, 7418 // such that by the time the end is reached all nodes will be sorted. 7419 for (SDNode &Node : allnodes()) { 7420 SDNode *N = &Node; 7421 checkForCycles(N, this); 7422 // N is in sorted position, so all its uses have one less operand 7423 // that needs to be sorted. 7424 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7425 UI != UE; ++UI) { 7426 SDNode *P = *UI; 7427 unsigned Degree = P->getNodeId(); 7428 assert(Degree != 0 && "Invalid node degree"); 7429 --Degree; 7430 if (Degree == 0) { 7431 // All of P's operands are sorted, so P may sorted now. 7432 P->setNodeId(DAGSize++); 7433 if (P->getIterator() != SortedPos) 7434 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7435 assert(SortedPos != AllNodes.end() && "Overran node list"); 7436 ++SortedPos; 7437 } else { 7438 // Update P's outstanding operand count. 7439 P->setNodeId(Degree); 7440 } 7441 } 7442 if (Node.getIterator() == SortedPos) { 7443 #ifndef NDEBUG 7444 allnodes_iterator I(N); 7445 SDNode *S = &*++I; 7446 dbgs() << "Overran sorted position:\n"; 7447 S->dumprFull(this); dbgs() << "\n"; 7448 dbgs() << "Checking if this is due to cycles\n"; 7449 checkForCycles(this, true); 7450 #endif 7451 llvm_unreachable(nullptr); 7452 } 7453 } 7454 7455 assert(SortedPos == AllNodes.end() && 7456 "Topological sort incomplete!"); 7457 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7458 "First node in topological sort is not the entry token!"); 7459 assert(AllNodes.front().getNodeId() == 0 && 7460 "First node in topological sort has non-zero id!"); 7461 assert(AllNodes.front().getNumOperands() == 0 && 7462 "First node in topological sort has operands!"); 7463 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7464 "Last node in topologic sort has unexpected id!"); 7465 assert(AllNodes.back().use_empty() && 7466 "Last node in topologic sort has users!"); 7467 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7468 return DAGSize; 7469 } 7470 7471 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7472 /// value is produced by SD. 7473 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7474 if (SD) { 7475 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7476 SD->setHasDebugValue(true); 7477 } 7478 DbgInfo->add(DB, SD, isParameter); 7479 } 7480 7481 /// Transfer SDDbgValues. Called in replace nodes. 7482 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 7483 if (From == To || !From.getNode()->getHasDebugValue()) 7484 return; 7485 SDNode *FromNode = From.getNode(); 7486 SDNode *ToNode = To.getNode(); 7487 SmallVector<SDDbgValue *, 2> ClonedDVs; 7488 for (auto *Dbg : GetDbgValues(FromNode)) { 7489 // Only add Dbgvalues attached to same ResNo. 7490 if (Dbg->getKind() == SDDbgValue::SDNODE && 7491 Dbg->getSDNode() == From.getNode() && 7492 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) { 7493 assert(FromNode != ToNode && 7494 "Should not transfer Debug Values intranode"); 7495 SDDbgValue *Clone = getDbgValue(Dbg->getVariable(), Dbg->getExpression(), 7496 ToNode, To.getResNo(), Dbg->isIndirect(), 7497 Dbg->getDebugLoc(), Dbg->getOrder()); 7498 ClonedDVs.push_back(Clone); 7499 Dbg->setIsInvalidated(); 7500 } 7501 } 7502 for (SDDbgValue *I : ClonedDVs) 7503 AddDbgValue(I, ToNode, false); 7504 } 7505 7506 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 7507 SDValue NewMemOp) { 7508 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 7509 // The new memory operation must have the same position as the old load in 7510 // terms of memory dependency. Create a TokenFactor for the old load and new 7511 // memory operation and update uses of the old load's output chain to use that 7512 // TokenFactor. 7513 SDValue OldChain = SDValue(OldLoad, 1); 7514 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 7515 if (!OldLoad->hasAnyUseOfValue(1)) 7516 return NewChain; 7517 7518 SDValue TokenFactor = 7519 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 7520 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 7521 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 7522 return TokenFactor; 7523 } 7524 7525 //===----------------------------------------------------------------------===// 7526 // SDNode Class 7527 //===----------------------------------------------------------------------===// 7528 7529 bool llvm::isNullConstant(SDValue V) { 7530 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7531 return Const != nullptr && Const->isNullValue(); 7532 } 7533 7534 bool llvm::isNullFPConstant(SDValue V) { 7535 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7536 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7537 } 7538 7539 bool llvm::isAllOnesConstant(SDValue V) { 7540 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7541 return Const != nullptr && Const->isAllOnesValue(); 7542 } 7543 7544 bool llvm::isOneConstant(SDValue V) { 7545 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7546 return Const != nullptr && Const->isOne(); 7547 } 7548 7549 bool llvm::isBitwiseNot(SDValue V) { 7550 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7551 } 7552 7553 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7554 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7555 return CN; 7556 7557 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7558 BitVector UndefElements; 7559 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7560 7561 // BuildVectors can truncate their operands. Ignore that case here. 7562 // FIXME: We blindly ignore splats which include undef which is overly 7563 // pessimistic. 7564 if (CN && UndefElements.none() && 7565 CN->getValueType(0) == N.getValueType().getScalarType()) 7566 return CN; 7567 } 7568 7569 return nullptr; 7570 } 7571 7572 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7573 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7574 return CN; 7575 7576 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7577 BitVector UndefElements; 7578 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7579 7580 if (CN && UndefElements.none()) 7581 return CN; 7582 } 7583 7584 return nullptr; 7585 } 7586 7587 HandleSDNode::~HandleSDNode() { 7588 DropOperands(); 7589 } 7590 7591 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7592 const DebugLoc &DL, 7593 const GlobalValue *GA, EVT VT, 7594 int64_t o, unsigned char TF) 7595 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7596 TheGlobal = GA; 7597 } 7598 7599 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7600 EVT VT, unsigned SrcAS, 7601 unsigned DestAS) 7602 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7603 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7604 7605 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7606 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7607 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7608 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7609 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7610 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7611 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7612 7613 // We check here that the size of the memory operand fits within the size of 7614 // the MMO. This is because the MMO might indicate only a possible address 7615 // range instead of specifying the affected memory addresses precisely. 7616 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7617 } 7618 7619 /// Profile - Gather unique data for the node. 7620 /// 7621 void SDNode::Profile(FoldingSetNodeID &ID) const { 7622 AddNodeIDNode(ID, this); 7623 } 7624 7625 namespace { 7626 7627 struct EVTArray { 7628 std::vector<EVT> VTs; 7629 7630 EVTArray() { 7631 VTs.reserve(MVT::LAST_VALUETYPE); 7632 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7633 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7634 } 7635 }; 7636 7637 } // end anonymous namespace 7638 7639 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 7640 static ManagedStatic<EVTArray> SimpleVTArray; 7641 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 7642 7643 /// getValueTypeList - Return a pointer to the specified value type. 7644 /// 7645 const EVT *SDNode::getValueTypeList(EVT VT) { 7646 if (VT.isExtended()) { 7647 sys::SmartScopedLock<true> Lock(*VTMutex); 7648 return &(*EVTs->insert(VT).first); 7649 } else { 7650 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7651 "Value type out of range!"); 7652 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7653 } 7654 } 7655 7656 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7657 /// indicated value. This method ignores uses of other values defined by this 7658 /// operation. 7659 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7660 assert(Value < getNumValues() && "Bad value!"); 7661 7662 // TODO: Only iterate over uses of a given value of the node 7663 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7664 if (UI.getUse().getResNo() == Value) { 7665 if (NUses == 0) 7666 return false; 7667 --NUses; 7668 } 7669 } 7670 7671 // Found exactly the right number of uses? 7672 return NUses == 0; 7673 } 7674 7675 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7676 /// value. This method ignores uses of other values defined by this operation. 7677 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7678 assert(Value < getNumValues() && "Bad value!"); 7679 7680 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7681 if (UI.getUse().getResNo() == Value) 7682 return true; 7683 7684 return false; 7685 } 7686 7687 /// isOnlyUserOf - Return true if this node is the only use of N. 7688 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7689 bool Seen = false; 7690 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7691 SDNode *User = *I; 7692 if (User == this) 7693 Seen = true; 7694 else 7695 return false; 7696 } 7697 7698 return Seen; 7699 } 7700 7701 /// Return true if the only users of N are contained in Nodes. 7702 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7703 bool Seen = false; 7704 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7705 SDNode *User = *I; 7706 if (llvm::any_of(Nodes, 7707 [&User](const SDNode *Node) { return User == Node; })) 7708 Seen = true; 7709 else 7710 return false; 7711 } 7712 7713 return Seen; 7714 } 7715 7716 /// isOperand - Return true if this node is an operand of N. 7717 bool SDValue::isOperandOf(const SDNode *N) const { 7718 for (const SDValue &Op : N->op_values()) 7719 if (*this == Op) 7720 return true; 7721 return false; 7722 } 7723 7724 bool SDNode::isOperandOf(const SDNode *N) const { 7725 for (const SDValue &Op : N->op_values()) 7726 if (this == Op.getNode()) 7727 return true; 7728 return false; 7729 } 7730 7731 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7732 /// be a chain) reaches the specified operand without crossing any 7733 /// side-effecting instructions on any chain path. In practice, this looks 7734 /// through token factors and non-volatile loads. In order to remain efficient, 7735 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7736 /// 7737 /// Note that we only need to examine chains when we're searching for 7738 /// side-effects; SelectionDAG requires that all side-effects are represented 7739 /// by chains, even if another operand would force a specific ordering. This 7740 /// constraint is necessary to allow transformations like splitting loads. 7741 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7742 unsigned Depth) const { 7743 if (*this == Dest) return true; 7744 7745 // Don't search too deeply, we just want to be able to see through 7746 // TokenFactor's etc. 7747 if (Depth == 0) return false; 7748 7749 // If this is a token factor, all inputs to the TF happen in parallel. 7750 if (getOpcode() == ISD::TokenFactor) { 7751 // First, try a shallow search. 7752 if (is_contained((*this)->ops(), Dest)) { 7753 // We found the chain we want as an operand of this TokenFactor. 7754 // Essentially, we reach the chain without side-effects if we could 7755 // serialize the TokenFactor into a simple chain of operations with 7756 // Dest as the last operation. This is automatically true if the 7757 // chain has one use: there are no other ordering constraints. 7758 // If the chain has more than one use, we give up: some other 7759 // use of Dest might force a side-effect between Dest and the current 7760 // node. 7761 if (Dest.hasOneUse()) 7762 return true; 7763 } 7764 // Next, try a deep search: check whether every operand of the TokenFactor 7765 // reaches Dest. 7766 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 7767 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 7768 }); 7769 } 7770 7771 // Loads don't have side effects, look through them. 7772 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7773 if (!Ld->isVolatile()) 7774 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7775 } 7776 return false; 7777 } 7778 7779 bool SDNode::hasPredecessor(const SDNode *N) const { 7780 SmallPtrSet<const SDNode *, 32> Visited; 7781 SmallVector<const SDNode *, 16> Worklist; 7782 Worklist.push_back(this); 7783 return hasPredecessorHelper(N, Visited, Worklist); 7784 } 7785 7786 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 7787 this->Flags.intersectWith(Flags); 7788 } 7789 7790 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7791 assert(N->getNumValues() == 1 && 7792 "Can't unroll a vector with multiple results!"); 7793 7794 EVT VT = N->getValueType(0); 7795 unsigned NE = VT.getVectorNumElements(); 7796 EVT EltVT = VT.getVectorElementType(); 7797 SDLoc dl(N); 7798 7799 SmallVector<SDValue, 8> Scalars; 7800 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7801 7802 // If ResNE is 0, fully unroll the vector op. 7803 if (ResNE == 0) 7804 ResNE = NE; 7805 else if (NE > ResNE) 7806 NE = ResNE; 7807 7808 unsigned i; 7809 for (i= 0; i != NE; ++i) { 7810 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7811 SDValue Operand = N->getOperand(j); 7812 EVT OperandVT = Operand.getValueType(); 7813 if (OperandVT.isVector()) { 7814 // A vector operand; extract a single element. 7815 EVT OperandEltVT = OperandVT.getVectorElementType(); 7816 Operands[j] = 7817 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7818 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7819 } else { 7820 // A scalar operand; just use it as is. 7821 Operands[j] = Operand; 7822 } 7823 } 7824 7825 switch (N->getOpcode()) { 7826 default: { 7827 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7828 N->getFlags())); 7829 break; 7830 } 7831 case ISD::VSELECT: 7832 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7833 break; 7834 case ISD::SHL: 7835 case ISD::SRA: 7836 case ISD::SRL: 7837 case ISD::ROTL: 7838 case ISD::ROTR: 7839 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7840 getShiftAmountOperand(Operands[0].getValueType(), 7841 Operands[1]))); 7842 break; 7843 case ISD::SIGN_EXTEND_INREG: 7844 case ISD::FP_ROUND_INREG: { 7845 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7846 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7847 Operands[0], 7848 getValueType(ExtVT))); 7849 } 7850 } 7851 } 7852 7853 for (; i < ResNE; ++i) 7854 Scalars.push_back(getUNDEF(EltVT)); 7855 7856 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7857 return getBuildVector(VecVT, dl, Scalars); 7858 } 7859 7860 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7861 LoadSDNode *Base, 7862 unsigned Bytes, 7863 int Dist) const { 7864 if (LD->isVolatile() || Base->isVolatile()) 7865 return false; 7866 if (LD->isIndexed() || Base->isIndexed()) 7867 return false; 7868 if (LD->getChain() != Base->getChain()) 7869 return false; 7870 EVT VT = LD->getValueType(0); 7871 if (VT.getSizeInBits() / 8 != Bytes) 7872 return false; 7873 7874 SDValue Loc = LD->getOperand(1); 7875 SDValue BaseLoc = Base->getOperand(1); 7876 7877 auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this); 7878 auto LocDecomp = BaseIndexOffset::match(Loc, *this); 7879 7880 int64_t Offset = 0; 7881 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 7882 return (Dist * Bytes == Offset); 7883 return false; 7884 } 7885 7886 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7887 /// it cannot be inferred. 7888 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7889 // If this is a GlobalAddress + cst, return the alignment. 7890 const GlobalValue *GV; 7891 int64_t GVOffset = 0; 7892 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7893 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7894 KnownBits Known(PtrWidth); 7895 llvm::computeKnownBits(GV, Known, getDataLayout()); 7896 unsigned AlignBits = Known.countMinTrailingZeros(); 7897 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7898 if (Align) 7899 return MinAlign(Align, GVOffset); 7900 } 7901 7902 // If this is a direct reference to a stack slot, use information about the 7903 // stack slot's alignment. 7904 int FrameIdx = 1 << 31; 7905 int64_t FrameOffset = 0; 7906 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7907 FrameIdx = FI->getIndex(); 7908 } else if (isBaseWithConstantOffset(Ptr) && 7909 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7910 // Handle FI+Cst 7911 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7912 FrameOffset = Ptr.getConstantOperandVal(1); 7913 } 7914 7915 if (FrameIdx != (1 << 31)) { 7916 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7917 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7918 FrameOffset); 7919 return FIInfoAlign; 7920 } 7921 7922 return 0; 7923 } 7924 7925 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7926 /// which is split (or expanded) into two not necessarily identical pieces. 7927 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 7928 // Currently all types are split in half. 7929 EVT LoVT, HiVT; 7930 if (!VT.isVector()) 7931 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 7932 else 7933 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 7934 7935 return std::make_pair(LoVT, HiVT); 7936 } 7937 7938 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 7939 /// low/high part. 7940 std::pair<SDValue, SDValue> 7941 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 7942 const EVT &HiVT) { 7943 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 7944 N.getValueType().getVectorNumElements() && 7945 "More vector elements requested than available!"); 7946 SDValue Lo, Hi; 7947 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 7948 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 7949 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 7950 getConstant(LoVT.getVectorNumElements(), DL, 7951 TLI->getVectorIdxTy(getDataLayout()))); 7952 return std::make_pair(Lo, Hi); 7953 } 7954 7955 void SelectionDAG::ExtractVectorElements(SDValue Op, 7956 SmallVectorImpl<SDValue> &Args, 7957 unsigned Start, unsigned Count) { 7958 EVT VT = Op.getValueType(); 7959 if (Count == 0) 7960 Count = VT.getVectorNumElements(); 7961 7962 EVT EltVT = VT.getVectorElementType(); 7963 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 7964 SDLoc SL(Op); 7965 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 7966 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7967 Op, getConstant(i, SL, IdxTy))); 7968 } 7969 } 7970 7971 // getAddressSpace - Return the address space this GlobalAddress belongs to. 7972 unsigned GlobalAddressSDNode::getAddressSpace() const { 7973 return getGlobal()->getType()->getAddressSpace(); 7974 } 7975 7976 Type *ConstantPoolSDNode::getType() const { 7977 if (isMachineConstantPoolEntry()) 7978 return Val.MachineCPVal->getType(); 7979 return Val.ConstVal->getType(); 7980 } 7981 7982 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 7983 unsigned &SplatBitSize, 7984 bool &HasAnyUndefs, 7985 unsigned MinSplatBits, 7986 bool IsBigEndian) const { 7987 EVT VT = getValueType(0); 7988 assert(VT.isVector() && "Expected a vector type"); 7989 unsigned VecWidth = VT.getSizeInBits(); 7990 if (MinSplatBits > VecWidth) 7991 return false; 7992 7993 // FIXME: The widths are based on this node's type, but build vectors can 7994 // truncate their operands. 7995 SplatValue = APInt(VecWidth, 0); 7996 SplatUndef = APInt(VecWidth, 0); 7997 7998 // Get the bits. Bits with undefined values (when the corresponding element 7999 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 8000 // in SplatValue. If any of the values are not constant, give up and return 8001 // false. 8002 unsigned int NumOps = getNumOperands(); 8003 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 8004 unsigned EltWidth = VT.getScalarSizeInBits(); 8005 8006 for (unsigned j = 0; j < NumOps; ++j) { 8007 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 8008 SDValue OpVal = getOperand(i); 8009 unsigned BitPos = j * EltWidth; 8010 8011 if (OpVal.isUndef()) 8012 SplatUndef.setBits(BitPos, BitPos + EltWidth); 8013 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 8014 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 8015 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 8016 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 8017 else 8018 return false; 8019 } 8020 8021 // The build_vector is all constants or undefs. Find the smallest element 8022 // size that splats the vector. 8023 HasAnyUndefs = (SplatUndef != 0); 8024 8025 // FIXME: This does not work for vectors with elements less than 8 bits. 8026 while (VecWidth > 8) { 8027 unsigned HalfSize = VecWidth / 2; 8028 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 8029 APInt LowValue = SplatValue.trunc(HalfSize); 8030 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 8031 APInt LowUndef = SplatUndef.trunc(HalfSize); 8032 8033 // If the two halves do not match (ignoring undef bits), stop here. 8034 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 8035 MinSplatBits > HalfSize) 8036 break; 8037 8038 SplatValue = HighValue | LowValue; 8039 SplatUndef = HighUndef & LowUndef; 8040 8041 VecWidth = HalfSize; 8042 } 8043 8044 SplatBitSize = VecWidth; 8045 return true; 8046 } 8047 8048 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 8049 if (UndefElements) { 8050 UndefElements->clear(); 8051 UndefElements->resize(getNumOperands()); 8052 } 8053 SDValue Splatted; 8054 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 8055 SDValue Op = getOperand(i); 8056 if (Op.isUndef()) { 8057 if (UndefElements) 8058 (*UndefElements)[i] = true; 8059 } else if (!Splatted) { 8060 Splatted = Op; 8061 } else if (Splatted != Op) { 8062 return SDValue(); 8063 } 8064 } 8065 8066 if (!Splatted) { 8067 assert(getOperand(0).isUndef() && 8068 "Can only have a splat without a constant for all undefs."); 8069 return getOperand(0); 8070 } 8071 8072 return Splatted; 8073 } 8074 8075 ConstantSDNode * 8076 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 8077 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 8078 } 8079 8080 ConstantFPSDNode * 8081 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 8082 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 8083 } 8084 8085 int32_t 8086 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 8087 uint32_t BitWidth) const { 8088 if (ConstantFPSDNode *CN = 8089 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 8090 bool IsExact; 8091 APSInt IntVal(BitWidth); 8092 const APFloat &APF = CN->getValueAPF(); 8093 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 8094 APFloat::opOK || 8095 !IsExact) 8096 return -1; 8097 8098 return IntVal.exactLogBase2(); 8099 } 8100 return -1; 8101 } 8102 8103 bool BuildVectorSDNode::isConstant() const { 8104 for (const SDValue &Op : op_values()) { 8105 unsigned Opc = Op.getOpcode(); 8106 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 8107 return false; 8108 } 8109 return true; 8110 } 8111 8112 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 8113 // Find the first non-undef value in the shuffle mask. 8114 unsigned i, e; 8115 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 8116 /* search */; 8117 8118 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 8119 8120 // Make sure all remaining elements are either undef or the same as the first 8121 // non-undef value. 8122 for (int Idx = Mask[i]; i != e; ++i) 8123 if (Mask[i] >= 0 && Mask[i] != Idx) 8124 return false; 8125 return true; 8126 } 8127 8128 // \brief Returns the SDNode if it is a constant integer BuildVector 8129 // or constant integer. 8130 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 8131 if (isa<ConstantSDNode>(N)) 8132 return N.getNode(); 8133 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 8134 return N.getNode(); 8135 // Treat a GlobalAddress supporting constant offset folding as a 8136 // constant integer. 8137 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 8138 if (GA->getOpcode() == ISD::GlobalAddress && 8139 TLI->isOffsetFoldingLegal(GA)) 8140 return GA; 8141 return nullptr; 8142 } 8143 8144 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 8145 if (isa<ConstantFPSDNode>(N)) 8146 return N.getNode(); 8147 8148 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 8149 return N.getNode(); 8150 8151 return nullptr; 8152 } 8153 8154 #ifndef NDEBUG 8155 static void checkForCyclesHelper(const SDNode *N, 8156 SmallPtrSetImpl<const SDNode*> &Visited, 8157 SmallPtrSetImpl<const SDNode*> &Checked, 8158 const llvm::SelectionDAG *DAG) { 8159 // If this node has already been checked, don't check it again. 8160 if (Checked.count(N)) 8161 return; 8162 8163 // If a node has already been visited on this depth-first walk, reject it as 8164 // a cycle. 8165 if (!Visited.insert(N).second) { 8166 errs() << "Detected cycle in SelectionDAG\n"; 8167 dbgs() << "Offending node:\n"; 8168 N->dumprFull(DAG); dbgs() << "\n"; 8169 abort(); 8170 } 8171 8172 for (const SDValue &Op : N->op_values()) 8173 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 8174 8175 Checked.insert(N); 8176 Visited.erase(N); 8177 } 8178 #endif 8179 8180 void llvm::checkForCycles(const llvm::SDNode *N, 8181 const llvm::SelectionDAG *DAG, 8182 bool force) { 8183 #ifndef NDEBUG 8184 bool check = force; 8185 #ifdef EXPENSIVE_CHECKS 8186 check = true; 8187 #endif // EXPENSIVE_CHECKS 8188 if (check) { 8189 assert(N && "Checking nonexistent SDNode"); 8190 SmallPtrSet<const SDNode*, 32> visited; 8191 SmallPtrSet<const SDNode*, 32> checked; 8192 checkForCyclesHelper(N, visited, checked, DAG); 8193 } 8194 #endif // !NDEBUG 8195 } 8196 8197 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 8198 checkForCycles(DAG->getRoot().getNode(), DAG, force); 8199 } 8200