1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineValueType.h" 36 #include "llvm/CodeGen/RuntimeLibcalls.h" 37 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Constant.h" 42 #include "llvm/IR/Constants.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DebugInfoMetadata.h" 45 #include "llvm/IR/DebugLoc.h" 46 #include "llvm/IR/DerivedTypes.h" 47 #include "llvm/IR/Function.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/Metadata.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CodeGen.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/KnownBits.h" 58 #include "llvm/Support/ManagedStatic.h" 59 #include "llvm/Support/MathExtras.h" 60 #include "llvm/Support/Mutex.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetLowering.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetOptions.h" 65 #include "llvm/Target/TargetRegisterInfo.h" 66 #include "llvm/Target/TargetSubtargetInfo.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 //===----------------------------------------------------------------------===// 91 // ConstantFPSDNode Class 92 //===----------------------------------------------------------------------===// 93 94 /// isExactlyValue - We don't rely on operator== working on double values, as 95 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 96 /// As such, this method can be used to do an exact bit-for-bit comparison of 97 /// two floating point values. 98 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 99 return getValueAPF().bitwiseIsEqual(V); 100 } 101 102 bool ConstantFPSDNode::isValueValidForType(EVT VT, 103 const APFloat& Val) { 104 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 105 106 // convert modifies in place, so make a copy. 107 APFloat Val2 = APFloat(Val); 108 bool losesInfo; 109 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 110 APFloat::rmNearestTiesToEven, 111 &losesInfo); 112 return !losesInfo; 113 } 114 115 //===----------------------------------------------------------------------===// 116 // ISD Namespace 117 //===----------------------------------------------------------------------===// 118 119 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 120 auto *BV = dyn_cast<BuildVectorSDNode>(N); 121 if (!BV) 122 return false; 123 124 APInt SplatUndef; 125 unsigned SplatBitSize; 126 bool HasUndefs; 127 EVT EltVT = N->getValueType(0).getVectorElementType(); 128 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) && 129 EltVT.getSizeInBits() >= SplatBitSize; 130 } 131 132 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 133 // specializations of the more general isConstantSplatVector()? 134 135 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 136 // Look through a bit convert. 137 while (N->getOpcode() == ISD::BITCAST) 138 N = N->getOperand(0).getNode(); 139 140 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 141 142 unsigned i = 0, e = N->getNumOperands(); 143 144 // Skip over all of the undef values. 145 while (i != e && N->getOperand(i).isUndef()) 146 ++i; 147 148 // Do not accept an all-undef vector. 149 if (i == e) return false; 150 151 // Do not accept build_vectors that aren't all constants or which have non-~0 152 // elements. We have to be a bit careful here, as the type of the constant 153 // may not be the same as the type of the vector elements due to type 154 // legalization (the elements are promoted to a legal type for the target and 155 // a vector of a type may be legal when the base element type is not). 156 // We only want to check enough bits to cover the vector elements, because 157 // we care if the resultant vector is all ones, not whether the individual 158 // constants are. 159 SDValue NotZero = N->getOperand(i); 160 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 161 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 162 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 163 return false; 164 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 165 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 166 return false; 167 } else 168 return false; 169 170 // Okay, we have at least one ~0 value, check to see if the rest match or are 171 // undefs. Even with the above element type twiddling, this should be OK, as 172 // the same type legalization should have applied to all the elements. 173 for (++i; i != e; ++i) 174 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 175 return false; 176 return true; 177 } 178 179 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 180 // Look through a bit convert. 181 while (N->getOpcode() == ISD::BITCAST) 182 N = N->getOperand(0).getNode(); 183 184 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 185 186 bool IsAllUndef = true; 187 for (const SDValue &Op : N->op_values()) { 188 if (Op.isUndef()) 189 continue; 190 IsAllUndef = false; 191 // Do not accept build_vectors that aren't all constants or which have non-0 192 // elements. We have to be a bit careful here, as the type of the constant 193 // may not be the same as the type of the vector elements due to type 194 // legalization (the elements are promoted to a legal type for the target 195 // and a vector of a type may be legal when the base element type is not). 196 // We only want to check enough bits to cover the vector elements, because 197 // we care if the resultant vector is all zeros, not whether the individual 198 // constants are. 199 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 200 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 201 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 202 return false; 203 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 204 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 205 return false; 206 } else 207 return false; 208 } 209 210 // Do not accept an all-undef vector. 211 if (IsAllUndef) 212 return false; 213 return true; 214 } 215 216 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 217 if (N->getOpcode() != ISD::BUILD_VECTOR) 218 return false; 219 220 for (const SDValue &Op : N->op_values()) { 221 if (Op.isUndef()) 222 continue; 223 if (!isa<ConstantSDNode>(Op)) 224 return false; 225 } 226 return true; 227 } 228 229 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 230 if (N->getOpcode() != ISD::BUILD_VECTOR) 231 return false; 232 233 for (const SDValue &Op : N->op_values()) { 234 if (Op.isUndef()) 235 continue; 236 if (!isa<ConstantFPSDNode>(Op)) 237 return false; 238 } 239 return true; 240 } 241 242 bool ISD::allOperandsUndef(const SDNode *N) { 243 // Return false if the node has no operands. 244 // This is "logically inconsistent" with the definition of "all" but 245 // is probably the desired behavior. 246 if (N->getNumOperands() == 0) 247 return false; 248 249 for (const SDValue &Op : N->op_values()) 250 if (!Op.isUndef()) 251 return false; 252 253 return true; 254 } 255 256 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 257 switch (ExtType) { 258 case ISD::EXTLOAD: 259 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 260 case ISD::SEXTLOAD: 261 return ISD::SIGN_EXTEND; 262 case ISD::ZEXTLOAD: 263 return ISD::ZERO_EXTEND; 264 default: 265 break; 266 } 267 268 llvm_unreachable("Invalid LoadExtType"); 269 } 270 271 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 272 // To perform this operation, we just need to swap the L and G bits of the 273 // operation. 274 unsigned OldL = (Operation >> 2) & 1; 275 unsigned OldG = (Operation >> 1) & 1; 276 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 277 (OldL << 1) | // New G bit 278 (OldG << 2)); // New L bit. 279 } 280 281 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 282 unsigned Operation = Op; 283 if (isInteger) 284 Operation ^= 7; // Flip L, G, E bits, but not U. 285 else 286 Operation ^= 15; // Flip all of the condition bits. 287 288 if (Operation > ISD::SETTRUE2) 289 Operation &= ~8; // Don't let N and U bits get set. 290 291 return ISD::CondCode(Operation); 292 } 293 294 /// For an integer comparison, return 1 if the comparison is a signed operation 295 /// and 2 if the result is an unsigned comparison. Return zero if the operation 296 /// does not depend on the sign of the input (setne and seteq). 297 static int isSignedOp(ISD::CondCode Opcode) { 298 switch (Opcode) { 299 default: llvm_unreachable("Illegal integer setcc operation!"); 300 case ISD::SETEQ: 301 case ISD::SETNE: return 0; 302 case ISD::SETLT: 303 case ISD::SETLE: 304 case ISD::SETGT: 305 case ISD::SETGE: return 1; 306 case ISD::SETULT: 307 case ISD::SETULE: 308 case ISD::SETUGT: 309 case ISD::SETUGE: return 2; 310 } 311 } 312 313 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 314 bool IsInteger) { 315 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 316 // Cannot fold a signed integer setcc with an unsigned integer setcc. 317 return ISD::SETCC_INVALID; 318 319 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 320 321 // If the N and U bits get set, then the resultant comparison DOES suddenly 322 // care about orderedness, and it is true when ordered. 323 if (Op > ISD::SETTRUE2) 324 Op &= ~16; // Clear the U bit if the N bit is set. 325 326 // Canonicalize illegal integer setcc's. 327 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 328 Op = ISD::SETNE; 329 330 return ISD::CondCode(Op); 331 } 332 333 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 334 bool IsInteger) { 335 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 336 // Cannot fold a signed setcc with an unsigned setcc. 337 return ISD::SETCC_INVALID; 338 339 // Combine all of the condition bits. 340 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 341 342 // Canonicalize illegal integer setcc's. 343 if (IsInteger) { 344 switch (Result) { 345 default: break; 346 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 347 case ISD::SETOEQ: // SETEQ & SETU[LG]E 348 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 349 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 350 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 351 } 352 } 353 354 return Result; 355 } 356 357 //===----------------------------------------------------------------------===// 358 // SDNode Profile Support 359 //===----------------------------------------------------------------------===// 360 361 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 362 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 363 ID.AddInteger(OpC); 364 } 365 366 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 367 /// solely with their pointer. 368 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 369 ID.AddPointer(VTList.VTs); 370 } 371 372 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 373 static void AddNodeIDOperands(FoldingSetNodeID &ID, 374 ArrayRef<SDValue> Ops) { 375 for (auto& Op : Ops) { 376 ID.AddPointer(Op.getNode()); 377 ID.AddInteger(Op.getResNo()); 378 } 379 } 380 381 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 382 static void AddNodeIDOperands(FoldingSetNodeID &ID, 383 ArrayRef<SDUse> Ops) { 384 for (auto& Op : Ops) { 385 ID.AddPointer(Op.getNode()); 386 ID.AddInteger(Op.getResNo()); 387 } 388 } 389 390 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 391 SDVTList VTList, ArrayRef<SDValue> OpList) { 392 AddNodeIDOpcode(ID, OpC); 393 AddNodeIDValueTypes(ID, VTList); 394 AddNodeIDOperands(ID, OpList); 395 } 396 397 /// If this is an SDNode with special info, add this info to the NodeID data. 398 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 399 switch (N->getOpcode()) { 400 case ISD::TargetExternalSymbol: 401 case ISD::ExternalSymbol: 402 case ISD::MCSymbol: 403 llvm_unreachable("Should only be used on nodes with operands"); 404 default: break; // Normal nodes don't need extra info. 405 case ISD::TargetConstant: 406 case ISD::Constant: { 407 const ConstantSDNode *C = cast<ConstantSDNode>(N); 408 ID.AddPointer(C->getConstantIntValue()); 409 ID.AddBoolean(C->isOpaque()); 410 break; 411 } 412 case ISD::TargetConstantFP: 413 case ISD::ConstantFP: 414 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 415 break; 416 case ISD::TargetGlobalAddress: 417 case ISD::GlobalAddress: 418 case ISD::TargetGlobalTLSAddress: 419 case ISD::GlobalTLSAddress: { 420 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 421 ID.AddPointer(GA->getGlobal()); 422 ID.AddInteger(GA->getOffset()); 423 ID.AddInteger(GA->getTargetFlags()); 424 break; 425 } 426 case ISD::BasicBlock: 427 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 428 break; 429 case ISD::Register: 430 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 431 break; 432 case ISD::RegisterMask: 433 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 434 break; 435 case ISD::SRCVALUE: 436 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 437 break; 438 case ISD::FrameIndex: 439 case ISD::TargetFrameIndex: 440 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 441 break; 442 case ISD::JumpTable: 443 case ISD::TargetJumpTable: 444 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 445 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 446 break; 447 case ISD::ConstantPool: 448 case ISD::TargetConstantPool: { 449 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 450 ID.AddInteger(CP->getAlignment()); 451 ID.AddInteger(CP->getOffset()); 452 if (CP->isMachineConstantPoolEntry()) 453 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 454 else 455 ID.AddPointer(CP->getConstVal()); 456 ID.AddInteger(CP->getTargetFlags()); 457 break; 458 } 459 case ISD::TargetIndex: { 460 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 461 ID.AddInteger(TI->getIndex()); 462 ID.AddInteger(TI->getOffset()); 463 ID.AddInteger(TI->getTargetFlags()); 464 break; 465 } 466 case ISD::LOAD: { 467 const LoadSDNode *LD = cast<LoadSDNode>(N); 468 ID.AddInteger(LD->getMemoryVT().getRawBits()); 469 ID.AddInteger(LD->getRawSubclassData()); 470 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 471 break; 472 } 473 case ISD::STORE: { 474 const StoreSDNode *ST = cast<StoreSDNode>(N); 475 ID.AddInteger(ST->getMemoryVT().getRawBits()); 476 ID.AddInteger(ST->getRawSubclassData()); 477 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 478 break; 479 } 480 case ISD::ATOMIC_CMP_SWAP: 481 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 482 case ISD::ATOMIC_SWAP: 483 case ISD::ATOMIC_LOAD_ADD: 484 case ISD::ATOMIC_LOAD_SUB: 485 case ISD::ATOMIC_LOAD_AND: 486 case ISD::ATOMIC_LOAD_OR: 487 case ISD::ATOMIC_LOAD_XOR: 488 case ISD::ATOMIC_LOAD_NAND: 489 case ISD::ATOMIC_LOAD_MIN: 490 case ISD::ATOMIC_LOAD_MAX: 491 case ISD::ATOMIC_LOAD_UMIN: 492 case ISD::ATOMIC_LOAD_UMAX: 493 case ISD::ATOMIC_LOAD: 494 case ISD::ATOMIC_STORE: { 495 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 496 ID.AddInteger(AT->getMemoryVT().getRawBits()); 497 ID.AddInteger(AT->getRawSubclassData()); 498 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 499 break; 500 } 501 case ISD::PREFETCH: { 502 const MemSDNode *PF = cast<MemSDNode>(N); 503 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 504 break; 505 } 506 case ISD::VECTOR_SHUFFLE: { 507 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 508 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 509 i != e; ++i) 510 ID.AddInteger(SVN->getMaskElt(i)); 511 break; 512 } 513 case ISD::TargetBlockAddress: 514 case ISD::BlockAddress: { 515 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 516 ID.AddPointer(BA->getBlockAddress()); 517 ID.AddInteger(BA->getOffset()); 518 ID.AddInteger(BA->getTargetFlags()); 519 break; 520 } 521 } // end switch (N->getOpcode()) 522 523 // Target specific memory nodes could also have address spaces to check. 524 if (N->isTargetMemoryOpcode()) 525 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 526 } 527 528 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 529 /// data. 530 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 531 AddNodeIDOpcode(ID, N->getOpcode()); 532 // Add the return value info. 533 AddNodeIDValueTypes(ID, N->getVTList()); 534 // Add the operand info. 535 AddNodeIDOperands(ID, N->ops()); 536 537 // Handle SDNode leafs with special info. 538 AddNodeIDCustom(ID, N); 539 } 540 541 //===----------------------------------------------------------------------===// 542 // SelectionDAG Class 543 //===----------------------------------------------------------------------===// 544 545 /// doNotCSE - Return true if CSE should not be performed for this node. 546 static bool doNotCSE(SDNode *N) { 547 if (N->getValueType(0) == MVT::Glue) 548 return true; // Never CSE anything that produces a flag. 549 550 switch (N->getOpcode()) { 551 default: break; 552 case ISD::HANDLENODE: 553 case ISD::EH_LABEL: 554 return true; // Never CSE these nodes. 555 } 556 557 // Check that remaining values produced are not flags. 558 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 559 if (N->getValueType(i) == MVT::Glue) 560 return true; // Never CSE anything that produces a flag. 561 562 return false; 563 } 564 565 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 566 /// SelectionDAG. 567 void SelectionDAG::RemoveDeadNodes() { 568 // Create a dummy node (which is not added to allnodes), that adds a reference 569 // to the root node, preventing it from being deleted. 570 HandleSDNode Dummy(getRoot()); 571 572 SmallVector<SDNode*, 128> DeadNodes; 573 574 // Add all obviously-dead nodes to the DeadNodes worklist. 575 for (SDNode &Node : allnodes()) 576 if (Node.use_empty()) 577 DeadNodes.push_back(&Node); 578 579 RemoveDeadNodes(DeadNodes); 580 581 // If the root changed (e.g. it was a dead load, update the root). 582 setRoot(Dummy.getValue()); 583 } 584 585 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 586 /// given list, and any nodes that become unreachable as a result. 587 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 588 589 // Process the worklist, deleting the nodes and adding their uses to the 590 // worklist. 591 while (!DeadNodes.empty()) { 592 SDNode *N = DeadNodes.pop_back_val(); 593 // Skip to next node if we've already managed to delete the node. This could 594 // happen if replacing a node causes a node previously added to the node to 595 // be deleted. 596 if (N->getOpcode() == ISD::DELETED_NODE) 597 continue; 598 599 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 600 DUL->NodeDeleted(N, nullptr); 601 602 // Take the node out of the appropriate CSE map. 603 RemoveNodeFromCSEMaps(N); 604 605 // Next, brutally remove the operand list. This is safe to do, as there are 606 // no cycles in the graph. 607 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 608 SDUse &Use = *I++; 609 SDNode *Operand = Use.getNode(); 610 Use.set(SDValue()); 611 612 // Now that we removed this operand, see if there are no uses of it left. 613 if (Operand->use_empty()) 614 DeadNodes.push_back(Operand); 615 } 616 617 DeallocateNode(N); 618 } 619 } 620 621 void SelectionDAG::RemoveDeadNode(SDNode *N){ 622 SmallVector<SDNode*, 16> DeadNodes(1, N); 623 624 // Create a dummy node that adds a reference to the root node, preventing 625 // it from being deleted. (This matters if the root is an operand of the 626 // dead node.) 627 HandleSDNode Dummy(getRoot()); 628 629 RemoveDeadNodes(DeadNodes); 630 } 631 632 void SelectionDAG::DeleteNode(SDNode *N) { 633 // First take this out of the appropriate CSE map. 634 RemoveNodeFromCSEMaps(N); 635 636 // Finally, remove uses due to operands of this node, remove from the 637 // AllNodes list, and delete the node. 638 DeleteNodeNotInCSEMaps(N); 639 } 640 641 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 642 assert(N->getIterator() != AllNodes.begin() && 643 "Cannot delete the entry node!"); 644 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 645 646 // Drop all of the operands and decrement used node's use counts. 647 N->DropOperands(); 648 649 DeallocateNode(N); 650 } 651 652 void SDDbgInfo::erase(const SDNode *Node) { 653 DbgValMapType::iterator I = DbgValMap.find(Node); 654 if (I == DbgValMap.end()) 655 return; 656 for (auto &Val: I->second) 657 Val->setIsInvalidated(); 658 DbgValMap.erase(I); 659 } 660 661 void SelectionDAG::DeallocateNode(SDNode *N) { 662 // If we have operands, deallocate them. 663 removeOperands(N); 664 665 NodeAllocator.Deallocate(AllNodes.remove(N)); 666 667 // Set the opcode to DELETED_NODE to help catch bugs when node 668 // memory is reallocated. 669 // FIXME: There are places in SDag that have grown a dependency on the opcode 670 // value in the released node. 671 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 672 N->NodeType = ISD::DELETED_NODE; 673 674 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 675 // them and forget about that node. 676 DbgInfo->erase(N); 677 } 678 679 #ifndef NDEBUG 680 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 681 static void VerifySDNode(SDNode *N) { 682 switch (N->getOpcode()) { 683 default: 684 break; 685 case ISD::BUILD_PAIR: { 686 EVT VT = N->getValueType(0); 687 assert(N->getNumValues() == 1 && "Too many results!"); 688 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 689 "Wrong return type!"); 690 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 691 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 692 "Mismatched operand types!"); 693 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 694 "Wrong operand type!"); 695 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 696 "Wrong return type size"); 697 break; 698 } 699 case ISD::BUILD_VECTOR: { 700 assert(N->getNumValues() == 1 && "Too many results!"); 701 assert(N->getValueType(0).isVector() && "Wrong return type!"); 702 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 703 "Wrong number of operands!"); 704 EVT EltVT = N->getValueType(0).getVectorElementType(); 705 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 706 assert((I->getValueType() == EltVT || 707 (EltVT.isInteger() && I->getValueType().isInteger() && 708 EltVT.bitsLE(I->getValueType()))) && 709 "Wrong operand type!"); 710 assert(I->getValueType() == N->getOperand(0).getValueType() && 711 "Operands must all have the same type"); 712 } 713 break; 714 } 715 } 716 } 717 #endif // NDEBUG 718 719 /// \brief Insert a newly allocated node into the DAG. 720 /// 721 /// Handles insertion into the all nodes list and CSE map, as well as 722 /// verification and other common operations when a new node is allocated. 723 void SelectionDAG::InsertNode(SDNode *N) { 724 AllNodes.push_back(N); 725 #ifndef NDEBUG 726 N->PersistentId = NextPersistentId++; 727 VerifySDNode(N); 728 #endif 729 } 730 731 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 732 /// correspond to it. This is useful when we're about to delete or repurpose 733 /// the node. We don't want future request for structurally identical nodes 734 /// to return N anymore. 735 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 736 bool Erased = false; 737 switch (N->getOpcode()) { 738 case ISD::HANDLENODE: return false; // noop. 739 case ISD::CONDCODE: 740 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 741 "Cond code doesn't exist!"); 742 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 743 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 744 break; 745 case ISD::ExternalSymbol: 746 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 747 break; 748 case ISD::TargetExternalSymbol: { 749 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 750 Erased = TargetExternalSymbols.erase( 751 std::pair<std::string,unsigned char>(ESN->getSymbol(), 752 ESN->getTargetFlags())); 753 break; 754 } 755 case ISD::MCSymbol: { 756 auto *MCSN = cast<MCSymbolSDNode>(N); 757 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 758 break; 759 } 760 case ISD::VALUETYPE: { 761 EVT VT = cast<VTSDNode>(N)->getVT(); 762 if (VT.isExtended()) { 763 Erased = ExtendedValueTypeNodes.erase(VT); 764 } else { 765 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 766 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 767 } 768 break; 769 } 770 default: 771 // Remove it from the CSE Map. 772 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 773 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 774 Erased = CSEMap.RemoveNode(N); 775 break; 776 } 777 #ifndef NDEBUG 778 // Verify that the node was actually in one of the CSE maps, unless it has a 779 // flag result (which cannot be CSE'd) or is one of the special cases that are 780 // not subject to CSE. 781 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 782 !N->isMachineOpcode() && !doNotCSE(N)) { 783 N->dump(this); 784 dbgs() << "\n"; 785 llvm_unreachable("Node is not in map!"); 786 } 787 #endif 788 return Erased; 789 } 790 791 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 792 /// maps and modified in place. Add it back to the CSE maps, unless an identical 793 /// node already exists, in which case transfer all its users to the existing 794 /// node. This transfer can potentially trigger recursive merging. 795 void 796 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 797 // For node types that aren't CSE'd, just act as if no identical node 798 // already exists. 799 if (!doNotCSE(N)) { 800 SDNode *Existing = CSEMap.GetOrInsertNode(N); 801 if (Existing != N) { 802 // If there was already an existing matching node, use ReplaceAllUsesWith 803 // to replace the dead one with the existing one. This can cause 804 // recursive merging of other unrelated nodes down the line. 805 ReplaceAllUsesWith(N, Existing); 806 807 // N is now dead. Inform the listeners and delete it. 808 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 809 DUL->NodeDeleted(N, Existing); 810 DeleteNodeNotInCSEMaps(N); 811 return; 812 } 813 } 814 815 // If the node doesn't already exist, we updated it. Inform listeners. 816 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 817 DUL->NodeUpdated(N); 818 } 819 820 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 821 /// were replaced with those specified. If this node is never memoized, 822 /// return null, otherwise return a pointer to the slot it would take. If a 823 /// node already exists with these operands, the slot will be non-null. 824 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 825 void *&InsertPos) { 826 if (doNotCSE(N)) 827 return nullptr; 828 829 SDValue Ops[] = { Op }; 830 FoldingSetNodeID ID; 831 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 832 AddNodeIDCustom(ID, N); 833 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 834 if (Node) 835 Node->intersectFlagsWith(N->getFlags()); 836 return Node; 837 } 838 839 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 840 /// were replaced with those specified. If this node is never memoized, 841 /// return null, otherwise return a pointer to the slot it would take. If a 842 /// node already exists with these operands, the slot will be non-null. 843 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 844 SDValue Op1, SDValue Op2, 845 void *&InsertPos) { 846 if (doNotCSE(N)) 847 return nullptr; 848 849 SDValue Ops[] = { Op1, Op2 }; 850 FoldingSetNodeID ID; 851 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 852 AddNodeIDCustom(ID, N); 853 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 854 if (Node) 855 Node->intersectFlagsWith(N->getFlags()); 856 return Node; 857 } 858 859 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 860 /// were replaced with those specified. If this node is never memoized, 861 /// return null, otherwise return a pointer to the slot it would take. If a 862 /// node already exists with these operands, the slot will be non-null. 863 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 864 void *&InsertPos) { 865 if (doNotCSE(N)) 866 return nullptr; 867 868 FoldingSetNodeID ID; 869 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 870 AddNodeIDCustom(ID, N); 871 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 872 if (Node) 873 Node->intersectFlagsWith(N->getFlags()); 874 return Node; 875 } 876 877 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 878 Type *Ty = VT == MVT::iPTR ? 879 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 880 VT.getTypeForEVT(*getContext()); 881 882 return getDataLayout().getABITypeAlignment(Ty); 883 } 884 885 // EntryNode could meaningfully have debug info if we can find it... 886 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 887 : TM(tm), OptLevel(OL), 888 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 889 Root(getEntryNode()) { 890 InsertNode(&EntryNode); 891 DbgInfo = new SDDbgInfo(); 892 } 893 894 void SelectionDAG::init(MachineFunction &NewMF, 895 OptimizationRemarkEmitter &NewORE) { 896 MF = &NewMF; 897 ORE = &NewORE; 898 TLI = getSubtarget().getTargetLowering(); 899 TSI = getSubtarget().getSelectionDAGInfo(); 900 Context = &MF->getFunction()->getContext(); 901 } 902 903 SelectionDAG::~SelectionDAG() { 904 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 905 allnodes_clear(); 906 OperandRecycler.clear(OperandAllocator); 907 delete DbgInfo; 908 } 909 910 void SelectionDAG::allnodes_clear() { 911 assert(&*AllNodes.begin() == &EntryNode); 912 AllNodes.remove(AllNodes.begin()); 913 while (!AllNodes.empty()) 914 DeallocateNode(&AllNodes.front()); 915 #ifndef NDEBUG 916 NextPersistentId = 0; 917 #endif 918 } 919 920 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 921 void *&InsertPos) { 922 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 923 if (N) { 924 switch (N->getOpcode()) { 925 default: break; 926 case ISD::Constant: 927 case ISD::ConstantFP: 928 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 929 "debug location. Use another overload."); 930 } 931 } 932 return N; 933 } 934 935 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 936 const SDLoc &DL, void *&InsertPos) { 937 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 938 if (N) { 939 switch (N->getOpcode()) { 940 case ISD::Constant: 941 case ISD::ConstantFP: 942 // Erase debug location from the node if the node is used at several 943 // different places. Do not propagate one location to all uses as it 944 // will cause a worse single stepping debugging experience. 945 if (N->getDebugLoc() != DL.getDebugLoc()) 946 N->setDebugLoc(DebugLoc()); 947 break; 948 default: 949 // When the node's point of use is located earlier in the instruction 950 // sequence than its prior point of use, update its debug info to the 951 // earlier location. 952 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 953 N->setDebugLoc(DL.getDebugLoc()); 954 break; 955 } 956 } 957 return N; 958 } 959 960 void SelectionDAG::clear() { 961 allnodes_clear(); 962 OperandRecycler.clear(OperandAllocator); 963 OperandAllocator.Reset(); 964 CSEMap.clear(); 965 966 ExtendedValueTypeNodes.clear(); 967 ExternalSymbols.clear(); 968 TargetExternalSymbols.clear(); 969 MCSymbols.clear(); 970 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 971 static_cast<CondCodeSDNode*>(nullptr)); 972 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 973 static_cast<SDNode*>(nullptr)); 974 975 EntryNode.UseList = nullptr; 976 InsertNode(&EntryNode); 977 Root = getEntryNode(); 978 DbgInfo->clear(); 979 } 980 981 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 982 return VT.bitsGT(Op.getValueType()) 983 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 984 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 985 } 986 987 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 988 return VT.bitsGT(Op.getValueType()) ? 989 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 990 getNode(ISD::TRUNCATE, DL, VT, Op); 991 } 992 993 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 994 return VT.bitsGT(Op.getValueType()) ? 995 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 996 getNode(ISD::TRUNCATE, DL, VT, Op); 997 } 998 999 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1000 return VT.bitsGT(Op.getValueType()) ? 1001 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1002 getNode(ISD::TRUNCATE, DL, VT, Op); 1003 } 1004 1005 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1006 EVT OpVT) { 1007 if (VT.bitsLE(Op.getValueType())) 1008 return getNode(ISD::TRUNCATE, SL, VT, Op); 1009 1010 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1011 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1012 } 1013 1014 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1015 assert(!VT.isVector() && 1016 "getZeroExtendInReg should use the vector element type instead of " 1017 "the vector type!"); 1018 if (Op.getValueType() == VT) return Op; 1019 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1020 APInt Imm = APInt::getLowBitsSet(BitWidth, 1021 VT.getSizeInBits()); 1022 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1023 getConstant(Imm, DL, Op.getValueType())); 1024 } 1025 1026 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1027 EVT VT) { 1028 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1029 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1030 "The sizes of the input and result must match in order to perform the " 1031 "extend in-register."); 1032 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1033 "The destination vector type must have fewer lanes than the input."); 1034 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1035 } 1036 1037 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1038 EVT VT) { 1039 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1040 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1041 "The sizes of the input and result must match in order to perform the " 1042 "extend in-register."); 1043 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1044 "The destination vector type must have fewer lanes than the input."); 1045 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1046 } 1047 1048 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1049 EVT VT) { 1050 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1051 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1052 "The sizes of the input and result must match in order to perform the " 1053 "extend in-register."); 1054 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1055 "The destination vector type must have fewer lanes than the input."); 1056 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1057 } 1058 1059 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1060 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1061 EVT EltVT = VT.getScalarType(); 1062 SDValue NegOne = 1063 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1064 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1065 } 1066 1067 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1068 EVT EltVT = VT.getScalarType(); 1069 SDValue TrueValue; 1070 switch (TLI->getBooleanContents(VT)) { 1071 case TargetLowering::ZeroOrOneBooleanContent: 1072 case TargetLowering::UndefinedBooleanContent: 1073 TrueValue = getConstant(1, DL, VT); 1074 break; 1075 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1076 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1077 VT); 1078 break; 1079 } 1080 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1081 } 1082 1083 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1084 bool isT, bool isO) { 1085 EVT EltVT = VT.getScalarType(); 1086 assert((EltVT.getSizeInBits() >= 64 || 1087 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1088 "getConstant with a uint64_t value that doesn't fit in the type!"); 1089 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1090 } 1091 1092 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1093 bool isT, bool isO) { 1094 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1095 } 1096 1097 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1098 EVT VT, bool isT, bool isO) { 1099 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1100 1101 EVT EltVT = VT.getScalarType(); 1102 const ConstantInt *Elt = &Val; 1103 1104 // In some cases the vector type is legal but the element type is illegal and 1105 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1106 // inserted value (the type does not need to match the vector element type). 1107 // Any extra bits introduced will be truncated away. 1108 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1109 TargetLowering::TypePromoteInteger) { 1110 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1111 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1112 Elt = ConstantInt::get(*getContext(), NewVal); 1113 } 1114 // In other cases the element type is illegal and needs to be expanded, for 1115 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1116 // the value into n parts and use a vector type with n-times the elements. 1117 // Then bitcast to the type requested. 1118 // Legalizing constants too early makes the DAGCombiner's job harder so we 1119 // only legalize if the DAG tells us we must produce legal types. 1120 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1121 TLI->getTypeAction(*getContext(), EltVT) == 1122 TargetLowering::TypeExpandInteger) { 1123 const APInt &NewVal = Elt->getValue(); 1124 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1125 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1126 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1127 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1128 1129 // Check the temporary vector is the correct size. If this fails then 1130 // getTypeToTransformTo() probably returned a type whose size (in bits) 1131 // isn't a power-of-2 factor of the requested type size. 1132 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1133 1134 SmallVector<SDValue, 2> EltParts; 1135 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1136 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1137 .zextOrTrunc(ViaEltSizeInBits), DL, 1138 ViaEltVT, isT, isO)); 1139 } 1140 1141 // EltParts is currently in little endian order. If we actually want 1142 // big-endian order then reverse it now. 1143 if (getDataLayout().isBigEndian()) 1144 std::reverse(EltParts.begin(), EltParts.end()); 1145 1146 // The elements must be reversed when the element order is different 1147 // to the endianness of the elements (because the BITCAST is itself a 1148 // vector shuffle in this situation). However, we do not need any code to 1149 // perform this reversal because getConstant() is producing a vector 1150 // splat. 1151 // This situation occurs in MIPS MSA. 1152 1153 SmallVector<SDValue, 8> Ops; 1154 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1155 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1156 return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1157 } 1158 1159 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1160 "APInt size does not match type size!"); 1161 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1162 FoldingSetNodeID ID; 1163 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1164 ID.AddPointer(Elt); 1165 ID.AddBoolean(isO); 1166 void *IP = nullptr; 1167 SDNode *N = nullptr; 1168 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1169 if (!VT.isVector()) 1170 return SDValue(N, 0); 1171 1172 if (!N) { 1173 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1174 CSEMap.InsertNode(N, IP); 1175 InsertNode(N); 1176 } 1177 1178 SDValue Result(N, 0); 1179 if (VT.isVector()) 1180 Result = getSplatBuildVector(VT, DL, Result); 1181 return Result; 1182 } 1183 1184 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1185 bool isTarget) { 1186 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1187 } 1188 1189 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1190 bool isTarget) { 1191 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1192 } 1193 1194 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1195 EVT VT, bool isTarget) { 1196 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1197 1198 EVT EltVT = VT.getScalarType(); 1199 1200 // Do the map lookup using the actual bit pattern for the floating point 1201 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1202 // we don't have issues with SNANs. 1203 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1204 FoldingSetNodeID ID; 1205 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1206 ID.AddPointer(&V); 1207 void *IP = nullptr; 1208 SDNode *N = nullptr; 1209 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1210 if (!VT.isVector()) 1211 return SDValue(N, 0); 1212 1213 if (!N) { 1214 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1215 CSEMap.InsertNode(N, IP); 1216 InsertNode(N); 1217 } 1218 1219 SDValue Result(N, 0); 1220 if (VT.isVector()) 1221 Result = getSplatBuildVector(VT, DL, Result); 1222 return Result; 1223 } 1224 1225 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1226 bool isTarget) { 1227 EVT EltVT = VT.getScalarType(); 1228 if (EltVT == MVT::f32) 1229 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1230 else if (EltVT == MVT::f64) 1231 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1232 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1233 EltVT == MVT::f16) { 1234 bool Ignored; 1235 APFloat APF = APFloat(Val); 1236 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1237 &Ignored); 1238 return getConstantFP(APF, DL, VT, isTarget); 1239 } else 1240 llvm_unreachable("Unsupported type in getConstantFP"); 1241 } 1242 1243 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1244 EVT VT, int64_t Offset, bool isTargetGA, 1245 unsigned char TargetFlags) { 1246 assert((TargetFlags == 0 || isTargetGA) && 1247 "Cannot set target flags on target-independent globals"); 1248 1249 // Truncate (with sign-extension) the offset value to the pointer size. 1250 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1251 if (BitWidth < 64) 1252 Offset = SignExtend64(Offset, BitWidth); 1253 1254 unsigned Opc; 1255 if (GV->isThreadLocal()) 1256 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1257 else 1258 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1259 1260 FoldingSetNodeID ID; 1261 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1262 ID.AddPointer(GV); 1263 ID.AddInteger(Offset); 1264 ID.AddInteger(TargetFlags); 1265 void *IP = nullptr; 1266 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1267 return SDValue(E, 0); 1268 1269 auto *N = newSDNode<GlobalAddressSDNode>( 1270 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1271 CSEMap.InsertNode(N, IP); 1272 InsertNode(N); 1273 return SDValue(N, 0); 1274 } 1275 1276 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1277 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1278 FoldingSetNodeID ID; 1279 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1280 ID.AddInteger(FI); 1281 void *IP = nullptr; 1282 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1283 return SDValue(E, 0); 1284 1285 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1286 CSEMap.InsertNode(N, IP); 1287 InsertNode(N); 1288 return SDValue(N, 0); 1289 } 1290 1291 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1292 unsigned char TargetFlags) { 1293 assert((TargetFlags == 0 || isTarget) && 1294 "Cannot set target flags on target-independent jump tables"); 1295 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1296 FoldingSetNodeID ID; 1297 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1298 ID.AddInteger(JTI); 1299 ID.AddInteger(TargetFlags); 1300 void *IP = nullptr; 1301 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1302 return SDValue(E, 0); 1303 1304 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1305 CSEMap.InsertNode(N, IP); 1306 InsertNode(N); 1307 return SDValue(N, 0); 1308 } 1309 1310 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1311 unsigned Alignment, int Offset, 1312 bool isTarget, 1313 unsigned char TargetFlags) { 1314 assert((TargetFlags == 0 || isTarget) && 1315 "Cannot set target flags on target-independent globals"); 1316 if (Alignment == 0) 1317 Alignment = MF->getFunction()->optForSize() 1318 ? getDataLayout().getABITypeAlignment(C->getType()) 1319 : getDataLayout().getPrefTypeAlignment(C->getType()); 1320 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1321 FoldingSetNodeID ID; 1322 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1323 ID.AddInteger(Alignment); 1324 ID.AddInteger(Offset); 1325 ID.AddPointer(C); 1326 ID.AddInteger(TargetFlags); 1327 void *IP = nullptr; 1328 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1329 return SDValue(E, 0); 1330 1331 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1332 TargetFlags); 1333 CSEMap.InsertNode(N, IP); 1334 InsertNode(N); 1335 return SDValue(N, 0); 1336 } 1337 1338 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1339 unsigned Alignment, int Offset, 1340 bool isTarget, 1341 unsigned char TargetFlags) { 1342 assert((TargetFlags == 0 || isTarget) && 1343 "Cannot set target flags on target-independent globals"); 1344 if (Alignment == 0) 1345 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1346 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1347 FoldingSetNodeID ID; 1348 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1349 ID.AddInteger(Alignment); 1350 ID.AddInteger(Offset); 1351 C->addSelectionDAGCSEId(ID); 1352 ID.AddInteger(TargetFlags); 1353 void *IP = nullptr; 1354 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1355 return SDValue(E, 0); 1356 1357 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1358 TargetFlags); 1359 CSEMap.InsertNode(N, IP); 1360 InsertNode(N); 1361 return SDValue(N, 0); 1362 } 1363 1364 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1365 unsigned char TargetFlags) { 1366 FoldingSetNodeID ID; 1367 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1368 ID.AddInteger(Index); 1369 ID.AddInteger(Offset); 1370 ID.AddInteger(TargetFlags); 1371 void *IP = nullptr; 1372 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1373 return SDValue(E, 0); 1374 1375 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1376 CSEMap.InsertNode(N, IP); 1377 InsertNode(N); 1378 return SDValue(N, 0); 1379 } 1380 1381 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1382 FoldingSetNodeID ID; 1383 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1384 ID.AddPointer(MBB); 1385 void *IP = nullptr; 1386 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1387 return SDValue(E, 0); 1388 1389 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1390 CSEMap.InsertNode(N, IP); 1391 InsertNode(N); 1392 return SDValue(N, 0); 1393 } 1394 1395 SDValue SelectionDAG::getValueType(EVT VT) { 1396 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1397 ValueTypeNodes.size()) 1398 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1399 1400 SDNode *&N = VT.isExtended() ? 1401 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1402 1403 if (N) return SDValue(N, 0); 1404 N = newSDNode<VTSDNode>(VT); 1405 InsertNode(N); 1406 return SDValue(N, 0); 1407 } 1408 1409 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1410 SDNode *&N = ExternalSymbols[Sym]; 1411 if (N) return SDValue(N, 0); 1412 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1413 InsertNode(N); 1414 return SDValue(N, 0); 1415 } 1416 1417 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1418 SDNode *&N = MCSymbols[Sym]; 1419 if (N) 1420 return SDValue(N, 0); 1421 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1422 InsertNode(N); 1423 return SDValue(N, 0); 1424 } 1425 1426 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1427 unsigned char TargetFlags) { 1428 SDNode *&N = 1429 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1430 TargetFlags)]; 1431 if (N) return SDValue(N, 0); 1432 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1433 InsertNode(N); 1434 return SDValue(N, 0); 1435 } 1436 1437 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1438 if ((unsigned)Cond >= CondCodeNodes.size()) 1439 CondCodeNodes.resize(Cond+1); 1440 1441 if (!CondCodeNodes[Cond]) { 1442 auto *N = newSDNode<CondCodeSDNode>(Cond); 1443 CondCodeNodes[Cond] = N; 1444 InsertNode(N); 1445 } 1446 1447 return SDValue(CondCodeNodes[Cond], 0); 1448 } 1449 1450 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1451 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1452 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1453 std::swap(N1, N2); 1454 ShuffleVectorSDNode::commuteMask(M); 1455 } 1456 1457 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1458 SDValue N2, ArrayRef<int> Mask) { 1459 assert(VT.getVectorNumElements() == Mask.size() && 1460 "Must have the same number of vector elements as mask elements!"); 1461 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1462 "Invalid VECTOR_SHUFFLE"); 1463 1464 // Canonicalize shuffle undef, undef -> undef 1465 if (N1.isUndef() && N2.isUndef()) 1466 return getUNDEF(VT); 1467 1468 // Validate that all indices in Mask are within the range of the elements 1469 // input to the shuffle. 1470 int NElts = Mask.size(); 1471 assert(llvm::all_of(Mask, [&](int M) { return M < (NElts * 2); }) && 1472 "Index out of range"); 1473 1474 // Copy the mask so we can do any needed cleanup. 1475 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1476 1477 // Canonicalize shuffle v, v -> v, undef 1478 if (N1 == N2) { 1479 N2 = getUNDEF(VT); 1480 for (int i = 0; i != NElts; ++i) 1481 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1482 } 1483 1484 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1485 if (N1.isUndef()) 1486 commuteShuffle(N1, N2, MaskVec); 1487 1488 // If shuffling a splat, try to blend the splat instead. We do this here so 1489 // that even when this arises during lowering we don't have to re-handle it. 1490 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1491 BitVector UndefElements; 1492 SDValue Splat = BV->getSplatValue(&UndefElements); 1493 if (!Splat) 1494 return; 1495 1496 for (int i = 0; i < NElts; ++i) { 1497 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1498 continue; 1499 1500 // If this input comes from undef, mark it as such. 1501 if (UndefElements[MaskVec[i] - Offset]) { 1502 MaskVec[i] = -1; 1503 continue; 1504 } 1505 1506 // If we can blend a non-undef lane, use that instead. 1507 if (!UndefElements[i]) 1508 MaskVec[i] = i + Offset; 1509 } 1510 }; 1511 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1512 BlendSplat(N1BV, 0); 1513 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1514 BlendSplat(N2BV, NElts); 1515 1516 // Canonicalize all index into lhs, -> shuffle lhs, undef 1517 // Canonicalize all index into rhs, -> shuffle rhs, undef 1518 bool AllLHS = true, AllRHS = true; 1519 bool N2Undef = N2.isUndef(); 1520 for (int i = 0; i != NElts; ++i) { 1521 if (MaskVec[i] >= NElts) { 1522 if (N2Undef) 1523 MaskVec[i] = -1; 1524 else 1525 AllLHS = false; 1526 } else if (MaskVec[i] >= 0) { 1527 AllRHS = false; 1528 } 1529 } 1530 if (AllLHS && AllRHS) 1531 return getUNDEF(VT); 1532 if (AllLHS && !N2Undef) 1533 N2 = getUNDEF(VT); 1534 if (AllRHS) { 1535 N1 = getUNDEF(VT); 1536 commuteShuffle(N1, N2, MaskVec); 1537 } 1538 // Reset our undef status after accounting for the mask. 1539 N2Undef = N2.isUndef(); 1540 // Re-check whether both sides ended up undef. 1541 if (N1.isUndef() && N2Undef) 1542 return getUNDEF(VT); 1543 1544 // If Identity shuffle return that node. 1545 bool Identity = true, AllSame = true; 1546 for (int i = 0; i != NElts; ++i) { 1547 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1548 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1549 } 1550 if (Identity && NElts) 1551 return N1; 1552 1553 // Shuffling a constant splat doesn't change the result. 1554 if (N2Undef) { 1555 SDValue V = N1; 1556 1557 // Look through any bitcasts. We check that these don't change the number 1558 // (and size) of elements and just changes their types. 1559 while (V.getOpcode() == ISD::BITCAST) 1560 V = V->getOperand(0); 1561 1562 // A splat should always show up as a build vector node. 1563 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1564 BitVector UndefElements; 1565 SDValue Splat = BV->getSplatValue(&UndefElements); 1566 // If this is a splat of an undef, shuffling it is also undef. 1567 if (Splat && Splat.isUndef()) 1568 return getUNDEF(VT); 1569 1570 bool SameNumElts = 1571 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1572 1573 // We only have a splat which can skip shuffles if there is a splatted 1574 // value and no undef lanes rearranged by the shuffle. 1575 if (Splat && UndefElements.none()) { 1576 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1577 // number of elements match or the value splatted is a zero constant. 1578 if (SameNumElts) 1579 return N1; 1580 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1581 if (C->isNullValue()) 1582 return N1; 1583 } 1584 1585 // If the shuffle itself creates a splat, build the vector directly. 1586 if (AllSame && SameNumElts) { 1587 EVT BuildVT = BV->getValueType(0); 1588 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1589 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1590 1591 // We may have jumped through bitcasts, so the type of the 1592 // BUILD_VECTOR may not match the type of the shuffle. 1593 if (BuildVT != VT) 1594 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1595 return NewBV; 1596 } 1597 } 1598 } 1599 1600 FoldingSetNodeID ID; 1601 SDValue Ops[2] = { N1, N2 }; 1602 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1603 for (int i = 0; i != NElts; ++i) 1604 ID.AddInteger(MaskVec[i]); 1605 1606 void* IP = nullptr; 1607 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1608 return SDValue(E, 0); 1609 1610 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1611 // SDNode doesn't have access to it. This memory will be "leaked" when 1612 // the node is deallocated, but recovered when the NodeAllocator is released. 1613 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1614 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1615 1616 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1617 dl.getDebugLoc(), MaskAlloc); 1618 createOperands(N, Ops); 1619 1620 CSEMap.InsertNode(N, IP); 1621 InsertNode(N); 1622 return SDValue(N, 0); 1623 } 1624 1625 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1626 MVT VT = SV.getSimpleValueType(0); 1627 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1628 ShuffleVectorSDNode::commuteMask(MaskVec); 1629 1630 SDValue Op0 = SV.getOperand(0); 1631 SDValue Op1 = SV.getOperand(1); 1632 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1633 } 1634 1635 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1636 FoldingSetNodeID ID; 1637 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1638 ID.AddInteger(RegNo); 1639 void *IP = nullptr; 1640 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1641 return SDValue(E, 0); 1642 1643 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1644 CSEMap.InsertNode(N, IP); 1645 InsertNode(N); 1646 return SDValue(N, 0); 1647 } 1648 1649 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1650 FoldingSetNodeID ID; 1651 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1652 ID.AddPointer(RegMask); 1653 void *IP = nullptr; 1654 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1655 return SDValue(E, 0); 1656 1657 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1658 CSEMap.InsertNode(N, IP); 1659 InsertNode(N); 1660 return SDValue(N, 0); 1661 } 1662 1663 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1664 MCSymbol *Label) { 1665 FoldingSetNodeID ID; 1666 SDValue Ops[] = { Root }; 1667 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops); 1668 ID.AddPointer(Label); 1669 void *IP = nullptr; 1670 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1671 return SDValue(E, 0); 1672 1673 auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1674 createOperands(N, Ops); 1675 1676 CSEMap.InsertNode(N, IP); 1677 InsertNode(N); 1678 return SDValue(N, 0); 1679 } 1680 1681 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1682 int64_t Offset, 1683 bool isTarget, 1684 unsigned char TargetFlags) { 1685 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1686 1687 FoldingSetNodeID ID; 1688 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1689 ID.AddPointer(BA); 1690 ID.AddInteger(Offset); 1691 ID.AddInteger(TargetFlags); 1692 void *IP = nullptr; 1693 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1694 return SDValue(E, 0); 1695 1696 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1697 CSEMap.InsertNode(N, IP); 1698 InsertNode(N); 1699 return SDValue(N, 0); 1700 } 1701 1702 SDValue SelectionDAG::getSrcValue(const Value *V) { 1703 assert((!V || V->getType()->isPointerTy()) && 1704 "SrcValue is not a pointer?"); 1705 1706 FoldingSetNodeID ID; 1707 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1708 ID.AddPointer(V); 1709 1710 void *IP = nullptr; 1711 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1712 return SDValue(E, 0); 1713 1714 auto *N = newSDNode<SrcValueSDNode>(V); 1715 CSEMap.InsertNode(N, IP); 1716 InsertNode(N); 1717 return SDValue(N, 0); 1718 } 1719 1720 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1721 FoldingSetNodeID ID; 1722 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1723 ID.AddPointer(MD); 1724 1725 void *IP = nullptr; 1726 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1727 return SDValue(E, 0); 1728 1729 auto *N = newSDNode<MDNodeSDNode>(MD); 1730 CSEMap.InsertNode(N, IP); 1731 InsertNode(N); 1732 return SDValue(N, 0); 1733 } 1734 1735 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1736 if (VT == V.getValueType()) 1737 return V; 1738 1739 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1740 } 1741 1742 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1743 unsigned SrcAS, unsigned DestAS) { 1744 SDValue Ops[] = {Ptr}; 1745 FoldingSetNodeID ID; 1746 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1747 ID.AddInteger(SrcAS); 1748 ID.AddInteger(DestAS); 1749 1750 void *IP = nullptr; 1751 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1752 return SDValue(E, 0); 1753 1754 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1755 VT, SrcAS, DestAS); 1756 createOperands(N, Ops); 1757 1758 CSEMap.InsertNode(N, IP); 1759 InsertNode(N); 1760 return SDValue(N, 0); 1761 } 1762 1763 /// getShiftAmountOperand - Return the specified value casted to 1764 /// the target's desired shift amount type. 1765 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1766 EVT OpTy = Op.getValueType(); 1767 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1768 if (OpTy == ShTy || OpTy.isVector()) return Op; 1769 1770 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1771 } 1772 1773 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1774 SDLoc dl(Node); 1775 const TargetLowering &TLI = getTargetLoweringInfo(); 1776 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1777 EVT VT = Node->getValueType(0); 1778 SDValue Tmp1 = Node->getOperand(0); 1779 SDValue Tmp2 = Node->getOperand(1); 1780 unsigned Align = Node->getConstantOperandVal(3); 1781 1782 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1783 Tmp2, MachinePointerInfo(V)); 1784 SDValue VAList = VAListLoad; 1785 1786 if (Align > TLI.getMinStackArgumentAlignment()) { 1787 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1788 1789 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1790 getConstant(Align - 1, dl, VAList.getValueType())); 1791 1792 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1793 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1794 } 1795 1796 // Increment the pointer, VAList, to the next vaarg 1797 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1798 getConstant(getDataLayout().getTypeAllocSize( 1799 VT.getTypeForEVT(*getContext())), 1800 dl, VAList.getValueType())); 1801 // Store the incremented VAList to the legalized pointer 1802 Tmp1 = 1803 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1804 // Load the actual argument out of the pointer VAList 1805 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1806 } 1807 1808 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1809 SDLoc dl(Node); 1810 const TargetLowering &TLI = getTargetLoweringInfo(); 1811 // This defaults to loading a pointer from the input and storing it to the 1812 // output, returning the chain. 1813 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1814 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1815 SDValue Tmp1 = 1816 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1817 Node->getOperand(2), MachinePointerInfo(VS)); 1818 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1819 MachinePointerInfo(VD)); 1820 } 1821 1822 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1823 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1824 unsigned ByteSize = VT.getStoreSize(); 1825 Type *Ty = VT.getTypeForEVT(*getContext()); 1826 unsigned StackAlign = 1827 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1828 1829 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1830 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1831 } 1832 1833 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1834 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1835 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1836 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1837 const DataLayout &DL = getDataLayout(); 1838 unsigned Align = 1839 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1840 1841 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1842 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1843 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1844 } 1845 1846 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1847 ISD::CondCode Cond, const SDLoc &dl) { 1848 // These setcc operations always fold. 1849 switch (Cond) { 1850 default: break; 1851 case ISD::SETFALSE: 1852 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1853 case ISD::SETTRUE: 1854 case ISD::SETTRUE2: { 1855 TargetLowering::BooleanContent Cnt = 1856 TLI->getBooleanContents(N1->getValueType(0)); 1857 return getConstant( 1858 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1859 VT); 1860 } 1861 1862 case ISD::SETOEQ: 1863 case ISD::SETOGT: 1864 case ISD::SETOGE: 1865 case ISD::SETOLT: 1866 case ISD::SETOLE: 1867 case ISD::SETONE: 1868 case ISD::SETO: 1869 case ISD::SETUO: 1870 case ISD::SETUEQ: 1871 case ISD::SETUNE: 1872 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1873 break; 1874 } 1875 1876 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1877 const APInt &C2 = N2C->getAPIntValue(); 1878 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1879 const APInt &C1 = N1C->getAPIntValue(); 1880 1881 switch (Cond) { 1882 default: llvm_unreachable("Unknown integer setcc!"); 1883 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1884 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1885 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1886 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1887 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1888 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1889 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1890 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1891 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1892 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1893 } 1894 } 1895 } 1896 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1897 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1898 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1899 switch (Cond) { 1900 default: break; 1901 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1902 return getUNDEF(VT); 1903 LLVM_FALLTHROUGH; 1904 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1905 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1906 return getUNDEF(VT); 1907 LLVM_FALLTHROUGH; 1908 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1909 R==APFloat::cmpLessThan, dl, VT); 1910 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1911 return getUNDEF(VT); 1912 LLVM_FALLTHROUGH; 1913 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1914 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1915 return getUNDEF(VT); 1916 LLVM_FALLTHROUGH; 1917 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1918 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1919 return getUNDEF(VT); 1920 LLVM_FALLTHROUGH; 1921 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1922 R==APFloat::cmpEqual, dl, VT); 1923 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1924 return getUNDEF(VT); 1925 LLVM_FALLTHROUGH; 1926 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1927 R==APFloat::cmpEqual, dl, VT); 1928 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1929 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1930 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1931 R==APFloat::cmpEqual, dl, VT); 1932 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1933 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1934 R==APFloat::cmpLessThan, dl, VT); 1935 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1936 R==APFloat::cmpUnordered, dl, VT); 1937 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1938 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1939 } 1940 } else { 1941 // Ensure that the constant occurs on the RHS. 1942 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1943 MVT CompVT = N1.getValueType().getSimpleVT(); 1944 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1945 return SDValue(); 1946 1947 return getSetCC(dl, VT, N2, N1, SwappedCond); 1948 } 1949 } 1950 1951 // Could not fold it. 1952 return SDValue(); 1953 } 1954 1955 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1956 /// use this predicate to simplify operations downstream. 1957 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1958 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1959 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 1960 } 1961 1962 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1963 /// this predicate to simplify operations downstream. Mask is known to be zero 1964 /// for bits that V cannot have. 1965 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1966 unsigned Depth) const { 1967 KnownBits Known; 1968 computeKnownBits(Op, Known, Depth); 1969 return Mask.isSubsetOf(Known.Zero); 1970 } 1971 1972 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 1973 /// is less than the element bit-width of the shift node, return it. 1974 static const APInt *getValidShiftAmountConstant(SDValue V) { 1975 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 1976 // Shifting more than the bitwidth is not valid. 1977 const APInt &ShAmt = SA->getAPIntValue(); 1978 if (ShAmt.ult(V.getScalarValueSizeInBits())) 1979 return &ShAmt; 1980 } 1981 return nullptr; 1982 } 1983 1984 /// Determine which bits of Op are known to be either zero or one and return 1985 /// them in Known. For vectors, the known bits are those that are shared by 1986 /// every vector element. 1987 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 1988 unsigned Depth) const { 1989 EVT VT = Op.getValueType(); 1990 APInt DemandedElts = VT.isVector() 1991 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 1992 : APInt(1, 1); 1993 computeKnownBits(Op, Known, DemandedElts, Depth); 1994 } 1995 1996 /// Determine which bits of Op are known to be either zero or one and return 1997 /// them in Known. The DemandedElts argument allows us to only collect the known 1998 /// bits that are shared by the requested vector elements. 1999 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2000 const APInt &DemandedElts, 2001 unsigned Depth) const { 2002 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2003 2004 Known = KnownBits(BitWidth); // Don't know anything. 2005 if (Depth == 6) 2006 return; // Limit search depth. 2007 2008 KnownBits Known2; 2009 unsigned NumElts = DemandedElts.getBitWidth(); 2010 2011 if (!DemandedElts) 2012 return; // No demanded elts, better to assume we don't know anything. 2013 2014 unsigned Opcode = Op.getOpcode(); 2015 switch (Opcode) { 2016 case ISD::Constant: 2017 // We know all of the bits for a constant! 2018 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 2019 Known.Zero = ~Known.One; 2020 break; 2021 case ISD::BUILD_VECTOR: 2022 // Collect the known bits that are shared by every demanded vector element. 2023 assert(NumElts == Op.getValueType().getVectorNumElements() && 2024 "Unexpected vector size"); 2025 Known.Zero.setAllBits(); Known.One.setAllBits(); 2026 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2027 if (!DemandedElts[i]) 2028 continue; 2029 2030 SDValue SrcOp = Op.getOperand(i); 2031 computeKnownBits(SrcOp, Known2, Depth + 1); 2032 2033 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2034 if (SrcOp.getValueSizeInBits() != BitWidth) { 2035 assert(SrcOp.getValueSizeInBits() > BitWidth && 2036 "Expected BUILD_VECTOR implicit truncation"); 2037 Known2 = Known2.trunc(BitWidth); 2038 } 2039 2040 // Known bits are the values that are shared by every demanded element. 2041 Known.One &= Known2.One; 2042 Known.Zero &= Known2.Zero; 2043 2044 // If we don't know any bits, early out. 2045 if (!Known.One && !Known.Zero) 2046 break; 2047 } 2048 break; 2049 case ISD::VECTOR_SHUFFLE: { 2050 // Collect the known bits that are shared by every vector element referenced 2051 // by the shuffle. 2052 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2053 Known.Zero.setAllBits(); Known.One.setAllBits(); 2054 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2055 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2056 for (unsigned i = 0; i != NumElts; ++i) { 2057 if (!DemandedElts[i]) 2058 continue; 2059 2060 int M = SVN->getMaskElt(i); 2061 if (M < 0) { 2062 // For UNDEF elements, we don't know anything about the common state of 2063 // the shuffle result. 2064 Known.resetAll(); 2065 DemandedLHS.clearAllBits(); 2066 DemandedRHS.clearAllBits(); 2067 break; 2068 } 2069 2070 if ((unsigned)M < NumElts) 2071 DemandedLHS.setBit((unsigned)M % NumElts); 2072 else 2073 DemandedRHS.setBit((unsigned)M % NumElts); 2074 } 2075 // Known bits are the values that are shared by every demanded element. 2076 if (!!DemandedLHS) { 2077 SDValue LHS = Op.getOperand(0); 2078 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2079 Known.One &= Known2.One; 2080 Known.Zero &= Known2.Zero; 2081 } 2082 // If we don't know any bits, early out. 2083 if (!Known.One && !Known.Zero) 2084 break; 2085 if (!!DemandedRHS) { 2086 SDValue RHS = Op.getOperand(1); 2087 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2088 Known.One &= Known2.One; 2089 Known.Zero &= Known2.Zero; 2090 } 2091 break; 2092 } 2093 case ISD::CONCAT_VECTORS: { 2094 // Split DemandedElts and test each of the demanded subvectors. 2095 Known.Zero.setAllBits(); Known.One.setAllBits(); 2096 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2097 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2098 unsigned NumSubVectors = Op.getNumOperands(); 2099 for (unsigned i = 0; i != NumSubVectors; ++i) { 2100 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2101 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2102 if (!!DemandedSub) { 2103 SDValue Sub = Op.getOperand(i); 2104 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2105 Known.One &= Known2.One; 2106 Known.Zero &= Known2.Zero; 2107 } 2108 // If we don't know any bits, early out. 2109 if (!Known.One && !Known.Zero) 2110 break; 2111 } 2112 break; 2113 } 2114 case ISD::EXTRACT_SUBVECTOR: { 2115 // If we know the element index, just demand that subvector elements, 2116 // otherwise demand them all. 2117 SDValue Src = Op.getOperand(0); 2118 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2119 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2120 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2121 // Offset the demanded elts by the subvector index. 2122 uint64_t Idx = SubIdx->getZExtValue(); 2123 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2124 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2125 } else { 2126 computeKnownBits(Src, Known, Depth + 1); 2127 } 2128 break; 2129 } 2130 case ISD::BITCAST: { 2131 SDValue N0 = Op.getOperand(0); 2132 unsigned SubBitWidth = N0.getScalarValueSizeInBits(); 2133 2134 // Ignore bitcasts from floating point. 2135 if (!N0.getValueType().isInteger()) 2136 break; 2137 2138 // Fast handling of 'identity' bitcasts. 2139 if (BitWidth == SubBitWidth) { 2140 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2141 break; 2142 } 2143 2144 // Support big-endian targets when it becomes useful. 2145 bool IsLE = getDataLayout().isLittleEndian(); 2146 if (!IsLE) 2147 break; 2148 2149 // Bitcast 'small element' vector to 'large element' scalar/vector. 2150 if ((BitWidth % SubBitWidth) == 0) { 2151 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2152 2153 // Collect known bits for the (larger) output by collecting the known 2154 // bits from each set of sub elements and shift these into place. 2155 // We need to separately call computeKnownBits for each set of 2156 // sub elements as the knownbits for each is likely to be different. 2157 unsigned SubScale = BitWidth / SubBitWidth; 2158 APInt SubDemandedElts(NumElts * SubScale, 0); 2159 for (unsigned i = 0; i != NumElts; ++i) 2160 if (DemandedElts[i]) 2161 SubDemandedElts.setBit(i * SubScale); 2162 2163 for (unsigned i = 0; i != SubScale; ++i) { 2164 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2165 Depth + 1); 2166 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i); 2167 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i); 2168 } 2169 } 2170 2171 // Bitcast 'large element' scalar/vector to 'small element' vector. 2172 if ((SubBitWidth % BitWidth) == 0) { 2173 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2174 2175 // Collect known bits for the (smaller) output by collecting the known 2176 // bits from the overlapping larger input elements and extracting the 2177 // sub sections we actually care about. 2178 unsigned SubScale = SubBitWidth / BitWidth; 2179 APInt SubDemandedElts(NumElts / SubScale, 0); 2180 for (unsigned i = 0; i != NumElts; ++i) 2181 if (DemandedElts[i]) 2182 SubDemandedElts.setBit(i / SubScale); 2183 2184 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2185 2186 Known.Zero.setAllBits(); Known.One.setAllBits(); 2187 for (unsigned i = 0; i != NumElts; ++i) 2188 if (DemandedElts[i]) { 2189 unsigned Offset = (i % SubScale) * BitWidth; 2190 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2191 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2192 // If we don't know any bits, early out. 2193 if (!Known.One && !Known.Zero) 2194 break; 2195 } 2196 } 2197 break; 2198 } 2199 case ISD::AND: 2200 // If either the LHS or the RHS are Zero, the result is zero. 2201 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2202 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2203 2204 // Output known-1 bits are only known if set in both the LHS & RHS. 2205 Known.One &= Known2.One; 2206 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2207 Known.Zero |= Known2.Zero; 2208 break; 2209 case ISD::OR: 2210 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2211 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2212 2213 // Output known-0 bits are only known if clear in both the LHS & RHS. 2214 Known.Zero &= Known2.Zero; 2215 // Output known-1 are known to be set if set in either the LHS | RHS. 2216 Known.One |= Known2.One; 2217 break; 2218 case ISD::XOR: { 2219 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2220 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2221 2222 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2223 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2224 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2225 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2226 Known.Zero = KnownZeroOut; 2227 break; 2228 } 2229 case ISD::MUL: { 2230 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2231 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2232 2233 // If low bits are zero in either operand, output low known-0 bits. 2234 // Also compute a conservative estimate for high known-0 bits. 2235 // More trickiness is possible, but this is sufficient for the 2236 // interesting case of alignment computation. 2237 unsigned TrailZ = Known.countMinTrailingZeros() + 2238 Known2.countMinTrailingZeros(); 2239 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2240 Known2.countMinLeadingZeros(), 2241 BitWidth) - BitWidth; 2242 2243 Known.resetAll(); 2244 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2245 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2246 break; 2247 } 2248 case ISD::UDIV: { 2249 // For the purposes of computing leading zeros we can conservatively 2250 // treat a udiv as a logical right shift by the power of 2 known to 2251 // be less than the denominator. 2252 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2253 unsigned LeadZ = Known2.countMinLeadingZeros(); 2254 2255 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2256 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2257 if (RHSMaxLeadingZeros != BitWidth) 2258 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2259 2260 Known.Zero.setHighBits(LeadZ); 2261 break; 2262 } 2263 case ISD::SELECT: 2264 computeKnownBits(Op.getOperand(2), Known, Depth+1); 2265 // If we don't know any bits, early out. 2266 if (!Known.One && !Known.Zero) 2267 break; 2268 computeKnownBits(Op.getOperand(1), Known2, Depth+1); 2269 2270 // Only known if known in both the LHS and RHS. 2271 Known.One &= Known2.One; 2272 Known.Zero &= Known2.Zero; 2273 break; 2274 case ISD::SELECT_CC: 2275 computeKnownBits(Op.getOperand(3), Known, Depth+1); 2276 // If we don't know any bits, early out. 2277 if (!Known.One && !Known.Zero) 2278 break; 2279 computeKnownBits(Op.getOperand(2), Known2, Depth+1); 2280 2281 // Only known if known in both the LHS and RHS. 2282 Known.One &= Known2.One; 2283 Known.Zero &= Known2.Zero; 2284 break; 2285 case ISD::SMULO: 2286 case ISD::UMULO: 2287 if (Op.getResNo() != 1) 2288 break; 2289 // The boolean result conforms to getBooleanContents. 2290 // If we know the result of a setcc has the top bits zero, use this info. 2291 // We know that we have an integer-based boolean since these operations 2292 // are only available for integer. 2293 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2294 TargetLowering::ZeroOrOneBooleanContent && 2295 BitWidth > 1) 2296 Known.Zero.setBitsFrom(1); 2297 break; 2298 case ISD::SETCC: 2299 // If we know the result of a setcc has the top bits zero, use this info. 2300 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2301 TargetLowering::ZeroOrOneBooleanContent && 2302 BitWidth > 1) 2303 Known.Zero.setBitsFrom(1); 2304 break; 2305 case ISD::SHL: 2306 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2307 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2308 Known.Zero <<= *ShAmt; 2309 Known.One <<= *ShAmt; 2310 // Low bits are known zero. 2311 Known.Zero.setLowBits(ShAmt->getZExtValue()); 2312 } 2313 break; 2314 case ISD::SRL: 2315 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2316 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2317 Known.Zero.lshrInPlace(*ShAmt); 2318 Known.One.lshrInPlace(*ShAmt); 2319 // High bits are known zero. 2320 Known.Zero.setHighBits(ShAmt->getZExtValue()); 2321 } 2322 break; 2323 case ISD::SRA: 2324 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2325 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2326 Known.Zero.lshrInPlace(*ShAmt); 2327 Known.One.lshrInPlace(*ShAmt); 2328 // If we know the value of the sign bit, then we know it is copied across 2329 // the high bits by the shift amount. 2330 APInt SignMask = APInt::getSignMask(BitWidth); 2331 SignMask.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask. 2332 if (Known.Zero.intersects(SignMask)) { 2333 Known.Zero.setHighBits(ShAmt->getZExtValue());// New bits are known zero. 2334 } else if (Known.One.intersects(SignMask)) { 2335 Known.One.setHighBits(ShAmt->getZExtValue()); // New bits are known one. 2336 } 2337 } 2338 break; 2339 case ISD::SIGN_EXTEND_INREG: { 2340 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2341 unsigned EBits = EVT.getScalarSizeInBits(); 2342 2343 // Sign extension. Compute the demanded bits in the result that are not 2344 // present in the input. 2345 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2346 2347 APInt InSignMask = APInt::getSignMask(EBits); 2348 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2349 2350 // If the sign extended bits are demanded, we know that the sign 2351 // bit is demanded. 2352 InSignMask = InSignMask.zext(BitWidth); 2353 if (NewBits.getBoolValue()) 2354 InputDemandedBits |= InSignMask; 2355 2356 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2357 Known.One &= InputDemandedBits; 2358 Known.Zero &= InputDemandedBits; 2359 2360 // If the sign bit of the input is known set or clear, then we know the 2361 // top bits of the result. 2362 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2363 Known.Zero |= NewBits; 2364 Known.One &= ~NewBits; 2365 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2366 Known.One |= NewBits; 2367 Known.Zero &= ~NewBits; 2368 } else { // Input sign bit unknown 2369 Known.Zero &= ~NewBits; 2370 Known.One &= ~NewBits; 2371 } 2372 break; 2373 } 2374 case ISD::CTTZ: 2375 case ISD::CTTZ_ZERO_UNDEF: { 2376 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2377 // If we have a known 1, its position is our upper bound. 2378 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2379 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2380 Known.Zero.setBitsFrom(LowBits); 2381 break; 2382 } 2383 case ISD::CTLZ: 2384 case ISD::CTLZ_ZERO_UNDEF: { 2385 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2386 // If we have a known 1, its position is our upper bound. 2387 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2388 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2389 Known.Zero.setBitsFrom(LowBits); 2390 break; 2391 } 2392 case ISD::CTPOP: { 2393 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2394 // If we know some of the bits are zero, they can't be one. 2395 unsigned PossibleOnes = Known2.countMaxPopulation(); 2396 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2397 break; 2398 } 2399 case ISD::LOAD: { 2400 LoadSDNode *LD = cast<LoadSDNode>(Op); 2401 // If this is a ZEXTLoad and we are looking at the loaded value. 2402 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2403 EVT VT = LD->getMemoryVT(); 2404 unsigned MemBits = VT.getScalarSizeInBits(); 2405 Known.Zero.setBitsFrom(MemBits); 2406 } else if (const MDNode *Ranges = LD->getRanges()) { 2407 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2408 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2409 } 2410 break; 2411 } 2412 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2413 EVT InVT = Op.getOperand(0).getValueType(); 2414 unsigned InBits = InVT.getScalarSizeInBits(); 2415 Known = Known.trunc(InBits); 2416 computeKnownBits(Op.getOperand(0), Known, 2417 DemandedElts.zext(InVT.getVectorNumElements()), 2418 Depth + 1); 2419 Known = Known.zext(BitWidth); 2420 Known.Zero.setBitsFrom(InBits); 2421 break; 2422 } 2423 case ISD::ZERO_EXTEND: { 2424 EVT InVT = Op.getOperand(0).getValueType(); 2425 unsigned InBits = InVT.getScalarSizeInBits(); 2426 Known = Known.trunc(InBits); 2427 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2428 Known = Known.zext(BitWidth); 2429 Known.Zero.setBitsFrom(InBits); 2430 break; 2431 } 2432 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2433 case ISD::SIGN_EXTEND: { 2434 EVT InVT = Op.getOperand(0).getValueType(); 2435 unsigned InBits = InVT.getScalarSizeInBits(); 2436 2437 Known = Known.trunc(InBits); 2438 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2439 2440 // If the sign bit is known to be zero or one, then sext will extend 2441 // it to the top bits, else it will just zext. 2442 Known = Known.sext(BitWidth); 2443 break; 2444 } 2445 case ISD::ANY_EXTEND: { 2446 EVT InVT = Op.getOperand(0).getValueType(); 2447 unsigned InBits = InVT.getScalarSizeInBits(); 2448 Known = Known.trunc(InBits); 2449 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2450 Known = Known.zext(BitWidth); 2451 break; 2452 } 2453 case ISD::TRUNCATE: { 2454 EVT InVT = Op.getOperand(0).getValueType(); 2455 unsigned InBits = InVT.getScalarSizeInBits(); 2456 Known = Known.zext(InBits); 2457 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2458 Known = Known.trunc(BitWidth); 2459 break; 2460 } 2461 case ISD::AssertZext: { 2462 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2463 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2464 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2465 Known.Zero |= (~InMask); 2466 Known.One &= (~Known.Zero); 2467 break; 2468 } 2469 case ISD::FGETSIGN: 2470 // All bits are zero except the low bit. 2471 Known.Zero.setBitsFrom(1); 2472 break; 2473 case ISD::USUBO: 2474 case ISD::SSUBO: 2475 if (Op.getResNo() == 1) { 2476 // If we know the result of a setcc has the top bits zero, use this info. 2477 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2478 TargetLowering::ZeroOrOneBooleanContent && 2479 BitWidth > 1) 2480 Known.Zero.setBitsFrom(1); 2481 break; 2482 } 2483 LLVM_FALLTHROUGH; 2484 case ISD::SUB: 2485 case ISD::SUBC: { 2486 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2487 // We know that the top bits of C-X are clear if X contains less bits 2488 // than C (i.e. no wrap-around can happen). For example, 20-X is 2489 // positive if we can prove that X is >= 0 and < 16. 2490 if (CLHS->getAPIntValue().isNonNegative()) { 2491 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2492 // NLZ can't be BitWidth with no sign bit 2493 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2494 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2495 Depth + 1); 2496 2497 // If all of the MaskV bits are known to be zero, then we know the 2498 // output top bits are zero, because we now know that the output is 2499 // from [0-C]. 2500 if ((Known2.Zero & MaskV) == MaskV) { 2501 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2502 // Top bits known zero. 2503 Known.Zero.setHighBits(NLZ2); 2504 } 2505 } 2506 } 2507 2508 // If low bits are know to be zero in both operands, then we know they are 2509 // going to be 0 in the result. Both addition and complement operations 2510 // preserve the low zero bits. 2511 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2512 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2513 if (KnownZeroLow == 0) 2514 break; 2515 2516 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2517 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2518 Known.Zero.setLowBits(KnownZeroLow); 2519 break; 2520 } 2521 case ISD::UADDO: 2522 case ISD::SADDO: 2523 case ISD::ADDCARRY: 2524 if (Op.getResNo() == 1) { 2525 // If we know the result of a setcc has the top bits zero, use this info. 2526 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2527 TargetLowering::ZeroOrOneBooleanContent && 2528 BitWidth > 1) 2529 Known.Zero.setBitsFrom(1); 2530 break; 2531 } 2532 LLVM_FALLTHROUGH; 2533 case ISD::ADD: 2534 case ISD::ADDC: 2535 case ISD::ADDE: { 2536 // Output known-0 bits are known if clear or set in both the low clear bits 2537 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2538 // low 3 bits clear. 2539 // Output known-0 bits are also known if the top bits of each input are 2540 // known to be clear. For example, if one input has the top 10 bits clear 2541 // and the other has the top 8 bits clear, we know the top 7 bits of the 2542 // output must be clear. 2543 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2544 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2545 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2546 2547 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2548 Depth + 1); 2549 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2550 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2551 2552 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2553 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2554 // use this information if we know (at least) that the low two bits are 2555 // clear. We then return to the caller that the low bit is unknown but 2556 // that other bits are known zero. 2557 if (KnownZeroLow >= 2) 2558 Known.Zero.setBits(1, KnownZeroLow); 2559 break; 2560 } 2561 2562 Known.Zero.setLowBits(KnownZeroLow); 2563 if (KnownZeroHigh > 1) 2564 Known.Zero.setHighBits(KnownZeroHigh - 1); 2565 break; 2566 } 2567 case ISD::SREM: 2568 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2569 const APInt &RA = Rem->getAPIntValue().abs(); 2570 if (RA.isPowerOf2()) { 2571 APInt LowBits = RA - 1; 2572 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2573 2574 // The low bits of the first operand are unchanged by the srem. 2575 Known.Zero = Known2.Zero & LowBits; 2576 Known.One = Known2.One & LowBits; 2577 2578 // If the first operand is non-negative or has all low bits zero, then 2579 // the upper bits are all zero. 2580 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2581 Known.Zero |= ~LowBits; 2582 2583 // If the first operand is negative and not all low bits are zero, then 2584 // the upper bits are all one. 2585 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2586 Known.One |= ~LowBits; 2587 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2588 } 2589 } 2590 break; 2591 case ISD::UREM: { 2592 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2593 const APInt &RA = Rem->getAPIntValue(); 2594 if (RA.isPowerOf2()) { 2595 APInt LowBits = (RA - 1); 2596 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2597 2598 // The upper bits are all zero, the lower ones are unchanged. 2599 Known.Zero = Known2.Zero | ~LowBits; 2600 Known.One = Known2.One & LowBits; 2601 break; 2602 } 2603 } 2604 2605 // Since the result is less than or equal to either operand, any leading 2606 // zero bits in either operand must also exist in the result. 2607 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2608 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2609 2610 uint32_t Leaders = 2611 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2612 Known.resetAll(); 2613 Known.Zero.setHighBits(Leaders); 2614 break; 2615 } 2616 case ISD::EXTRACT_ELEMENT: { 2617 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2618 const unsigned Index = Op.getConstantOperandVal(1); 2619 const unsigned BitWidth = Op.getValueSizeInBits(); 2620 2621 // Remove low part of known bits mask 2622 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2623 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2624 2625 // Remove high part of known bit mask 2626 Known = Known.trunc(BitWidth); 2627 break; 2628 } 2629 case ISD::EXTRACT_VECTOR_ELT: { 2630 SDValue InVec = Op.getOperand(0); 2631 SDValue EltNo = Op.getOperand(1); 2632 EVT VecVT = InVec.getValueType(); 2633 const unsigned BitWidth = Op.getValueSizeInBits(); 2634 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2635 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2636 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2637 // anything about the extended bits. 2638 if (BitWidth > EltBitWidth) 2639 Known = Known.trunc(EltBitWidth); 2640 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2641 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2642 // If we know the element index, just demand that vector element. 2643 unsigned Idx = ConstEltNo->getZExtValue(); 2644 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2645 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2646 } else { 2647 // Unknown element index, so ignore DemandedElts and demand them all. 2648 computeKnownBits(InVec, Known, Depth + 1); 2649 } 2650 if (BitWidth > EltBitWidth) 2651 Known = Known.zext(BitWidth); 2652 break; 2653 } 2654 case ISD::INSERT_VECTOR_ELT: { 2655 SDValue InVec = Op.getOperand(0); 2656 SDValue InVal = Op.getOperand(1); 2657 SDValue EltNo = Op.getOperand(2); 2658 2659 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2660 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2661 // If we know the element index, split the demand between the 2662 // source vector and the inserted element. 2663 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2664 unsigned EltIdx = CEltNo->getZExtValue(); 2665 2666 // If we demand the inserted element then add its common known bits. 2667 if (DemandedElts[EltIdx]) { 2668 computeKnownBits(InVal, Known2, Depth + 1); 2669 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2670 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2671 } 2672 2673 // If we demand the source vector then add its common known bits, ensuring 2674 // that we don't demand the inserted element. 2675 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2676 if (!!VectorElts) { 2677 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2678 Known.One &= Known2.One; 2679 Known.Zero &= Known2.Zero; 2680 } 2681 } else { 2682 // Unknown element index, so ignore DemandedElts and demand them all. 2683 computeKnownBits(InVec, Known, Depth + 1); 2684 computeKnownBits(InVal, Known2, Depth + 1); 2685 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2686 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2687 } 2688 break; 2689 } 2690 case ISD::BITREVERSE: { 2691 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2692 Known.Zero = Known2.Zero.reverseBits(); 2693 Known.One = Known2.One.reverseBits(); 2694 break; 2695 } 2696 case ISD::BSWAP: { 2697 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2698 Known.Zero = Known2.Zero.byteSwap(); 2699 Known.One = Known2.One.byteSwap(); 2700 break; 2701 } 2702 case ISD::ABS: { 2703 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2704 2705 // If the source's MSB is zero then we know the rest of the bits already. 2706 if (Known2.isNonNegative()) { 2707 Known.Zero = Known2.Zero; 2708 Known.One = Known2.One; 2709 break; 2710 } 2711 2712 // We only know that the absolute values's MSB will be zero iff there is 2713 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2714 Known2.One.clearSignBit(); 2715 if (Known2.One.getBoolValue()) { 2716 Known.Zero = APInt::getSignMask(BitWidth); 2717 break; 2718 } 2719 break; 2720 } 2721 case ISD::UMIN: { 2722 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2723 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2724 2725 // UMIN - we know that the result will have the maximum of the 2726 // known zero leading bits of the inputs. 2727 unsigned LeadZero = Known.countMinLeadingZeros(); 2728 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2729 2730 Known.Zero &= Known2.Zero; 2731 Known.One &= Known2.One; 2732 Known.Zero.setHighBits(LeadZero); 2733 break; 2734 } 2735 case ISD::UMAX: { 2736 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2737 Depth + 1); 2738 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2739 2740 // UMAX - we know that the result will have the maximum of the 2741 // known one leading bits of the inputs. 2742 unsigned LeadOne = Known.countMinLeadingOnes(); 2743 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2744 2745 Known.Zero &= Known2.Zero; 2746 Known.One &= Known2.One; 2747 Known.One.setHighBits(LeadOne); 2748 break; 2749 } 2750 case ISD::SMIN: 2751 case ISD::SMAX: { 2752 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2753 Depth + 1); 2754 // If we don't know any bits, early out. 2755 if (!Known.One && !Known.Zero) 2756 break; 2757 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2758 Known.Zero &= Known2.Zero; 2759 Known.One &= Known2.One; 2760 break; 2761 } 2762 case ISD::FrameIndex: 2763 case ISD::TargetFrameIndex: 2764 if (unsigned Align = InferPtrAlignment(Op)) { 2765 // The low bits are known zero if the pointer is aligned. 2766 Known.Zero.setLowBits(Log2_32(Align)); 2767 break; 2768 } 2769 break; 2770 2771 default: 2772 if (Opcode < ISD::BUILTIN_OP_END) 2773 break; 2774 LLVM_FALLTHROUGH; 2775 case ISD::INTRINSIC_WO_CHAIN: 2776 case ISD::INTRINSIC_W_CHAIN: 2777 case ISD::INTRINSIC_VOID: 2778 // Allow the target to implement this method for its nodes. 2779 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 2780 break; 2781 } 2782 2783 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2784 } 2785 2786 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 2787 SDValue N1) const { 2788 // X + 0 never overflow 2789 if (isNullConstant(N1)) 2790 return OFK_Never; 2791 2792 KnownBits N1Known; 2793 computeKnownBits(N1, N1Known); 2794 if (N1Known.Zero.getBoolValue()) { 2795 KnownBits N0Known; 2796 computeKnownBits(N0, N0Known); 2797 2798 bool overflow; 2799 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 2800 if (!overflow) 2801 return OFK_Never; 2802 } 2803 2804 // mulhi + 1 never overflow 2805 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 2806 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 2807 return OFK_Never; 2808 2809 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 2810 KnownBits N0Known; 2811 computeKnownBits(N0, N0Known); 2812 2813 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 2814 return OFK_Never; 2815 } 2816 2817 return OFK_Sometime; 2818 } 2819 2820 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2821 EVT OpVT = Val.getValueType(); 2822 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2823 2824 // Is the constant a known power of 2? 2825 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2826 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2827 2828 // A left-shift of a constant one will have exactly one bit set because 2829 // shifting the bit off the end is undefined. 2830 if (Val.getOpcode() == ISD::SHL) { 2831 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2832 if (C && C->getAPIntValue() == 1) 2833 return true; 2834 } 2835 2836 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2837 // one bit set. 2838 if (Val.getOpcode() == ISD::SRL) { 2839 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2840 if (C && C->getAPIntValue().isSignMask()) 2841 return true; 2842 } 2843 2844 // Are all operands of a build vector constant powers of two? 2845 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2846 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2847 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2848 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2849 return false; 2850 })) 2851 return true; 2852 2853 // More could be done here, though the above checks are enough 2854 // to handle some common cases. 2855 2856 // Fall back to computeKnownBits to catch other known cases. 2857 KnownBits Known; 2858 computeKnownBits(Val, Known); 2859 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 2860 } 2861 2862 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 2863 EVT VT = Op.getValueType(); 2864 APInt DemandedElts = VT.isVector() 2865 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2866 : APInt(1, 1); 2867 return ComputeNumSignBits(Op, DemandedElts, Depth); 2868 } 2869 2870 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 2871 unsigned Depth) const { 2872 EVT VT = Op.getValueType(); 2873 assert(VT.isInteger() && "Invalid VT!"); 2874 unsigned VTBits = VT.getScalarSizeInBits(); 2875 unsigned NumElts = DemandedElts.getBitWidth(); 2876 unsigned Tmp, Tmp2; 2877 unsigned FirstAnswer = 1; 2878 2879 if (Depth == 6) 2880 return 1; // Limit search depth. 2881 2882 if (!DemandedElts) 2883 return 1; // No demanded elts, better to assume we don't know anything. 2884 2885 switch (Op.getOpcode()) { 2886 default: break; 2887 case ISD::AssertSext: 2888 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2889 return VTBits-Tmp+1; 2890 case ISD::AssertZext: 2891 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2892 return VTBits-Tmp; 2893 2894 case ISD::Constant: { 2895 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2896 return Val.getNumSignBits(); 2897 } 2898 2899 case ISD::BUILD_VECTOR: 2900 Tmp = VTBits; 2901 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 2902 if (!DemandedElts[i]) 2903 continue; 2904 2905 SDValue SrcOp = Op.getOperand(i); 2906 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 2907 2908 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2909 if (SrcOp.getValueSizeInBits() != VTBits) { 2910 assert(SrcOp.getValueSizeInBits() > VTBits && 2911 "Expected BUILD_VECTOR implicit truncation"); 2912 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 2913 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 2914 } 2915 Tmp = std::min(Tmp, Tmp2); 2916 } 2917 return Tmp; 2918 2919 case ISD::VECTOR_SHUFFLE: { 2920 // Collect the minimum number of sign bits that are shared by every vector 2921 // element referenced by the shuffle. 2922 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2923 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2924 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2925 for (unsigned i = 0; i != NumElts; ++i) { 2926 int M = SVN->getMaskElt(i); 2927 if (!DemandedElts[i]) 2928 continue; 2929 // For UNDEF elements, we don't know anything about the common state of 2930 // the shuffle result. 2931 if (M < 0) 2932 return 1; 2933 if ((unsigned)M < NumElts) 2934 DemandedLHS.setBit((unsigned)M % NumElts); 2935 else 2936 DemandedRHS.setBit((unsigned)M % NumElts); 2937 } 2938 Tmp = std::numeric_limits<unsigned>::max(); 2939 if (!!DemandedLHS) 2940 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 2941 if (!!DemandedRHS) { 2942 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 2943 Tmp = std::min(Tmp, Tmp2); 2944 } 2945 // If we don't know anything, early out and try computeKnownBits fall-back. 2946 if (Tmp == 1) 2947 break; 2948 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 2949 return Tmp; 2950 } 2951 2952 case ISD::SIGN_EXTEND: 2953 case ISD::SIGN_EXTEND_VECTOR_INREG: 2954 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 2955 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2956 2957 case ISD::SIGN_EXTEND_INREG: 2958 // Max of the input and what this extends. 2959 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 2960 Tmp = VTBits-Tmp+1; 2961 2962 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2963 return std::max(Tmp, Tmp2); 2964 2965 case ISD::SRA: 2966 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 2967 // SRA X, C -> adds C sign bits. 2968 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2969 APInt ShiftVal = C->getAPIntValue(); 2970 ShiftVal += Tmp; 2971 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 2972 } 2973 return Tmp; 2974 case ISD::SHL: 2975 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2976 // shl destroys sign bits. 2977 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2978 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 2979 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 2980 return Tmp - C->getZExtValue(); 2981 } 2982 break; 2983 case ISD::AND: 2984 case ISD::OR: 2985 case ISD::XOR: // NOT is handled here. 2986 // Logical binary ops preserve the number of sign bits at the worst. 2987 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2988 if (Tmp != 1) { 2989 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2990 FirstAnswer = std::min(Tmp, Tmp2); 2991 // We computed what we know about the sign bits as our first 2992 // answer. Now proceed to the generic code that uses 2993 // computeKnownBits, and pick whichever answer is better. 2994 } 2995 break; 2996 2997 case ISD::SELECT: 2998 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2999 if (Tmp == 1) return 1; // Early out. 3000 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 3001 return std::min(Tmp, Tmp2); 3002 case ISD::SELECT_CC: 3003 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1); 3004 if (Tmp == 1) return 1; // Early out. 3005 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1); 3006 return std::min(Tmp, Tmp2); 3007 case ISD::SMIN: 3008 case ISD::SMAX: 3009 case ISD::UMIN: 3010 case ISD::UMAX: 3011 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3012 if (Tmp == 1) 3013 return 1; // Early out. 3014 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3015 return std::min(Tmp, Tmp2); 3016 case ISD::SADDO: 3017 case ISD::UADDO: 3018 case ISD::SSUBO: 3019 case ISD::USUBO: 3020 case ISD::SMULO: 3021 case ISD::UMULO: 3022 if (Op.getResNo() != 1) 3023 break; 3024 // The boolean result conforms to getBooleanContents. Fall through. 3025 // If setcc returns 0/-1, all bits are sign bits. 3026 // We know that we have an integer-based boolean since these operations 3027 // are only available for integer. 3028 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3029 TargetLowering::ZeroOrNegativeOneBooleanContent) 3030 return VTBits; 3031 break; 3032 case ISD::SETCC: 3033 // If setcc returns 0/-1, all bits are sign bits. 3034 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3035 TargetLowering::ZeroOrNegativeOneBooleanContent) 3036 return VTBits; 3037 break; 3038 case ISD::ROTL: 3039 case ISD::ROTR: 3040 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3041 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 3042 3043 // Handle rotate right by N like a rotate left by 32-N. 3044 if (Op.getOpcode() == ISD::ROTR) 3045 RotAmt = (VTBits-RotAmt) & (VTBits-1); 3046 3047 // If we aren't rotating out all of the known-in sign bits, return the 3048 // number that are left. This handles rotl(sext(x), 1) for example. 3049 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3050 if (Tmp > RotAmt+1) return Tmp-RotAmt; 3051 } 3052 break; 3053 case ISD::ADD: 3054 case ISD::ADDC: 3055 // Add can have at most one carry bit. Thus we know that the output 3056 // is, at worst, one more bit than the inputs. 3057 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3058 if (Tmp == 1) return 1; // Early out. 3059 3060 // Special case decrementing a value (ADD X, -1): 3061 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3062 if (CRHS->isAllOnesValue()) { 3063 KnownBits Known; 3064 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3065 3066 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3067 // sign bits set. 3068 if ((Known.Zero | 1).isAllOnesValue()) 3069 return VTBits; 3070 3071 // If we are subtracting one from a positive number, there is no carry 3072 // out of the result. 3073 if (Known.isNonNegative()) 3074 return Tmp; 3075 } 3076 3077 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3078 if (Tmp2 == 1) return 1; 3079 return std::min(Tmp, Tmp2)-1; 3080 3081 case ISD::SUB: 3082 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3083 if (Tmp2 == 1) return 1; 3084 3085 // Handle NEG. 3086 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3087 if (CLHS->isNullValue()) { 3088 KnownBits Known; 3089 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3090 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3091 // sign bits set. 3092 if ((Known.Zero | 1).isAllOnesValue()) 3093 return VTBits; 3094 3095 // If the input is known to be positive (the sign bit is known clear), 3096 // the output of the NEG has the same number of sign bits as the input. 3097 if (Known.isNonNegative()) 3098 return Tmp2; 3099 3100 // Otherwise, we treat this like a SUB. 3101 } 3102 3103 // Sub can have at most one carry bit. Thus we know that the output 3104 // is, at worst, one more bit than the inputs. 3105 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3106 if (Tmp == 1) return 1; // Early out. 3107 return std::min(Tmp, Tmp2)-1; 3108 case ISD::TRUNCATE: { 3109 // Check if the sign bits of source go down as far as the truncated value. 3110 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3111 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3112 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3113 return NumSrcSignBits - (NumSrcBits - VTBits); 3114 break; 3115 } 3116 case ISD::EXTRACT_ELEMENT: { 3117 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3118 const int BitWidth = Op.getValueSizeInBits(); 3119 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3120 3121 // Get reverse index (starting from 1), Op1 value indexes elements from 3122 // little end. Sign starts at big end. 3123 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3124 3125 // If the sign portion ends in our element the subtraction gives correct 3126 // result. Otherwise it gives either negative or > bitwidth result 3127 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3128 } 3129 case ISD::INSERT_VECTOR_ELT: { 3130 SDValue InVec = Op.getOperand(0); 3131 SDValue InVal = Op.getOperand(1); 3132 SDValue EltNo = Op.getOperand(2); 3133 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3134 3135 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3136 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3137 // If we know the element index, split the demand between the 3138 // source vector and the inserted element. 3139 unsigned EltIdx = CEltNo->getZExtValue(); 3140 3141 // If we demand the inserted element then get its sign bits. 3142 Tmp = std::numeric_limits<unsigned>::max(); 3143 if (DemandedElts[EltIdx]) { 3144 // TODO - handle implicit truncation of inserted elements. 3145 if (InVal.getScalarValueSizeInBits() != VTBits) 3146 break; 3147 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3148 } 3149 3150 // If we demand the source vector then get its sign bits, and determine 3151 // the minimum. 3152 APInt VectorElts = DemandedElts; 3153 VectorElts.clearBit(EltIdx); 3154 if (!!VectorElts) { 3155 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3156 Tmp = std::min(Tmp, Tmp2); 3157 } 3158 } else { 3159 // Unknown element index, so ignore DemandedElts and demand them all. 3160 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3161 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3162 Tmp = std::min(Tmp, Tmp2); 3163 } 3164 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3165 return Tmp; 3166 } 3167 case ISD::EXTRACT_VECTOR_ELT: { 3168 SDValue InVec = Op.getOperand(0); 3169 SDValue EltNo = Op.getOperand(1); 3170 EVT VecVT = InVec.getValueType(); 3171 const unsigned BitWidth = Op.getValueSizeInBits(); 3172 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3173 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3174 3175 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3176 // anything about sign bits. But if the sizes match we can derive knowledge 3177 // about sign bits from the vector operand. 3178 if (BitWidth != EltBitWidth) 3179 break; 3180 3181 // If we know the element index, just demand that vector element, else for 3182 // an unknown element index, ignore DemandedElts and demand them all. 3183 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3184 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3185 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3186 DemandedSrcElts = 3187 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3188 3189 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3190 } 3191 case ISD::EXTRACT_SUBVECTOR: { 3192 // If we know the element index, just demand that subvector elements, 3193 // otherwise demand them all. 3194 SDValue Src = Op.getOperand(0); 3195 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3196 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3197 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3198 // Offset the demanded elts by the subvector index. 3199 uint64_t Idx = SubIdx->getZExtValue(); 3200 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3201 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3202 } 3203 return ComputeNumSignBits(Src, Depth + 1); 3204 } 3205 case ISD::CONCAT_VECTORS: 3206 // Determine the minimum number of sign bits across all demanded 3207 // elts of the input vectors. Early out if the result is already 1. 3208 Tmp = std::numeric_limits<unsigned>::max(); 3209 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3210 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3211 unsigned NumSubVectors = Op.getNumOperands(); 3212 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3213 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3214 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3215 if (!DemandedSub) 3216 continue; 3217 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3218 Tmp = std::min(Tmp, Tmp2); 3219 } 3220 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3221 return Tmp; 3222 } 3223 3224 // If we are looking at the loaded value of the SDNode. 3225 if (Op.getResNo() == 0) { 3226 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3227 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3228 unsigned ExtType = LD->getExtensionType(); 3229 switch (ExtType) { 3230 default: break; 3231 case ISD::SEXTLOAD: // '17' bits known 3232 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3233 return VTBits-Tmp+1; 3234 case ISD::ZEXTLOAD: // '16' bits known 3235 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3236 return VTBits-Tmp; 3237 } 3238 } 3239 } 3240 3241 // Allow the target to implement this method for its nodes. 3242 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3243 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3244 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3245 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3246 unsigned NumBits = 3247 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3248 if (NumBits > 1) 3249 FirstAnswer = std::max(FirstAnswer, NumBits); 3250 } 3251 3252 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3253 // use this information. 3254 KnownBits Known; 3255 computeKnownBits(Op, Known, DemandedElts, Depth); 3256 3257 APInt Mask; 3258 if (Known.isNonNegative()) { // sign bit is 0 3259 Mask = Known.Zero; 3260 } else if (Known.isNegative()) { // sign bit is 1; 3261 Mask = Known.One; 3262 } else { 3263 // Nothing known. 3264 return FirstAnswer; 3265 } 3266 3267 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3268 // the number of identical bits in the top of the input value. 3269 Mask = ~Mask; 3270 Mask <<= Mask.getBitWidth()-VTBits; 3271 // Return # leading zeros. We use 'min' here in case Val was zero before 3272 // shifting. We don't want to return '64' as for an i32 "0". 3273 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3274 } 3275 3276 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3277 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3278 !isa<ConstantSDNode>(Op.getOperand(1))) 3279 return false; 3280 3281 if (Op.getOpcode() == ISD::OR && 3282 !MaskedValueIsZero(Op.getOperand(0), 3283 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3284 return false; 3285 3286 return true; 3287 } 3288 3289 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3290 // If we're told that NaNs won't happen, assume they won't. 3291 if (getTarget().Options.NoNaNsFPMath) 3292 return true; 3293 3294 if (Op->getFlags().hasNoNaNs()) 3295 return true; 3296 3297 // If the value is a constant, we can obviously see if it is a NaN or not. 3298 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3299 return !C->getValueAPF().isNaN(); 3300 3301 // TODO: Recognize more cases here. 3302 3303 return false; 3304 } 3305 3306 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3307 // If the value is a constant, we can obviously see if it is a zero or not. 3308 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3309 return !C->isZero(); 3310 3311 // TODO: Recognize more cases here. 3312 switch (Op.getOpcode()) { 3313 default: break; 3314 case ISD::OR: 3315 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3316 return !C->isNullValue(); 3317 break; 3318 } 3319 3320 return false; 3321 } 3322 3323 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3324 // Check the obvious case. 3325 if (A == B) return true; 3326 3327 // For for negative and positive zero. 3328 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3329 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3330 if (CA->isZero() && CB->isZero()) return true; 3331 3332 // Otherwise they may not be equal. 3333 return false; 3334 } 3335 3336 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3337 assert(A.getValueType() == B.getValueType() && 3338 "Values must have the same type"); 3339 KnownBits AKnown, BKnown; 3340 computeKnownBits(A, AKnown); 3341 computeKnownBits(B, BKnown); 3342 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3343 } 3344 3345 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3346 ArrayRef<SDValue> Ops, 3347 SelectionDAG &DAG) { 3348 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3349 assert(llvm::all_of(Ops, 3350 [Ops](SDValue Op) { 3351 return Ops[0].getValueType() == Op.getValueType(); 3352 }) && 3353 "Concatenation of vectors with inconsistent value types!"); 3354 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3355 VT.getVectorNumElements() && 3356 "Incorrect element count in vector concatenation!"); 3357 3358 if (Ops.size() == 1) 3359 return Ops[0]; 3360 3361 // Concat of UNDEFs is UNDEF. 3362 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3363 return DAG.getUNDEF(VT); 3364 3365 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3366 // simplified to one big BUILD_VECTOR. 3367 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3368 EVT SVT = VT.getScalarType(); 3369 SmallVector<SDValue, 16> Elts; 3370 for (SDValue Op : Ops) { 3371 EVT OpVT = Op.getValueType(); 3372 if (Op.isUndef()) 3373 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3374 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3375 Elts.append(Op->op_begin(), Op->op_end()); 3376 else 3377 return SDValue(); 3378 } 3379 3380 // BUILD_VECTOR requires all inputs to be of the same type, find the 3381 // maximum type and extend them all. 3382 for (SDValue Op : Elts) 3383 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3384 3385 if (SVT.bitsGT(VT.getScalarType())) 3386 for (SDValue &Op : Elts) 3387 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3388 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3389 : DAG.getSExtOrTrunc(Op, DL, SVT); 3390 3391 return DAG.getBuildVector(VT, DL, Elts); 3392 } 3393 3394 /// Gets or creates the specified node. 3395 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3396 FoldingSetNodeID ID; 3397 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3398 void *IP = nullptr; 3399 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3400 return SDValue(E, 0); 3401 3402 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3403 getVTList(VT)); 3404 CSEMap.InsertNode(N, IP); 3405 3406 InsertNode(N); 3407 return SDValue(N, 0); 3408 } 3409 3410 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3411 SDValue Operand, const SDNodeFlags Flags) { 3412 // Constant fold unary operations with an integer constant operand. Even 3413 // opaque constant will be folded, because the folding of unary operations 3414 // doesn't create new constants with different values. Nevertheless, the 3415 // opaque flag is preserved during folding to prevent future folding with 3416 // other constants. 3417 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3418 const APInt &Val = C->getAPIntValue(); 3419 switch (Opcode) { 3420 default: break; 3421 case ISD::SIGN_EXTEND: 3422 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3423 C->isTargetOpcode(), C->isOpaque()); 3424 case ISD::ANY_EXTEND: 3425 case ISD::ZERO_EXTEND: 3426 case ISD::TRUNCATE: 3427 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3428 C->isTargetOpcode(), C->isOpaque()); 3429 case ISD::UINT_TO_FP: 3430 case ISD::SINT_TO_FP: { 3431 APFloat apf(EVTToAPFloatSemantics(VT), 3432 APInt::getNullValue(VT.getSizeInBits())); 3433 (void)apf.convertFromAPInt(Val, 3434 Opcode==ISD::SINT_TO_FP, 3435 APFloat::rmNearestTiesToEven); 3436 return getConstantFP(apf, DL, VT); 3437 } 3438 case ISD::BITCAST: 3439 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3440 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3441 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3442 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3443 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3444 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3445 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3446 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3447 break; 3448 case ISD::ABS: 3449 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3450 C->isOpaque()); 3451 case ISD::BITREVERSE: 3452 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3453 C->isOpaque()); 3454 case ISD::BSWAP: 3455 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3456 C->isOpaque()); 3457 case ISD::CTPOP: 3458 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3459 C->isOpaque()); 3460 case ISD::CTLZ: 3461 case ISD::CTLZ_ZERO_UNDEF: 3462 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3463 C->isOpaque()); 3464 case ISD::CTTZ: 3465 case ISD::CTTZ_ZERO_UNDEF: 3466 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3467 C->isOpaque()); 3468 case ISD::FP16_TO_FP: { 3469 bool Ignored; 3470 APFloat FPV(APFloat::IEEEhalf(), 3471 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3472 3473 // This can return overflow, underflow, or inexact; we don't care. 3474 // FIXME need to be more flexible about rounding mode. 3475 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3476 APFloat::rmNearestTiesToEven, &Ignored); 3477 return getConstantFP(FPV, DL, VT); 3478 } 3479 } 3480 } 3481 3482 // Constant fold unary operations with a floating point constant operand. 3483 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3484 APFloat V = C->getValueAPF(); // make copy 3485 switch (Opcode) { 3486 case ISD::FNEG: 3487 V.changeSign(); 3488 return getConstantFP(V, DL, VT); 3489 case ISD::FABS: 3490 V.clearSign(); 3491 return getConstantFP(V, DL, VT); 3492 case ISD::FCEIL: { 3493 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3494 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3495 return getConstantFP(V, DL, VT); 3496 break; 3497 } 3498 case ISD::FTRUNC: { 3499 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3500 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3501 return getConstantFP(V, DL, VT); 3502 break; 3503 } 3504 case ISD::FFLOOR: { 3505 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3506 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3507 return getConstantFP(V, DL, VT); 3508 break; 3509 } 3510 case ISD::FP_EXTEND: { 3511 bool ignored; 3512 // This can return overflow, underflow, or inexact; we don't care. 3513 // FIXME need to be more flexible about rounding mode. 3514 (void)V.convert(EVTToAPFloatSemantics(VT), 3515 APFloat::rmNearestTiesToEven, &ignored); 3516 return getConstantFP(V, DL, VT); 3517 } 3518 case ISD::FP_TO_SINT: 3519 case ISD::FP_TO_UINT: { 3520 bool ignored; 3521 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3522 // FIXME need to be more flexible about rounding mode. 3523 APFloat::opStatus s = 3524 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3525 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3526 break; 3527 return getConstant(IntVal, DL, VT); 3528 } 3529 case ISD::BITCAST: 3530 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3531 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3532 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3533 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3534 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3535 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3536 break; 3537 case ISD::FP_TO_FP16: { 3538 bool Ignored; 3539 // This can return overflow, underflow, or inexact; we don't care. 3540 // FIXME need to be more flexible about rounding mode. 3541 (void)V.convert(APFloat::IEEEhalf(), 3542 APFloat::rmNearestTiesToEven, &Ignored); 3543 return getConstant(V.bitcastToAPInt(), DL, VT); 3544 } 3545 } 3546 } 3547 3548 // Constant fold unary operations with a vector integer or float operand. 3549 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3550 if (BV->isConstant()) { 3551 switch (Opcode) { 3552 default: 3553 // FIXME: Entirely reasonable to perform folding of other unary 3554 // operations here as the need arises. 3555 break; 3556 case ISD::FNEG: 3557 case ISD::FABS: 3558 case ISD::FCEIL: 3559 case ISD::FTRUNC: 3560 case ISD::FFLOOR: 3561 case ISD::FP_EXTEND: 3562 case ISD::FP_TO_SINT: 3563 case ISD::FP_TO_UINT: 3564 case ISD::TRUNCATE: 3565 case ISD::UINT_TO_FP: 3566 case ISD::SINT_TO_FP: 3567 case ISD::ABS: 3568 case ISD::BITREVERSE: 3569 case ISD::BSWAP: 3570 case ISD::CTLZ: 3571 case ISD::CTLZ_ZERO_UNDEF: 3572 case ISD::CTTZ: 3573 case ISD::CTTZ_ZERO_UNDEF: 3574 case ISD::CTPOP: { 3575 SDValue Ops = { Operand }; 3576 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3577 return Fold; 3578 } 3579 } 3580 } 3581 } 3582 3583 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3584 switch (Opcode) { 3585 case ISD::TokenFactor: 3586 case ISD::MERGE_VALUES: 3587 case ISD::CONCAT_VECTORS: 3588 return Operand; // Factor, merge or concat of one node? No need. 3589 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3590 case ISD::FP_EXTEND: 3591 assert(VT.isFloatingPoint() && 3592 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3593 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3594 assert((!VT.isVector() || 3595 VT.getVectorNumElements() == 3596 Operand.getValueType().getVectorNumElements()) && 3597 "Vector element count mismatch!"); 3598 assert(Operand.getValueType().bitsLT(VT) && 3599 "Invalid fpext node, dst < src!"); 3600 if (Operand.isUndef()) 3601 return getUNDEF(VT); 3602 break; 3603 case ISD::SIGN_EXTEND: 3604 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3605 "Invalid SIGN_EXTEND!"); 3606 if (Operand.getValueType() == VT) return Operand; // noop extension 3607 assert((!VT.isVector() || 3608 VT.getVectorNumElements() == 3609 Operand.getValueType().getVectorNumElements()) && 3610 "Vector element count mismatch!"); 3611 assert(Operand.getValueType().bitsLT(VT) && 3612 "Invalid sext node, dst < src!"); 3613 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3614 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3615 else if (OpOpcode == ISD::UNDEF) 3616 // sext(undef) = 0, because the top bits will all be the same. 3617 return getConstant(0, DL, VT); 3618 break; 3619 case ISD::ZERO_EXTEND: 3620 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3621 "Invalid ZERO_EXTEND!"); 3622 if (Operand.getValueType() == VT) return Operand; // noop extension 3623 assert((!VT.isVector() || 3624 VT.getVectorNumElements() == 3625 Operand.getValueType().getVectorNumElements()) && 3626 "Vector element count mismatch!"); 3627 assert(Operand.getValueType().bitsLT(VT) && 3628 "Invalid zext node, dst < src!"); 3629 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3630 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3631 else if (OpOpcode == ISD::UNDEF) 3632 // zext(undef) = 0, because the top bits will be zero. 3633 return getConstant(0, DL, VT); 3634 break; 3635 case ISD::ANY_EXTEND: 3636 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3637 "Invalid ANY_EXTEND!"); 3638 if (Operand.getValueType() == VT) return Operand; // noop extension 3639 assert((!VT.isVector() || 3640 VT.getVectorNumElements() == 3641 Operand.getValueType().getVectorNumElements()) && 3642 "Vector element count mismatch!"); 3643 assert(Operand.getValueType().bitsLT(VT) && 3644 "Invalid anyext node, dst < src!"); 3645 3646 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3647 OpOpcode == ISD::ANY_EXTEND) 3648 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3649 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3650 else if (OpOpcode == ISD::UNDEF) 3651 return getUNDEF(VT); 3652 3653 // (ext (trunx x)) -> x 3654 if (OpOpcode == ISD::TRUNCATE) { 3655 SDValue OpOp = Operand.getOperand(0); 3656 if (OpOp.getValueType() == VT) 3657 return OpOp; 3658 } 3659 break; 3660 case ISD::TRUNCATE: 3661 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3662 "Invalid TRUNCATE!"); 3663 if (Operand.getValueType() == VT) return Operand; // noop truncate 3664 assert((!VT.isVector() || 3665 VT.getVectorNumElements() == 3666 Operand.getValueType().getVectorNumElements()) && 3667 "Vector element count mismatch!"); 3668 assert(Operand.getValueType().bitsGT(VT) && 3669 "Invalid truncate node, src < dst!"); 3670 if (OpOpcode == ISD::TRUNCATE) 3671 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3672 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3673 OpOpcode == ISD::ANY_EXTEND) { 3674 // If the source is smaller than the dest, we still need an extend. 3675 if (Operand.getOperand(0).getValueType().getScalarType() 3676 .bitsLT(VT.getScalarType())) 3677 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3678 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3679 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3680 return Operand.getOperand(0); 3681 } 3682 if (OpOpcode == ISD::UNDEF) 3683 return getUNDEF(VT); 3684 break; 3685 case ISD::ABS: 3686 assert(VT.isInteger() && VT == Operand.getValueType() && 3687 "Invalid ABS!"); 3688 if (OpOpcode == ISD::UNDEF) 3689 return getUNDEF(VT); 3690 break; 3691 case ISD::BSWAP: 3692 assert(VT.isInteger() && VT == Operand.getValueType() && 3693 "Invalid BSWAP!"); 3694 assert((VT.getScalarSizeInBits() % 16 == 0) && 3695 "BSWAP types must be a multiple of 16 bits!"); 3696 if (OpOpcode == ISD::UNDEF) 3697 return getUNDEF(VT); 3698 break; 3699 case ISD::BITREVERSE: 3700 assert(VT.isInteger() && VT == Operand.getValueType() && 3701 "Invalid BITREVERSE!"); 3702 if (OpOpcode == ISD::UNDEF) 3703 return getUNDEF(VT); 3704 break; 3705 case ISD::BITCAST: 3706 // Basic sanity checking. 3707 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3708 "Cannot BITCAST between types of different sizes!"); 3709 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3710 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3711 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3712 if (OpOpcode == ISD::UNDEF) 3713 return getUNDEF(VT); 3714 break; 3715 case ISD::SCALAR_TO_VECTOR: 3716 assert(VT.isVector() && !Operand.getValueType().isVector() && 3717 (VT.getVectorElementType() == Operand.getValueType() || 3718 (VT.getVectorElementType().isInteger() && 3719 Operand.getValueType().isInteger() && 3720 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3721 "Illegal SCALAR_TO_VECTOR node!"); 3722 if (OpOpcode == ISD::UNDEF) 3723 return getUNDEF(VT); 3724 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3725 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3726 isa<ConstantSDNode>(Operand.getOperand(1)) && 3727 Operand.getConstantOperandVal(1) == 0 && 3728 Operand.getOperand(0).getValueType() == VT) 3729 return Operand.getOperand(0); 3730 break; 3731 case ISD::FNEG: 3732 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3733 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3734 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3735 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 3736 Operand.getOperand(0), Operand.getNode()->getFlags()); 3737 if (OpOpcode == ISD::FNEG) // --X -> X 3738 return Operand.getOperand(0); 3739 break; 3740 case ISD::FABS: 3741 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3742 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 3743 break; 3744 } 3745 3746 SDNode *N; 3747 SDVTList VTs = getVTList(VT); 3748 SDValue Ops[] = {Operand}; 3749 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3750 FoldingSetNodeID ID; 3751 AddNodeIDNode(ID, Opcode, VTs, Ops); 3752 void *IP = nullptr; 3753 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 3754 E->intersectFlagsWith(Flags); 3755 return SDValue(E, 0); 3756 } 3757 3758 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3759 N->setFlags(Flags); 3760 createOperands(N, Ops); 3761 CSEMap.InsertNode(N, IP); 3762 } else { 3763 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3764 createOperands(N, Ops); 3765 } 3766 3767 InsertNode(N); 3768 return SDValue(N, 0); 3769 } 3770 3771 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3772 const APInt &C2) { 3773 switch (Opcode) { 3774 case ISD::ADD: return std::make_pair(C1 + C2, true); 3775 case ISD::SUB: return std::make_pair(C1 - C2, true); 3776 case ISD::MUL: return std::make_pair(C1 * C2, true); 3777 case ISD::AND: return std::make_pair(C1 & C2, true); 3778 case ISD::OR: return std::make_pair(C1 | C2, true); 3779 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3780 case ISD::SHL: return std::make_pair(C1 << C2, true); 3781 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3782 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3783 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3784 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3785 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3786 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3787 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3788 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3789 case ISD::UDIV: 3790 if (!C2.getBoolValue()) 3791 break; 3792 return std::make_pair(C1.udiv(C2), true); 3793 case ISD::UREM: 3794 if (!C2.getBoolValue()) 3795 break; 3796 return std::make_pair(C1.urem(C2), true); 3797 case ISD::SDIV: 3798 if (!C2.getBoolValue()) 3799 break; 3800 return std::make_pair(C1.sdiv(C2), true); 3801 case ISD::SREM: 3802 if (!C2.getBoolValue()) 3803 break; 3804 return std::make_pair(C1.srem(C2), true); 3805 } 3806 return std::make_pair(APInt(1, 0), false); 3807 } 3808 3809 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3810 EVT VT, const ConstantSDNode *Cst1, 3811 const ConstantSDNode *Cst2) { 3812 if (Cst1->isOpaque() || Cst2->isOpaque()) 3813 return SDValue(); 3814 3815 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3816 Cst2->getAPIntValue()); 3817 if (!Folded.second) 3818 return SDValue(); 3819 return getConstant(Folded.first, DL, VT); 3820 } 3821 3822 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 3823 const GlobalAddressSDNode *GA, 3824 const SDNode *N2) { 3825 if (GA->getOpcode() != ISD::GlobalAddress) 3826 return SDValue(); 3827 if (!TLI->isOffsetFoldingLegal(GA)) 3828 return SDValue(); 3829 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 3830 if (!Cst2) 3831 return SDValue(); 3832 int64_t Offset = Cst2->getSExtValue(); 3833 switch (Opcode) { 3834 case ISD::ADD: break; 3835 case ISD::SUB: Offset = -uint64_t(Offset); break; 3836 default: return SDValue(); 3837 } 3838 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 3839 GA->getOffset() + uint64_t(Offset)); 3840 } 3841 3842 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 3843 switch (Opcode) { 3844 case ISD::SDIV: 3845 case ISD::UDIV: 3846 case ISD::SREM: 3847 case ISD::UREM: { 3848 // If a divisor is zero/undef or any element of a divisor vector is 3849 // zero/undef, the whole op is undef. 3850 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 3851 SDValue Divisor = Ops[1]; 3852 if (Divisor.isUndef() || isNullConstant(Divisor)) 3853 return true; 3854 3855 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 3856 llvm::any_of(Divisor->op_values(), 3857 [](SDValue V) { return V.isUndef() || 3858 isNullConstant(V); }); 3859 // TODO: Handle signed overflow. 3860 } 3861 // TODO: Handle oversized shifts. 3862 default: 3863 return false; 3864 } 3865 } 3866 3867 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3868 EVT VT, SDNode *Cst1, 3869 SDNode *Cst2) { 3870 // If the opcode is a target-specific ISD node, there's nothing we can 3871 // do here and the operand rules may not line up with the below, so 3872 // bail early. 3873 if (Opcode >= ISD::BUILTIN_OP_END) 3874 return SDValue(); 3875 3876 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 3877 return getUNDEF(VT); 3878 3879 // Handle the case of two scalars. 3880 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 3881 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 3882 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 3883 assert((!Folded || !VT.isVector()) && 3884 "Can't fold vectors ops with scalar operands"); 3885 return Folded; 3886 } 3887 } 3888 3889 // fold (add Sym, c) -> Sym+c 3890 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 3891 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 3892 if (TLI->isCommutativeBinOp(Opcode)) 3893 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 3894 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 3895 3896 // For vectors extract each constant element into Inputs so we can constant 3897 // fold them individually. 3898 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 3899 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 3900 if (!BV1 || !BV2) 3901 return SDValue(); 3902 3903 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 3904 3905 EVT SVT = VT.getScalarType(); 3906 SmallVector<SDValue, 4> Outputs; 3907 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 3908 SDValue V1 = BV1->getOperand(I); 3909 SDValue V2 = BV2->getOperand(I); 3910 3911 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 3912 // FIXME: This is valid and could be handled by truncation. 3913 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 3914 return SDValue(); 3915 3916 // Fold one vector element. 3917 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 3918 3919 // Scalar folding only succeeded if the result is a constant or UNDEF. 3920 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 3921 ScalarResult.getOpcode() != ISD::ConstantFP) 3922 return SDValue(); 3923 Outputs.push_back(ScalarResult); 3924 } 3925 3926 assert(VT.getVectorNumElements() == Outputs.size() && 3927 "Vector size mismatch!"); 3928 3929 // We may have a vector type but a scalar result. Create a splat. 3930 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3931 3932 // Build a big vector out of the scalar elements we generated. 3933 return getBuildVector(VT, SDLoc(), Outputs); 3934 } 3935 3936 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 3937 const SDLoc &DL, EVT VT, 3938 ArrayRef<SDValue> Ops, 3939 const SDNodeFlags Flags) { 3940 // If the opcode is a target-specific ISD node, there's nothing we can 3941 // do here and the operand rules may not line up with the below, so 3942 // bail early. 3943 if (Opcode >= ISD::BUILTIN_OP_END) 3944 return SDValue(); 3945 3946 if (isUndef(Opcode, Ops)) 3947 return getUNDEF(VT); 3948 3949 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 3950 if (!VT.isVector()) 3951 return SDValue(); 3952 3953 unsigned NumElts = VT.getVectorNumElements(); 3954 3955 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 3956 return !Op.getValueType().isVector() || 3957 Op.getValueType().getVectorNumElements() == NumElts; 3958 }; 3959 3960 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 3961 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 3962 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 3963 (BV && BV->isConstant()); 3964 }; 3965 3966 // All operands must be vector types with the same number of elements as 3967 // the result type and must be either UNDEF or a build vector of constant 3968 // or UNDEF scalars. 3969 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 3970 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 3971 return SDValue(); 3972 3973 // If we are comparing vectors, then the result needs to be a i1 boolean 3974 // that is then sign-extended back to the legal result type. 3975 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 3976 3977 // Find legal integer scalar type for constant promotion and 3978 // ensure that its scalar size is at least as large as source. 3979 EVT LegalSVT = VT.getScalarType(); 3980 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 3981 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 3982 if (LegalSVT.bitsLT(VT.getScalarType())) 3983 return SDValue(); 3984 } 3985 3986 // Constant fold each scalar lane separately. 3987 SmallVector<SDValue, 4> ScalarResults; 3988 for (unsigned i = 0; i != NumElts; i++) { 3989 SmallVector<SDValue, 4> ScalarOps; 3990 for (SDValue Op : Ops) { 3991 EVT InSVT = Op.getValueType().getScalarType(); 3992 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 3993 if (!InBV) { 3994 // We've checked that this is UNDEF or a constant of some kind. 3995 if (Op.isUndef()) 3996 ScalarOps.push_back(getUNDEF(InSVT)); 3997 else 3998 ScalarOps.push_back(Op); 3999 continue; 4000 } 4001 4002 SDValue ScalarOp = InBV->getOperand(i); 4003 EVT ScalarVT = ScalarOp.getValueType(); 4004 4005 // Build vector (integer) scalar operands may need implicit 4006 // truncation - do this before constant folding. 4007 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4008 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4009 4010 ScalarOps.push_back(ScalarOp); 4011 } 4012 4013 // Constant fold the scalar operands. 4014 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4015 4016 // Legalize the (integer) scalar constant if necessary. 4017 if (LegalSVT != SVT) 4018 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4019 4020 // Scalar folding only succeeded if the result is a constant or UNDEF. 4021 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4022 ScalarResult.getOpcode() != ISD::ConstantFP) 4023 return SDValue(); 4024 ScalarResults.push_back(ScalarResult); 4025 } 4026 4027 return getBuildVector(VT, DL, ScalarResults); 4028 } 4029 4030 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4031 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4032 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4033 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4034 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4035 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4036 4037 // Canonicalize constant to RHS if commutative. 4038 if (TLI->isCommutativeBinOp(Opcode)) { 4039 if (N1C && !N2C) { 4040 std::swap(N1C, N2C); 4041 std::swap(N1, N2); 4042 } else if (N1CFP && !N2CFP) { 4043 std::swap(N1CFP, N2CFP); 4044 std::swap(N1, N2); 4045 } 4046 } 4047 4048 switch (Opcode) { 4049 default: break; 4050 case ISD::TokenFactor: 4051 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4052 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4053 // Fold trivial token factors. 4054 if (N1.getOpcode() == ISD::EntryToken) return N2; 4055 if (N2.getOpcode() == ISD::EntryToken) return N1; 4056 if (N1 == N2) return N1; 4057 break; 4058 case ISD::CONCAT_VECTORS: { 4059 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4060 SDValue Ops[] = {N1, N2}; 4061 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4062 return V; 4063 break; 4064 } 4065 case ISD::AND: 4066 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4067 assert(N1.getValueType() == N2.getValueType() && 4068 N1.getValueType() == VT && "Binary operator types must match!"); 4069 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4070 // worth handling here. 4071 if (N2C && N2C->isNullValue()) 4072 return N2; 4073 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4074 return N1; 4075 break; 4076 case ISD::OR: 4077 case ISD::XOR: 4078 case ISD::ADD: 4079 case ISD::SUB: 4080 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4081 assert(N1.getValueType() == N2.getValueType() && 4082 N1.getValueType() == VT && "Binary operator types must match!"); 4083 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4084 // it's worth handling here. 4085 if (N2C && N2C->isNullValue()) 4086 return N1; 4087 break; 4088 case ISD::UDIV: 4089 case ISD::UREM: 4090 case ISD::MULHU: 4091 case ISD::MULHS: 4092 case ISD::MUL: 4093 case ISD::SDIV: 4094 case ISD::SREM: 4095 case ISD::SMIN: 4096 case ISD::SMAX: 4097 case ISD::UMIN: 4098 case ISD::UMAX: 4099 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4100 assert(N1.getValueType() == N2.getValueType() && 4101 N1.getValueType() == VT && "Binary operator types must match!"); 4102 break; 4103 case ISD::FADD: 4104 case ISD::FSUB: 4105 case ISD::FMUL: 4106 case ISD::FDIV: 4107 case ISD::FREM: 4108 if (getTarget().Options.UnsafeFPMath) { 4109 if (Opcode == ISD::FADD) { 4110 // x+0 --> x 4111 if (N2CFP && N2CFP->getValueAPF().isZero()) 4112 return N1; 4113 } else if (Opcode == ISD::FSUB) { 4114 // x-0 --> x 4115 if (N2CFP && N2CFP->getValueAPF().isZero()) 4116 return N1; 4117 } else if (Opcode == ISD::FMUL) { 4118 // x*0 --> 0 4119 if (N2CFP && N2CFP->isZero()) 4120 return N2; 4121 // x*1 --> x 4122 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4123 return N1; 4124 } 4125 } 4126 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4127 assert(N1.getValueType() == N2.getValueType() && 4128 N1.getValueType() == VT && "Binary operator types must match!"); 4129 break; 4130 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4131 assert(N1.getValueType() == VT && 4132 N1.getValueType().isFloatingPoint() && 4133 N2.getValueType().isFloatingPoint() && 4134 "Invalid FCOPYSIGN!"); 4135 break; 4136 case ISD::SHL: 4137 case ISD::SRA: 4138 case ISD::SRL: 4139 case ISD::ROTL: 4140 case ISD::ROTR: 4141 assert(VT == N1.getValueType() && 4142 "Shift operators return type must be the same as their first arg"); 4143 assert(VT.isInteger() && N2.getValueType().isInteger() && 4144 "Shifts only work on integers"); 4145 assert((!VT.isVector() || VT == N2.getValueType()) && 4146 "Vector shift amounts must be in the same as their first arg"); 4147 // Verify that the shift amount VT is bit enough to hold valid shift 4148 // amounts. This catches things like trying to shift an i1024 value by an 4149 // i8, which is easy to fall into in generic code that uses 4150 // TLI.getShiftAmount(). 4151 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4152 "Invalid use of small shift amount with oversized value!"); 4153 4154 // Always fold shifts of i1 values so the code generator doesn't need to 4155 // handle them. Since we know the size of the shift has to be less than the 4156 // size of the value, the shift/rotate count is guaranteed to be zero. 4157 if (VT == MVT::i1) 4158 return N1; 4159 if (N2C && N2C->isNullValue()) 4160 return N1; 4161 break; 4162 case ISD::FP_ROUND_INREG: { 4163 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4164 assert(VT == N1.getValueType() && "Not an inreg round!"); 4165 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4166 "Cannot FP_ROUND_INREG integer types"); 4167 assert(EVT.isVector() == VT.isVector() && 4168 "FP_ROUND_INREG type should be vector iff the operand " 4169 "type is vector!"); 4170 assert((!EVT.isVector() || 4171 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4172 "Vector element counts must match in FP_ROUND_INREG"); 4173 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4174 (void)EVT; 4175 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4176 break; 4177 } 4178 case ISD::FP_ROUND: 4179 assert(VT.isFloatingPoint() && 4180 N1.getValueType().isFloatingPoint() && 4181 VT.bitsLE(N1.getValueType()) && 4182 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4183 "Invalid FP_ROUND!"); 4184 if (N1.getValueType() == VT) return N1; // noop conversion. 4185 break; 4186 case ISD::AssertSext: 4187 case ISD::AssertZext: { 4188 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4189 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4190 assert(VT.isInteger() && EVT.isInteger() && 4191 "Cannot *_EXTEND_INREG FP types"); 4192 assert(!EVT.isVector() && 4193 "AssertSExt/AssertZExt type should be the vector element type " 4194 "rather than the vector type!"); 4195 assert(EVT.bitsLE(VT) && "Not extending!"); 4196 if (VT == EVT) return N1; // noop assertion. 4197 break; 4198 } 4199 case ISD::SIGN_EXTEND_INREG: { 4200 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4201 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4202 assert(VT.isInteger() && EVT.isInteger() && 4203 "Cannot *_EXTEND_INREG FP types"); 4204 assert(EVT.isVector() == VT.isVector() && 4205 "SIGN_EXTEND_INREG type should be vector iff the operand " 4206 "type is vector!"); 4207 assert((!EVT.isVector() || 4208 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4209 "Vector element counts must match in SIGN_EXTEND_INREG"); 4210 assert(EVT.bitsLE(VT) && "Not extending!"); 4211 if (EVT == VT) return N1; // Not actually extending 4212 4213 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4214 unsigned FromBits = EVT.getScalarSizeInBits(); 4215 Val <<= Val.getBitWidth() - FromBits; 4216 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4217 return getConstant(Val, DL, ConstantVT); 4218 }; 4219 4220 if (N1C) { 4221 const APInt &Val = N1C->getAPIntValue(); 4222 return SignExtendInReg(Val, VT); 4223 } 4224 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4225 SmallVector<SDValue, 8> Ops; 4226 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4227 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4228 SDValue Op = N1.getOperand(i); 4229 if (Op.isUndef()) { 4230 Ops.push_back(getUNDEF(OpVT)); 4231 continue; 4232 } 4233 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4234 APInt Val = C->getAPIntValue(); 4235 Ops.push_back(SignExtendInReg(Val, OpVT)); 4236 } 4237 return getBuildVector(VT, DL, Ops); 4238 } 4239 break; 4240 } 4241 case ISD::EXTRACT_VECTOR_ELT: 4242 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4243 if (N1.isUndef()) 4244 return getUNDEF(VT); 4245 4246 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4247 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4248 return getUNDEF(VT); 4249 4250 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4251 // expanding copies of large vectors from registers. 4252 if (N2C && 4253 N1.getOpcode() == ISD::CONCAT_VECTORS && 4254 N1.getNumOperands() > 0) { 4255 unsigned Factor = 4256 N1.getOperand(0).getValueType().getVectorNumElements(); 4257 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4258 N1.getOperand(N2C->getZExtValue() / Factor), 4259 getConstant(N2C->getZExtValue() % Factor, DL, 4260 N2.getValueType())); 4261 } 4262 4263 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4264 // expanding large vector constants. 4265 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4266 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4267 4268 if (VT != Elt.getValueType()) 4269 // If the vector element type is not legal, the BUILD_VECTOR operands 4270 // are promoted and implicitly truncated, and the result implicitly 4271 // extended. Make that explicit here. 4272 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4273 4274 return Elt; 4275 } 4276 4277 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4278 // operations are lowered to scalars. 4279 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4280 // If the indices are the same, return the inserted element else 4281 // if the indices are known different, extract the element from 4282 // the original vector. 4283 SDValue N1Op2 = N1.getOperand(2); 4284 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4285 4286 if (N1Op2C && N2C) { 4287 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4288 if (VT == N1.getOperand(1).getValueType()) 4289 return N1.getOperand(1); 4290 else 4291 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4292 } 4293 4294 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4295 } 4296 } 4297 break; 4298 case ISD::EXTRACT_ELEMENT: 4299 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4300 assert(!N1.getValueType().isVector() && !VT.isVector() && 4301 (N1.getValueType().isInteger() == VT.isInteger()) && 4302 N1.getValueType() != VT && 4303 "Wrong types for EXTRACT_ELEMENT!"); 4304 4305 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4306 // 64-bit integers into 32-bit parts. Instead of building the extract of 4307 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4308 if (N1.getOpcode() == ISD::BUILD_PAIR) 4309 return N1.getOperand(N2C->getZExtValue()); 4310 4311 // EXTRACT_ELEMENT of a constant int is also very common. 4312 if (N1C) { 4313 unsigned ElementSize = VT.getSizeInBits(); 4314 unsigned Shift = ElementSize * N2C->getZExtValue(); 4315 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4316 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4317 } 4318 break; 4319 case ISD::EXTRACT_SUBVECTOR: 4320 if (VT.isSimple() && N1.getValueType().isSimple()) { 4321 assert(VT.isVector() && N1.getValueType().isVector() && 4322 "Extract subvector VTs must be a vectors!"); 4323 assert(VT.getVectorElementType() == 4324 N1.getValueType().getVectorElementType() && 4325 "Extract subvector VTs must have the same element type!"); 4326 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4327 "Extract subvector must be from larger vector to smaller vector!"); 4328 4329 if (N2C) { 4330 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4331 <= N1.getValueType().getVectorNumElements()) 4332 && "Extract subvector overflow!"); 4333 } 4334 4335 // Trivial extraction. 4336 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4337 return N1; 4338 4339 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4340 if (N1.isUndef()) 4341 return getUNDEF(VT); 4342 4343 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4344 // the concat have the same type as the extract. 4345 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4346 N1.getNumOperands() > 0 && 4347 VT == N1.getOperand(0).getValueType()) { 4348 unsigned Factor = VT.getVectorNumElements(); 4349 return N1.getOperand(N2C->getZExtValue() / Factor); 4350 } 4351 4352 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4353 // during shuffle legalization. 4354 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4355 VT == N1.getOperand(1).getValueType()) 4356 return N1.getOperand(1); 4357 } 4358 break; 4359 } 4360 4361 // Perform trivial constant folding. 4362 if (SDValue SV = 4363 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4364 return SV; 4365 4366 // Constant fold FP operations. 4367 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4368 if (N1CFP) { 4369 if (N2CFP) { 4370 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4371 APFloat::opStatus s; 4372 switch (Opcode) { 4373 case ISD::FADD: 4374 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4375 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4376 return getConstantFP(V1, DL, VT); 4377 break; 4378 case ISD::FSUB: 4379 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4380 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4381 return getConstantFP(V1, DL, VT); 4382 break; 4383 case ISD::FMUL: 4384 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4385 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4386 return getConstantFP(V1, DL, VT); 4387 break; 4388 case ISD::FDIV: 4389 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4390 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4391 s!=APFloat::opDivByZero)) { 4392 return getConstantFP(V1, DL, VT); 4393 } 4394 break; 4395 case ISD::FREM : 4396 s = V1.mod(V2); 4397 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4398 s!=APFloat::opDivByZero)) { 4399 return getConstantFP(V1, DL, VT); 4400 } 4401 break; 4402 case ISD::FCOPYSIGN: 4403 V1.copySign(V2); 4404 return getConstantFP(V1, DL, VT); 4405 default: break; 4406 } 4407 } 4408 4409 if (Opcode == ISD::FP_ROUND) { 4410 APFloat V = N1CFP->getValueAPF(); // make copy 4411 bool ignored; 4412 // This can return overflow, underflow, or inexact; we don't care. 4413 // FIXME need to be more flexible about rounding mode. 4414 (void)V.convert(EVTToAPFloatSemantics(VT), 4415 APFloat::rmNearestTiesToEven, &ignored); 4416 return getConstantFP(V, DL, VT); 4417 } 4418 } 4419 4420 // Canonicalize an UNDEF to the RHS, even over a constant. 4421 if (N1.isUndef()) { 4422 if (TLI->isCommutativeBinOp(Opcode)) { 4423 std::swap(N1, N2); 4424 } else { 4425 switch (Opcode) { 4426 case ISD::FP_ROUND_INREG: 4427 case ISD::SIGN_EXTEND_INREG: 4428 case ISD::SUB: 4429 case ISD::FSUB: 4430 case ISD::FDIV: 4431 case ISD::FREM: 4432 case ISD::SRA: 4433 return N1; // fold op(undef, arg2) -> undef 4434 case ISD::UDIV: 4435 case ISD::SDIV: 4436 case ISD::UREM: 4437 case ISD::SREM: 4438 case ISD::SRL: 4439 case ISD::SHL: 4440 if (!VT.isVector()) 4441 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4442 // For vectors, we can't easily build an all zero vector, just return 4443 // the LHS. 4444 return N2; 4445 } 4446 } 4447 } 4448 4449 // Fold a bunch of operators when the RHS is undef. 4450 if (N2.isUndef()) { 4451 switch (Opcode) { 4452 case ISD::XOR: 4453 if (N1.isUndef()) 4454 // Handle undef ^ undef -> 0 special case. This is a common 4455 // idiom (misuse). 4456 return getConstant(0, DL, VT); 4457 LLVM_FALLTHROUGH; 4458 case ISD::ADD: 4459 case ISD::ADDC: 4460 case ISD::ADDE: 4461 case ISD::SUB: 4462 case ISD::UDIV: 4463 case ISD::SDIV: 4464 case ISD::UREM: 4465 case ISD::SREM: 4466 return N2; // fold op(arg1, undef) -> undef 4467 case ISD::FADD: 4468 case ISD::FSUB: 4469 case ISD::FMUL: 4470 case ISD::FDIV: 4471 case ISD::FREM: 4472 if (getTarget().Options.UnsafeFPMath) 4473 return N2; 4474 break; 4475 case ISD::MUL: 4476 case ISD::AND: 4477 case ISD::SRL: 4478 case ISD::SHL: 4479 if (!VT.isVector()) 4480 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4481 // For vectors, we can't easily build an all zero vector, just return 4482 // the LHS. 4483 return N1; 4484 case ISD::OR: 4485 if (!VT.isVector()) 4486 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4487 // For vectors, we can't easily build an all one vector, just return 4488 // the LHS. 4489 return N1; 4490 case ISD::SRA: 4491 return N1; 4492 } 4493 } 4494 4495 // Memoize this node if possible. 4496 SDNode *N; 4497 SDVTList VTs = getVTList(VT); 4498 SDValue Ops[] = {N1, N2}; 4499 if (VT != MVT::Glue) { 4500 FoldingSetNodeID ID; 4501 AddNodeIDNode(ID, Opcode, VTs, Ops); 4502 void *IP = nullptr; 4503 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4504 E->intersectFlagsWith(Flags); 4505 return SDValue(E, 0); 4506 } 4507 4508 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4509 N->setFlags(Flags); 4510 createOperands(N, Ops); 4511 CSEMap.InsertNode(N, IP); 4512 } else { 4513 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4514 createOperands(N, Ops); 4515 } 4516 4517 InsertNode(N); 4518 return SDValue(N, 0); 4519 } 4520 4521 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4522 SDValue N1, SDValue N2, SDValue N3) { 4523 // Perform various simplifications. 4524 switch (Opcode) { 4525 case ISD::FMA: { 4526 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4527 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4528 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4529 if (N1CFP && N2CFP && N3CFP) { 4530 APFloat V1 = N1CFP->getValueAPF(); 4531 const APFloat &V2 = N2CFP->getValueAPF(); 4532 const APFloat &V3 = N3CFP->getValueAPF(); 4533 APFloat::opStatus s = 4534 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4535 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4536 return getConstantFP(V1, DL, VT); 4537 } 4538 break; 4539 } 4540 case ISD::CONCAT_VECTORS: { 4541 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4542 SDValue Ops[] = {N1, N2, N3}; 4543 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4544 return V; 4545 break; 4546 } 4547 case ISD::SETCC: { 4548 // Use FoldSetCC to simplify SETCC's. 4549 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4550 return V; 4551 // Vector constant folding. 4552 SDValue Ops[] = {N1, N2, N3}; 4553 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4554 return V; 4555 break; 4556 } 4557 case ISD::SELECT: 4558 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4559 if (N1C->getZExtValue()) 4560 return N2; // select true, X, Y -> X 4561 return N3; // select false, X, Y -> Y 4562 } 4563 4564 if (N2 == N3) return N2; // select C, X, X -> X 4565 break; 4566 case ISD::VECTOR_SHUFFLE: 4567 llvm_unreachable("should use getVectorShuffle constructor!"); 4568 case ISD::INSERT_VECTOR_ELT: { 4569 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4570 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4571 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4572 return getUNDEF(VT); 4573 break; 4574 } 4575 case ISD::INSERT_SUBVECTOR: { 4576 SDValue Index = N3; 4577 if (VT.isSimple() && N1.getValueType().isSimple() 4578 && N2.getValueType().isSimple()) { 4579 assert(VT.isVector() && N1.getValueType().isVector() && 4580 N2.getValueType().isVector() && 4581 "Insert subvector VTs must be a vectors"); 4582 assert(VT == N1.getValueType() && 4583 "Dest and insert subvector source types must match!"); 4584 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4585 "Insert subvector must be from smaller vector to larger vector!"); 4586 if (isa<ConstantSDNode>(Index)) { 4587 assert((N2.getValueType().getVectorNumElements() + 4588 cast<ConstantSDNode>(Index)->getZExtValue() 4589 <= VT.getVectorNumElements()) 4590 && "Insert subvector overflow!"); 4591 } 4592 4593 // Trivial insertion. 4594 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4595 return N2; 4596 } 4597 break; 4598 } 4599 case ISD::BITCAST: 4600 // Fold bit_convert nodes from a type to themselves. 4601 if (N1.getValueType() == VT) 4602 return N1; 4603 break; 4604 } 4605 4606 // Memoize node if it doesn't produce a flag. 4607 SDNode *N; 4608 SDVTList VTs = getVTList(VT); 4609 SDValue Ops[] = {N1, N2, N3}; 4610 if (VT != MVT::Glue) { 4611 FoldingSetNodeID ID; 4612 AddNodeIDNode(ID, Opcode, VTs, Ops); 4613 void *IP = nullptr; 4614 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4615 return SDValue(E, 0); 4616 4617 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4618 createOperands(N, Ops); 4619 CSEMap.InsertNode(N, IP); 4620 } else { 4621 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4622 createOperands(N, Ops); 4623 } 4624 4625 InsertNode(N); 4626 return SDValue(N, 0); 4627 } 4628 4629 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4630 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4631 SDValue Ops[] = { N1, N2, N3, N4 }; 4632 return getNode(Opcode, DL, VT, Ops); 4633 } 4634 4635 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4636 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4637 SDValue N5) { 4638 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4639 return getNode(Opcode, DL, VT, Ops); 4640 } 4641 4642 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4643 /// the incoming stack arguments to be loaded from the stack. 4644 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4645 SmallVector<SDValue, 8> ArgChains; 4646 4647 // Include the original chain at the beginning of the list. When this is 4648 // used by target LowerCall hooks, this helps legalize find the 4649 // CALLSEQ_BEGIN node. 4650 ArgChains.push_back(Chain); 4651 4652 // Add a chain value for each stack argument. 4653 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4654 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4655 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4656 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4657 if (FI->getIndex() < 0) 4658 ArgChains.push_back(SDValue(L, 1)); 4659 4660 // Build a tokenfactor for all the chains. 4661 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4662 } 4663 4664 /// getMemsetValue - Vectorized representation of the memset value 4665 /// operand. 4666 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4667 const SDLoc &dl) { 4668 assert(!Value.isUndef()); 4669 4670 unsigned NumBits = VT.getScalarSizeInBits(); 4671 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4672 assert(C->getAPIntValue().getBitWidth() == 8); 4673 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4674 if (VT.isInteger()) 4675 return DAG.getConstant(Val, dl, VT); 4676 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4677 VT); 4678 } 4679 4680 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4681 EVT IntVT = VT.getScalarType(); 4682 if (!IntVT.isInteger()) 4683 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4684 4685 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4686 if (NumBits > 8) { 4687 // Use a multiplication with 0x010101... to extend the input to the 4688 // required length. 4689 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4690 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4691 DAG.getConstant(Magic, dl, IntVT)); 4692 } 4693 4694 if (VT != Value.getValueType() && !VT.isInteger()) 4695 Value = DAG.getBitcast(VT.getScalarType(), Value); 4696 if (VT != Value.getValueType()) 4697 Value = DAG.getSplatBuildVector(VT, dl, Value); 4698 4699 return Value; 4700 } 4701 4702 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4703 /// used when a memcpy is turned into a memset when the source is a constant 4704 /// string ptr. 4705 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4706 const TargetLowering &TLI, 4707 const ConstantDataArraySlice &Slice) { 4708 // Handle vector with all elements zero. 4709 if (Slice.Array == nullptr) { 4710 if (VT.isInteger()) 4711 return DAG.getConstant(0, dl, VT); 4712 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4713 return DAG.getConstantFP(0.0, dl, VT); 4714 else if (VT.isVector()) { 4715 unsigned NumElts = VT.getVectorNumElements(); 4716 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4717 return DAG.getNode(ISD::BITCAST, dl, VT, 4718 DAG.getConstant(0, dl, 4719 EVT::getVectorVT(*DAG.getContext(), 4720 EltVT, NumElts))); 4721 } else 4722 llvm_unreachable("Expected type!"); 4723 } 4724 4725 assert(!VT.isVector() && "Can't handle vector type here!"); 4726 unsigned NumVTBits = VT.getSizeInBits(); 4727 unsigned NumVTBytes = NumVTBits / 8; 4728 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 4729 4730 APInt Val(NumVTBits, 0); 4731 if (DAG.getDataLayout().isLittleEndian()) { 4732 for (unsigned i = 0; i != NumBytes; ++i) 4733 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 4734 } else { 4735 for (unsigned i = 0; i != NumBytes; ++i) 4736 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 4737 } 4738 4739 // If the "cost" of materializing the integer immediate is less than the cost 4740 // of a load, then it is cost effective to turn the load into the immediate. 4741 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4742 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4743 return DAG.getConstant(Val, dl, VT); 4744 return SDValue(nullptr, 0); 4745 } 4746 4747 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4748 const SDLoc &DL) { 4749 EVT VT = Base.getValueType(); 4750 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4751 } 4752 4753 /// Returns true if memcpy source is constant data. 4754 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 4755 uint64_t SrcDelta = 0; 4756 GlobalAddressSDNode *G = nullptr; 4757 if (Src.getOpcode() == ISD::GlobalAddress) 4758 G = cast<GlobalAddressSDNode>(Src); 4759 else if (Src.getOpcode() == ISD::ADD && 4760 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4761 Src.getOperand(1).getOpcode() == ISD::Constant) { 4762 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4763 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4764 } 4765 if (!G) 4766 return false; 4767 4768 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 4769 SrcDelta + G->getOffset()); 4770 } 4771 4772 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4773 /// Return true if the number of memory ops is below the threshold (Limit). 4774 /// It returns the types of the sequence of memory ops to perform 4775 /// memset / memcpy by reference. 4776 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4777 unsigned Limit, uint64_t Size, 4778 unsigned DstAlign, unsigned SrcAlign, 4779 bool IsMemset, 4780 bool ZeroMemset, 4781 bool MemcpyStrSrc, 4782 bool AllowOverlap, 4783 unsigned DstAS, unsigned SrcAS, 4784 SelectionDAG &DAG, 4785 const TargetLowering &TLI) { 4786 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4787 "Expecting memcpy / memset source to meet alignment requirement!"); 4788 // If 'SrcAlign' is zero, that means the memory operation does not need to 4789 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4790 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4791 // is the specified alignment of the memory operation. If it is zero, that 4792 // means it's possible to change the alignment of the destination. 4793 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4794 // not need to be loaded. 4795 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4796 IsMemset, ZeroMemset, MemcpyStrSrc, 4797 DAG.getMachineFunction()); 4798 4799 if (VT == MVT::Other) { 4800 // Use the largest integer type whose alignment constraints are satisfied. 4801 // We only need to check DstAlign here as SrcAlign is always greater or 4802 // equal to DstAlign (or zero). 4803 VT = MVT::i64; 4804 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 4805 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 4806 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 4807 assert(VT.isInteger()); 4808 4809 // Find the largest legal integer type. 4810 MVT LVT = MVT::i64; 4811 while (!TLI.isTypeLegal(LVT)) 4812 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 4813 assert(LVT.isInteger()); 4814 4815 // If the type we've chosen is larger than the largest legal integer type 4816 // then use that instead. 4817 if (VT.bitsGT(LVT)) 4818 VT = LVT; 4819 } 4820 4821 unsigned NumMemOps = 0; 4822 while (Size != 0) { 4823 unsigned VTSize = VT.getSizeInBits() / 8; 4824 while (VTSize > Size) { 4825 // For now, only use non-vector load / store's for the left-over pieces. 4826 EVT NewVT = VT; 4827 unsigned NewVTSize; 4828 4829 bool Found = false; 4830 if (VT.isVector() || VT.isFloatingPoint()) { 4831 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 4832 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 4833 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 4834 Found = true; 4835 else if (NewVT == MVT::i64 && 4836 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 4837 TLI.isSafeMemOpType(MVT::f64)) { 4838 // i64 is usually not legal on 32-bit targets, but f64 may be. 4839 NewVT = MVT::f64; 4840 Found = true; 4841 } 4842 } 4843 4844 if (!Found) { 4845 do { 4846 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 4847 if (NewVT == MVT::i8) 4848 break; 4849 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 4850 } 4851 NewVTSize = NewVT.getSizeInBits() / 8; 4852 4853 // If the new VT cannot cover all of the remaining bits, then consider 4854 // issuing a (or a pair of) unaligned and overlapping load / store. 4855 // FIXME: Only does this for 64-bit or more since we don't have proper 4856 // cost model for unaligned load / store. 4857 bool Fast; 4858 if (NumMemOps && AllowOverlap && 4859 VTSize >= 8 && NewVTSize < Size && 4860 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 4861 VTSize = Size; 4862 else { 4863 VT = NewVT; 4864 VTSize = NewVTSize; 4865 } 4866 } 4867 4868 if (++NumMemOps > Limit) 4869 return false; 4870 4871 MemOps.push_back(VT); 4872 Size -= VTSize; 4873 } 4874 4875 return true; 4876 } 4877 4878 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 4879 // On Darwin, -Os means optimize for size without hurting performance, so 4880 // only really optimize for size when -Oz (MinSize) is used. 4881 if (MF.getTarget().getTargetTriple().isOSDarwin()) 4882 return MF.getFunction()->optForMinSize(); 4883 return MF.getFunction()->optForSize(); 4884 } 4885 4886 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 4887 SDValue Chain, SDValue Dst, SDValue Src, 4888 uint64_t Size, unsigned Align, 4889 bool isVol, bool AlwaysInline, 4890 MachinePointerInfo DstPtrInfo, 4891 MachinePointerInfo SrcPtrInfo) { 4892 // Turn a memcpy of undef to nop. 4893 if (Src.isUndef()) 4894 return Chain; 4895 4896 // Expand memcpy to a series of load and store ops if the size operand falls 4897 // below a certain threshold. 4898 // TODO: In the AlwaysInline case, if the size is big then generate a loop 4899 // rather than maybe a humongous number of loads and stores. 4900 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4901 const DataLayout &DL = DAG.getDataLayout(); 4902 LLVMContext &C = *DAG.getContext(); 4903 std::vector<EVT> MemOps; 4904 bool DstAlignCanChange = false; 4905 MachineFunction &MF = DAG.getMachineFunction(); 4906 MachineFrameInfo &MFI = MF.getFrameInfo(); 4907 bool OptSize = shouldLowerMemFuncForSize(MF); 4908 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4909 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 4910 DstAlignCanChange = true; 4911 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4912 if (Align > SrcAlign) 4913 SrcAlign = Align; 4914 ConstantDataArraySlice Slice; 4915 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 4916 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 4917 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 4918 4919 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4920 (DstAlignCanChange ? 0 : Align), 4921 (isZeroConstant ? 0 : SrcAlign), 4922 false, false, CopyFromConstant, true, 4923 DstPtrInfo.getAddrSpace(), 4924 SrcPtrInfo.getAddrSpace(), 4925 DAG, TLI)) 4926 return SDValue(); 4927 4928 if (DstAlignCanChange) { 4929 Type *Ty = MemOps[0].getTypeForEVT(C); 4930 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 4931 4932 // Don't promote to an alignment that would require dynamic stack 4933 // realignment. 4934 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 4935 if (!TRI->needsStackRealignment(MF)) 4936 while (NewAlign > Align && 4937 DL.exceedsNaturalStackAlignment(NewAlign)) 4938 NewAlign /= 2; 4939 4940 if (NewAlign > Align) { 4941 // Give the stack frame object a larger alignment if needed. 4942 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 4943 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 4944 Align = NewAlign; 4945 } 4946 } 4947 4948 MachineMemOperand::Flags MMOFlags = 4949 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 4950 SmallVector<SDValue, 8> OutChains; 4951 unsigned NumMemOps = MemOps.size(); 4952 uint64_t SrcOff = 0, DstOff = 0; 4953 for (unsigned i = 0; i != NumMemOps; ++i) { 4954 EVT VT = MemOps[i]; 4955 unsigned VTSize = VT.getSizeInBits() / 8; 4956 SDValue Value, Store; 4957 4958 if (VTSize > Size) { 4959 // Issuing an unaligned load / store pair that overlaps with the previous 4960 // pair. Adjust the offset accordingly. 4961 assert(i == NumMemOps-1 && i != 0); 4962 SrcOff -= VTSize - Size; 4963 DstOff -= VTSize - Size; 4964 } 4965 4966 if (CopyFromConstant && 4967 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 4968 // It's unlikely a store of a vector immediate can be done in a single 4969 // instruction. It would require a load from a constantpool first. 4970 // We only handle zero vectors here. 4971 // FIXME: Handle other cases where store of vector immediate is done in 4972 // a single instruction. 4973 ConstantDataArraySlice SubSlice; 4974 if (SrcOff < Slice.Length) { 4975 SubSlice = Slice; 4976 SubSlice.move(SrcOff); 4977 } else { 4978 // This is an out-of-bounds access and hence UB. Pretend we read zero. 4979 SubSlice.Array = nullptr; 4980 SubSlice.Offset = 0; 4981 SubSlice.Length = VTSize; 4982 } 4983 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 4984 if (Value.getNode()) 4985 Store = DAG.getStore(Chain, dl, Value, 4986 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4987 DstPtrInfo.getWithOffset(DstOff), Align, 4988 MMOFlags); 4989 } 4990 4991 if (!Store.getNode()) { 4992 // The type might not be legal for the target. This should only happen 4993 // if the type is smaller than a legal type, as on PPC, so the right 4994 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 4995 // to Load/Store if NVT==VT. 4996 // FIXME does the case above also need this? 4997 EVT NVT = TLI.getTypeToTransformTo(C, VT); 4998 assert(NVT.bitsGE(VT)); 4999 5000 bool isDereferenceable = 5001 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5002 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5003 if (isDereferenceable) 5004 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5005 5006 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5007 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5008 SrcPtrInfo.getWithOffset(SrcOff), VT, 5009 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5010 OutChains.push_back(Value.getValue(1)); 5011 Store = DAG.getTruncStore( 5012 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5013 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5014 } 5015 OutChains.push_back(Store); 5016 SrcOff += VTSize; 5017 DstOff += VTSize; 5018 Size -= VTSize; 5019 } 5020 5021 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5022 } 5023 5024 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5025 SDValue Chain, SDValue Dst, SDValue Src, 5026 uint64_t Size, unsigned Align, 5027 bool isVol, bool AlwaysInline, 5028 MachinePointerInfo DstPtrInfo, 5029 MachinePointerInfo SrcPtrInfo) { 5030 // Turn a memmove of undef to nop. 5031 if (Src.isUndef()) 5032 return Chain; 5033 5034 // Expand memmove to a series of load and store ops if the size operand falls 5035 // below a certain threshold. 5036 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5037 const DataLayout &DL = DAG.getDataLayout(); 5038 LLVMContext &C = *DAG.getContext(); 5039 std::vector<EVT> MemOps; 5040 bool DstAlignCanChange = false; 5041 MachineFunction &MF = DAG.getMachineFunction(); 5042 MachineFrameInfo &MFI = MF.getFrameInfo(); 5043 bool OptSize = shouldLowerMemFuncForSize(MF); 5044 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5045 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5046 DstAlignCanChange = true; 5047 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5048 if (Align > SrcAlign) 5049 SrcAlign = Align; 5050 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5051 5052 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5053 (DstAlignCanChange ? 0 : Align), SrcAlign, 5054 false, false, false, false, 5055 DstPtrInfo.getAddrSpace(), 5056 SrcPtrInfo.getAddrSpace(), 5057 DAG, TLI)) 5058 return SDValue(); 5059 5060 if (DstAlignCanChange) { 5061 Type *Ty = MemOps[0].getTypeForEVT(C); 5062 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5063 if (NewAlign > Align) { 5064 // Give the stack frame object a larger alignment if needed. 5065 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5066 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5067 Align = NewAlign; 5068 } 5069 } 5070 5071 MachineMemOperand::Flags MMOFlags = 5072 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5073 uint64_t SrcOff = 0, DstOff = 0; 5074 SmallVector<SDValue, 8> LoadValues; 5075 SmallVector<SDValue, 8> LoadChains; 5076 SmallVector<SDValue, 8> OutChains; 5077 unsigned NumMemOps = MemOps.size(); 5078 for (unsigned i = 0; i < NumMemOps; i++) { 5079 EVT VT = MemOps[i]; 5080 unsigned VTSize = VT.getSizeInBits() / 8; 5081 SDValue Value; 5082 5083 bool isDereferenceable = 5084 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5085 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5086 if (isDereferenceable) 5087 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5088 5089 Value = 5090 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5091 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5092 LoadValues.push_back(Value); 5093 LoadChains.push_back(Value.getValue(1)); 5094 SrcOff += VTSize; 5095 } 5096 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5097 OutChains.clear(); 5098 for (unsigned i = 0; i < NumMemOps; i++) { 5099 EVT VT = MemOps[i]; 5100 unsigned VTSize = VT.getSizeInBits() / 8; 5101 SDValue Store; 5102 5103 Store = DAG.getStore(Chain, dl, LoadValues[i], 5104 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5105 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5106 OutChains.push_back(Store); 5107 DstOff += VTSize; 5108 } 5109 5110 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5111 } 5112 5113 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5114 /// operations. 5115 /// 5116 /// \param DAG Selection DAG where lowered code is placed. 5117 /// \param dl Link to corresponding IR location. 5118 /// \param Chain Control flow dependency. 5119 /// \param Dst Pointer to destination memory location. 5120 /// \param Src Value of byte to write into the memory. 5121 /// \param Size Number of bytes to write. 5122 /// \param Align Alignment of the destination in bytes. 5123 /// \param isVol True if destination is volatile. 5124 /// \param DstPtrInfo IR information on the memory pointer. 5125 /// \returns New head in the control flow, if lowering was successful, empty 5126 /// SDValue otherwise. 5127 /// 5128 /// The function tries to replace 'llvm.memset' intrinsic with several store 5129 /// operations and value calculation code. This is usually profitable for small 5130 /// memory size. 5131 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5132 SDValue Chain, SDValue Dst, SDValue Src, 5133 uint64_t Size, unsigned Align, bool isVol, 5134 MachinePointerInfo DstPtrInfo) { 5135 // Turn a memset of undef to nop. 5136 if (Src.isUndef()) 5137 return Chain; 5138 5139 // Expand memset to a series of load/store ops if the size operand 5140 // falls below a certain threshold. 5141 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5142 std::vector<EVT> MemOps; 5143 bool DstAlignCanChange = false; 5144 MachineFunction &MF = DAG.getMachineFunction(); 5145 MachineFrameInfo &MFI = MF.getFrameInfo(); 5146 bool OptSize = shouldLowerMemFuncForSize(MF); 5147 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5148 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5149 DstAlignCanChange = true; 5150 bool IsZeroVal = 5151 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5152 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5153 Size, (DstAlignCanChange ? 0 : Align), 0, 5154 true, IsZeroVal, false, true, 5155 DstPtrInfo.getAddrSpace(), ~0u, 5156 DAG, TLI)) 5157 return SDValue(); 5158 5159 if (DstAlignCanChange) { 5160 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5161 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5162 if (NewAlign > Align) { 5163 // Give the stack frame object a larger alignment if needed. 5164 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5165 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5166 Align = NewAlign; 5167 } 5168 } 5169 5170 SmallVector<SDValue, 8> OutChains; 5171 uint64_t DstOff = 0; 5172 unsigned NumMemOps = MemOps.size(); 5173 5174 // Find the largest store and generate the bit pattern for it. 5175 EVT LargestVT = MemOps[0]; 5176 for (unsigned i = 1; i < NumMemOps; i++) 5177 if (MemOps[i].bitsGT(LargestVT)) 5178 LargestVT = MemOps[i]; 5179 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5180 5181 for (unsigned i = 0; i < NumMemOps; i++) { 5182 EVT VT = MemOps[i]; 5183 unsigned VTSize = VT.getSizeInBits() / 8; 5184 if (VTSize > Size) { 5185 // Issuing an unaligned load / store pair that overlaps with the previous 5186 // pair. Adjust the offset accordingly. 5187 assert(i == NumMemOps-1 && i != 0); 5188 DstOff -= VTSize - Size; 5189 } 5190 5191 // If this store is smaller than the largest store see whether we can get 5192 // the smaller value for free with a truncate. 5193 SDValue Value = MemSetValue; 5194 if (VT.bitsLT(LargestVT)) { 5195 if (!LargestVT.isVector() && !VT.isVector() && 5196 TLI.isTruncateFree(LargestVT, VT)) 5197 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5198 else 5199 Value = getMemsetValue(Src, VT, DAG, dl); 5200 } 5201 assert(Value.getValueType() == VT && "Value with wrong type."); 5202 SDValue Store = DAG.getStore( 5203 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5204 DstPtrInfo.getWithOffset(DstOff), Align, 5205 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5206 OutChains.push_back(Store); 5207 DstOff += VT.getSizeInBits() / 8; 5208 Size -= VTSize; 5209 } 5210 5211 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5212 } 5213 5214 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5215 unsigned AS) { 5216 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5217 // pointer operands can be losslessly bitcasted to pointers of address space 0 5218 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5219 report_fatal_error("cannot lower memory intrinsic in address space " + 5220 Twine(AS)); 5221 } 5222 } 5223 5224 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5225 SDValue Src, SDValue Size, unsigned Align, 5226 bool isVol, bool AlwaysInline, bool isTailCall, 5227 MachinePointerInfo DstPtrInfo, 5228 MachinePointerInfo SrcPtrInfo) { 5229 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5230 5231 // Check to see if we should lower the memcpy to loads and stores first. 5232 // For cases within the target-specified limits, this is the best choice. 5233 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5234 if (ConstantSize) { 5235 // Memcpy with size zero? Just return the original chain. 5236 if (ConstantSize->isNullValue()) 5237 return Chain; 5238 5239 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5240 ConstantSize->getZExtValue(),Align, 5241 isVol, false, DstPtrInfo, SrcPtrInfo); 5242 if (Result.getNode()) 5243 return Result; 5244 } 5245 5246 // Then check to see if we should lower the memcpy with target-specific 5247 // code. If the target chooses to do this, this is the next best. 5248 if (TSI) { 5249 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5250 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5251 DstPtrInfo, SrcPtrInfo); 5252 if (Result.getNode()) 5253 return Result; 5254 } 5255 5256 // If we really need inline code and the target declined to provide it, 5257 // use a (potentially long) sequence of loads and stores. 5258 if (AlwaysInline) { 5259 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5260 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5261 ConstantSize->getZExtValue(), Align, isVol, 5262 true, DstPtrInfo, SrcPtrInfo); 5263 } 5264 5265 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5266 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5267 5268 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5269 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5270 // respect volatile, so they may do things like read or write memory 5271 // beyond the given memory regions. But fixing this isn't easy, and most 5272 // people don't care. 5273 5274 // Emit a library call. 5275 TargetLowering::ArgListTy Args; 5276 TargetLowering::ArgListEntry Entry; 5277 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5278 Entry.Node = Dst; Args.push_back(Entry); 5279 Entry.Node = Src; Args.push_back(Entry); 5280 Entry.Node = Size; Args.push_back(Entry); 5281 // FIXME: pass in SDLoc 5282 TargetLowering::CallLoweringInfo CLI(*this); 5283 CLI.setDebugLoc(dl) 5284 .setChain(Chain) 5285 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5286 Dst.getValueType().getTypeForEVT(*getContext()), 5287 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5288 TLI->getPointerTy(getDataLayout())), 5289 std::move(Args)) 5290 .setDiscardResult() 5291 .setTailCall(isTailCall); 5292 5293 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5294 return CallResult.second; 5295 } 5296 5297 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5298 SDValue Src, SDValue Size, unsigned Align, 5299 bool isVol, bool isTailCall, 5300 MachinePointerInfo DstPtrInfo, 5301 MachinePointerInfo SrcPtrInfo) { 5302 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5303 5304 // Check to see if we should lower the memmove to loads and stores first. 5305 // For cases within the target-specified limits, this is the best choice. 5306 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5307 if (ConstantSize) { 5308 // Memmove with size zero? Just return the original chain. 5309 if (ConstantSize->isNullValue()) 5310 return Chain; 5311 5312 SDValue Result = 5313 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5314 ConstantSize->getZExtValue(), Align, isVol, 5315 false, DstPtrInfo, SrcPtrInfo); 5316 if (Result.getNode()) 5317 return Result; 5318 } 5319 5320 // Then check to see if we should lower the memmove with target-specific 5321 // code. If the target chooses to do this, this is the next best. 5322 if (TSI) { 5323 SDValue Result = TSI->EmitTargetCodeForMemmove( 5324 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5325 if (Result.getNode()) 5326 return Result; 5327 } 5328 5329 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5330 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5331 5332 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5333 // not be safe. See memcpy above for more details. 5334 5335 // Emit a library call. 5336 TargetLowering::ArgListTy Args; 5337 TargetLowering::ArgListEntry Entry; 5338 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5339 Entry.Node = Dst; Args.push_back(Entry); 5340 Entry.Node = Src; Args.push_back(Entry); 5341 Entry.Node = Size; Args.push_back(Entry); 5342 // FIXME: pass in SDLoc 5343 TargetLowering::CallLoweringInfo CLI(*this); 5344 CLI.setDebugLoc(dl) 5345 .setChain(Chain) 5346 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5347 Dst.getValueType().getTypeForEVT(*getContext()), 5348 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5349 TLI->getPointerTy(getDataLayout())), 5350 std::move(Args)) 5351 .setDiscardResult() 5352 .setTailCall(isTailCall); 5353 5354 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5355 return CallResult.second; 5356 } 5357 5358 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5359 SDValue Src, SDValue Size, unsigned Align, 5360 bool isVol, bool isTailCall, 5361 MachinePointerInfo DstPtrInfo) { 5362 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5363 5364 // Check to see if we should lower the memset to stores first. 5365 // For cases within the target-specified limits, this is the best choice. 5366 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5367 if (ConstantSize) { 5368 // Memset with size zero? Just return the original chain. 5369 if (ConstantSize->isNullValue()) 5370 return Chain; 5371 5372 SDValue Result = 5373 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5374 Align, isVol, DstPtrInfo); 5375 5376 if (Result.getNode()) 5377 return Result; 5378 } 5379 5380 // Then check to see if we should lower the memset with target-specific 5381 // code. If the target chooses to do this, this is the next best. 5382 if (TSI) { 5383 SDValue Result = TSI->EmitTargetCodeForMemset( 5384 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5385 if (Result.getNode()) 5386 return Result; 5387 } 5388 5389 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5390 5391 // Emit a library call. 5392 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5393 TargetLowering::ArgListTy Args; 5394 TargetLowering::ArgListEntry Entry; 5395 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5396 Args.push_back(Entry); 5397 Entry.Node = Src; 5398 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5399 Args.push_back(Entry); 5400 Entry.Node = Size; 5401 Entry.Ty = IntPtrTy; 5402 Args.push_back(Entry); 5403 5404 // FIXME: pass in SDLoc 5405 TargetLowering::CallLoweringInfo CLI(*this); 5406 CLI.setDebugLoc(dl) 5407 .setChain(Chain) 5408 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5409 Dst.getValueType().getTypeForEVT(*getContext()), 5410 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5411 TLI->getPointerTy(getDataLayout())), 5412 std::move(Args)) 5413 .setDiscardResult() 5414 .setTailCall(isTailCall); 5415 5416 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5417 return CallResult.second; 5418 } 5419 5420 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5421 SDVTList VTList, ArrayRef<SDValue> Ops, 5422 MachineMemOperand *MMO) { 5423 FoldingSetNodeID ID; 5424 ID.AddInteger(MemVT.getRawBits()); 5425 AddNodeIDNode(ID, Opcode, VTList, Ops); 5426 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5427 void* IP = nullptr; 5428 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5429 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5430 return SDValue(E, 0); 5431 } 5432 5433 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5434 VTList, MemVT, MMO); 5435 createOperands(N, Ops); 5436 5437 CSEMap.InsertNode(N, IP); 5438 InsertNode(N); 5439 return SDValue(N, 0); 5440 } 5441 5442 SDValue SelectionDAG::getAtomicCmpSwap( 5443 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5444 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5445 unsigned Alignment, AtomicOrdering SuccessOrdering, 5446 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 5447 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5448 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5449 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5450 5451 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5452 Alignment = getEVTAlignment(MemVT); 5453 5454 MachineFunction &MF = getMachineFunction(); 5455 5456 // FIXME: Volatile isn't really correct; we should keep track of atomic 5457 // orderings in the memoperand. 5458 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5459 MachineMemOperand::MOStore; 5460 MachineMemOperand *MMO = 5461 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5462 AAMDNodes(), nullptr, SSID, SuccessOrdering, 5463 FailureOrdering); 5464 5465 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5466 } 5467 5468 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5469 EVT MemVT, SDVTList VTs, SDValue Chain, 5470 SDValue Ptr, SDValue Cmp, SDValue Swp, 5471 MachineMemOperand *MMO) { 5472 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5473 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5474 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5475 5476 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5477 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5478 } 5479 5480 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5481 SDValue Chain, SDValue Ptr, SDValue Val, 5482 const Value *PtrVal, unsigned Alignment, 5483 AtomicOrdering Ordering, 5484 SyncScope::ID SSID) { 5485 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5486 Alignment = getEVTAlignment(MemVT); 5487 5488 MachineFunction &MF = getMachineFunction(); 5489 // An atomic store does not load. An atomic load does not store. 5490 // (An atomicrmw obviously both loads and stores.) 5491 // For now, atomics are considered to be volatile always, and they are 5492 // chained as such. 5493 // FIXME: Volatile isn't really correct; we should keep track of atomic 5494 // orderings in the memoperand. 5495 auto Flags = MachineMemOperand::MOVolatile; 5496 if (Opcode != ISD::ATOMIC_STORE) 5497 Flags |= MachineMemOperand::MOLoad; 5498 if (Opcode != ISD::ATOMIC_LOAD) 5499 Flags |= MachineMemOperand::MOStore; 5500 5501 MachineMemOperand *MMO = 5502 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5503 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5504 nullptr, SSID, Ordering); 5505 5506 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5507 } 5508 5509 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5510 SDValue Chain, SDValue Ptr, SDValue Val, 5511 MachineMemOperand *MMO) { 5512 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5513 Opcode == ISD::ATOMIC_LOAD_SUB || 5514 Opcode == ISD::ATOMIC_LOAD_AND || 5515 Opcode == ISD::ATOMIC_LOAD_OR || 5516 Opcode == ISD::ATOMIC_LOAD_XOR || 5517 Opcode == ISD::ATOMIC_LOAD_NAND || 5518 Opcode == ISD::ATOMIC_LOAD_MIN || 5519 Opcode == ISD::ATOMIC_LOAD_MAX || 5520 Opcode == ISD::ATOMIC_LOAD_UMIN || 5521 Opcode == ISD::ATOMIC_LOAD_UMAX || 5522 Opcode == ISD::ATOMIC_SWAP || 5523 Opcode == ISD::ATOMIC_STORE) && 5524 "Invalid Atomic Op"); 5525 5526 EVT VT = Val.getValueType(); 5527 5528 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5529 getVTList(VT, MVT::Other); 5530 SDValue Ops[] = {Chain, Ptr, Val}; 5531 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5532 } 5533 5534 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5535 EVT VT, SDValue Chain, SDValue Ptr, 5536 MachineMemOperand *MMO) { 5537 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5538 5539 SDVTList VTs = getVTList(VT, MVT::Other); 5540 SDValue Ops[] = {Chain, Ptr}; 5541 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5542 } 5543 5544 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5545 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5546 if (Ops.size() == 1) 5547 return Ops[0]; 5548 5549 SmallVector<EVT, 4> VTs; 5550 VTs.reserve(Ops.size()); 5551 for (unsigned i = 0; i < Ops.size(); ++i) 5552 VTs.push_back(Ops[i].getValueType()); 5553 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5554 } 5555 5556 SDValue SelectionDAG::getMemIntrinsicNode( 5557 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5558 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, 5559 bool ReadMem, bool WriteMem, unsigned Size) { 5560 if (Align == 0) // Ensure that codegen never sees alignment 0 5561 Align = getEVTAlignment(MemVT); 5562 5563 MachineFunction &MF = getMachineFunction(); 5564 auto Flags = MachineMemOperand::MONone; 5565 if (WriteMem) 5566 Flags |= MachineMemOperand::MOStore; 5567 if (ReadMem) 5568 Flags |= MachineMemOperand::MOLoad; 5569 if (Vol) 5570 Flags |= MachineMemOperand::MOVolatile; 5571 if (!Size) 5572 Size = MemVT.getStoreSize(); 5573 MachineMemOperand *MMO = 5574 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5575 5576 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5577 } 5578 5579 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5580 SDVTList VTList, 5581 ArrayRef<SDValue> Ops, EVT MemVT, 5582 MachineMemOperand *MMO) { 5583 assert((Opcode == ISD::INTRINSIC_VOID || 5584 Opcode == ISD::INTRINSIC_W_CHAIN || 5585 Opcode == ISD::PREFETCH || 5586 Opcode == ISD::LIFETIME_START || 5587 Opcode == ISD::LIFETIME_END || 5588 ((int)Opcode <= std::numeric_limits<int>::max() && 5589 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5590 "Opcode is not a memory-accessing opcode!"); 5591 5592 // Memoize the node unless it returns a flag. 5593 MemIntrinsicSDNode *N; 5594 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5595 FoldingSetNodeID ID; 5596 AddNodeIDNode(ID, Opcode, VTList, Ops); 5597 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5598 void *IP = nullptr; 5599 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5600 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5601 return SDValue(E, 0); 5602 } 5603 5604 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5605 VTList, MemVT, MMO); 5606 createOperands(N, Ops); 5607 5608 CSEMap.InsertNode(N, IP); 5609 } else { 5610 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5611 VTList, MemVT, MMO); 5612 createOperands(N, Ops); 5613 } 5614 InsertNode(N); 5615 return SDValue(N, 0); 5616 } 5617 5618 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5619 /// MachinePointerInfo record from it. This is particularly useful because the 5620 /// code generator has many cases where it doesn't bother passing in a 5621 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5622 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5623 int64_t Offset = 0) { 5624 // If this is FI+Offset, we can model it. 5625 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5626 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5627 FI->getIndex(), Offset); 5628 5629 // If this is (FI+Offset1)+Offset2, we can model it. 5630 if (Ptr.getOpcode() != ISD::ADD || 5631 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5632 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5633 return MachinePointerInfo(); 5634 5635 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5636 return MachinePointerInfo::getFixedStack( 5637 DAG.getMachineFunction(), FI, 5638 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5639 } 5640 5641 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5642 /// MachinePointerInfo record from it. This is particularly useful because the 5643 /// code generator has many cases where it doesn't bother passing in a 5644 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5645 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5646 SDValue OffsetOp) { 5647 // If the 'Offset' value isn't a constant, we can't handle this. 5648 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5649 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); 5650 if (OffsetOp.isUndef()) 5651 return InferPointerInfo(DAG, Ptr); 5652 return MachinePointerInfo(); 5653 } 5654 5655 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5656 EVT VT, const SDLoc &dl, SDValue Chain, 5657 SDValue Ptr, SDValue Offset, 5658 MachinePointerInfo PtrInfo, EVT MemVT, 5659 unsigned Alignment, 5660 MachineMemOperand::Flags MMOFlags, 5661 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5662 assert(Chain.getValueType() == MVT::Other && 5663 "Invalid chain type"); 5664 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5665 Alignment = getEVTAlignment(MemVT); 5666 5667 MMOFlags |= MachineMemOperand::MOLoad; 5668 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5669 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5670 // clients. 5671 if (PtrInfo.V.isNull()) 5672 PtrInfo = InferPointerInfo(*this, Ptr, Offset); 5673 5674 MachineFunction &MF = getMachineFunction(); 5675 MachineMemOperand *MMO = MF.getMachineMemOperand( 5676 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5677 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5678 } 5679 5680 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5681 EVT VT, const SDLoc &dl, SDValue Chain, 5682 SDValue Ptr, SDValue Offset, EVT MemVT, 5683 MachineMemOperand *MMO) { 5684 if (VT == MemVT) { 5685 ExtType = ISD::NON_EXTLOAD; 5686 } else if (ExtType == ISD::NON_EXTLOAD) { 5687 assert(VT == MemVT && "Non-extending load from different memory type!"); 5688 } else { 5689 // Extending load. 5690 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5691 "Should only be an extending load, not truncating!"); 5692 assert(VT.isInteger() == MemVT.isInteger() && 5693 "Cannot convert from FP to Int or Int -> FP!"); 5694 assert(VT.isVector() == MemVT.isVector() && 5695 "Cannot use an ext load to convert to or from a vector!"); 5696 assert((!VT.isVector() || 5697 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5698 "Cannot use an ext load to change the number of vector elements!"); 5699 } 5700 5701 bool Indexed = AM != ISD::UNINDEXED; 5702 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5703 5704 SDVTList VTs = Indexed ? 5705 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5706 SDValue Ops[] = { Chain, Ptr, Offset }; 5707 FoldingSetNodeID ID; 5708 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5709 ID.AddInteger(MemVT.getRawBits()); 5710 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5711 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5712 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5713 void *IP = nullptr; 5714 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5715 cast<LoadSDNode>(E)->refineAlignment(MMO); 5716 return SDValue(E, 0); 5717 } 5718 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5719 ExtType, MemVT, MMO); 5720 createOperands(N, Ops); 5721 5722 CSEMap.InsertNode(N, IP); 5723 InsertNode(N); 5724 return SDValue(N, 0); 5725 } 5726 5727 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5728 SDValue Ptr, MachinePointerInfo PtrInfo, 5729 unsigned Alignment, 5730 MachineMemOperand::Flags MMOFlags, 5731 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5732 SDValue Undef = getUNDEF(Ptr.getValueType()); 5733 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5734 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5735 } 5736 5737 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5738 SDValue Ptr, MachineMemOperand *MMO) { 5739 SDValue Undef = getUNDEF(Ptr.getValueType()); 5740 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5741 VT, MMO); 5742 } 5743 5744 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5745 EVT VT, SDValue Chain, SDValue Ptr, 5746 MachinePointerInfo PtrInfo, EVT MemVT, 5747 unsigned Alignment, 5748 MachineMemOperand::Flags MMOFlags, 5749 const AAMDNodes &AAInfo) { 5750 SDValue Undef = getUNDEF(Ptr.getValueType()); 5751 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5752 MemVT, Alignment, MMOFlags, AAInfo); 5753 } 5754 5755 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5756 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5757 MachineMemOperand *MMO) { 5758 SDValue Undef = getUNDEF(Ptr.getValueType()); 5759 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5760 MemVT, MMO); 5761 } 5762 5763 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5764 SDValue Base, SDValue Offset, 5765 ISD::MemIndexedMode AM) { 5766 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5767 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5768 // Don't propagate the invariant or dereferenceable flags. 5769 auto MMOFlags = 5770 LD->getMemOperand()->getFlags() & 5771 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5772 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5773 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5774 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5775 LD->getAAInfo()); 5776 } 5777 5778 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5779 SDValue Ptr, MachinePointerInfo PtrInfo, 5780 unsigned Alignment, 5781 MachineMemOperand::Flags MMOFlags, 5782 const AAMDNodes &AAInfo) { 5783 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 5784 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5785 Alignment = getEVTAlignment(Val.getValueType()); 5786 5787 MMOFlags |= MachineMemOperand::MOStore; 5788 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5789 5790 if (PtrInfo.V.isNull()) 5791 PtrInfo = InferPointerInfo(*this, Ptr); 5792 5793 MachineFunction &MF = getMachineFunction(); 5794 MachineMemOperand *MMO = MF.getMachineMemOperand( 5795 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 5796 return getStore(Chain, dl, Val, Ptr, MMO); 5797 } 5798 5799 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5800 SDValue Ptr, MachineMemOperand *MMO) { 5801 assert(Chain.getValueType() == MVT::Other && 5802 "Invalid chain type"); 5803 EVT VT = Val.getValueType(); 5804 SDVTList VTs = getVTList(MVT::Other); 5805 SDValue Undef = getUNDEF(Ptr.getValueType()); 5806 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5807 FoldingSetNodeID ID; 5808 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5809 ID.AddInteger(VT.getRawBits()); 5810 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5811 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 5812 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5813 void *IP = nullptr; 5814 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5815 cast<StoreSDNode>(E)->refineAlignment(MMO); 5816 return SDValue(E, 0); 5817 } 5818 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5819 ISD::UNINDEXED, false, VT, MMO); 5820 createOperands(N, Ops); 5821 5822 CSEMap.InsertNode(N, IP); 5823 InsertNode(N); 5824 return SDValue(N, 0); 5825 } 5826 5827 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5828 SDValue Ptr, MachinePointerInfo PtrInfo, 5829 EVT SVT, unsigned Alignment, 5830 MachineMemOperand::Flags MMOFlags, 5831 const AAMDNodes &AAInfo) { 5832 assert(Chain.getValueType() == MVT::Other && 5833 "Invalid chain type"); 5834 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5835 Alignment = getEVTAlignment(SVT); 5836 5837 MMOFlags |= MachineMemOperand::MOStore; 5838 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5839 5840 if (PtrInfo.V.isNull()) 5841 PtrInfo = InferPointerInfo(*this, Ptr); 5842 5843 MachineFunction &MF = getMachineFunction(); 5844 MachineMemOperand *MMO = MF.getMachineMemOperand( 5845 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 5846 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 5847 } 5848 5849 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5850 SDValue Ptr, EVT SVT, 5851 MachineMemOperand *MMO) { 5852 EVT VT = Val.getValueType(); 5853 5854 assert(Chain.getValueType() == MVT::Other && 5855 "Invalid chain type"); 5856 if (VT == SVT) 5857 return getStore(Chain, dl, Val, Ptr, MMO); 5858 5859 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 5860 "Should only be a truncating store, not extending!"); 5861 assert(VT.isInteger() == SVT.isInteger() && 5862 "Can't do FP-INT conversion!"); 5863 assert(VT.isVector() == SVT.isVector() && 5864 "Cannot use trunc store to convert to or from a vector!"); 5865 assert((!VT.isVector() || 5866 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 5867 "Cannot use trunc store to change the number of vector elements!"); 5868 5869 SDVTList VTs = getVTList(MVT::Other); 5870 SDValue Undef = getUNDEF(Ptr.getValueType()); 5871 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5872 FoldingSetNodeID ID; 5873 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5874 ID.AddInteger(SVT.getRawBits()); 5875 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5876 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 5877 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5878 void *IP = nullptr; 5879 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5880 cast<StoreSDNode>(E)->refineAlignment(MMO); 5881 return SDValue(E, 0); 5882 } 5883 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5884 ISD::UNINDEXED, true, SVT, MMO); 5885 createOperands(N, Ops); 5886 5887 CSEMap.InsertNode(N, IP); 5888 InsertNode(N); 5889 return SDValue(N, 0); 5890 } 5891 5892 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 5893 SDValue Base, SDValue Offset, 5894 ISD::MemIndexedMode AM) { 5895 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 5896 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 5897 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 5898 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 5899 FoldingSetNodeID ID; 5900 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5901 ID.AddInteger(ST->getMemoryVT().getRawBits()); 5902 ID.AddInteger(ST->getRawSubclassData()); 5903 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 5904 void *IP = nullptr; 5905 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 5906 return SDValue(E, 0); 5907 5908 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5909 ST->isTruncatingStore(), ST->getMemoryVT(), 5910 ST->getMemOperand()); 5911 createOperands(N, Ops); 5912 5913 CSEMap.InsertNode(N, IP); 5914 InsertNode(N); 5915 return SDValue(N, 0); 5916 } 5917 5918 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5919 SDValue Ptr, SDValue Mask, SDValue Src0, 5920 EVT MemVT, MachineMemOperand *MMO, 5921 ISD::LoadExtType ExtTy, bool isExpanding) { 5922 SDVTList VTs = getVTList(VT, MVT::Other); 5923 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 5924 FoldingSetNodeID ID; 5925 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 5926 ID.AddInteger(VT.getRawBits()); 5927 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 5928 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 5929 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5930 void *IP = nullptr; 5931 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5932 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 5933 return SDValue(E, 0); 5934 } 5935 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5936 ExtTy, isExpanding, MemVT, MMO); 5937 createOperands(N, Ops); 5938 5939 CSEMap.InsertNode(N, IP); 5940 InsertNode(N); 5941 return SDValue(N, 0); 5942 } 5943 5944 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 5945 SDValue Val, SDValue Ptr, SDValue Mask, 5946 EVT MemVT, MachineMemOperand *MMO, 5947 bool IsTruncating, bool IsCompressing) { 5948 assert(Chain.getValueType() == MVT::Other && 5949 "Invalid chain type"); 5950 EVT VT = Val.getValueType(); 5951 SDVTList VTs = getVTList(MVT::Other); 5952 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 5953 FoldingSetNodeID ID; 5954 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 5955 ID.AddInteger(VT.getRawBits()); 5956 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 5957 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 5958 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5959 void *IP = nullptr; 5960 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5961 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 5962 return SDValue(E, 0); 5963 } 5964 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5965 IsTruncating, IsCompressing, MemVT, MMO); 5966 createOperands(N, Ops); 5967 5968 CSEMap.InsertNode(N, IP); 5969 InsertNode(N); 5970 return SDValue(N, 0); 5971 } 5972 5973 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 5974 ArrayRef<SDValue> Ops, 5975 MachineMemOperand *MMO) { 5976 assert(Ops.size() == 5 && "Incompatible number of operands"); 5977 5978 FoldingSetNodeID ID; 5979 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 5980 ID.AddInteger(VT.getRawBits()); 5981 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 5982 dl.getIROrder(), VTs, VT, MMO)); 5983 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5984 void *IP = nullptr; 5985 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5986 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 5987 return SDValue(E, 0); 5988 } 5989 5990 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 5991 VTs, VT, MMO); 5992 createOperands(N, Ops); 5993 5994 assert(N->getValue().getValueType() == N->getValueType(0) && 5995 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 5996 assert(N->getMask().getValueType().getVectorNumElements() == 5997 N->getValueType(0).getVectorNumElements() && 5998 "Vector width mismatch between mask and data"); 5999 assert(N->getIndex().getValueType().getVectorNumElements() == 6000 N->getValueType(0).getVectorNumElements() && 6001 "Vector width mismatch between index and data"); 6002 6003 CSEMap.InsertNode(N, IP); 6004 InsertNode(N); 6005 return SDValue(N, 0); 6006 } 6007 6008 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6009 ArrayRef<SDValue> Ops, 6010 MachineMemOperand *MMO) { 6011 assert(Ops.size() == 5 && "Incompatible number of operands"); 6012 6013 FoldingSetNodeID ID; 6014 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6015 ID.AddInteger(VT.getRawBits()); 6016 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6017 dl.getIROrder(), VTs, VT, MMO)); 6018 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6019 void *IP = nullptr; 6020 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6021 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6022 return SDValue(E, 0); 6023 } 6024 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6025 VTs, VT, MMO); 6026 createOperands(N, Ops); 6027 6028 assert(N->getMask().getValueType().getVectorNumElements() == 6029 N->getValue().getValueType().getVectorNumElements() && 6030 "Vector width mismatch between mask and data"); 6031 assert(N->getIndex().getValueType().getVectorNumElements() == 6032 N->getValue().getValueType().getVectorNumElements() && 6033 "Vector width mismatch between index and data"); 6034 6035 CSEMap.InsertNode(N, IP); 6036 InsertNode(N); 6037 return SDValue(N, 0); 6038 } 6039 6040 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6041 SDValue Ptr, SDValue SV, unsigned Align) { 6042 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6043 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6044 } 6045 6046 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6047 ArrayRef<SDUse> Ops) { 6048 switch (Ops.size()) { 6049 case 0: return getNode(Opcode, DL, VT); 6050 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6051 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6052 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6053 default: break; 6054 } 6055 6056 // Copy from an SDUse array into an SDValue array for use with 6057 // the regular getNode logic. 6058 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6059 return getNode(Opcode, DL, VT, NewOps); 6060 } 6061 6062 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6063 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6064 unsigned NumOps = Ops.size(); 6065 switch (NumOps) { 6066 case 0: return getNode(Opcode, DL, VT); 6067 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6068 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6069 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6070 default: break; 6071 } 6072 6073 switch (Opcode) { 6074 default: break; 6075 case ISD::CONCAT_VECTORS: 6076 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6077 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6078 return V; 6079 break; 6080 case ISD::SELECT_CC: 6081 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6082 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6083 "LHS and RHS of condition must have same type!"); 6084 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6085 "True and False arms of SelectCC must have same type!"); 6086 assert(Ops[2].getValueType() == VT && 6087 "select_cc node must be of same type as true and false value!"); 6088 break; 6089 case ISD::BR_CC: 6090 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6091 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6092 "LHS/RHS of comparison should match types!"); 6093 break; 6094 } 6095 6096 // Memoize nodes. 6097 SDNode *N; 6098 SDVTList VTs = getVTList(VT); 6099 6100 if (VT != MVT::Glue) { 6101 FoldingSetNodeID ID; 6102 AddNodeIDNode(ID, Opcode, VTs, Ops); 6103 void *IP = nullptr; 6104 6105 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6106 return SDValue(E, 0); 6107 6108 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6109 createOperands(N, Ops); 6110 6111 CSEMap.InsertNode(N, IP); 6112 } else { 6113 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6114 createOperands(N, Ops); 6115 } 6116 6117 InsertNode(N); 6118 return SDValue(N, 0); 6119 } 6120 6121 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6122 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6123 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6124 } 6125 6126 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6127 ArrayRef<SDValue> Ops) { 6128 if (VTList.NumVTs == 1) 6129 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6130 6131 #if 0 6132 switch (Opcode) { 6133 // FIXME: figure out how to safely handle things like 6134 // int foo(int x) { return 1 << (x & 255); } 6135 // int bar() { return foo(256); } 6136 case ISD::SRA_PARTS: 6137 case ISD::SRL_PARTS: 6138 case ISD::SHL_PARTS: 6139 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6140 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6141 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6142 else if (N3.getOpcode() == ISD::AND) 6143 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6144 // If the and is only masking out bits that cannot effect the shift, 6145 // eliminate the and. 6146 unsigned NumBits = VT.getScalarSizeInBits()*2; 6147 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6148 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6149 } 6150 break; 6151 } 6152 #endif 6153 6154 // Memoize the node unless it returns a flag. 6155 SDNode *N; 6156 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6157 FoldingSetNodeID ID; 6158 AddNodeIDNode(ID, Opcode, VTList, Ops); 6159 void *IP = nullptr; 6160 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6161 return SDValue(E, 0); 6162 6163 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6164 createOperands(N, Ops); 6165 CSEMap.InsertNode(N, IP); 6166 } else { 6167 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6168 createOperands(N, Ops); 6169 } 6170 InsertNode(N); 6171 return SDValue(N, 0); 6172 } 6173 6174 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6175 SDVTList VTList) { 6176 return getNode(Opcode, DL, VTList, None); 6177 } 6178 6179 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6180 SDValue N1) { 6181 SDValue Ops[] = { N1 }; 6182 return getNode(Opcode, DL, VTList, Ops); 6183 } 6184 6185 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6186 SDValue N1, SDValue N2) { 6187 SDValue Ops[] = { N1, N2 }; 6188 return getNode(Opcode, DL, VTList, Ops); 6189 } 6190 6191 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6192 SDValue N1, SDValue N2, SDValue N3) { 6193 SDValue Ops[] = { N1, N2, N3 }; 6194 return getNode(Opcode, DL, VTList, Ops); 6195 } 6196 6197 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6198 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6199 SDValue Ops[] = { N1, N2, N3, N4 }; 6200 return getNode(Opcode, DL, VTList, Ops); 6201 } 6202 6203 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6204 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6205 SDValue N5) { 6206 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6207 return getNode(Opcode, DL, VTList, Ops); 6208 } 6209 6210 SDVTList SelectionDAG::getVTList(EVT VT) { 6211 return makeVTList(SDNode::getValueTypeList(VT), 1); 6212 } 6213 6214 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6215 FoldingSetNodeID ID; 6216 ID.AddInteger(2U); 6217 ID.AddInteger(VT1.getRawBits()); 6218 ID.AddInteger(VT2.getRawBits()); 6219 6220 void *IP = nullptr; 6221 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6222 if (!Result) { 6223 EVT *Array = Allocator.Allocate<EVT>(2); 6224 Array[0] = VT1; 6225 Array[1] = VT2; 6226 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6227 VTListMap.InsertNode(Result, IP); 6228 } 6229 return Result->getSDVTList(); 6230 } 6231 6232 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6233 FoldingSetNodeID ID; 6234 ID.AddInteger(3U); 6235 ID.AddInteger(VT1.getRawBits()); 6236 ID.AddInteger(VT2.getRawBits()); 6237 ID.AddInteger(VT3.getRawBits()); 6238 6239 void *IP = nullptr; 6240 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6241 if (!Result) { 6242 EVT *Array = Allocator.Allocate<EVT>(3); 6243 Array[0] = VT1; 6244 Array[1] = VT2; 6245 Array[2] = VT3; 6246 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6247 VTListMap.InsertNode(Result, IP); 6248 } 6249 return Result->getSDVTList(); 6250 } 6251 6252 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6253 FoldingSetNodeID ID; 6254 ID.AddInteger(4U); 6255 ID.AddInteger(VT1.getRawBits()); 6256 ID.AddInteger(VT2.getRawBits()); 6257 ID.AddInteger(VT3.getRawBits()); 6258 ID.AddInteger(VT4.getRawBits()); 6259 6260 void *IP = nullptr; 6261 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6262 if (!Result) { 6263 EVT *Array = Allocator.Allocate<EVT>(4); 6264 Array[0] = VT1; 6265 Array[1] = VT2; 6266 Array[2] = VT3; 6267 Array[3] = VT4; 6268 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6269 VTListMap.InsertNode(Result, IP); 6270 } 6271 return Result->getSDVTList(); 6272 } 6273 6274 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6275 unsigned NumVTs = VTs.size(); 6276 FoldingSetNodeID ID; 6277 ID.AddInteger(NumVTs); 6278 for (unsigned index = 0; index < NumVTs; index++) { 6279 ID.AddInteger(VTs[index].getRawBits()); 6280 } 6281 6282 void *IP = nullptr; 6283 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6284 if (!Result) { 6285 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6286 std::copy(VTs.begin(), VTs.end(), Array); 6287 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6288 VTListMap.InsertNode(Result, IP); 6289 } 6290 return Result->getSDVTList(); 6291 } 6292 6293 6294 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6295 /// specified operands. If the resultant node already exists in the DAG, 6296 /// this does not modify the specified node, instead it returns the node that 6297 /// already exists. If the resultant node does not exist in the DAG, the 6298 /// input node is returned. As a degenerate case, if you specify the same 6299 /// input operands as the node already has, the input node is returned. 6300 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6301 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6302 6303 // Check to see if there is no change. 6304 if (Op == N->getOperand(0)) return N; 6305 6306 // See if the modified node already exists. 6307 void *InsertPos = nullptr; 6308 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6309 return Existing; 6310 6311 // Nope it doesn't. Remove the node from its current place in the maps. 6312 if (InsertPos) 6313 if (!RemoveNodeFromCSEMaps(N)) 6314 InsertPos = nullptr; 6315 6316 // Now we update the operands. 6317 N->OperandList[0].set(Op); 6318 6319 // If this gets put into a CSE map, add it. 6320 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6321 return N; 6322 } 6323 6324 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6325 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6326 6327 // Check to see if there is no change. 6328 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6329 return N; // No operands changed, just return the input node. 6330 6331 // See if the modified node already exists. 6332 void *InsertPos = nullptr; 6333 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6334 return Existing; 6335 6336 // Nope it doesn't. Remove the node from its current place in the maps. 6337 if (InsertPos) 6338 if (!RemoveNodeFromCSEMaps(N)) 6339 InsertPos = nullptr; 6340 6341 // Now we update the operands. 6342 if (N->OperandList[0] != Op1) 6343 N->OperandList[0].set(Op1); 6344 if (N->OperandList[1] != Op2) 6345 N->OperandList[1].set(Op2); 6346 6347 // If this gets put into a CSE map, add it. 6348 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6349 return N; 6350 } 6351 6352 SDNode *SelectionDAG:: 6353 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6354 SDValue Ops[] = { Op1, Op2, Op3 }; 6355 return UpdateNodeOperands(N, Ops); 6356 } 6357 6358 SDNode *SelectionDAG:: 6359 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6360 SDValue Op3, SDValue Op4) { 6361 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6362 return UpdateNodeOperands(N, Ops); 6363 } 6364 6365 SDNode *SelectionDAG:: 6366 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6367 SDValue Op3, SDValue Op4, SDValue Op5) { 6368 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6369 return UpdateNodeOperands(N, Ops); 6370 } 6371 6372 SDNode *SelectionDAG:: 6373 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6374 unsigned NumOps = Ops.size(); 6375 assert(N->getNumOperands() == NumOps && 6376 "Update with wrong number of operands"); 6377 6378 // If no operands changed just return the input node. 6379 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6380 return N; 6381 6382 // See if the modified node already exists. 6383 void *InsertPos = nullptr; 6384 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6385 return Existing; 6386 6387 // Nope it doesn't. Remove the node from its current place in the maps. 6388 if (InsertPos) 6389 if (!RemoveNodeFromCSEMaps(N)) 6390 InsertPos = nullptr; 6391 6392 // Now we update the operands. 6393 for (unsigned i = 0; i != NumOps; ++i) 6394 if (N->OperandList[i] != Ops[i]) 6395 N->OperandList[i].set(Ops[i]); 6396 6397 // If this gets put into a CSE map, add it. 6398 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6399 return N; 6400 } 6401 6402 /// DropOperands - Release the operands and set this node to have 6403 /// zero operands. 6404 void SDNode::DropOperands() { 6405 // Unlike the code in MorphNodeTo that does this, we don't need to 6406 // watch for dead nodes here. 6407 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6408 SDUse &Use = *I++; 6409 Use.set(SDValue()); 6410 } 6411 } 6412 6413 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6414 /// machine opcode. 6415 /// 6416 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6417 EVT VT) { 6418 SDVTList VTs = getVTList(VT); 6419 return SelectNodeTo(N, MachineOpc, VTs, None); 6420 } 6421 6422 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6423 EVT VT, SDValue Op1) { 6424 SDVTList VTs = getVTList(VT); 6425 SDValue Ops[] = { Op1 }; 6426 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6427 } 6428 6429 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6430 EVT VT, SDValue Op1, 6431 SDValue Op2) { 6432 SDVTList VTs = getVTList(VT); 6433 SDValue Ops[] = { Op1, Op2 }; 6434 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6435 } 6436 6437 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6438 EVT VT, SDValue Op1, 6439 SDValue Op2, SDValue Op3) { 6440 SDVTList VTs = getVTList(VT); 6441 SDValue Ops[] = { Op1, Op2, Op3 }; 6442 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6443 } 6444 6445 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6446 EVT VT, ArrayRef<SDValue> Ops) { 6447 SDVTList VTs = getVTList(VT); 6448 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6449 } 6450 6451 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6452 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6453 SDVTList VTs = getVTList(VT1, VT2); 6454 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6455 } 6456 6457 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6458 EVT VT1, EVT VT2) { 6459 SDVTList VTs = getVTList(VT1, VT2); 6460 return SelectNodeTo(N, MachineOpc, VTs, None); 6461 } 6462 6463 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6464 EVT VT1, EVT VT2, EVT VT3, 6465 ArrayRef<SDValue> Ops) { 6466 SDVTList VTs = getVTList(VT1, VT2, VT3); 6467 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6468 } 6469 6470 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6471 EVT VT1, EVT VT2, 6472 SDValue Op1, SDValue Op2) { 6473 SDVTList VTs = getVTList(VT1, VT2); 6474 SDValue Ops[] = { Op1, Op2 }; 6475 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6476 } 6477 6478 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6479 SDVTList VTs,ArrayRef<SDValue> Ops) { 6480 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6481 // Reset the NodeID to -1. 6482 New->setNodeId(-1); 6483 if (New != N) { 6484 ReplaceAllUsesWith(N, New); 6485 RemoveDeadNode(N); 6486 } 6487 return New; 6488 } 6489 6490 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6491 /// the line number information on the merged node since it is not possible to 6492 /// preserve the information that operation is associated with multiple lines. 6493 /// This will make the debugger working better at -O0, were there is a higher 6494 /// probability having other instructions associated with that line. 6495 /// 6496 /// For IROrder, we keep the smaller of the two 6497 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6498 DebugLoc NLoc = N->getDebugLoc(); 6499 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6500 N->setDebugLoc(DebugLoc()); 6501 } 6502 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6503 N->setIROrder(Order); 6504 return N; 6505 } 6506 6507 /// MorphNodeTo - This *mutates* the specified node to have the specified 6508 /// return type, opcode, and operands. 6509 /// 6510 /// Note that MorphNodeTo returns the resultant node. If there is already a 6511 /// node of the specified opcode and operands, it returns that node instead of 6512 /// the current one. Note that the SDLoc need not be the same. 6513 /// 6514 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6515 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6516 /// node, and because it doesn't require CSE recalculation for any of 6517 /// the node's users. 6518 /// 6519 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6520 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6521 /// the legalizer which maintain worklists that would need to be updated when 6522 /// deleting things. 6523 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6524 SDVTList VTs, ArrayRef<SDValue> Ops) { 6525 // If an identical node already exists, use it. 6526 void *IP = nullptr; 6527 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6528 FoldingSetNodeID ID; 6529 AddNodeIDNode(ID, Opc, VTs, Ops); 6530 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6531 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6532 } 6533 6534 if (!RemoveNodeFromCSEMaps(N)) 6535 IP = nullptr; 6536 6537 // Start the morphing. 6538 N->NodeType = Opc; 6539 N->ValueList = VTs.VTs; 6540 N->NumValues = VTs.NumVTs; 6541 6542 // Clear the operands list, updating used nodes to remove this from their 6543 // use list. Keep track of any operands that become dead as a result. 6544 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6545 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6546 SDUse &Use = *I++; 6547 SDNode *Used = Use.getNode(); 6548 Use.set(SDValue()); 6549 if (Used->use_empty()) 6550 DeadNodeSet.insert(Used); 6551 } 6552 6553 // For MachineNode, initialize the memory references information. 6554 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6555 MN->setMemRefs(nullptr, nullptr); 6556 6557 // Swap for an appropriately sized array from the recycler. 6558 removeOperands(N); 6559 createOperands(N, Ops); 6560 6561 // Delete any nodes that are still dead after adding the uses for the 6562 // new operands. 6563 if (!DeadNodeSet.empty()) { 6564 SmallVector<SDNode *, 16> DeadNodes; 6565 for (SDNode *N : DeadNodeSet) 6566 if (N->use_empty()) 6567 DeadNodes.push_back(N); 6568 RemoveDeadNodes(DeadNodes); 6569 } 6570 6571 if (IP) 6572 CSEMap.InsertNode(N, IP); // Memoize the new node. 6573 return N; 6574 } 6575 6576 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 6577 unsigned OrigOpc = Node->getOpcode(); 6578 unsigned NewOpc; 6579 bool IsUnary = false; 6580 switch (OrigOpc) { 6581 default: 6582 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 6583 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 6584 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 6585 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 6586 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 6587 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 6588 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 6589 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 6590 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 6591 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 6592 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 6593 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 6594 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 6595 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 6596 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 6597 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 6598 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 6599 case ISD::STRICT_FNEARBYINT: 6600 NewOpc = ISD::FNEARBYINT; 6601 IsUnary = true; 6602 break; 6603 } 6604 6605 // We're taking this node out of the chain, so we need to re-link things. 6606 SDValue InputChain = Node->getOperand(0); 6607 SDValue OutputChain = SDValue(Node, 1); 6608 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 6609 6610 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 6611 SDNode *Res = nullptr; 6612 if (IsUnary) 6613 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 6614 else 6615 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6616 Node->getOperand(2) }); 6617 6618 // MorphNodeTo can operate in two ways: if an existing node with the 6619 // specified operands exists, it can just return it. Otherwise, it 6620 // updates the node in place to have the requested operands. 6621 if (Res == Node) { 6622 // If we updated the node in place, reset the node ID. To the isel, 6623 // this should be just like a newly allocated machine node. 6624 Res->setNodeId(-1); 6625 } else { 6626 ReplaceAllUsesWith(Node, Res); 6627 RemoveDeadNode(Node); 6628 } 6629 6630 return Res; 6631 } 6632 6633 /// getMachineNode - These are used for target selectors to create a new node 6634 /// with specified return type(s), MachineInstr opcode, and operands. 6635 /// 6636 /// Note that getMachineNode returns the resultant node. If there is already a 6637 /// node of the specified opcode and operands, it returns that node instead of 6638 /// the current one. 6639 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6640 EVT VT) { 6641 SDVTList VTs = getVTList(VT); 6642 return getMachineNode(Opcode, dl, VTs, None); 6643 } 6644 6645 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6646 EVT VT, SDValue Op1) { 6647 SDVTList VTs = getVTList(VT); 6648 SDValue Ops[] = { Op1 }; 6649 return getMachineNode(Opcode, dl, VTs, Ops); 6650 } 6651 6652 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6653 EVT VT, SDValue Op1, SDValue Op2) { 6654 SDVTList VTs = getVTList(VT); 6655 SDValue Ops[] = { Op1, Op2 }; 6656 return getMachineNode(Opcode, dl, VTs, Ops); 6657 } 6658 6659 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6660 EVT VT, SDValue Op1, SDValue Op2, 6661 SDValue Op3) { 6662 SDVTList VTs = getVTList(VT); 6663 SDValue Ops[] = { Op1, Op2, Op3 }; 6664 return getMachineNode(Opcode, dl, VTs, Ops); 6665 } 6666 6667 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6668 EVT VT, ArrayRef<SDValue> Ops) { 6669 SDVTList VTs = getVTList(VT); 6670 return getMachineNode(Opcode, dl, VTs, Ops); 6671 } 6672 6673 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6674 EVT VT1, EVT VT2, SDValue Op1, 6675 SDValue Op2) { 6676 SDVTList VTs = getVTList(VT1, VT2); 6677 SDValue Ops[] = { Op1, Op2 }; 6678 return getMachineNode(Opcode, dl, VTs, Ops); 6679 } 6680 6681 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6682 EVT VT1, EVT VT2, SDValue Op1, 6683 SDValue Op2, SDValue Op3) { 6684 SDVTList VTs = getVTList(VT1, VT2); 6685 SDValue Ops[] = { Op1, Op2, Op3 }; 6686 return getMachineNode(Opcode, dl, VTs, Ops); 6687 } 6688 6689 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6690 EVT VT1, EVT VT2, 6691 ArrayRef<SDValue> Ops) { 6692 SDVTList VTs = getVTList(VT1, VT2); 6693 return getMachineNode(Opcode, dl, VTs, Ops); 6694 } 6695 6696 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6697 EVT VT1, EVT VT2, EVT VT3, 6698 SDValue Op1, SDValue Op2) { 6699 SDVTList VTs = getVTList(VT1, VT2, VT3); 6700 SDValue Ops[] = { Op1, Op2 }; 6701 return getMachineNode(Opcode, dl, VTs, Ops); 6702 } 6703 6704 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6705 EVT VT1, EVT VT2, EVT VT3, 6706 SDValue Op1, SDValue Op2, 6707 SDValue Op3) { 6708 SDVTList VTs = getVTList(VT1, VT2, VT3); 6709 SDValue Ops[] = { Op1, Op2, Op3 }; 6710 return getMachineNode(Opcode, dl, VTs, Ops); 6711 } 6712 6713 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6714 EVT VT1, EVT VT2, EVT VT3, 6715 ArrayRef<SDValue> Ops) { 6716 SDVTList VTs = getVTList(VT1, VT2, VT3); 6717 return getMachineNode(Opcode, dl, VTs, Ops); 6718 } 6719 6720 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6721 ArrayRef<EVT> ResultTys, 6722 ArrayRef<SDValue> Ops) { 6723 SDVTList VTs = getVTList(ResultTys); 6724 return getMachineNode(Opcode, dl, VTs, Ops); 6725 } 6726 6727 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6728 SDVTList VTs, 6729 ArrayRef<SDValue> Ops) { 6730 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6731 MachineSDNode *N; 6732 void *IP = nullptr; 6733 6734 if (DoCSE) { 6735 FoldingSetNodeID ID; 6736 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6737 IP = nullptr; 6738 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6739 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6740 } 6741 } 6742 6743 // Allocate a new MachineSDNode. 6744 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6745 createOperands(N, Ops); 6746 6747 if (DoCSE) 6748 CSEMap.InsertNode(N, IP); 6749 6750 InsertNode(N); 6751 return N; 6752 } 6753 6754 /// getTargetExtractSubreg - A convenience function for creating 6755 /// TargetOpcode::EXTRACT_SUBREG nodes. 6756 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6757 SDValue Operand) { 6758 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6759 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6760 VT, Operand, SRIdxVal); 6761 return SDValue(Subreg, 0); 6762 } 6763 6764 /// getTargetInsertSubreg - A convenience function for creating 6765 /// TargetOpcode::INSERT_SUBREG nodes. 6766 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6767 SDValue Operand, SDValue Subreg) { 6768 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6769 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6770 VT, Operand, Subreg, SRIdxVal); 6771 return SDValue(Result, 0); 6772 } 6773 6774 /// getNodeIfExists - Get the specified node if it's already available, or 6775 /// else return NULL. 6776 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6777 ArrayRef<SDValue> Ops, 6778 const SDNodeFlags Flags) { 6779 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6780 FoldingSetNodeID ID; 6781 AddNodeIDNode(ID, Opcode, VTList, Ops); 6782 void *IP = nullptr; 6783 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 6784 E->intersectFlagsWith(Flags); 6785 return E; 6786 } 6787 } 6788 return nullptr; 6789 } 6790 6791 /// getDbgValue - Creates a SDDbgValue node. 6792 /// 6793 /// SDNode 6794 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, 6795 unsigned R, bool IsIndirect, uint64_t Off, 6796 const DebugLoc &DL, unsigned O) { 6797 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6798 "Expected inlined-at fields to agree"); 6799 return new (DbgInfo->getAlloc()) 6800 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O); 6801 } 6802 6803 /// Constant 6804 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr, 6805 const Value *C, uint64_t Off, 6806 const DebugLoc &DL, unsigned O) { 6807 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6808 "Expected inlined-at fields to agree"); 6809 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O); 6810 } 6811 6812 /// FrameIndex 6813 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, 6814 unsigned FI, uint64_t Off, 6815 const DebugLoc &DL, 6816 unsigned O) { 6817 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6818 "Expected inlined-at fields to agree"); 6819 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O); 6820 } 6821 6822 namespace { 6823 6824 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 6825 /// pointed to by a use iterator is deleted, increment the use iterator 6826 /// so that it doesn't dangle. 6827 /// 6828 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 6829 SDNode::use_iterator &UI; 6830 SDNode::use_iterator &UE; 6831 6832 void NodeDeleted(SDNode *N, SDNode *E) override { 6833 // Increment the iterator as needed. 6834 while (UI != UE && N == *UI) 6835 ++UI; 6836 } 6837 6838 public: 6839 RAUWUpdateListener(SelectionDAG &d, 6840 SDNode::use_iterator &ui, 6841 SDNode::use_iterator &ue) 6842 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 6843 }; 6844 6845 } // end anonymous namespace 6846 6847 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6848 /// This can cause recursive merging of nodes in the DAG. 6849 /// 6850 /// This version assumes From has a single result value. 6851 /// 6852 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 6853 SDNode *From = FromN.getNode(); 6854 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 6855 "Cannot replace with this method!"); 6856 assert(From != To.getNode() && "Cannot replace uses of with self"); 6857 6858 // Preserve Debug Values 6859 TransferDbgValues(FromN, To); 6860 6861 // Iterate over all the existing uses of From. New uses will be added 6862 // to the beginning of the use list, which we avoid visiting. 6863 // This specifically avoids visiting uses of From that arise while the 6864 // replacement is happening, because any such uses would be the result 6865 // of CSE: If an existing node looks like From after one of its operands 6866 // is replaced by To, we don't want to replace of all its users with To 6867 // too. See PR3018 for more info. 6868 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6869 RAUWUpdateListener Listener(*this, UI, UE); 6870 while (UI != UE) { 6871 SDNode *User = *UI; 6872 6873 // This node is about to morph, remove its old self from the CSE maps. 6874 RemoveNodeFromCSEMaps(User); 6875 6876 // A user can appear in a use list multiple times, and when this 6877 // happens the uses are usually next to each other in the list. 6878 // To help reduce the number of CSE recomputations, process all 6879 // the uses of this user that we can find this way. 6880 do { 6881 SDUse &Use = UI.getUse(); 6882 ++UI; 6883 Use.set(To); 6884 } while (UI != UE && *UI == User); 6885 6886 // Now that we have modified User, add it back to the CSE maps. If it 6887 // already exists there, recursively merge the results together. 6888 AddModifiedNodeToCSEMaps(User); 6889 } 6890 6891 // If we just RAUW'd the root, take note. 6892 if (FromN == getRoot()) 6893 setRoot(To); 6894 } 6895 6896 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6897 /// This can cause recursive merging of nodes in the DAG. 6898 /// 6899 /// This version assumes that for each value of From, there is a 6900 /// corresponding value in To in the same position with the same type. 6901 /// 6902 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 6903 #ifndef NDEBUG 6904 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6905 assert((!From->hasAnyUseOfValue(i) || 6906 From->getValueType(i) == To->getValueType(i)) && 6907 "Cannot use this version of ReplaceAllUsesWith!"); 6908 #endif 6909 6910 // Handle the trivial case. 6911 if (From == To) 6912 return; 6913 6914 // Preserve Debug Info. Only do this if there's a use. 6915 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6916 if (From->hasAnyUseOfValue(i)) { 6917 assert((i < To->getNumValues()) && "Invalid To location"); 6918 TransferDbgValues(SDValue(From, i), SDValue(To, i)); 6919 } 6920 6921 // Iterate over just the existing users of From. See the comments in 6922 // the ReplaceAllUsesWith above. 6923 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6924 RAUWUpdateListener Listener(*this, UI, UE); 6925 while (UI != UE) { 6926 SDNode *User = *UI; 6927 6928 // This node is about to morph, remove its old self from the CSE maps. 6929 RemoveNodeFromCSEMaps(User); 6930 6931 // A user can appear in a use list multiple times, and when this 6932 // happens the uses are usually next to each other in the list. 6933 // To help reduce the number of CSE recomputations, process all 6934 // the uses of this user that we can find this way. 6935 do { 6936 SDUse &Use = UI.getUse(); 6937 ++UI; 6938 Use.setNode(To); 6939 } while (UI != UE && *UI == User); 6940 6941 // Now that we have modified User, add it back to the CSE maps. If it 6942 // already exists there, recursively merge the results together. 6943 AddModifiedNodeToCSEMaps(User); 6944 } 6945 6946 // If we just RAUW'd the root, take note. 6947 if (From == getRoot().getNode()) 6948 setRoot(SDValue(To, getRoot().getResNo())); 6949 } 6950 6951 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6952 /// This can cause recursive merging of nodes in the DAG. 6953 /// 6954 /// This version can replace From with any result values. To must match the 6955 /// number and types of values returned by From. 6956 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 6957 if (From->getNumValues() == 1) // Handle the simple case efficiently. 6958 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 6959 6960 // Preserve Debug Info. 6961 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6962 TransferDbgValues(SDValue(From, i), *To); 6963 6964 // Iterate over just the existing users of From. See the comments in 6965 // the ReplaceAllUsesWith above. 6966 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6967 RAUWUpdateListener Listener(*this, UI, UE); 6968 while (UI != UE) { 6969 SDNode *User = *UI; 6970 6971 // This node is about to morph, remove its old self from the CSE maps. 6972 RemoveNodeFromCSEMaps(User); 6973 6974 // A user can appear in a use list multiple times, and when this 6975 // happens the uses are usually next to each other in the list. 6976 // To help reduce the number of CSE recomputations, process all 6977 // the uses of this user that we can find this way. 6978 do { 6979 SDUse &Use = UI.getUse(); 6980 const SDValue &ToOp = To[Use.getResNo()]; 6981 ++UI; 6982 Use.set(ToOp); 6983 } while (UI != UE && *UI == User); 6984 6985 // Now that we have modified User, add it back to the CSE maps. If it 6986 // already exists there, recursively merge the results together. 6987 AddModifiedNodeToCSEMaps(User); 6988 } 6989 6990 // If we just RAUW'd the root, take note. 6991 if (From == getRoot().getNode()) 6992 setRoot(SDValue(To[getRoot().getResNo()])); 6993 } 6994 6995 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 6996 /// uses of other values produced by From.getNode() alone. The Deleted 6997 /// vector is handled the same way as for ReplaceAllUsesWith. 6998 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 6999 // Handle the really simple, really trivial case efficiently. 7000 if (From == To) return; 7001 7002 // Handle the simple, trivial, case efficiently. 7003 if (From.getNode()->getNumValues() == 1) { 7004 ReplaceAllUsesWith(From, To); 7005 return; 7006 } 7007 7008 // Preserve Debug Info. 7009 TransferDbgValues(From, To); 7010 7011 // Iterate over just the existing users of From. See the comments in 7012 // the ReplaceAllUsesWith above. 7013 SDNode::use_iterator UI = From.getNode()->use_begin(), 7014 UE = From.getNode()->use_end(); 7015 RAUWUpdateListener Listener(*this, UI, UE); 7016 while (UI != UE) { 7017 SDNode *User = *UI; 7018 bool UserRemovedFromCSEMaps = false; 7019 7020 // A user can appear in a use list multiple times, and when this 7021 // happens the uses are usually next to each other in the list. 7022 // To help reduce the number of CSE recomputations, process all 7023 // the uses of this user that we can find this way. 7024 do { 7025 SDUse &Use = UI.getUse(); 7026 7027 // Skip uses of different values from the same node. 7028 if (Use.getResNo() != From.getResNo()) { 7029 ++UI; 7030 continue; 7031 } 7032 7033 // If this node hasn't been modified yet, it's still in the CSE maps, 7034 // so remove its old self from the CSE maps. 7035 if (!UserRemovedFromCSEMaps) { 7036 RemoveNodeFromCSEMaps(User); 7037 UserRemovedFromCSEMaps = true; 7038 } 7039 7040 ++UI; 7041 Use.set(To); 7042 } while (UI != UE && *UI == User); 7043 7044 // We are iterating over all uses of the From node, so if a use 7045 // doesn't use the specific value, no changes are made. 7046 if (!UserRemovedFromCSEMaps) 7047 continue; 7048 7049 // Now that we have modified User, add it back to the CSE maps. If it 7050 // already exists there, recursively merge the results together. 7051 AddModifiedNodeToCSEMaps(User); 7052 } 7053 7054 // If we just RAUW'd the root, take note. 7055 if (From == getRoot()) 7056 setRoot(To); 7057 } 7058 7059 namespace { 7060 7061 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 7062 /// to record information about a use. 7063 struct UseMemo { 7064 SDNode *User; 7065 unsigned Index; 7066 SDUse *Use; 7067 }; 7068 7069 /// operator< - Sort Memos by User. 7070 bool operator<(const UseMemo &L, const UseMemo &R) { 7071 return (intptr_t)L.User < (intptr_t)R.User; 7072 } 7073 7074 } // end anonymous namespace 7075 7076 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 7077 /// uses of other values produced by From.getNode() alone. The same value 7078 /// may appear in both the From and To list. The Deleted vector is 7079 /// handled the same way as for ReplaceAllUsesWith. 7080 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 7081 const SDValue *To, 7082 unsigned Num){ 7083 // Handle the simple, trivial case efficiently. 7084 if (Num == 1) 7085 return ReplaceAllUsesOfValueWith(*From, *To); 7086 7087 TransferDbgValues(*From, *To); 7088 7089 // Read up all the uses and make records of them. This helps 7090 // processing new uses that are introduced during the 7091 // replacement process. 7092 SmallVector<UseMemo, 4> Uses; 7093 for (unsigned i = 0; i != Num; ++i) { 7094 unsigned FromResNo = From[i].getResNo(); 7095 SDNode *FromNode = From[i].getNode(); 7096 for (SDNode::use_iterator UI = FromNode->use_begin(), 7097 E = FromNode->use_end(); UI != E; ++UI) { 7098 SDUse &Use = UI.getUse(); 7099 if (Use.getResNo() == FromResNo) { 7100 UseMemo Memo = { *UI, i, &Use }; 7101 Uses.push_back(Memo); 7102 } 7103 } 7104 } 7105 7106 // Sort the uses, so that all the uses from a given User are together. 7107 std::sort(Uses.begin(), Uses.end()); 7108 7109 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7110 UseIndex != UseIndexEnd; ) { 7111 // We know that this user uses some value of From. If it is the right 7112 // value, update it. 7113 SDNode *User = Uses[UseIndex].User; 7114 7115 // This node is about to morph, remove its old self from the CSE maps. 7116 RemoveNodeFromCSEMaps(User); 7117 7118 // The Uses array is sorted, so all the uses for a given User 7119 // are next to each other in the list. 7120 // To help reduce the number of CSE recomputations, process all 7121 // the uses of this user that we can find this way. 7122 do { 7123 unsigned i = Uses[UseIndex].Index; 7124 SDUse &Use = *Uses[UseIndex].Use; 7125 ++UseIndex; 7126 7127 Use.set(To[i]); 7128 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7129 7130 // Now that we have modified User, add it back to the CSE maps. If it 7131 // already exists there, recursively merge the results together. 7132 AddModifiedNodeToCSEMaps(User); 7133 } 7134 } 7135 7136 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7137 /// based on their topological order. It returns the maximum id and a vector 7138 /// of the SDNodes* in assigned order by reference. 7139 unsigned SelectionDAG::AssignTopologicalOrder() { 7140 unsigned DAGSize = 0; 7141 7142 // SortedPos tracks the progress of the algorithm. Nodes before it are 7143 // sorted, nodes after it are unsorted. When the algorithm completes 7144 // it is at the end of the list. 7145 allnodes_iterator SortedPos = allnodes_begin(); 7146 7147 // Visit all the nodes. Move nodes with no operands to the front of 7148 // the list immediately. Annotate nodes that do have operands with their 7149 // operand count. Before we do this, the Node Id fields of the nodes 7150 // may contain arbitrary values. After, the Node Id fields for nodes 7151 // before SortedPos will contain the topological sort index, and the 7152 // Node Id fields for nodes At SortedPos and after will contain the 7153 // count of outstanding operands. 7154 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7155 SDNode *N = &*I++; 7156 checkForCycles(N, this); 7157 unsigned Degree = N->getNumOperands(); 7158 if (Degree == 0) { 7159 // A node with no uses, add it to the result array immediately. 7160 N->setNodeId(DAGSize++); 7161 allnodes_iterator Q(N); 7162 if (Q != SortedPos) 7163 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7164 assert(SortedPos != AllNodes.end() && "Overran node list"); 7165 ++SortedPos; 7166 } else { 7167 // Temporarily use the Node Id as scratch space for the degree count. 7168 N->setNodeId(Degree); 7169 } 7170 } 7171 7172 // Visit all the nodes. As we iterate, move nodes into sorted order, 7173 // such that by the time the end is reached all nodes will be sorted. 7174 for (SDNode &Node : allnodes()) { 7175 SDNode *N = &Node; 7176 checkForCycles(N, this); 7177 // N is in sorted position, so all its uses have one less operand 7178 // that needs to be sorted. 7179 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7180 UI != UE; ++UI) { 7181 SDNode *P = *UI; 7182 unsigned Degree = P->getNodeId(); 7183 assert(Degree != 0 && "Invalid node degree"); 7184 --Degree; 7185 if (Degree == 0) { 7186 // All of P's operands are sorted, so P may sorted now. 7187 P->setNodeId(DAGSize++); 7188 if (P->getIterator() != SortedPos) 7189 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7190 assert(SortedPos != AllNodes.end() && "Overran node list"); 7191 ++SortedPos; 7192 } else { 7193 // Update P's outstanding operand count. 7194 P->setNodeId(Degree); 7195 } 7196 } 7197 if (Node.getIterator() == SortedPos) { 7198 #ifndef NDEBUG 7199 allnodes_iterator I(N); 7200 SDNode *S = &*++I; 7201 dbgs() << "Overran sorted position:\n"; 7202 S->dumprFull(this); dbgs() << "\n"; 7203 dbgs() << "Checking if this is due to cycles\n"; 7204 checkForCycles(this, true); 7205 #endif 7206 llvm_unreachable(nullptr); 7207 } 7208 } 7209 7210 assert(SortedPos == AllNodes.end() && 7211 "Topological sort incomplete!"); 7212 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7213 "First node in topological sort is not the entry token!"); 7214 assert(AllNodes.front().getNodeId() == 0 && 7215 "First node in topological sort has non-zero id!"); 7216 assert(AllNodes.front().getNumOperands() == 0 && 7217 "First node in topological sort has operands!"); 7218 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7219 "Last node in topologic sort has unexpected id!"); 7220 assert(AllNodes.back().use_empty() && 7221 "Last node in topologic sort has users!"); 7222 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7223 return DAGSize; 7224 } 7225 7226 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7227 /// value is produced by SD. 7228 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7229 if (SD) { 7230 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7231 SD->setHasDebugValue(true); 7232 } 7233 DbgInfo->add(DB, SD, isParameter); 7234 } 7235 7236 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes. 7237 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 7238 if (From == To || !From.getNode()->getHasDebugValue()) 7239 return; 7240 SDNode *FromNode = From.getNode(); 7241 SDNode *ToNode = To.getNode(); 7242 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 7243 SmallVector<SDDbgValue *, 2> ClonedDVs; 7244 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 7245 I != E; ++I) { 7246 SDDbgValue *Dbg = *I; 7247 // Only add Dbgvalues attached to same ResNo. 7248 if (Dbg->getKind() == SDDbgValue::SDNODE && 7249 Dbg->getSDNode() == From.getNode() && 7250 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) { 7251 assert(FromNode != ToNode && 7252 "Should not transfer Debug Values intranode"); 7253 SDDbgValue *Clone = 7254 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode, 7255 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(), 7256 Dbg->getDebugLoc(), Dbg->getOrder()); 7257 ClonedDVs.push_back(Clone); 7258 Dbg->setIsInvalidated(); 7259 } 7260 } 7261 for (SDDbgValue *I : ClonedDVs) 7262 AddDbgValue(I, ToNode, false); 7263 } 7264 7265 void SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 7266 SDValue NewMemOp) { 7267 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 7268 if (!OldLoad->hasAnyUseOfValue(1)) 7269 return; 7270 7271 // The new memory operation must have the same position as the old load in 7272 // terms of memory dependency. Create a TokenFactor for the old load and new 7273 // memory operation and update uses of the old load's output chain to use that 7274 // TokenFactor. 7275 SDValue OldChain = SDValue(OldLoad, 1); 7276 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 7277 SDValue TokenFactor = 7278 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 7279 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 7280 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 7281 } 7282 7283 //===----------------------------------------------------------------------===// 7284 // SDNode Class 7285 //===----------------------------------------------------------------------===// 7286 7287 bool llvm::isNullConstant(SDValue V) { 7288 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7289 return Const != nullptr && Const->isNullValue(); 7290 } 7291 7292 bool llvm::isNullFPConstant(SDValue V) { 7293 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7294 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7295 } 7296 7297 bool llvm::isAllOnesConstant(SDValue V) { 7298 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7299 return Const != nullptr && Const->isAllOnesValue(); 7300 } 7301 7302 bool llvm::isOneConstant(SDValue V) { 7303 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7304 return Const != nullptr && Const->isOne(); 7305 } 7306 7307 bool llvm::isBitwiseNot(SDValue V) { 7308 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7309 } 7310 7311 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7312 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7313 return CN; 7314 7315 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7316 BitVector UndefElements; 7317 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7318 7319 // BuildVectors can truncate their operands. Ignore that case here. 7320 // FIXME: We blindly ignore splats which include undef which is overly 7321 // pessimistic. 7322 if (CN && UndefElements.none() && 7323 CN->getValueType(0) == N.getValueType().getScalarType()) 7324 return CN; 7325 } 7326 7327 return nullptr; 7328 } 7329 7330 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7331 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7332 return CN; 7333 7334 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7335 BitVector UndefElements; 7336 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7337 7338 if (CN && UndefElements.none()) 7339 return CN; 7340 } 7341 7342 return nullptr; 7343 } 7344 7345 HandleSDNode::~HandleSDNode() { 7346 DropOperands(); 7347 } 7348 7349 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7350 const DebugLoc &DL, 7351 const GlobalValue *GA, EVT VT, 7352 int64_t o, unsigned char TF) 7353 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7354 TheGlobal = GA; 7355 } 7356 7357 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7358 EVT VT, unsigned SrcAS, 7359 unsigned DestAS) 7360 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7361 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7362 7363 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7364 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7365 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7366 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7367 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7368 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7369 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7370 7371 // We check here that the size of the memory operand fits within the size of 7372 // the MMO. This is because the MMO might indicate only a possible address 7373 // range instead of specifying the affected memory addresses precisely. 7374 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7375 } 7376 7377 /// Profile - Gather unique data for the node. 7378 /// 7379 void SDNode::Profile(FoldingSetNodeID &ID) const { 7380 AddNodeIDNode(ID, this); 7381 } 7382 7383 namespace { 7384 7385 struct EVTArray { 7386 std::vector<EVT> VTs; 7387 7388 EVTArray() { 7389 VTs.reserve(MVT::LAST_VALUETYPE); 7390 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7391 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7392 } 7393 }; 7394 7395 } // end anonymous namespace 7396 7397 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 7398 static ManagedStatic<EVTArray> SimpleVTArray; 7399 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 7400 7401 /// getValueTypeList - Return a pointer to the specified value type. 7402 /// 7403 const EVT *SDNode::getValueTypeList(EVT VT) { 7404 if (VT.isExtended()) { 7405 sys::SmartScopedLock<true> Lock(*VTMutex); 7406 return &(*EVTs->insert(VT).first); 7407 } else { 7408 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7409 "Value type out of range!"); 7410 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7411 } 7412 } 7413 7414 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7415 /// indicated value. This method ignores uses of other values defined by this 7416 /// operation. 7417 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7418 assert(Value < getNumValues() && "Bad value!"); 7419 7420 // TODO: Only iterate over uses of a given value of the node 7421 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7422 if (UI.getUse().getResNo() == Value) { 7423 if (NUses == 0) 7424 return false; 7425 --NUses; 7426 } 7427 } 7428 7429 // Found exactly the right number of uses? 7430 return NUses == 0; 7431 } 7432 7433 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7434 /// value. This method ignores uses of other values defined by this operation. 7435 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7436 assert(Value < getNumValues() && "Bad value!"); 7437 7438 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7439 if (UI.getUse().getResNo() == Value) 7440 return true; 7441 7442 return false; 7443 } 7444 7445 /// isOnlyUserOf - Return true if this node is the only use of N. 7446 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7447 bool Seen = false; 7448 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7449 SDNode *User = *I; 7450 if (User == this) 7451 Seen = true; 7452 else 7453 return false; 7454 } 7455 7456 return Seen; 7457 } 7458 7459 /// Return true if the only users of N are contained in Nodes. 7460 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7461 bool Seen = false; 7462 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7463 SDNode *User = *I; 7464 if (llvm::any_of(Nodes, 7465 [&User](const SDNode *Node) { return User == Node; })) 7466 Seen = true; 7467 else 7468 return false; 7469 } 7470 7471 return Seen; 7472 } 7473 7474 /// isOperand - Return true if this node is an operand of N. 7475 bool SDValue::isOperandOf(const SDNode *N) const { 7476 for (const SDValue &Op : N->op_values()) 7477 if (*this == Op) 7478 return true; 7479 return false; 7480 } 7481 7482 bool SDNode::isOperandOf(const SDNode *N) const { 7483 for (const SDValue &Op : N->op_values()) 7484 if (this == Op.getNode()) 7485 return true; 7486 return false; 7487 } 7488 7489 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7490 /// be a chain) reaches the specified operand without crossing any 7491 /// side-effecting instructions on any chain path. In practice, this looks 7492 /// through token factors and non-volatile loads. In order to remain efficient, 7493 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7494 /// 7495 /// Note that we only need to examine chains when we're searching for 7496 /// side-effects; SelectionDAG requires that all side-effects are represented 7497 /// by chains, even if another operand would force a specific ordering. This 7498 /// constraint is necessary to allow transformations like splitting loads. 7499 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7500 unsigned Depth) const { 7501 if (*this == Dest) return true; 7502 7503 // Don't search too deeply, we just want to be able to see through 7504 // TokenFactor's etc. 7505 if (Depth == 0) return false; 7506 7507 // If this is a token factor, all inputs to the TF happen in parallel. 7508 if (getOpcode() == ISD::TokenFactor) { 7509 // First, try a shallow search. 7510 if (is_contained((*this)->ops(), Dest)) { 7511 // We found the chain we want as an operand of this TokenFactor. 7512 // Essentially, we reach the chain without side-effects if we could 7513 // serialize the TokenFactor into a simple chain of operations with 7514 // Dest as the last operation. This is automatically true if the 7515 // chain has one use: there are no other ordering constraints. 7516 // If the chain has more than one use, we give up: some other 7517 // use of Dest might force a side-effect between Dest and the current 7518 // node. 7519 if (Dest.hasOneUse()) 7520 return true; 7521 } 7522 // Next, try a deep search: check whether every operand of the TokenFactor 7523 // reaches Dest. 7524 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 7525 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 7526 }); 7527 } 7528 7529 // Loads don't have side effects, look through them. 7530 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7531 if (!Ld->isVolatile()) 7532 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7533 } 7534 return false; 7535 } 7536 7537 bool SDNode::hasPredecessor(const SDNode *N) const { 7538 SmallPtrSet<const SDNode *, 32> Visited; 7539 SmallVector<const SDNode *, 16> Worklist; 7540 Worklist.push_back(this); 7541 return hasPredecessorHelper(N, Visited, Worklist); 7542 } 7543 7544 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 7545 this->Flags.intersectWith(Flags); 7546 } 7547 7548 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7549 assert(N->getNumValues() == 1 && 7550 "Can't unroll a vector with multiple results!"); 7551 7552 EVT VT = N->getValueType(0); 7553 unsigned NE = VT.getVectorNumElements(); 7554 EVT EltVT = VT.getVectorElementType(); 7555 SDLoc dl(N); 7556 7557 SmallVector<SDValue, 8> Scalars; 7558 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7559 7560 // If ResNE is 0, fully unroll the vector op. 7561 if (ResNE == 0) 7562 ResNE = NE; 7563 else if (NE > ResNE) 7564 NE = ResNE; 7565 7566 unsigned i; 7567 for (i= 0; i != NE; ++i) { 7568 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7569 SDValue Operand = N->getOperand(j); 7570 EVT OperandVT = Operand.getValueType(); 7571 if (OperandVT.isVector()) { 7572 // A vector operand; extract a single element. 7573 EVT OperandEltVT = OperandVT.getVectorElementType(); 7574 Operands[j] = 7575 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7576 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7577 } else { 7578 // A scalar operand; just use it as is. 7579 Operands[j] = Operand; 7580 } 7581 } 7582 7583 switch (N->getOpcode()) { 7584 default: { 7585 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7586 N->getFlags())); 7587 break; 7588 } 7589 case ISD::VSELECT: 7590 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7591 break; 7592 case ISD::SHL: 7593 case ISD::SRA: 7594 case ISD::SRL: 7595 case ISD::ROTL: 7596 case ISD::ROTR: 7597 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7598 getShiftAmountOperand(Operands[0].getValueType(), 7599 Operands[1]))); 7600 break; 7601 case ISD::SIGN_EXTEND_INREG: 7602 case ISD::FP_ROUND_INREG: { 7603 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7604 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7605 Operands[0], 7606 getValueType(ExtVT))); 7607 } 7608 } 7609 } 7610 7611 for (; i < ResNE; ++i) 7612 Scalars.push_back(getUNDEF(EltVT)); 7613 7614 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7615 return getBuildVector(VecVT, dl, Scalars); 7616 } 7617 7618 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7619 LoadSDNode *Base, 7620 unsigned Bytes, 7621 int Dist) const { 7622 if (LD->isVolatile() || Base->isVolatile()) 7623 return false; 7624 if (LD->isIndexed() || Base->isIndexed()) 7625 return false; 7626 if (LD->getChain() != Base->getChain()) 7627 return false; 7628 EVT VT = LD->getValueType(0); 7629 if (VT.getSizeInBits() / 8 != Bytes) 7630 return false; 7631 7632 SDValue Loc = LD->getOperand(1); 7633 SDValue BaseLoc = Base->getOperand(1); 7634 7635 auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this); 7636 auto LocDecomp = BaseIndexOffset::match(Loc, *this); 7637 7638 int64_t Offset = 0; 7639 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 7640 return (Dist * Bytes == Offset); 7641 return false; 7642 } 7643 7644 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7645 /// it cannot be inferred. 7646 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7647 // If this is a GlobalAddress + cst, return the alignment. 7648 const GlobalValue *GV; 7649 int64_t GVOffset = 0; 7650 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7651 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7652 KnownBits Known(PtrWidth); 7653 llvm::computeKnownBits(GV, Known, getDataLayout()); 7654 unsigned AlignBits = Known.countMinTrailingZeros(); 7655 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7656 if (Align) 7657 return MinAlign(Align, GVOffset); 7658 } 7659 7660 // If this is a direct reference to a stack slot, use information about the 7661 // stack slot's alignment. 7662 int FrameIdx = 1 << 31; 7663 int64_t FrameOffset = 0; 7664 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7665 FrameIdx = FI->getIndex(); 7666 } else if (isBaseWithConstantOffset(Ptr) && 7667 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7668 // Handle FI+Cst 7669 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7670 FrameOffset = Ptr.getConstantOperandVal(1); 7671 } 7672 7673 if (FrameIdx != (1 << 31)) { 7674 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7675 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7676 FrameOffset); 7677 return FIInfoAlign; 7678 } 7679 7680 return 0; 7681 } 7682 7683 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7684 /// which is split (or expanded) into two not necessarily identical pieces. 7685 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 7686 // Currently all types are split in half. 7687 EVT LoVT, HiVT; 7688 if (!VT.isVector()) 7689 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 7690 else 7691 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 7692 7693 return std::make_pair(LoVT, HiVT); 7694 } 7695 7696 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 7697 /// low/high part. 7698 std::pair<SDValue, SDValue> 7699 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 7700 const EVT &HiVT) { 7701 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 7702 N.getValueType().getVectorNumElements() && 7703 "More vector elements requested than available!"); 7704 SDValue Lo, Hi; 7705 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 7706 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 7707 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 7708 getConstant(LoVT.getVectorNumElements(), DL, 7709 TLI->getVectorIdxTy(getDataLayout()))); 7710 return std::make_pair(Lo, Hi); 7711 } 7712 7713 void SelectionDAG::ExtractVectorElements(SDValue Op, 7714 SmallVectorImpl<SDValue> &Args, 7715 unsigned Start, unsigned Count) { 7716 EVT VT = Op.getValueType(); 7717 if (Count == 0) 7718 Count = VT.getVectorNumElements(); 7719 7720 EVT EltVT = VT.getVectorElementType(); 7721 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 7722 SDLoc SL(Op); 7723 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 7724 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7725 Op, getConstant(i, SL, IdxTy))); 7726 } 7727 } 7728 7729 // getAddressSpace - Return the address space this GlobalAddress belongs to. 7730 unsigned GlobalAddressSDNode::getAddressSpace() const { 7731 return getGlobal()->getType()->getAddressSpace(); 7732 } 7733 7734 Type *ConstantPoolSDNode::getType() const { 7735 if (isMachineConstantPoolEntry()) 7736 return Val.MachineCPVal->getType(); 7737 return Val.ConstVal->getType(); 7738 } 7739 7740 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 7741 unsigned &SplatBitSize, 7742 bool &HasAnyUndefs, 7743 unsigned MinSplatBits, 7744 bool IsBigEndian) const { 7745 EVT VT = getValueType(0); 7746 assert(VT.isVector() && "Expected a vector type"); 7747 unsigned VecWidth = VT.getSizeInBits(); 7748 if (MinSplatBits > VecWidth) 7749 return false; 7750 7751 // FIXME: The widths are based on this node's type, but build vectors can 7752 // truncate their operands. 7753 SplatValue = APInt(VecWidth, 0); 7754 SplatUndef = APInt(VecWidth, 0); 7755 7756 // Get the bits. Bits with undefined values (when the corresponding element 7757 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 7758 // in SplatValue. If any of the values are not constant, give up and return 7759 // false. 7760 unsigned int NumOps = getNumOperands(); 7761 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 7762 unsigned EltWidth = VT.getScalarSizeInBits(); 7763 7764 for (unsigned j = 0; j < NumOps; ++j) { 7765 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 7766 SDValue OpVal = getOperand(i); 7767 unsigned BitPos = j * EltWidth; 7768 7769 if (OpVal.isUndef()) 7770 SplatUndef.setBits(BitPos, BitPos + EltWidth); 7771 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 7772 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 7773 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 7774 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 7775 else 7776 return false; 7777 } 7778 7779 // The build_vector is all constants or undefs. Find the smallest element 7780 // size that splats the vector. 7781 HasAnyUndefs = (SplatUndef != 0); 7782 7783 // FIXME: This does not work for vectors with elements less than 8 bits. 7784 while (VecWidth > 8) { 7785 unsigned HalfSize = VecWidth / 2; 7786 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 7787 APInt LowValue = SplatValue.trunc(HalfSize); 7788 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 7789 APInt LowUndef = SplatUndef.trunc(HalfSize); 7790 7791 // If the two halves do not match (ignoring undef bits), stop here. 7792 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 7793 MinSplatBits > HalfSize) 7794 break; 7795 7796 SplatValue = HighValue | LowValue; 7797 SplatUndef = HighUndef & LowUndef; 7798 7799 VecWidth = HalfSize; 7800 } 7801 7802 SplatBitSize = VecWidth; 7803 return true; 7804 } 7805 7806 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 7807 if (UndefElements) { 7808 UndefElements->clear(); 7809 UndefElements->resize(getNumOperands()); 7810 } 7811 SDValue Splatted; 7812 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 7813 SDValue Op = getOperand(i); 7814 if (Op.isUndef()) { 7815 if (UndefElements) 7816 (*UndefElements)[i] = true; 7817 } else if (!Splatted) { 7818 Splatted = Op; 7819 } else if (Splatted != Op) { 7820 return SDValue(); 7821 } 7822 } 7823 7824 if (!Splatted) { 7825 assert(getOperand(0).isUndef() && 7826 "Can only have a splat without a constant for all undefs."); 7827 return getOperand(0); 7828 } 7829 7830 return Splatted; 7831 } 7832 7833 ConstantSDNode * 7834 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 7835 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 7836 } 7837 7838 ConstantFPSDNode * 7839 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 7840 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 7841 } 7842 7843 int32_t 7844 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 7845 uint32_t BitWidth) const { 7846 if (ConstantFPSDNode *CN = 7847 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 7848 bool IsExact; 7849 APSInt IntVal(BitWidth); 7850 const APFloat &APF = CN->getValueAPF(); 7851 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 7852 APFloat::opOK || 7853 !IsExact) 7854 return -1; 7855 7856 return IntVal.exactLogBase2(); 7857 } 7858 return -1; 7859 } 7860 7861 bool BuildVectorSDNode::isConstant() const { 7862 for (const SDValue &Op : op_values()) { 7863 unsigned Opc = Op.getOpcode(); 7864 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 7865 return false; 7866 } 7867 return true; 7868 } 7869 7870 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 7871 // Find the first non-undef value in the shuffle mask. 7872 unsigned i, e; 7873 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 7874 /* search */; 7875 7876 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 7877 7878 // Make sure all remaining elements are either undef or the same as the first 7879 // non-undef value. 7880 for (int Idx = Mask[i]; i != e; ++i) 7881 if (Mask[i] >= 0 && Mask[i] != Idx) 7882 return false; 7883 return true; 7884 } 7885 7886 // \brief Returns the SDNode if it is a constant integer BuildVector 7887 // or constant integer. 7888 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 7889 if (isa<ConstantSDNode>(N)) 7890 return N.getNode(); 7891 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 7892 return N.getNode(); 7893 // Treat a GlobalAddress supporting constant offset folding as a 7894 // constant integer. 7895 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 7896 if (GA->getOpcode() == ISD::GlobalAddress && 7897 TLI->isOffsetFoldingLegal(GA)) 7898 return GA; 7899 return nullptr; 7900 } 7901 7902 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 7903 if (isa<ConstantFPSDNode>(N)) 7904 return N.getNode(); 7905 7906 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 7907 return N.getNode(); 7908 7909 return nullptr; 7910 } 7911 7912 #ifndef NDEBUG 7913 static void checkForCyclesHelper(const SDNode *N, 7914 SmallPtrSetImpl<const SDNode*> &Visited, 7915 SmallPtrSetImpl<const SDNode*> &Checked, 7916 const llvm::SelectionDAG *DAG) { 7917 // If this node has already been checked, don't check it again. 7918 if (Checked.count(N)) 7919 return; 7920 7921 // If a node has already been visited on this depth-first walk, reject it as 7922 // a cycle. 7923 if (!Visited.insert(N).second) { 7924 errs() << "Detected cycle in SelectionDAG\n"; 7925 dbgs() << "Offending node:\n"; 7926 N->dumprFull(DAG); dbgs() << "\n"; 7927 abort(); 7928 } 7929 7930 for (const SDValue &Op : N->op_values()) 7931 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 7932 7933 Checked.insert(N); 7934 Visited.erase(N); 7935 } 7936 #endif 7937 7938 void llvm::checkForCycles(const llvm::SDNode *N, 7939 const llvm::SelectionDAG *DAG, 7940 bool force) { 7941 #ifndef NDEBUG 7942 bool check = force; 7943 #ifdef EXPENSIVE_CHECKS 7944 check = true; 7945 #endif // EXPENSIVE_CHECKS 7946 if (check) { 7947 assert(N && "Checking nonexistent SDNode"); 7948 SmallPtrSet<const SDNode*, 32> visited; 7949 SmallPtrSet<const SDNode*, 32> checked; 7950 checkForCyclesHelper(N, visited, checked, DAG); 7951 } 7952 #endif // !NDEBUG 7953 } 7954 7955 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 7956 checkForCycles(DAG->getRoot().getNode(), DAG, force); 7957 } 7958