1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineConstantPool.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfo.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/KnownBits.h" 40 #include "llvm/Support/ManagedStatic.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/Mutex.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetIntrinsicInfo.h" 46 #include "llvm/Target/TargetLowering.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include "llvm/Target/TargetOptions.h" 49 #include "llvm/Target/TargetRegisterInfo.h" 50 #include "llvm/Target/TargetSubtargetInfo.h" 51 #include <algorithm> 52 #include <cmath> 53 #include <utility> 54 55 using namespace llvm; 56 57 /// makeVTList - Return an instance of the SDVTList struct initialized with the 58 /// specified members. 59 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 60 SDVTList Res = {VTs, NumVTs}; 61 return Res; 62 } 63 64 // Default null implementations of the callbacks. 65 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 66 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 67 68 //===----------------------------------------------------------------------===// 69 // ConstantFPSDNode Class 70 //===----------------------------------------------------------------------===// 71 72 /// isExactlyValue - We don't rely on operator== working on double values, as 73 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 74 /// As such, this method can be used to do an exact bit-for-bit comparison of 75 /// two floating point values. 76 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 77 return getValueAPF().bitwiseIsEqual(V); 78 } 79 80 bool ConstantFPSDNode::isValueValidForType(EVT VT, 81 const APFloat& Val) { 82 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 83 84 // convert modifies in place, so make a copy. 85 APFloat Val2 = APFloat(Val); 86 bool losesInfo; 87 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 88 APFloat::rmNearestTiesToEven, 89 &losesInfo); 90 return !losesInfo; 91 } 92 93 //===----------------------------------------------------------------------===// 94 // ISD Namespace 95 //===----------------------------------------------------------------------===// 96 97 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 98 auto *BV = dyn_cast<BuildVectorSDNode>(N); 99 if (!BV) 100 return false; 101 102 APInt SplatUndef; 103 unsigned SplatBitSize; 104 bool HasUndefs; 105 EVT EltVT = N->getValueType(0).getVectorElementType(); 106 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) && 107 EltVT.getSizeInBits() >= SplatBitSize; 108 } 109 110 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 111 // specializations of the more general isConstantSplatVector()? 112 113 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 114 // Look through a bit convert. 115 while (N->getOpcode() == ISD::BITCAST) 116 N = N->getOperand(0).getNode(); 117 118 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 119 120 unsigned i = 0, e = N->getNumOperands(); 121 122 // Skip over all of the undef values. 123 while (i != e && N->getOperand(i).isUndef()) 124 ++i; 125 126 // Do not accept an all-undef vector. 127 if (i == e) return false; 128 129 // Do not accept build_vectors that aren't all constants or which have non-~0 130 // elements. We have to be a bit careful here, as the type of the constant 131 // may not be the same as the type of the vector elements due to type 132 // legalization (the elements are promoted to a legal type for the target and 133 // a vector of a type may be legal when the base element type is not). 134 // We only want to check enough bits to cover the vector elements, because 135 // we care if the resultant vector is all ones, not whether the individual 136 // constants are. 137 SDValue NotZero = N->getOperand(i); 138 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 139 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 140 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 141 return false; 142 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 143 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 144 return false; 145 } else 146 return false; 147 148 // Okay, we have at least one ~0 value, check to see if the rest match or are 149 // undefs. Even with the above element type twiddling, this should be OK, as 150 // the same type legalization should have applied to all the elements. 151 for (++i; i != e; ++i) 152 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 153 return false; 154 return true; 155 } 156 157 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 158 // Look through a bit convert. 159 while (N->getOpcode() == ISD::BITCAST) 160 N = N->getOperand(0).getNode(); 161 162 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 163 164 bool IsAllUndef = true; 165 for (const SDValue &Op : N->op_values()) { 166 if (Op.isUndef()) 167 continue; 168 IsAllUndef = false; 169 // Do not accept build_vectors that aren't all constants or which have non-0 170 // elements. We have to be a bit careful here, as the type of the constant 171 // may not be the same as the type of the vector elements due to type 172 // legalization (the elements are promoted to a legal type for the target 173 // and a vector of a type may be legal when the base element type is not). 174 // We only want to check enough bits to cover the vector elements, because 175 // we care if the resultant vector is all zeros, not whether the individual 176 // constants are. 177 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 178 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 179 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 180 return false; 181 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 182 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 183 return false; 184 } else 185 return false; 186 } 187 188 // Do not accept an all-undef vector. 189 if (IsAllUndef) 190 return false; 191 return true; 192 } 193 194 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 195 if (N->getOpcode() != ISD::BUILD_VECTOR) 196 return false; 197 198 for (const SDValue &Op : N->op_values()) { 199 if (Op.isUndef()) 200 continue; 201 if (!isa<ConstantSDNode>(Op)) 202 return false; 203 } 204 return true; 205 } 206 207 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 208 if (N->getOpcode() != ISD::BUILD_VECTOR) 209 return false; 210 211 for (const SDValue &Op : N->op_values()) { 212 if (Op.isUndef()) 213 continue; 214 if (!isa<ConstantFPSDNode>(Op)) 215 return false; 216 } 217 return true; 218 } 219 220 bool ISD::allOperandsUndef(const SDNode *N) { 221 // Return false if the node has no operands. 222 // This is "logically inconsistent" with the definition of "all" but 223 // is probably the desired behavior. 224 if (N->getNumOperands() == 0) 225 return false; 226 227 for (const SDValue &Op : N->op_values()) 228 if (!Op.isUndef()) 229 return false; 230 231 return true; 232 } 233 234 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 235 switch (ExtType) { 236 case ISD::EXTLOAD: 237 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 238 case ISD::SEXTLOAD: 239 return ISD::SIGN_EXTEND; 240 case ISD::ZEXTLOAD: 241 return ISD::ZERO_EXTEND; 242 default: 243 break; 244 } 245 246 llvm_unreachable("Invalid LoadExtType"); 247 } 248 249 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 250 // To perform this operation, we just need to swap the L and G bits of the 251 // operation. 252 unsigned OldL = (Operation >> 2) & 1; 253 unsigned OldG = (Operation >> 1) & 1; 254 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 255 (OldL << 1) | // New G bit 256 (OldG << 2)); // New L bit. 257 } 258 259 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 260 unsigned Operation = Op; 261 if (isInteger) 262 Operation ^= 7; // Flip L, G, E bits, but not U. 263 else 264 Operation ^= 15; // Flip all of the condition bits. 265 266 if (Operation > ISD::SETTRUE2) 267 Operation &= ~8; // Don't let N and U bits get set. 268 269 return ISD::CondCode(Operation); 270 } 271 272 273 /// For an integer comparison, return 1 if the comparison is a signed operation 274 /// and 2 if the result is an unsigned comparison. Return zero if the operation 275 /// does not depend on the sign of the input (setne and seteq). 276 static int isSignedOp(ISD::CondCode Opcode) { 277 switch (Opcode) { 278 default: llvm_unreachable("Illegal integer setcc operation!"); 279 case ISD::SETEQ: 280 case ISD::SETNE: return 0; 281 case ISD::SETLT: 282 case ISD::SETLE: 283 case ISD::SETGT: 284 case ISD::SETGE: return 1; 285 case ISD::SETULT: 286 case ISD::SETULE: 287 case ISD::SETUGT: 288 case ISD::SETUGE: return 2; 289 } 290 } 291 292 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 293 bool IsInteger) { 294 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 295 // Cannot fold a signed integer setcc with an unsigned integer setcc. 296 return ISD::SETCC_INVALID; 297 298 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 299 300 // If the N and U bits get set, then the resultant comparison DOES suddenly 301 // care about orderedness, and it is true when ordered. 302 if (Op > ISD::SETTRUE2) 303 Op &= ~16; // Clear the U bit if the N bit is set. 304 305 // Canonicalize illegal integer setcc's. 306 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 307 Op = ISD::SETNE; 308 309 return ISD::CondCode(Op); 310 } 311 312 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 313 bool IsInteger) { 314 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 315 // Cannot fold a signed setcc with an unsigned setcc. 316 return ISD::SETCC_INVALID; 317 318 // Combine all of the condition bits. 319 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 320 321 // Canonicalize illegal integer setcc's. 322 if (IsInteger) { 323 switch (Result) { 324 default: break; 325 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 326 case ISD::SETOEQ: // SETEQ & SETU[LG]E 327 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 328 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 329 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 330 } 331 } 332 333 return Result; 334 } 335 336 //===----------------------------------------------------------------------===// 337 // SDNode Profile Support 338 //===----------------------------------------------------------------------===// 339 340 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 341 /// 342 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 343 ID.AddInteger(OpC); 344 } 345 346 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 347 /// solely with their pointer. 348 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 349 ID.AddPointer(VTList.VTs); 350 } 351 352 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 353 /// 354 static void AddNodeIDOperands(FoldingSetNodeID &ID, 355 ArrayRef<SDValue> Ops) { 356 for (auto& Op : Ops) { 357 ID.AddPointer(Op.getNode()); 358 ID.AddInteger(Op.getResNo()); 359 } 360 } 361 362 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 363 /// 364 static void AddNodeIDOperands(FoldingSetNodeID &ID, 365 ArrayRef<SDUse> Ops) { 366 for (auto& Op : Ops) { 367 ID.AddPointer(Op.getNode()); 368 ID.AddInteger(Op.getResNo()); 369 } 370 } 371 372 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 373 SDVTList VTList, ArrayRef<SDValue> OpList) { 374 AddNodeIDOpcode(ID, OpC); 375 AddNodeIDValueTypes(ID, VTList); 376 AddNodeIDOperands(ID, OpList); 377 } 378 379 /// If this is an SDNode with special info, add this info to the NodeID data. 380 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 381 switch (N->getOpcode()) { 382 case ISD::TargetExternalSymbol: 383 case ISD::ExternalSymbol: 384 case ISD::MCSymbol: 385 llvm_unreachable("Should only be used on nodes with operands"); 386 default: break; // Normal nodes don't need extra info. 387 case ISD::TargetConstant: 388 case ISD::Constant: { 389 const ConstantSDNode *C = cast<ConstantSDNode>(N); 390 ID.AddPointer(C->getConstantIntValue()); 391 ID.AddBoolean(C->isOpaque()); 392 break; 393 } 394 case ISD::TargetConstantFP: 395 case ISD::ConstantFP: { 396 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 397 break; 398 } 399 case ISD::TargetGlobalAddress: 400 case ISD::GlobalAddress: 401 case ISD::TargetGlobalTLSAddress: 402 case ISD::GlobalTLSAddress: { 403 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 404 ID.AddPointer(GA->getGlobal()); 405 ID.AddInteger(GA->getOffset()); 406 ID.AddInteger(GA->getTargetFlags()); 407 break; 408 } 409 case ISD::BasicBlock: 410 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 411 break; 412 case ISD::Register: 413 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 414 break; 415 case ISD::RegisterMask: 416 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 417 break; 418 case ISD::SRCVALUE: 419 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 420 break; 421 case ISD::FrameIndex: 422 case ISD::TargetFrameIndex: 423 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 424 break; 425 case ISD::JumpTable: 426 case ISD::TargetJumpTable: 427 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 428 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 429 break; 430 case ISD::ConstantPool: 431 case ISD::TargetConstantPool: { 432 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 433 ID.AddInteger(CP->getAlignment()); 434 ID.AddInteger(CP->getOffset()); 435 if (CP->isMachineConstantPoolEntry()) 436 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 437 else 438 ID.AddPointer(CP->getConstVal()); 439 ID.AddInteger(CP->getTargetFlags()); 440 break; 441 } 442 case ISD::TargetIndex: { 443 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 444 ID.AddInteger(TI->getIndex()); 445 ID.AddInteger(TI->getOffset()); 446 ID.AddInteger(TI->getTargetFlags()); 447 break; 448 } 449 case ISD::LOAD: { 450 const LoadSDNode *LD = cast<LoadSDNode>(N); 451 ID.AddInteger(LD->getMemoryVT().getRawBits()); 452 ID.AddInteger(LD->getRawSubclassData()); 453 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 454 break; 455 } 456 case ISD::STORE: { 457 const StoreSDNode *ST = cast<StoreSDNode>(N); 458 ID.AddInteger(ST->getMemoryVT().getRawBits()); 459 ID.AddInteger(ST->getRawSubclassData()); 460 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 461 break; 462 } 463 case ISD::ATOMIC_CMP_SWAP: 464 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 465 case ISD::ATOMIC_SWAP: 466 case ISD::ATOMIC_LOAD_ADD: 467 case ISD::ATOMIC_LOAD_SUB: 468 case ISD::ATOMIC_LOAD_AND: 469 case ISD::ATOMIC_LOAD_OR: 470 case ISD::ATOMIC_LOAD_XOR: 471 case ISD::ATOMIC_LOAD_NAND: 472 case ISD::ATOMIC_LOAD_MIN: 473 case ISD::ATOMIC_LOAD_MAX: 474 case ISD::ATOMIC_LOAD_UMIN: 475 case ISD::ATOMIC_LOAD_UMAX: 476 case ISD::ATOMIC_LOAD: 477 case ISD::ATOMIC_STORE: { 478 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 479 ID.AddInteger(AT->getMemoryVT().getRawBits()); 480 ID.AddInteger(AT->getRawSubclassData()); 481 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 482 break; 483 } 484 case ISD::PREFETCH: { 485 const MemSDNode *PF = cast<MemSDNode>(N); 486 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 487 break; 488 } 489 case ISD::VECTOR_SHUFFLE: { 490 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 491 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 492 i != e; ++i) 493 ID.AddInteger(SVN->getMaskElt(i)); 494 break; 495 } 496 case ISD::TargetBlockAddress: 497 case ISD::BlockAddress: { 498 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 499 ID.AddPointer(BA->getBlockAddress()); 500 ID.AddInteger(BA->getOffset()); 501 ID.AddInteger(BA->getTargetFlags()); 502 break; 503 } 504 } // end switch (N->getOpcode()) 505 506 // Target specific memory nodes could also have address spaces to check. 507 if (N->isTargetMemoryOpcode()) 508 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 509 } 510 511 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 512 /// data. 513 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 514 AddNodeIDOpcode(ID, N->getOpcode()); 515 // Add the return value info. 516 AddNodeIDValueTypes(ID, N->getVTList()); 517 // Add the operand info. 518 AddNodeIDOperands(ID, N->ops()); 519 520 // Handle SDNode leafs with special info. 521 AddNodeIDCustom(ID, N); 522 } 523 524 //===----------------------------------------------------------------------===// 525 // SelectionDAG Class 526 //===----------------------------------------------------------------------===// 527 528 /// doNotCSE - Return true if CSE should not be performed for this node. 529 static bool doNotCSE(SDNode *N) { 530 if (N->getValueType(0) == MVT::Glue) 531 return true; // Never CSE anything that produces a flag. 532 533 switch (N->getOpcode()) { 534 default: break; 535 case ISD::HANDLENODE: 536 case ISD::EH_LABEL: 537 return true; // Never CSE these nodes. 538 } 539 540 // Check that remaining values produced are not flags. 541 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 542 if (N->getValueType(i) == MVT::Glue) 543 return true; // Never CSE anything that produces a flag. 544 545 return false; 546 } 547 548 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 549 /// SelectionDAG. 550 void SelectionDAG::RemoveDeadNodes() { 551 // Create a dummy node (which is not added to allnodes), that adds a reference 552 // to the root node, preventing it from being deleted. 553 HandleSDNode Dummy(getRoot()); 554 555 SmallVector<SDNode*, 128> DeadNodes; 556 557 // Add all obviously-dead nodes to the DeadNodes worklist. 558 for (SDNode &Node : allnodes()) 559 if (Node.use_empty()) 560 DeadNodes.push_back(&Node); 561 562 RemoveDeadNodes(DeadNodes); 563 564 // If the root changed (e.g. it was a dead load, update the root). 565 setRoot(Dummy.getValue()); 566 } 567 568 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 569 /// given list, and any nodes that become unreachable as a result. 570 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 571 572 // Process the worklist, deleting the nodes and adding their uses to the 573 // worklist. 574 while (!DeadNodes.empty()) { 575 SDNode *N = DeadNodes.pop_back_val(); 576 577 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 578 DUL->NodeDeleted(N, nullptr); 579 580 // Take the node out of the appropriate CSE map. 581 RemoveNodeFromCSEMaps(N); 582 583 // Next, brutally remove the operand list. This is safe to do, as there are 584 // no cycles in the graph. 585 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 586 SDUse &Use = *I++; 587 SDNode *Operand = Use.getNode(); 588 Use.set(SDValue()); 589 590 // Now that we removed this operand, see if there are no uses of it left. 591 if (Operand->use_empty()) 592 DeadNodes.push_back(Operand); 593 } 594 595 DeallocateNode(N); 596 } 597 } 598 599 void SelectionDAG::RemoveDeadNode(SDNode *N){ 600 SmallVector<SDNode*, 16> DeadNodes(1, N); 601 602 // Create a dummy node that adds a reference to the root node, preventing 603 // it from being deleted. (This matters if the root is an operand of the 604 // dead node.) 605 HandleSDNode Dummy(getRoot()); 606 607 RemoveDeadNodes(DeadNodes); 608 } 609 610 void SelectionDAG::DeleteNode(SDNode *N) { 611 // First take this out of the appropriate CSE map. 612 RemoveNodeFromCSEMaps(N); 613 614 // Finally, remove uses due to operands of this node, remove from the 615 // AllNodes list, and delete the node. 616 DeleteNodeNotInCSEMaps(N); 617 } 618 619 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 620 assert(N->getIterator() != AllNodes.begin() && 621 "Cannot delete the entry node!"); 622 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 623 624 // Drop all of the operands and decrement used node's use counts. 625 N->DropOperands(); 626 627 DeallocateNode(N); 628 } 629 630 void SDDbgInfo::erase(const SDNode *Node) { 631 DbgValMapType::iterator I = DbgValMap.find(Node); 632 if (I == DbgValMap.end()) 633 return; 634 for (auto &Val: I->second) 635 Val->setIsInvalidated(); 636 DbgValMap.erase(I); 637 } 638 639 void SelectionDAG::DeallocateNode(SDNode *N) { 640 // If we have operands, deallocate them. 641 removeOperands(N); 642 643 NodeAllocator.Deallocate(AllNodes.remove(N)); 644 645 // Set the opcode to DELETED_NODE to help catch bugs when node 646 // memory is reallocated. 647 // FIXME: There are places in SDag that have grown a dependency on the opcode 648 // value in the released node. 649 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 650 N->NodeType = ISD::DELETED_NODE; 651 652 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 653 // them and forget about that node. 654 DbgInfo->erase(N); 655 } 656 657 #ifndef NDEBUG 658 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 659 static void VerifySDNode(SDNode *N) { 660 switch (N->getOpcode()) { 661 default: 662 break; 663 case ISD::BUILD_PAIR: { 664 EVT VT = N->getValueType(0); 665 assert(N->getNumValues() == 1 && "Too many results!"); 666 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 667 "Wrong return type!"); 668 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 669 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 670 "Mismatched operand types!"); 671 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 672 "Wrong operand type!"); 673 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 674 "Wrong return type size"); 675 break; 676 } 677 case ISD::BUILD_VECTOR: { 678 assert(N->getNumValues() == 1 && "Too many results!"); 679 assert(N->getValueType(0).isVector() && "Wrong return type!"); 680 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 681 "Wrong number of operands!"); 682 EVT EltVT = N->getValueType(0).getVectorElementType(); 683 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 684 assert((I->getValueType() == EltVT || 685 (EltVT.isInteger() && I->getValueType().isInteger() && 686 EltVT.bitsLE(I->getValueType()))) && 687 "Wrong operand type!"); 688 assert(I->getValueType() == N->getOperand(0).getValueType() && 689 "Operands must all have the same type"); 690 } 691 break; 692 } 693 } 694 } 695 #endif // NDEBUG 696 697 /// \brief Insert a newly allocated node into the DAG. 698 /// 699 /// Handles insertion into the all nodes list and CSE map, as well as 700 /// verification and other common operations when a new node is allocated. 701 void SelectionDAG::InsertNode(SDNode *N) { 702 AllNodes.push_back(N); 703 #ifndef NDEBUG 704 N->PersistentId = NextPersistentId++; 705 VerifySDNode(N); 706 #endif 707 } 708 709 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 710 /// correspond to it. This is useful when we're about to delete or repurpose 711 /// the node. We don't want future request for structurally identical nodes 712 /// to return N anymore. 713 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 714 bool Erased = false; 715 switch (N->getOpcode()) { 716 case ISD::HANDLENODE: return false; // noop. 717 case ISD::CONDCODE: 718 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 719 "Cond code doesn't exist!"); 720 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 721 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 722 break; 723 case ISD::ExternalSymbol: 724 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 725 break; 726 case ISD::TargetExternalSymbol: { 727 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 728 Erased = TargetExternalSymbols.erase( 729 std::pair<std::string,unsigned char>(ESN->getSymbol(), 730 ESN->getTargetFlags())); 731 break; 732 } 733 case ISD::MCSymbol: { 734 auto *MCSN = cast<MCSymbolSDNode>(N); 735 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 736 break; 737 } 738 case ISD::VALUETYPE: { 739 EVT VT = cast<VTSDNode>(N)->getVT(); 740 if (VT.isExtended()) { 741 Erased = ExtendedValueTypeNodes.erase(VT); 742 } else { 743 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 744 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 745 } 746 break; 747 } 748 default: 749 // Remove it from the CSE Map. 750 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 751 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 752 Erased = CSEMap.RemoveNode(N); 753 break; 754 } 755 #ifndef NDEBUG 756 // Verify that the node was actually in one of the CSE maps, unless it has a 757 // flag result (which cannot be CSE'd) or is one of the special cases that are 758 // not subject to CSE. 759 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 760 !N->isMachineOpcode() && !doNotCSE(N)) { 761 N->dump(this); 762 dbgs() << "\n"; 763 llvm_unreachable("Node is not in map!"); 764 } 765 #endif 766 return Erased; 767 } 768 769 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 770 /// maps and modified in place. Add it back to the CSE maps, unless an identical 771 /// node already exists, in which case transfer all its users to the existing 772 /// node. This transfer can potentially trigger recursive merging. 773 /// 774 void 775 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 776 // For node types that aren't CSE'd, just act as if no identical node 777 // already exists. 778 if (!doNotCSE(N)) { 779 SDNode *Existing = CSEMap.GetOrInsertNode(N); 780 if (Existing != N) { 781 // If there was already an existing matching node, use ReplaceAllUsesWith 782 // to replace the dead one with the existing one. This can cause 783 // recursive merging of other unrelated nodes down the line. 784 ReplaceAllUsesWith(N, Existing); 785 786 // N is now dead. Inform the listeners and delete it. 787 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 788 DUL->NodeDeleted(N, Existing); 789 DeleteNodeNotInCSEMaps(N); 790 return; 791 } 792 } 793 794 // If the node doesn't already exist, we updated it. Inform listeners. 795 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 796 DUL->NodeUpdated(N); 797 } 798 799 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 800 /// were replaced with those specified. If this node is never memoized, 801 /// return null, otherwise return a pointer to the slot it would take. If a 802 /// node already exists with these operands, the slot will be non-null. 803 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 804 void *&InsertPos) { 805 if (doNotCSE(N)) 806 return nullptr; 807 808 SDValue Ops[] = { Op }; 809 FoldingSetNodeID ID; 810 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 811 AddNodeIDCustom(ID, N); 812 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 813 if (Node) 814 Node->intersectFlagsWith(N->getFlags()); 815 return Node; 816 } 817 818 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 819 /// were replaced with those specified. If this node is never memoized, 820 /// return null, otherwise return a pointer to the slot it would take. If a 821 /// node already exists with these operands, the slot will be non-null. 822 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 823 SDValue Op1, SDValue Op2, 824 void *&InsertPos) { 825 if (doNotCSE(N)) 826 return nullptr; 827 828 SDValue Ops[] = { Op1, Op2 }; 829 FoldingSetNodeID ID; 830 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 831 AddNodeIDCustom(ID, N); 832 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 833 if (Node) 834 Node->intersectFlagsWith(N->getFlags()); 835 return Node; 836 } 837 838 839 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 840 /// were replaced with those specified. If this node is never memoized, 841 /// return null, otherwise return a pointer to the slot it would take. If a 842 /// node already exists with these operands, the slot will be non-null. 843 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 844 void *&InsertPos) { 845 if (doNotCSE(N)) 846 return nullptr; 847 848 FoldingSetNodeID ID; 849 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 850 AddNodeIDCustom(ID, N); 851 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 852 if (Node) 853 Node->intersectFlagsWith(N->getFlags()); 854 return Node; 855 } 856 857 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 858 Type *Ty = VT == MVT::iPTR ? 859 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 860 VT.getTypeForEVT(*getContext()); 861 862 return getDataLayout().getABITypeAlignment(Ty); 863 } 864 865 // EntryNode could meaningfully have debug info if we can find it... 866 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 867 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL), 868 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 869 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false), 870 UpdateListeners(nullptr) { 871 InsertNode(&EntryNode); 872 DbgInfo = new SDDbgInfo(); 873 } 874 875 void SelectionDAG::init(MachineFunction &NewMF, 876 OptimizationRemarkEmitter &NewORE) { 877 MF = &NewMF; 878 ORE = &NewORE; 879 TLI = getSubtarget().getTargetLowering(); 880 TSI = getSubtarget().getSelectionDAGInfo(); 881 Context = &MF->getFunction()->getContext(); 882 } 883 884 SelectionDAG::~SelectionDAG() { 885 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 886 allnodes_clear(); 887 OperandRecycler.clear(OperandAllocator); 888 delete DbgInfo; 889 } 890 891 void SelectionDAG::allnodes_clear() { 892 assert(&*AllNodes.begin() == &EntryNode); 893 AllNodes.remove(AllNodes.begin()); 894 while (!AllNodes.empty()) 895 DeallocateNode(&AllNodes.front()); 896 #ifndef NDEBUG 897 NextPersistentId = 0; 898 #endif 899 } 900 901 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 902 void *&InsertPos) { 903 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 904 if (N) { 905 switch (N->getOpcode()) { 906 default: break; 907 case ISD::Constant: 908 case ISD::ConstantFP: 909 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 910 "debug location. Use another overload."); 911 } 912 } 913 return N; 914 } 915 916 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 917 const SDLoc &DL, void *&InsertPos) { 918 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 919 if (N) { 920 switch (N->getOpcode()) { 921 case ISD::Constant: 922 case ISD::ConstantFP: 923 // Erase debug location from the node if the node is used at several 924 // different places. Do not propagate one location to all uses as it 925 // will cause a worse single stepping debugging experience. 926 if (N->getDebugLoc() != DL.getDebugLoc()) 927 N->setDebugLoc(DebugLoc()); 928 break; 929 default: 930 // When the node's point of use is located earlier in the instruction 931 // sequence than its prior point of use, update its debug info to the 932 // earlier location. 933 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 934 N->setDebugLoc(DL.getDebugLoc()); 935 break; 936 } 937 } 938 return N; 939 } 940 941 void SelectionDAG::clear() { 942 allnodes_clear(); 943 OperandRecycler.clear(OperandAllocator); 944 OperandAllocator.Reset(); 945 CSEMap.clear(); 946 947 ExtendedValueTypeNodes.clear(); 948 ExternalSymbols.clear(); 949 TargetExternalSymbols.clear(); 950 MCSymbols.clear(); 951 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 952 static_cast<CondCodeSDNode*>(nullptr)); 953 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 954 static_cast<SDNode*>(nullptr)); 955 956 EntryNode.UseList = nullptr; 957 InsertNode(&EntryNode); 958 Root = getEntryNode(); 959 DbgInfo->clear(); 960 } 961 962 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 963 return VT.bitsGT(Op.getValueType()) 964 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 965 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 966 } 967 968 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 969 return VT.bitsGT(Op.getValueType()) ? 970 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 971 getNode(ISD::TRUNCATE, DL, VT, Op); 972 } 973 974 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 975 return VT.bitsGT(Op.getValueType()) ? 976 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 977 getNode(ISD::TRUNCATE, DL, VT, Op); 978 } 979 980 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 981 return VT.bitsGT(Op.getValueType()) ? 982 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 983 getNode(ISD::TRUNCATE, DL, VT, Op); 984 } 985 986 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 987 EVT OpVT) { 988 if (VT.bitsLE(Op.getValueType())) 989 return getNode(ISD::TRUNCATE, SL, VT, Op); 990 991 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 992 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 993 } 994 995 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 996 assert(!VT.isVector() && 997 "getZeroExtendInReg should use the vector element type instead of " 998 "the vector type!"); 999 if (Op.getValueType() == VT) return Op; 1000 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1001 APInt Imm = APInt::getLowBitsSet(BitWidth, 1002 VT.getSizeInBits()); 1003 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1004 getConstant(Imm, DL, Op.getValueType())); 1005 } 1006 1007 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1008 EVT VT) { 1009 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1010 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1011 "The sizes of the input and result must match in order to perform the " 1012 "extend in-register."); 1013 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1014 "The destination vector type must have fewer lanes than the input."); 1015 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1016 } 1017 1018 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1019 EVT VT) { 1020 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1021 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1022 "The sizes of the input and result must match in order to perform the " 1023 "extend in-register."); 1024 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1025 "The destination vector type must have fewer lanes than the input."); 1026 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1027 } 1028 1029 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1030 EVT VT) { 1031 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1032 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1033 "The sizes of the input and result must match in order to perform the " 1034 "extend in-register."); 1035 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1036 "The destination vector type must have fewer lanes than the input."); 1037 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1038 } 1039 1040 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1041 /// 1042 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1043 EVT EltVT = VT.getScalarType(); 1044 SDValue NegOne = 1045 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1046 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1047 } 1048 1049 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1050 EVT EltVT = VT.getScalarType(); 1051 SDValue TrueValue; 1052 switch (TLI->getBooleanContents(VT)) { 1053 case TargetLowering::ZeroOrOneBooleanContent: 1054 case TargetLowering::UndefinedBooleanContent: 1055 TrueValue = getConstant(1, DL, VT); 1056 break; 1057 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1058 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1059 VT); 1060 break; 1061 } 1062 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1063 } 1064 1065 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1066 bool isT, bool isO) { 1067 EVT EltVT = VT.getScalarType(); 1068 assert((EltVT.getSizeInBits() >= 64 || 1069 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1070 "getConstant with a uint64_t value that doesn't fit in the type!"); 1071 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1072 } 1073 1074 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1075 bool isT, bool isO) { 1076 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1077 } 1078 1079 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1080 EVT VT, bool isT, bool isO) { 1081 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1082 1083 EVT EltVT = VT.getScalarType(); 1084 const ConstantInt *Elt = &Val; 1085 1086 // In some cases the vector type is legal but the element type is illegal and 1087 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1088 // inserted value (the type does not need to match the vector element type). 1089 // Any extra bits introduced will be truncated away. 1090 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1091 TargetLowering::TypePromoteInteger) { 1092 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1093 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1094 Elt = ConstantInt::get(*getContext(), NewVal); 1095 } 1096 // In other cases the element type is illegal and needs to be expanded, for 1097 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1098 // the value into n parts and use a vector type with n-times the elements. 1099 // Then bitcast to the type requested. 1100 // Legalizing constants too early makes the DAGCombiner's job harder so we 1101 // only legalize if the DAG tells us we must produce legal types. 1102 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1103 TLI->getTypeAction(*getContext(), EltVT) == 1104 TargetLowering::TypeExpandInteger) { 1105 const APInt &NewVal = Elt->getValue(); 1106 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1107 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1108 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1109 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1110 1111 // Check the temporary vector is the correct size. If this fails then 1112 // getTypeToTransformTo() probably returned a type whose size (in bits) 1113 // isn't a power-of-2 factor of the requested type size. 1114 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1115 1116 SmallVector<SDValue, 2> EltParts; 1117 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1118 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1119 .zextOrTrunc(ViaEltSizeInBits), DL, 1120 ViaEltVT, isT, isO)); 1121 } 1122 1123 // EltParts is currently in little endian order. If we actually want 1124 // big-endian order then reverse it now. 1125 if (getDataLayout().isBigEndian()) 1126 std::reverse(EltParts.begin(), EltParts.end()); 1127 1128 // The elements must be reversed when the element order is different 1129 // to the endianness of the elements (because the BITCAST is itself a 1130 // vector shuffle in this situation). However, we do not need any code to 1131 // perform this reversal because getConstant() is producing a vector 1132 // splat. 1133 // This situation occurs in MIPS MSA. 1134 1135 SmallVector<SDValue, 8> Ops; 1136 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1137 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1138 return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1139 } 1140 1141 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1142 "APInt size does not match type size!"); 1143 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1144 FoldingSetNodeID ID; 1145 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1146 ID.AddPointer(Elt); 1147 ID.AddBoolean(isO); 1148 void *IP = nullptr; 1149 SDNode *N = nullptr; 1150 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1151 if (!VT.isVector()) 1152 return SDValue(N, 0); 1153 1154 if (!N) { 1155 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1156 CSEMap.InsertNode(N, IP); 1157 InsertNode(N); 1158 } 1159 1160 SDValue Result(N, 0); 1161 if (VT.isVector()) 1162 Result = getSplatBuildVector(VT, DL, Result); 1163 return Result; 1164 } 1165 1166 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1167 bool isTarget) { 1168 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1169 } 1170 1171 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1172 bool isTarget) { 1173 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1174 } 1175 1176 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1177 EVT VT, bool isTarget) { 1178 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1179 1180 EVT EltVT = VT.getScalarType(); 1181 1182 // Do the map lookup using the actual bit pattern for the floating point 1183 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1184 // we don't have issues with SNANs. 1185 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1186 FoldingSetNodeID ID; 1187 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1188 ID.AddPointer(&V); 1189 void *IP = nullptr; 1190 SDNode *N = nullptr; 1191 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1192 if (!VT.isVector()) 1193 return SDValue(N, 0); 1194 1195 if (!N) { 1196 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1197 CSEMap.InsertNode(N, IP); 1198 InsertNode(N); 1199 } 1200 1201 SDValue Result(N, 0); 1202 if (VT.isVector()) 1203 Result = getSplatBuildVector(VT, DL, Result); 1204 return Result; 1205 } 1206 1207 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1208 bool isTarget) { 1209 EVT EltVT = VT.getScalarType(); 1210 if (EltVT == MVT::f32) 1211 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1212 else if (EltVT == MVT::f64) 1213 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1214 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1215 EltVT == MVT::f16) { 1216 bool Ignored; 1217 APFloat APF = APFloat(Val); 1218 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1219 &Ignored); 1220 return getConstantFP(APF, DL, VT, isTarget); 1221 } else 1222 llvm_unreachable("Unsupported type in getConstantFP"); 1223 } 1224 1225 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1226 EVT VT, int64_t Offset, bool isTargetGA, 1227 unsigned char TargetFlags) { 1228 assert((TargetFlags == 0 || isTargetGA) && 1229 "Cannot set target flags on target-independent globals"); 1230 1231 // Truncate (with sign-extension) the offset value to the pointer size. 1232 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1233 if (BitWidth < 64) 1234 Offset = SignExtend64(Offset, BitWidth); 1235 1236 unsigned Opc; 1237 if (GV->isThreadLocal()) 1238 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1239 else 1240 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1241 1242 FoldingSetNodeID ID; 1243 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1244 ID.AddPointer(GV); 1245 ID.AddInteger(Offset); 1246 ID.AddInteger(TargetFlags); 1247 void *IP = nullptr; 1248 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1249 return SDValue(E, 0); 1250 1251 auto *N = newSDNode<GlobalAddressSDNode>( 1252 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1253 CSEMap.InsertNode(N, IP); 1254 InsertNode(N); 1255 return SDValue(N, 0); 1256 } 1257 1258 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1259 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1260 FoldingSetNodeID ID; 1261 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1262 ID.AddInteger(FI); 1263 void *IP = nullptr; 1264 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1265 return SDValue(E, 0); 1266 1267 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1268 CSEMap.InsertNode(N, IP); 1269 InsertNode(N); 1270 return SDValue(N, 0); 1271 } 1272 1273 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1274 unsigned char TargetFlags) { 1275 assert((TargetFlags == 0 || isTarget) && 1276 "Cannot set target flags on target-independent jump tables"); 1277 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1278 FoldingSetNodeID ID; 1279 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1280 ID.AddInteger(JTI); 1281 ID.AddInteger(TargetFlags); 1282 void *IP = nullptr; 1283 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1284 return SDValue(E, 0); 1285 1286 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1287 CSEMap.InsertNode(N, IP); 1288 InsertNode(N); 1289 return SDValue(N, 0); 1290 } 1291 1292 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1293 unsigned Alignment, int Offset, 1294 bool isTarget, 1295 unsigned char TargetFlags) { 1296 assert((TargetFlags == 0 || isTarget) && 1297 "Cannot set target flags on target-independent globals"); 1298 if (Alignment == 0) 1299 Alignment = MF->getFunction()->optForSize() 1300 ? getDataLayout().getABITypeAlignment(C->getType()) 1301 : getDataLayout().getPrefTypeAlignment(C->getType()); 1302 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1303 FoldingSetNodeID ID; 1304 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1305 ID.AddInteger(Alignment); 1306 ID.AddInteger(Offset); 1307 ID.AddPointer(C); 1308 ID.AddInteger(TargetFlags); 1309 void *IP = nullptr; 1310 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1311 return SDValue(E, 0); 1312 1313 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1314 TargetFlags); 1315 CSEMap.InsertNode(N, IP); 1316 InsertNode(N); 1317 return SDValue(N, 0); 1318 } 1319 1320 1321 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1322 unsigned Alignment, int Offset, 1323 bool isTarget, 1324 unsigned char TargetFlags) { 1325 assert((TargetFlags == 0 || isTarget) && 1326 "Cannot set target flags on target-independent globals"); 1327 if (Alignment == 0) 1328 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1329 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1330 FoldingSetNodeID ID; 1331 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1332 ID.AddInteger(Alignment); 1333 ID.AddInteger(Offset); 1334 C->addSelectionDAGCSEId(ID); 1335 ID.AddInteger(TargetFlags); 1336 void *IP = nullptr; 1337 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1338 return SDValue(E, 0); 1339 1340 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1341 TargetFlags); 1342 CSEMap.InsertNode(N, IP); 1343 InsertNode(N); 1344 return SDValue(N, 0); 1345 } 1346 1347 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1348 unsigned char TargetFlags) { 1349 FoldingSetNodeID ID; 1350 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1351 ID.AddInteger(Index); 1352 ID.AddInteger(Offset); 1353 ID.AddInteger(TargetFlags); 1354 void *IP = nullptr; 1355 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1356 return SDValue(E, 0); 1357 1358 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1359 CSEMap.InsertNode(N, IP); 1360 InsertNode(N); 1361 return SDValue(N, 0); 1362 } 1363 1364 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1365 FoldingSetNodeID ID; 1366 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1367 ID.AddPointer(MBB); 1368 void *IP = nullptr; 1369 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1370 return SDValue(E, 0); 1371 1372 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1373 CSEMap.InsertNode(N, IP); 1374 InsertNode(N); 1375 return SDValue(N, 0); 1376 } 1377 1378 SDValue SelectionDAG::getValueType(EVT VT) { 1379 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1380 ValueTypeNodes.size()) 1381 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1382 1383 SDNode *&N = VT.isExtended() ? 1384 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1385 1386 if (N) return SDValue(N, 0); 1387 N = newSDNode<VTSDNode>(VT); 1388 InsertNode(N); 1389 return SDValue(N, 0); 1390 } 1391 1392 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1393 SDNode *&N = ExternalSymbols[Sym]; 1394 if (N) return SDValue(N, 0); 1395 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1396 InsertNode(N); 1397 return SDValue(N, 0); 1398 } 1399 1400 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1401 SDNode *&N = MCSymbols[Sym]; 1402 if (N) 1403 return SDValue(N, 0); 1404 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1405 InsertNode(N); 1406 return SDValue(N, 0); 1407 } 1408 1409 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1410 unsigned char TargetFlags) { 1411 SDNode *&N = 1412 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1413 TargetFlags)]; 1414 if (N) return SDValue(N, 0); 1415 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1416 InsertNode(N); 1417 return SDValue(N, 0); 1418 } 1419 1420 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1421 if ((unsigned)Cond >= CondCodeNodes.size()) 1422 CondCodeNodes.resize(Cond+1); 1423 1424 if (!CondCodeNodes[Cond]) { 1425 auto *N = newSDNode<CondCodeSDNode>(Cond); 1426 CondCodeNodes[Cond] = N; 1427 InsertNode(N); 1428 } 1429 1430 return SDValue(CondCodeNodes[Cond], 0); 1431 } 1432 1433 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1434 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1435 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1436 std::swap(N1, N2); 1437 ShuffleVectorSDNode::commuteMask(M); 1438 } 1439 1440 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1441 SDValue N2, ArrayRef<int> Mask) { 1442 assert(VT.getVectorNumElements() == Mask.size() && 1443 "Must have the same number of vector elements as mask elements!"); 1444 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1445 "Invalid VECTOR_SHUFFLE"); 1446 1447 // Canonicalize shuffle undef, undef -> undef 1448 if (N1.isUndef() && N2.isUndef()) 1449 return getUNDEF(VT); 1450 1451 // Validate that all indices in Mask are within the range of the elements 1452 // input to the shuffle. 1453 int NElts = Mask.size(); 1454 assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) && 1455 "Index out of range"); 1456 1457 // Copy the mask so we can do any needed cleanup. 1458 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1459 1460 // Canonicalize shuffle v, v -> v, undef 1461 if (N1 == N2) { 1462 N2 = getUNDEF(VT); 1463 for (int i = 0; i != NElts; ++i) 1464 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1465 } 1466 1467 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1468 if (N1.isUndef()) 1469 commuteShuffle(N1, N2, MaskVec); 1470 1471 // If shuffling a splat, try to blend the splat instead. We do this here so 1472 // that even when this arises during lowering we don't have to re-handle it. 1473 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1474 BitVector UndefElements; 1475 SDValue Splat = BV->getSplatValue(&UndefElements); 1476 if (!Splat) 1477 return; 1478 1479 for (int i = 0; i < NElts; ++i) { 1480 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1481 continue; 1482 1483 // If this input comes from undef, mark it as such. 1484 if (UndefElements[MaskVec[i] - Offset]) { 1485 MaskVec[i] = -1; 1486 continue; 1487 } 1488 1489 // If we can blend a non-undef lane, use that instead. 1490 if (!UndefElements[i]) 1491 MaskVec[i] = i + Offset; 1492 } 1493 }; 1494 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1495 BlendSplat(N1BV, 0); 1496 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1497 BlendSplat(N2BV, NElts); 1498 1499 // Canonicalize all index into lhs, -> shuffle lhs, undef 1500 // Canonicalize all index into rhs, -> shuffle rhs, undef 1501 bool AllLHS = true, AllRHS = true; 1502 bool N2Undef = N2.isUndef(); 1503 for (int i = 0; i != NElts; ++i) { 1504 if (MaskVec[i] >= NElts) { 1505 if (N2Undef) 1506 MaskVec[i] = -1; 1507 else 1508 AllLHS = false; 1509 } else if (MaskVec[i] >= 0) { 1510 AllRHS = false; 1511 } 1512 } 1513 if (AllLHS && AllRHS) 1514 return getUNDEF(VT); 1515 if (AllLHS && !N2Undef) 1516 N2 = getUNDEF(VT); 1517 if (AllRHS) { 1518 N1 = getUNDEF(VT); 1519 commuteShuffle(N1, N2, MaskVec); 1520 } 1521 // Reset our undef status after accounting for the mask. 1522 N2Undef = N2.isUndef(); 1523 // Re-check whether both sides ended up undef. 1524 if (N1.isUndef() && N2Undef) 1525 return getUNDEF(VT); 1526 1527 // If Identity shuffle return that node. 1528 bool Identity = true, AllSame = true; 1529 for (int i = 0; i != NElts; ++i) { 1530 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1531 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1532 } 1533 if (Identity && NElts) 1534 return N1; 1535 1536 // Shuffling a constant splat doesn't change the result. 1537 if (N2Undef) { 1538 SDValue V = N1; 1539 1540 // Look through any bitcasts. We check that these don't change the number 1541 // (and size) of elements and just changes their types. 1542 while (V.getOpcode() == ISD::BITCAST) 1543 V = V->getOperand(0); 1544 1545 // A splat should always show up as a build vector node. 1546 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1547 BitVector UndefElements; 1548 SDValue Splat = BV->getSplatValue(&UndefElements); 1549 // If this is a splat of an undef, shuffling it is also undef. 1550 if (Splat && Splat.isUndef()) 1551 return getUNDEF(VT); 1552 1553 bool SameNumElts = 1554 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1555 1556 // We only have a splat which can skip shuffles if there is a splatted 1557 // value and no undef lanes rearranged by the shuffle. 1558 if (Splat && UndefElements.none()) { 1559 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1560 // number of elements match or the value splatted is a zero constant. 1561 if (SameNumElts) 1562 return N1; 1563 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1564 if (C->isNullValue()) 1565 return N1; 1566 } 1567 1568 // If the shuffle itself creates a splat, build the vector directly. 1569 if (AllSame && SameNumElts) { 1570 EVT BuildVT = BV->getValueType(0); 1571 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1572 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1573 1574 // We may have jumped through bitcasts, so the type of the 1575 // BUILD_VECTOR may not match the type of the shuffle. 1576 if (BuildVT != VT) 1577 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1578 return NewBV; 1579 } 1580 } 1581 } 1582 1583 FoldingSetNodeID ID; 1584 SDValue Ops[2] = { N1, N2 }; 1585 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1586 for (int i = 0; i != NElts; ++i) 1587 ID.AddInteger(MaskVec[i]); 1588 1589 void* IP = nullptr; 1590 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1591 return SDValue(E, 0); 1592 1593 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1594 // SDNode doesn't have access to it. This memory will be "leaked" when 1595 // the node is deallocated, but recovered when the NodeAllocator is released. 1596 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1597 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1598 1599 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1600 dl.getDebugLoc(), MaskAlloc); 1601 createOperands(N, Ops); 1602 1603 CSEMap.InsertNode(N, IP); 1604 InsertNode(N); 1605 return SDValue(N, 0); 1606 } 1607 1608 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1609 MVT VT = SV.getSimpleValueType(0); 1610 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1611 ShuffleVectorSDNode::commuteMask(MaskVec); 1612 1613 SDValue Op0 = SV.getOperand(0); 1614 SDValue Op1 = SV.getOperand(1); 1615 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1616 } 1617 1618 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1619 FoldingSetNodeID ID; 1620 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1621 ID.AddInteger(RegNo); 1622 void *IP = nullptr; 1623 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1624 return SDValue(E, 0); 1625 1626 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1627 CSEMap.InsertNode(N, IP); 1628 InsertNode(N); 1629 return SDValue(N, 0); 1630 } 1631 1632 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1633 FoldingSetNodeID ID; 1634 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1635 ID.AddPointer(RegMask); 1636 void *IP = nullptr; 1637 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1638 return SDValue(E, 0); 1639 1640 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1641 CSEMap.InsertNode(N, IP); 1642 InsertNode(N); 1643 return SDValue(N, 0); 1644 } 1645 1646 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1647 MCSymbol *Label) { 1648 FoldingSetNodeID ID; 1649 SDValue Ops[] = { Root }; 1650 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops); 1651 ID.AddPointer(Label); 1652 void *IP = nullptr; 1653 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1654 return SDValue(E, 0); 1655 1656 auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1657 createOperands(N, Ops); 1658 1659 CSEMap.InsertNode(N, IP); 1660 InsertNode(N); 1661 return SDValue(N, 0); 1662 } 1663 1664 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1665 int64_t Offset, 1666 bool isTarget, 1667 unsigned char TargetFlags) { 1668 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1669 1670 FoldingSetNodeID ID; 1671 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1672 ID.AddPointer(BA); 1673 ID.AddInteger(Offset); 1674 ID.AddInteger(TargetFlags); 1675 void *IP = nullptr; 1676 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1677 return SDValue(E, 0); 1678 1679 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1680 CSEMap.InsertNode(N, IP); 1681 InsertNode(N); 1682 return SDValue(N, 0); 1683 } 1684 1685 SDValue SelectionDAG::getSrcValue(const Value *V) { 1686 assert((!V || V->getType()->isPointerTy()) && 1687 "SrcValue is not a pointer?"); 1688 1689 FoldingSetNodeID ID; 1690 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1691 ID.AddPointer(V); 1692 1693 void *IP = nullptr; 1694 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1695 return SDValue(E, 0); 1696 1697 auto *N = newSDNode<SrcValueSDNode>(V); 1698 CSEMap.InsertNode(N, IP); 1699 InsertNode(N); 1700 return SDValue(N, 0); 1701 } 1702 1703 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1704 FoldingSetNodeID ID; 1705 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1706 ID.AddPointer(MD); 1707 1708 void *IP = nullptr; 1709 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1710 return SDValue(E, 0); 1711 1712 auto *N = newSDNode<MDNodeSDNode>(MD); 1713 CSEMap.InsertNode(N, IP); 1714 InsertNode(N); 1715 return SDValue(N, 0); 1716 } 1717 1718 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1719 if (VT == V.getValueType()) 1720 return V; 1721 1722 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1723 } 1724 1725 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1726 unsigned SrcAS, unsigned DestAS) { 1727 SDValue Ops[] = {Ptr}; 1728 FoldingSetNodeID ID; 1729 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1730 ID.AddInteger(SrcAS); 1731 ID.AddInteger(DestAS); 1732 1733 void *IP = nullptr; 1734 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1735 return SDValue(E, 0); 1736 1737 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1738 VT, SrcAS, DestAS); 1739 createOperands(N, Ops); 1740 1741 CSEMap.InsertNode(N, IP); 1742 InsertNode(N); 1743 return SDValue(N, 0); 1744 } 1745 1746 /// getShiftAmountOperand - Return the specified value casted to 1747 /// the target's desired shift amount type. 1748 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1749 EVT OpTy = Op.getValueType(); 1750 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1751 if (OpTy == ShTy || OpTy.isVector()) return Op; 1752 1753 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1754 } 1755 1756 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1757 SDLoc dl(Node); 1758 const TargetLowering &TLI = getTargetLoweringInfo(); 1759 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1760 EVT VT = Node->getValueType(0); 1761 SDValue Tmp1 = Node->getOperand(0); 1762 SDValue Tmp2 = Node->getOperand(1); 1763 unsigned Align = Node->getConstantOperandVal(3); 1764 1765 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1766 Tmp2, MachinePointerInfo(V)); 1767 SDValue VAList = VAListLoad; 1768 1769 if (Align > TLI.getMinStackArgumentAlignment()) { 1770 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1771 1772 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1773 getConstant(Align - 1, dl, VAList.getValueType())); 1774 1775 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1776 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1777 } 1778 1779 // Increment the pointer, VAList, to the next vaarg 1780 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1781 getConstant(getDataLayout().getTypeAllocSize( 1782 VT.getTypeForEVT(*getContext())), 1783 dl, VAList.getValueType())); 1784 // Store the incremented VAList to the legalized pointer 1785 Tmp1 = 1786 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1787 // Load the actual argument out of the pointer VAList 1788 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1789 } 1790 1791 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1792 SDLoc dl(Node); 1793 const TargetLowering &TLI = getTargetLoweringInfo(); 1794 // This defaults to loading a pointer from the input and storing it to the 1795 // output, returning the chain. 1796 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1797 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1798 SDValue Tmp1 = 1799 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1800 Node->getOperand(2), MachinePointerInfo(VS)); 1801 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1802 MachinePointerInfo(VD)); 1803 } 1804 1805 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1806 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1807 unsigned ByteSize = VT.getStoreSize(); 1808 Type *Ty = VT.getTypeForEVT(*getContext()); 1809 unsigned StackAlign = 1810 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1811 1812 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1813 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1814 } 1815 1816 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1817 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1818 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1819 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1820 const DataLayout &DL = getDataLayout(); 1821 unsigned Align = 1822 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1823 1824 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1825 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1826 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1827 } 1828 1829 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1830 ISD::CondCode Cond, const SDLoc &dl) { 1831 // These setcc operations always fold. 1832 switch (Cond) { 1833 default: break; 1834 case ISD::SETFALSE: 1835 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1836 case ISD::SETTRUE: 1837 case ISD::SETTRUE2: { 1838 TargetLowering::BooleanContent Cnt = 1839 TLI->getBooleanContents(N1->getValueType(0)); 1840 return getConstant( 1841 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1842 VT); 1843 } 1844 1845 case ISD::SETOEQ: 1846 case ISD::SETOGT: 1847 case ISD::SETOGE: 1848 case ISD::SETOLT: 1849 case ISD::SETOLE: 1850 case ISD::SETONE: 1851 case ISD::SETO: 1852 case ISD::SETUO: 1853 case ISD::SETUEQ: 1854 case ISD::SETUNE: 1855 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1856 break; 1857 } 1858 1859 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1860 const APInt &C2 = N2C->getAPIntValue(); 1861 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1862 const APInt &C1 = N1C->getAPIntValue(); 1863 1864 switch (Cond) { 1865 default: llvm_unreachable("Unknown integer setcc!"); 1866 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1867 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1868 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1869 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1870 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1871 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1872 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1873 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1874 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1875 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1876 } 1877 } 1878 } 1879 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1880 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1881 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1882 switch (Cond) { 1883 default: break; 1884 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1885 return getUNDEF(VT); 1886 LLVM_FALLTHROUGH; 1887 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1888 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1889 return getUNDEF(VT); 1890 LLVM_FALLTHROUGH; 1891 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1892 R==APFloat::cmpLessThan, dl, VT); 1893 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1894 return getUNDEF(VT); 1895 LLVM_FALLTHROUGH; 1896 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1897 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1898 return getUNDEF(VT); 1899 LLVM_FALLTHROUGH; 1900 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1901 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1902 return getUNDEF(VT); 1903 LLVM_FALLTHROUGH; 1904 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1905 R==APFloat::cmpEqual, dl, VT); 1906 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1907 return getUNDEF(VT); 1908 LLVM_FALLTHROUGH; 1909 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1910 R==APFloat::cmpEqual, dl, VT); 1911 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1912 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1913 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1914 R==APFloat::cmpEqual, dl, VT); 1915 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1916 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1917 R==APFloat::cmpLessThan, dl, VT); 1918 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1919 R==APFloat::cmpUnordered, dl, VT); 1920 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1921 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1922 } 1923 } else { 1924 // Ensure that the constant occurs on the RHS. 1925 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1926 MVT CompVT = N1.getValueType().getSimpleVT(); 1927 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1928 return SDValue(); 1929 1930 return getSetCC(dl, VT, N2, N1, SwappedCond); 1931 } 1932 } 1933 1934 // Could not fold it. 1935 return SDValue(); 1936 } 1937 1938 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1939 /// use this predicate to simplify operations downstream. 1940 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1941 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1942 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 1943 } 1944 1945 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1946 /// this predicate to simplify operations downstream. Mask is known to be zero 1947 /// for bits that V cannot have. 1948 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1949 unsigned Depth) const { 1950 KnownBits Known; 1951 computeKnownBits(Op, Known, Depth); 1952 return Mask.isSubsetOf(Known.Zero); 1953 } 1954 1955 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 1956 /// is less than the element bit-width of the shift node, return it. 1957 static const APInt *getValidShiftAmountConstant(SDValue V) { 1958 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 1959 // Shifting more than the bitwidth is not valid. 1960 const APInt &ShAmt = SA->getAPIntValue(); 1961 if (ShAmt.ult(V.getScalarValueSizeInBits())) 1962 return &ShAmt; 1963 } 1964 return nullptr; 1965 } 1966 1967 /// Determine which bits of Op are known to be either zero or one and return 1968 /// them in Known. For vectors, the known bits are those that are shared by 1969 /// every vector element. 1970 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 1971 unsigned Depth) const { 1972 EVT VT = Op.getValueType(); 1973 APInt DemandedElts = VT.isVector() 1974 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 1975 : APInt(1, 1); 1976 computeKnownBits(Op, Known, DemandedElts, Depth); 1977 } 1978 1979 /// Determine which bits of Op are known to be either zero or one and return 1980 /// them in Known. The DemandedElts argument allows us to only collect the known 1981 /// bits that are shared by the requested vector elements. 1982 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 1983 const APInt &DemandedElts, 1984 unsigned Depth) const { 1985 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1986 1987 Known = KnownBits(BitWidth); // Don't know anything. 1988 if (Depth == 6) 1989 return; // Limit search depth. 1990 1991 KnownBits Known2; 1992 unsigned NumElts = DemandedElts.getBitWidth(); 1993 1994 if (!DemandedElts) 1995 return; // No demanded elts, better to assume we don't know anything. 1996 1997 unsigned Opcode = Op.getOpcode(); 1998 switch (Opcode) { 1999 case ISD::Constant: 2000 // We know all of the bits for a constant! 2001 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 2002 Known.Zero = ~Known.One; 2003 break; 2004 case ISD::BUILD_VECTOR: 2005 // Collect the known bits that are shared by every demanded vector element. 2006 assert(NumElts == Op.getValueType().getVectorNumElements() && 2007 "Unexpected vector size"); 2008 Known.Zero.setAllBits(); Known.One.setAllBits(); 2009 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2010 if (!DemandedElts[i]) 2011 continue; 2012 2013 SDValue SrcOp = Op.getOperand(i); 2014 computeKnownBits(SrcOp, Known2, Depth + 1); 2015 2016 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2017 if (SrcOp.getValueSizeInBits() != BitWidth) { 2018 assert(SrcOp.getValueSizeInBits() > BitWidth && 2019 "Expected BUILD_VECTOR implicit truncation"); 2020 Known2 = Known2.trunc(BitWidth); 2021 } 2022 2023 // Known bits are the values that are shared by every demanded element. 2024 Known.One &= Known2.One; 2025 Known.Zero &= Known2.Zero; 2026 2027 // If we don't know any bits, early out. 2028 if (!Known.One && !Known.Zero) 2029 break; 2030 } 2031 break; 2032 case ISD::VECTOR_SHUFFLE: { 2033 // Collect the known bits that are shared by every vector element referenced 2034 // by the shuffle. 2035 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2036 Known.Zero.setAllBits(); Known.One.setAllBits(); 2037 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2038 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2039 for (unsigned i = 0; i != NumElts; ++i) { 2040 if (!DemandedElts[i]) 2041 continue; 2042 2043 int M = SVN->getMaskElt(i); 2044 if (M < 0) { 2045 // For UNDEF elements, we don't know anything about the common state of 2046 // the shuffle result. 2047 Known.resetAll(); 2048 DemandedLHS.clearAllBits(); 2049 DemandedRHS.clearAllBits(); 2050 break; 2051 } 2052 2053 if ((unsigned)M < NumElts) 2054 DemandedLHS.setBit((unsigned)M % NumElts); 2055 else 2056 DemandedRHS.setBit((unsigned)M % NumElts); 2057 } 2058 // Known bits are the values that are shared by every demanded element. 2059 if (!!DemandedLHS) { 2060 SDValue LHS = Op.getOperand(0); 2061 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2062 Known.One &= Known2.One; 2063 Known.Zero &= Known2.Zero; 2064 } 2065 // If we don't know any bits, early out. 2066 if (!Known.One && !Known.Zero) 2067 break; 2068 if (!!DemandedRHS) { 2069 SDValue RHS = Op.getOperand(1); 2070 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2071 Known.One &= Known2.One; 2072 Known.Zero &= Known2.Zero; 2073 } 2074 break; 2075 } 2076 case ISD::CONCAT_VECTORS: { 2077 // Split DemandedElts and test each of the demanded subvectors. 2078 Known.Zero.setAllBits(); Known.One.setAllBits(); 2079 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2080 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2081 unsigned NumSubVectors = Op.getNumOperands(); 2082 for (unsigned i = 0; i != NumSubVectors; ++i) { 2083 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2084 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2085 if (!!DemandedSub) { 2086 SDValue Sub = Op.getOperand(i); 2087 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2088 Known.One &= Known2.One; 2089 Known.Zero &= Known2.Zero; 2090 } 2091 // If we don't know any bits, early out. 2092 if (!Known.One && !Known.Zero) 2093 break; 2094 } 2095 break; 2096 } 2097 case ISD::EXTRACT_SUBVECTOR: { 2098 // If we know the element index, just demand that subvector elements, 2099 // otherwise demand them all. 2100 SDValue Src = Op.getOperand(0); 2101 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2102 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2103 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2104 // Offset the demanded elts by the subvector index. 2105 uint64_t Idx = SubIdx->getZExtValue(); 2106 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2107 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2108 } else { 2109 computeKnownBits(Src, Known, Depth + 1); 2110 } 2111 break; 2112 } 2113 case ISD::BITCAST: { 2114 SDValue N0 = Op.getOperand(0); 2115 unsigned SubBitWidth = N0.getScalarValueSizeInBits(); 2116 2117 // Ignore bitcasts from floating point. 2118 if (!N0.getValueType().isInteger()) 2119 break; 2120 2121 // Fast handling of 'identity' bitcasts. 2122 if (BitWidth == SubBitWidth) { 2123 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2124 break; 2125 } 2126 2127 // Support big-endian targets when it becomes useful. 2128 bool IsLE = getDataLayout().isLittleEndian(); 2129 if (!IsLE) 2130 break; 2131 2132 // Bitcast 'small element' vector to 'large element' scalar/vector. 2133 if ((BitWidth % SubBitWidth) == 0) { 2134 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2135 2136 // Collect known bits for the (larger) output by collecting the known 2137 // bits from each set of sub elements and shift these into place. 2138 // We need to separately call computeKnownBits for each set of 2139 // sub elements as the knownbits for each is likely to be different. 2140 unsigned SubScale = BitWidth / SubBitWidth; 2141 APInt SubDemandedElts(NumElts * SubScale, 0); 2142 for (unsigned i = 0; i != NumElts; ++i) 2143 if (DemandedElts[i]) 2144 SubDemandedElts.setBit(i * SubScale); 2145 2146 for (unsigned i = 0; i != SubScale; ++i) { 2147 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2148 Depth + 1); 2149 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i); 2150 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i); 2151 } 2152 } 2153 2154 // Bitcast 'large element' scalar/vector to 'small element' vector. 2155 if ((SubBitWidth % BitWidth) == 0) { 2156 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2157 2158 // Collect known bits for the (smaller) output by collecting the known 2159 // bits from the overlapping larger input elements and extracting the 2160 // sub sections we actually care about. 2161 unsigned SubScale = SubBitWidth / BitWidth; 2162 APInt SubDemandedElts(NumElts / SubScale, 0); 2163 for (unsigned i = 0; i != NumElts; ++i) 2164 if (DemandedElts[i]) 2165 SubDemandedElts.setBit(i / SubScale); 2166 2167 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2168 2169 Known.Zero.setAllBits(); Known.One.setAllBits(); 2170 for (unsigned i = 0; i != NumElts; ++i) 2171 if (DemandedElts[i]) { 2172 unsigned Offset = (i % SubScale) * BitWidth; 2173 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2174 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2175 // If we don't know any bits, early out. 2176 if (!Known.One && !Known.Zero) 2177 break; 2178 } 2179 } 2180 break; 2181 } 2182 case ISD::AND: 2183 // If either the LHS or the RHS are Zero, the result is zero. 2184 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2185 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2186 2187 // Output known-1 bits are only known if set in both the LHS & RHS. 2188 Known.One &= Known2.One; 2189 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2190 Known.Zero |= Known2.Zero; 2191 break; 2192 case ISD::OR: 2193 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2194 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2195 2196 // Output known-0 bits are only known if clear in both the LHS & RHS. 2197 Known.Zero &= Known2.Zero; 2198 // Output known-1 are known to be set if set in either the LHS | RHS. 2199 Known.One |= Known2.One; 2200 break; 2201 case ISD::XOR: { 2202 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2203 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2204 2205 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2206 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2207 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2208 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2209 Known.Zero = KnownZeroOut; 2210 break; 2211 } 2212 case ISD::MUL: { 2213 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2214 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2215 2216 // If low bits are zero in either operand, output low known-0 bits. 2217 // Also compute a conservative estimate for high known-0 bits. 2218 // More trickiness is possible, but this is sufficient for the 2219 // interesting case of alignment computation. 2220 unsigned TrailZ = Known.countMinTrailingZeros() + 2221 Known2.countMinTrailingZeros(); 2222 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2223 Known2.countMinLeadingZeros(), 2224 BitWidth) - BitWidth; 2225 2226 Known.resetAll(); 2227 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2228 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2229 break; 2230 } 2231 case ISD::UDIV: { 2232 // For the purposes of computing leading zeros we can conservatively 2233 // treat a udiv as a logical right shift by the power of 2 known to 2234 // be less than the denominator. 2235 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2236 unsigned LeadZ = Known2.countMinLeadingZeros(); 2237 2238 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2239 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2240 if (RHSMaxLeadingZeros != BitWidth) 2241 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2242 2243 Known.Zero.setHighBits(LeadZ); 2244 break; 2245 } 2246 case ISD::SELECT: 2247 computeKnownBits(Op.getOperand(2), Known, Depth+1); 2248 // If we don't know any bits, early out. 2249 if (!Known.One && !Known.Zero) 2250 break; 2251 computeKnownBits(Op.getOperand(1), Known2, Depth+1); 2252 2253 // Only known if known in both the LHS and RHS. 2254 Known.One &= Known2.One; 2255 Known.Zero &= Known2.Zero; 2256 break; 2257 case ISD::SELECT_CC: 2258 computeKnownBits(Op.getOperand(3), Known, Depth+1); 2259 // If we don't know any bits, early out. 2260 if (!Known.One && !Known.Zero) 2261 break; 2262 computeKnownBits(Op.getOperand(2), Known2, Depth+1); 2263 2264 // Only known if known in both the LHS and RHS. 2265 Known.One &= Known2.One; 2266 Known.Zero &= Known2.Zero; 2267 break; 2268 case ISD::SMULO: 2269 case ISD::UMULO: 2270 if (Op.getResNo() != 1) 2271 break; 2272 // The boolean result conforms to getBooleanContents. 2273 // If we know the result of a setcc has the top bits zero, use this info. 2274 // We know that we have an integer-based boolean since these operations 2275 // are only available for integer. 2276 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2277 TargetLowering::ZeroOrOneBooleanContent && 2278 BitWidth > 1) 2279 Known.Zero.setBitsFrom(1); 2280 break; 2281 case ISD::SETCC: 2282 // If we know the result of a setcc has the top bits zero, use this info. 2283 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2284 TargetLowering::ZeroOrOneBooleanContent && 2285 BitWidth > 1) 2286 Known.Zero.setBitsFrom(1); 2287 break; 2288 case ISD::SHL: 2289 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2290 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2291 Known.Zero <<= *ShAmt; 2292 Known.One <<= *ShAmt; 2293 // Low bits are known zero. 2294 Known.Zero.setLowBits(ShAmt->getZExtValue()); 2295 } 2296 break; 2297 case ISD::SRL: 2298 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2299 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2300 Known.Zero.lshrInPlace(*ShAmt); 2301 Known.One.lshrInPlace(*ShAmt); 2302 // High bits are known zero. 2303 Known.Zero.setHighBits(ShAmt->getZExtValue()); 2304 } 2305 break; 2306 case ISD::SRA: 2307 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2308 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2309 Known.Zero.lshrInPlace(*ShAmt); 2310 Known.One.lshrInPlace(*ShAmt); 2311 // If we know the value of the sign bit, then we know it is copied across 2312 // the high bits by the shift amount. 2313 APInt SignMask = APInt::getSignMask(BitWidth); 2314 SignMask.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask. 2315 if (Known.Zero.intersects(SignMask)) { 2316 Known.Zero.setHighBits(ShAmt->getZExtValue());// New bits are known zero. 2317 } else if (Known.One.intersects(SignMask)) { 2318 Known.One.setHighBits(ShAmt->getZExtValue()); // New bits are known one. 2319 } 2320 } 2321 break; 2322 case ISD::SIGN_EXTEND_INREG: { 2323 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2324 unsigned EBits = EVT.getScalarSizeInBits(); 2325 2326 // Sign extension. Compute the demanded bits in the result that are not 2327 // present in the input. 2328 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2329 2330 APInt InSignMask = APInt::getSignMask(EBits); 2331 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2332 2333 // If the sign extended bits are demanded, we know that the sign 2334 // bit is demanded. 2335 InSignMask = InSignMask.zext(BitWidth); 2336 if (NewBits.getBoolValue()) 2337 InputDemandedBits |= InSignMask; 2338 2339 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2340 Known.One &= InputDemandedBits; 2341 Known.Zero &= InputDemandedBits; 2342 2343 // If the sign bit of the input is known set or clear, then we know the 2344 // top bits of the result. 2345 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2346 Known.Zero |= NewBits; 2347 Known.One &= ~NewBits; 2348 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2349 Known.One |= NewBits; 2350 Known.Zero &= ~NewBits; 2351 } else { // Input sign bit unknown 2352 Known.Zero &= ~NewBits; 2353 Known.One &= ~NewBits; 2354 } 2355 break; 2356 } 2357 case ISD::CTTZ: 2358 case ISD::CTTZ_ZERO_UNDEF: { 2359 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2360 // If we have a known 1, its position is our upper bound. 2361 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2362 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2363 Known.Zero.setBitsFrom(LowBits); 2364 break; 2365 } 2366 case ISD::CTLZ: 2367 case ISD::CTLZ_ZERO_UNDEF: { 2368 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2369 // If we have a known 1, its position is our upper bound. 2370 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2371 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2372 Known.Zero.setBitsFrom(LowBits); 2373 break; 2374 } 2375 case ISD::CTPOP: { 2376 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2377 // If we know some of the bits are zero, they can't be one. 2378 unsigned PossibleOnes = Known2.countMaxPopulation(); 2379 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2380 break; 2381 } 2382 case ISD::LOAD: { 2383 LoadSDNode *LD = cast<LoadSDNode>(Op); 2384 // If this is a ZEXTLoad and we are looking at the loaded value. 2385 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2386 EVT VT = LD->getMemoryVT(); 2387 unsigned MemBits = VT.getScalarSizeInBits(); 2388 Known.Zero.setBitsFrom(MemBits); 2389 } else if (const MDNode *Ranges = LD->getRanges()) { 2390 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2391 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2392 } 2393 break; 2394 } 2395 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2396 EVT InVT = Op.getOperand(0).getValueType(); 2397 unsigned InBits = InVT.getScalarSizeInBits(); 2398 Known = Known.trunc(InBits); 2399 computeKnownBits(Op.getOperand(0), Known, 2400 DemandedElts.zext(InVT.getVectorNumElements()), 2401 Depth + 1); 2402 Known = Known.zext(BitWidth); 2403 Known.Zero.setBitsFrom(InBits); 2404 break; 2405 } 2406 case ISD::ZERO_EXTEND: { 2407 EVT InVT = Op.getOperand(0).getValueType(); 2408 unsigned InBits = InVT.getScalarSizeInBits(); 2409 Known = Known.trunc(InBits); 2410 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2411 Known = Known.zext(BitWidth); 2412 Known.Zero.setBitsFrom(InBits); 2413 break; 2414 } 2415 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2416 case ISD::SIGN_EXTEND: { 2417 EVT InVT = Op.getOperand(0).getValueType(); 2418 unsigned InBits = InVT.getScalarSizeInBits(); 2419 2420 Known = Known.trunc(InBits); 2421 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2422 2423 // If the sign bit is known to be zero or one, then sext will extend 2424 // it to the top bits, else it will just zext. 2425 Known = Known.sext(BitWidth); 2426 break; 2427 } 2428 case ISD::ANY_EXTEND: { 2429 EVT InVT = Op.getOperand(0).getValueType(); 2430 unsigned InBits = InVT.getScalarSizeInBits(); 2431 Known = Known.trunc(InBits); 2432 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2433 Known = Known.zext(BitWidth); 2434 break; 2435 } 2436 case ISD::TRUNCATE: { 2437 EVT InVT = Op.getOperand(0).getValueType(); 2438 unsigned InBits = InVT.getScalarSizeInBits(); 2439 Known = Known.zext(InBits); 2440 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2441 Known = Known.trunc(BitWidth); 2442 break; 2443 } 2444 case ISD::AssertZext: { 2445 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2446 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2447 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2448 Known.Zero |= (~InMask); 2449 Known.One &= (~Known.Zero); 2450 break; 2451 } 2452 case ISD::FGETSIGN: 2453 // All bits are zero except the low bit. 2454 Known.Zero.setBitsFrom(1); 2455 break; 2456 case ISD::USUBO: 2457 case ISD::SSUBO: 2458 if (Op.getResNo() == 1) { 2459 // If we know the result of a setcc has the top bits zero, use this info. 2460 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2461 TargetLowering::ZeroOrOneBooleanContent && 2462 BitWidth > 1) 2463 Known.Zero.setBitsFrom(1); 2464 break; 2465 } 2466 LLVM_FALLTHROUGH; 2467 case ISD::SUB: 2468 case ISD::SUBC: { 2469 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2470 // We know that the top bits of C-X are clear if X contains less bits 2471 // than C (i.e. no wrap-around can happen). For example, 20-X is 2472 // positive if we can prove that X is >= 0 and < 16. 2473 if (CLHS->getAPIntValue().isNonNegative()) { 2474 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2475 // NLZ can't be BitWidth with no sign bit 2476 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2477 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2478 Depth + 1); 2479 2480 // If all of the MaskV bits are known to be zero, then we know the 2481 // output top bits are zero, because we now know that the output is 2482 // from [0-C]. 2483 if ((Known2.Zero & MaskV) == MaskV) { 2484 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2485 // Top bits known zero. 2486 Known.Zero.setHighBits(NLZ2); 2487 } 2488 } 2489 } 2490 2491 // If low bits are know to be zero in both operands, then we know they are 2492 // going to be 0 in the result. Both addition and complement operations 2493 // preserve the low zero bits. 2494 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2495 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2496 if (KnownZeroLow == 0) 2497 break; 2498 2499 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2500 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2501 Known.Zero.setLowBits(KnownZeroLow); 2502 break; 2503 } 2504 case ISD::UADDO: 2505 case ISD::SADDO: 2506 case ISD::ADDCARRY: 2507 if (Op.getResNo() == 1) { 2508 // If we know the result of a setcc has the top bits zero, use this info. 2509 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2510 TargetLowering::ZeroOrOneBooleanContent && 2511 BitWidth > 1) 2512 Known.Zero.setBitsFrom(1); 2513 break; 2514 } 2515 LLVM_FALLTHROUGH; 2516 case ISD::ADD: 2517 case ISD::ADDC: 2518 case ISD::ADDE: { 2519 // Output known-0 bits are known if clear or set in both the low clear bits 2520 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2521 // low 3 bits clear. 2522 // Output known-0 bits are also known if the top bits of each input are 2523 // known to be clear. For example, if one input has the top 10 bits clear 2524 // and the other has the top 8 bits clear, we know the top 7 bits of the 2525 // output must be clear. 2526 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2527 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2528 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2529 2530 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2531 Depth + 1); 2532 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2533 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2534 2535 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2536 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2537 // use this information if we know (at least) that the low two bits are 2538 // clear. We then return to the caller that the low bit is unknown but 2539 // that other bits are known zero. 2540 if (KnownZeroLow >= 2) 2541 Known.Zero.setBits(1, KnownZeroLow); 2542 break; 2543 } 2544 2545 Known.Zero.setLowBits(KnownZeroLow); 2546 if (KnownZeroHigh > 1) 2547 Known.Zero.setHighBits(KnownZeroHigh - 1); 2548 break; 2549 } 2550 case ISD::SREM: 2551 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2552 const APInt &RA = Rem->getAPIntValue().abs(); 2553 if (RA.isPowerOf2()) { 2554 APInt LowBits = RA - 1; 2555 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2556 2557 // The low bits of the first operand are unchanged by the srem. 2558 Known.Zero = Known2.Zero & LowBits; 2559 Known.One = Known2.One & LowBits; 2560 2561 // If the first operand is non-negative or has all low bits zero, then 2562 // the upper bits are all zero. 2563 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2564 Known.Zero |= ~LowBits; 2565 2566 // If the first operand is negative and not all low bits are zero, then 2567 // the upper bits are all one. 2568 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2569 Known.One |= ~LowBits; 2570 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2571 } 2572 } 2573 break; 2574 case ISD::UREM: { 2575 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2576 const APInt &RA = Rem->getAPIntValue(); 2577 if (RA.isPowerOf2()) { 2578 APInt LowBits = (RA - 1); 2579 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2580 2581 // The upper bits are all zero, the lower ones are unchanged. 2582 Known.Zero = Known2.Zero | ~LowBits; 2583 Known.One = Known2.One & LowBits; 2584 break; 2585 } 2586 } 2587 2588 // Since the result is less than or equal to either operand, any leading 2589 // zero bits in either operand must also exist in the result. 2590 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2591 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2592 2593 uint32_t Leaders = 2594 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2595 Known.resetAll(); 2596 Known.Zero.setHighBits(Leaders); 2597 break; 2598 } 2599 case ISD::EXTRACT_ELEMENT: { 2600 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2601 const unsigned Index = Op.getConstantOperandVal(1); 2602 const unsigned BitWidth = Op.getValueSizeInBits(); 2603 2604 // Remove low part of known bits mask 2605 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2606 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2607 2608 // Remove high part of known bit mask 2609 Known = Known.trunc(BitWidth); 2610 break; 2611 } 2612 case ISD::EXTRACT_VECTOR_ELT: { 2613 SDValue InVec = Op.getOperand(0); 2614 SDValue EltNo = Op.getOperand(1); 2615 EVT VecVT = InVec.getValueType(); 2616 const unsigned BitWidth = Op.getValueSizeInBits(); 2617 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2618 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2619 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2620 // anything about the extended bits. 2621 if (BitWidth > EltBitWidth) 2622 Known = Known.trunc(EltBitWidth); 2623 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2624 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2625 // If we know the element index, just demand that vector element. 2626 unsigned Idx = ConstEltNo->getZExtValue(); 2627 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2628 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2629 } else { 2630 // Unknown element index, so ignore DemandedElts and demand them all. 2631 computeKnownBits(InVec, Known, Depth + 1); 2632 } 2633 if (BitWidth > EltBitWidth) 2634 Known = Known.zext(BitWidth); 2635 break; 2636 } 2637 case ISD::INSERT_VECTOR_ELT: { 2638 SDValue InVec = Op.getOperand(0); 2639 SDValue InVal = Op.getOperand(1); 2640 SDValue EltNo = Op.getOperand(2); 2641 2642 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2643 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2644 // If we know the element index, split the demand between the 2645 // source vector and the inserted element. 2646 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2647 unsigned EltIdx = CEltNo->getZExtValue(); 2648 2649 // If we demand the inserted element then add its common known bits. 2650 if (DemandedElts[EltIdx]) { 2651 computeKnownBits(InVal, Known2, Depth + 1); 2652 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2653 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());; 2654 } 2655 2656 // If we demand the source vector then add its common known bits, ensuring 2657 // that we don't demand the inserted element. 2658 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2659 if (!!VectorElts) { 2660 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2661 Known.One &= Known2.One; 2662 Known.Zero &= Known2.Zero; 2663 } 2664 } else { 2665 // Unknown element index, so ignore DemandedElts and demand them all. 2666 computeKnownBits(InVec, Known, Depth + 1); 2667 computeKnownBits(InVal, Known2, Depth + 1); 2668 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2669 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());; 2670 } 2671 break; 2672 } 2673 case ISD::BITREVERSE: { 2674 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2675 Known.Zero = Known2.Zero.reverseBits(); 2676 Known.One = Known2.One.reverseBits(); 2677 break; 2678 } 2679 case ISD::BSWAP: { 2680 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2681 Known.Zero = Known2.Zero.byteSwap(); 2682 Known.One = Known2.One.byteSwap(); 2683 break; 2684 } 2685 case ISD::ABS: { 2686 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2687 2688 // If the source's MSB is zero then we know the rest of the bits already. 2689 if (Known2.isNonNegative()) { 2690 Known.Zero = Known2.Zero; 2691 Known.One = Known2.One; 2692 break; 2693 } 2694 2695 // We only know that the absolute values's MSB will be zero iff there is 2696 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2697 Known2.One.clearSignBit(); 2698 if (Known2.One.getBoolValue()) { 2699 Known.Zero = APInt::getSignMask(BitWidth); 2700 break; 2701 } 2702 break; 2703 } 2704 case ISD::UMIN: { 2705 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2706 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2707 2708 // UMIN - we know that the result will have the maximum of the 2709 // known zero leading bits of the inputs. 2710 unsigned LeadZero = Known.countMinLeadingZeros(); 2711 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2712 2713 Known.Zero &= Known2.Zero; 2714 Known.One &= Known2.One; 2715 Known.Zero.setHighBits(LeadZero); 2716 break; 2717 } 2718 case ISD::UMAX: { 2719 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2720 Depth + 1); 2721 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2722 2723 // UMAX - we know that the result will have the maximum of the 2724 // known one leading bits of the inputs. 2725 unsigned LeadOne = Known.countMinLeadingOnes(); 2726 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2727 2728 Known.Zero &= Known2.Zero; 2729 Known.One &= Known2.One; 2730 Known.One.setHighBits(LeadOne); 2731 break; 2732 } 2733 case ISD::SMIN: 2734 case ISD::SMAX: { 2735 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2736 Depth + 1); 2737 // If we don't know any bits, early out. 2738 if (!Known.One && !Known.Zero) 2739 break; 2740 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2741 Known.Zero &= Known2.Zero; 2742 Known.One &= Known2.One; 2743 break; 2744 } 2745 case ISD::FrameIndex: 2746 case ISD::TargetFrameIndex: 2747 if (unsigned Align = InferPtrAlignment(Op)) { 2748 // The low bits are known zero if the pointer is aligned. 2749 Known.Zero.setLowBits(Log2_32(Align)); 2750 break; 2751 } 2752 break; 2753 2754 default: 2755 if (Opcode < ISD::BUILTIN_OP_END) 2756 break; 2757 LLVM_FALLTHROUGH; 2758 case ISD::INTRINSIC_WO_CHAIN: 2759 case ISD::INTRINSIC_W_CHAIN: 2760 case ISD::INTRINSIC_VOID: 2761 // Allow the target to implement this method for its nodes. 2762 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 2763 break; 2764 } 2765 2766 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2767 } 2768 2769 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 2770 SDValue N1) const { 2771 // X + 0 never overflow 2772 if (isNullConstant(N1)) 2773 return OFK_Never; 2774 2775 KnownBits N1Known; 2776 computeKnownBits(N1, N1Known); 2777 if (N1Known.Zero.getBoolValue()) { 2778 KnownBits N0Known; 2779 computeKnownBits(N0, N0Known); 2780 2781 bool overflow; 2782 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 2783 if (!overflow) 2784 return OFK_Never; 2785 } 2786 2787 // mulhi + 1 never overflow 2788 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 2789 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 2790 return OFK_Never; 2791 2792 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 2793 KnownBits N0Known; 2794 computeKnownBits(N0, N0Known); 2795 2796 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 2797 return OFK_Never; 2798 } 2799 2800 return OFK_Sometime; 2801 } 2802 2803 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2804 EVT OpVT = Val.getValueType(); 2805 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2806 2807 // Is the constant a known power of 2? 2808 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2809 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2810 2811 // A left-shift of a constant one will have exactly one bit set because 2812 // shifting the bit off the end is undefined. 2813 if (Val.getOpcode() == ISD::SHL) { 2814 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2815 if (C && C->getAPIntValue() == 1) 2816 return true; 2817 } 2818 2819 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2820 // one bit set. 2821 if (Val.getOpcode() == ISD::SRL) { 2822 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2823 if (C && C->getAPIntValue().isSignMask()) 2824 return true; 2825 } 2826 2827 // Are all operands of a build vector constant powers of two? 2828 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2829 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2830 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2831 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2832 return false; 2833 })) 2834 return true; 2835 2836 // More could be done here, though the above checks are enough 2837 // to handle some common cases. 2838 2839 // Fall back to computeKnownBits to catch other known cases. 2840 KnownBits Known; 2841 computeKnownBits(Val, Known); 2842 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 2843 } 2844 2845 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 2846 EVT VT = Op.getValueType(); 2847 APInt DemandedElts = VT.isVector() 2848 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2849 : APInt(1, 1); 2850 return ComputeNumSignBits(Op, DemandedElts, Depth); 2851 } 2852 2853 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 2854 unsigned Depth) const { 2855 EVT VT = Op.getValueType(); 2856 assert(VT.isInteger() && "Invalid VT!"); 2857 unsigned VTBits = VT.getScalarSizeInBits(); 2858 unsigned NumElts = DemandedElts.getBitWidth(); 2859 unsigned Tmp, Tmp2; 2860 unsigned FirstAnswer = 1; 2861 2862 if (Depth == 6) 2863 return 1; // Limit search depth. 2864 2865 if (!DemandedElts) 2866 return 1; // No demanded elts, better to assume we don't know anything. 2867 2868 switch (Op.getOpcode()) { 2869 default: break; 2870 case ISD::AssertSext: 2871 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2872 return VTBits-Tmp+1; 2873 case ISD::AssertZext: 2874 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2875 return VTBits-Tmp; 2876 2877 case ISD::Constant: { 2878 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2879 return Val.getNumSignBits(); 2880 } 2881 2882 case ISD::BUILD_VECTOR: 2883 Tmp = VTBits; 2884 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 2885 if (!DemandedElts[i]) 2886 continue; 2887 2888 SDValue SrcOp = Op.getOperand(i); 2889 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 2890 2891 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2892 if (SrcOp.getValueSizeInBits() != VTBits) { 2893 assert(SrcOp.getValueSizeInBits() > VTBits && 2894 "Expected BUILD_VECTOR implicit truncation"); 2895 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 2896 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 2897 } 2898 Tmp = std::min(Tmp, Tmp2); 2899 } 2900 return Tmp; 2901 2902 case ISD::VECTOR_SHUFFLE: { 2903 // Collect the minimum number of sign bits that are shared by every vector 2904 // element referenced by the shuffle. 2905 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2906 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2907 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2908 for (unsigned i = 0; i != NumElts; ++i) { 2909 int M = SVN->getMaskElt(i); 2910 if (!DemandedElts[i]) 2911 continue; 2912 // For UNDEF elements, we don't know anything about the common state of 2913 // the shuffle result. 2914 if (M < 0) 2915 return 1; 2916 if ((unsigned)M < NumElts) 2917 DemandedLHS.setBit((unsigned)M % NumElts); 2918 else 2919 DemandedRHS.setBit((unsigned)M % NumElts); 2920 } 2921 Tmp = UINT_MAX; 2922 if (!!DemandedLHS) 2923 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 2924 if (!!DemandedRHS) { 2925 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 2926 Tmp = std::min(Tmp, Tmp2); 2927 } 2928 // If we don't know anything, early out and try computeKnownBits fall-back. 2929 if (Tmp == 1) 2930 break; 2931 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 2932 return Tmp; 2933 } 2934 2935 case ISD::SIGN_EXTEND: 2936 case ISD::SIGN_EXTEND_VECTOR_INREG: 2937 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 2938 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2939 2940 case ISD::SIGN_EXTEND_INREG: 2941 // Max of the input and what this extends. 2942 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 2943 Tmp = VTBits-Tmp+1; 2944 2945 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2946 return std::max(Tmp, Tmp2); 2947 2948 case ISD::SRA: 2949 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 2950 // SRA X, C -> adds C sign bits. 2951 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2952 APInt ShiftVal = C->getAPIntValue(); 2953 ShiftVal += Tmp; 2954 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 2955 } 2956 return Tmp; 2957 case ISD::SHL: 2958 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2959 // shl destroys sign bits. 2960 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2961 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 2962 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 2963 return Tmp - C->getZExtValue(); 2964 } 2965 break; 2966 case ISD::AND: 2967 case ISD::OR: 2968 case ISD::XOR: // NOT is handled here. 2969 // Logical binary ops preserve the number of sign bits at the worst. 2970 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2971 if (Tmp != 1) { 2972 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2973 FirstAnswer = std::min(Tmp, Tmp2); 2974 // We computed what we know about the sign bits as our first 2975 // answer. Now proceed to the generic code that uses 2976 // computeKnownBits, and pick whichever answer is better. 2977 } 2978 break; 2979 2980 case ISD::SELECT: 2981 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2982 if (Tmp == 1) return 1; // Early out. 2983 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2984 return std::min(Tmp, Tmp2); 2985 case ISD::SELECT_CC: 2986 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2987 if (Tmp == 1) return 1; // Early out. 2988 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1); 2989 return std::min(Tmp, Tmp2); 2990 case ISD::SMIN: 2991 case ISD::SMAX: 2992 case ISD::UMIN: 2993 case ISD::UMAX: 2994 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 2995 if (Tmp == 1) 2996 return 1; // Early out. 2997 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 2998 return std::min(Tmp, Tmp2); 2999 case ISD::SADDO: 3000 case ISD::UADDO: 3001 case ISD::SSUBO: 3002 case ISD::USUBO: 3003 case ISD::SMULO: 3004 case ISD::UMULO: 3005 if (Op.getResNo() != 1) 3006 break; 3007 // The boolean result conforms to getBooleanContents. Fall through. 3008 // If setcc returns 0/-1, all bits are sign bits. 3009 // We know that we have an integer-based boolean since these operations 3010 // are only available for integer. 3011 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3012 TargetLowering::ZeroOrNegativeOneBooleanContent) 3013 return VTBits; 3014 break; 3015 case ISD::SETCC: 3016 // If setcc returns 0/-1, all bits are sign bits. 3017 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3018 TargetLowering::ZeroOrNegativeOneBooleanContent) 3019 return VTBits; 3020 break; 3021 case ISD::ROTL: 3022 case ISD::ROTR: 3023 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3024 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 3025 3026 // Handle rotate right by N like a rotate left by 32-N. 3027 if (Op.getOpcode() == ISD::ROTR) 3028 RotAmt = (VTBits-RotAmt) & (VTBits-1); 3029 3030 // If we aren't rotating out all of the known-in sign bits, return the 3031 // number that are left. This handles rotl(sext(x), 1) for example. 3032 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3033 if (Tmp > RotAmt+1) return Tmp-RotAmt; 3034 } 3035 break; 3036 case ISD::ADD: 3037 case ISD::ADDC: 3038 // Add can have at most one carry bit. Thus we know that the output 3039 // is, at worst, one more bit than the inputs. 3040 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3041 if (Tmp == 1) return 1; // Early out. 3042 3043 // Special case decrementing a value (ADD X, -1): 3044 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3045 if (CRHS->isAllOnesValue()) { 3046 KnownBits Known; 3047 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3048 3049 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3050 // sign bits set. 3051 if ((Known.Zero | 1).isAllOnesValue()) 3052 return VTBits; 3053 3054 // If we are subtracting one from a positive number, there is no carry 3055 // out of the result. 3056 if (Known.isNonNegative()) 3057 return Tmp; 3058 } 3059 3060 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3061 if (Tmp2 == 1) return 1; 3062 return std::min(Tmp, Tmp2)-1; 3063 3064 case ISD::SUB: 3065 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3066 if (Tmp2 == 1) return 1; 3067 3068 // Handle NEG. 3069 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3070 if (CLHS->isNullValue()) { 3071 KnownBits Known; 3072 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3073 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3074 // sign bits set. 3075 if ((Known.Zero | 1).isAllOnesValue()) 3076 return VTBits; 3077 3078 // If the input is known to be positive (the sign bit is known clear), 3079 // the output of the NEG has the same number of sign bits as the input. 3080 if (Known.isNonNegative()) 3081 return Tmp2; 3082 3083 // Otherwise, we treat this like a SUB. 3084 } 3085 3086 // Sub can have at most one carry bit. Thus we know that the output 3087 // is, at worst, one more bit than the inputs. 3088 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3089 if (Tmp == 1) return 1; // Early out. 3090 return std::min(Tmp, Tmp2)-1; 3091 case ISD::TRUNCATE: { 3092 // Check if the sign bits of source go down as far as the truncated value. 3093 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3094 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3095 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3096 return NumSrcSignBits - (NumSrcBits - VTBits); 3097 break; 3098 } 3099 case ISD::EXTRACT_ELEMENT: { 3100 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3101 const int BitWidth = Op.getValueSizeInBits(); 3102 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3103 3104 // Get reverse index (starting from 1), Op1 value indexes elements from 3105 // little end. Sign starts at big end. 3106 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3107 3108 // If the sign portion ends in our element the subtraction gives correct 3109 // result. Otherwise it gives either negative or > bitwidth result 3110 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3111 } 3112 case ISD::INSERT_VECTOR_ELT: { 3113 SDValue InVec = Op.getOperand(0); 3114 SDValue InVal = Op.getOperand(1); 3115 SDValue EltNo = Op.getOperand(2); 3116 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3117 3118 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3119 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3120 // If we know the element index, split the demand between the 3121 // source vector and the inserted element. 3122 unsigned EltIdx = CEltNo->getZExtValue(); 3123 3124 // If we demand the inserted element then get its sign bits. 3125 Tmp = UINT_MAX; 3126 if (DemandedElts[EltIdx]) { 3127 // TODO - handle implicit truncation of inserted elements. 3128 if (InVal.getScalarValueSizeInBits() != VTBits) 3129 break; 3130 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3131 } 3132 3133 // If we demand the source vector then get its sign bits, and determine 3134 // the minimum. 3135 APInt VectorElts = DemandedElts; 3136 VectorElts.clearBit(EltIdx); 3137 if (!!VectorElts) { 3138 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3139 Tmp = std::min(Tmp, Tmp2); 3140 } 3141 } else { 3142 // Unknown element index, so ignore DemandedElts and demand them all. 3143 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3144 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3145 Tmp = std::min(Tmp, Tmp2); 3146 } 3147 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3148 return Tmp; 3149 } 3150 case ISD::EXTRACT_VECTOR_ELT: { 3151 SDValue InVec = Op.getOperand(0); 3152 SDValue EltNo = Op.getOperand(1); 3153 EVT VecVT = InVec.getValueType(); 3154 const unsigned BitWidth = Op.getValueSizeInBits(); 3155 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3156 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3157 3158 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3159 // anything about sign bits. But if the sizes match we can derive knowledge 3160 // about sign bits from the vector operand. 3161 if (BitWidth != EltBitWidth) 3162 break; 3163 3164 // If we know the element index, just demand that vector element, else for 3165 // an unknown element index, ignore DemandedElts and demand them all. 3166 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3167 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3168 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3169 DemandedSrcElts = 3170 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3171 3172 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3173 } 3174 case ISD::EXTRACT_SUBVECTOR: { 3175 // If we know the element index, just demand that subvector elements, 3176 // otherwise demand them all. 3177 SDValue Src = Op.getOperand(0); 3178 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3179 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3180 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3181 // Offset the demanded elts by the subvector index. 3182 uint64_t Idx = SubIdx->getZExtValue(); 3183 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3184 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3185 } 3186 return ComputeNumSignBits(Src, Depth + 1); 3187 } 3188 case ISD::CONCAT_VECTORS: 3189 // Determine the minimum number of sign bits across all demanded 3190 // elts of the input vectors. Early out if the result is already 1. 3191 Tmp = UINT_MAX; 3192 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3193 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3194 unsigned NumSubVectors = Op.getNumOperands(); 3195 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3196 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3197 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3198 if (!DemandedSub) 3199 continue; 3200 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3201 Tmp = std::min(Tmp, Tmp2); 3202 } 3203 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3204 return Tmp; 3205 } 3206 3207 // If we are looking at the loaded value of the SDNode. 3208 if (Op.getResNo() == 0) { 3209 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3210 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3211 unsigned ExtType = LD->getExtensionType(); 3212 switch (ExtType) { 3213 default: break; 3214 case ISD::SEXTLOAD: // '17' bits known 3215 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3216 return VTBits-Tmp+1; 3217 case ISD::ZEXTLOAD: // '16' bits known 3218 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3219 return VTBits-Tmp; 3220 } 3221 } 3222 } 3223 3224 // Allow the target to implement this method for its nodes. 3225 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3226 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3227 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3228 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3229 unsigned NumBits = 3230 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3231 if (NumBits > 1) 3232 FirstAnswer = std::max(FirstAnswer, NumBits); 3233 } 3234 3235 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3236 // use this information. 3237 KnownBits Known; 3238 computeKnownBits(Op, Known, DemandedElts, Depth); 3239 3240 APInt Mask; 3241 if (Known.isNonNegative()) { // sign bit is 0 3242 Mask = Known.Zero; 3243 } else if (Known.isNegative()) { // sign bit is 1; 3244 Mask = Known.One; 3245 } else { 3246 // Nothing known. 3247 return FirstAnswer; 3248 } 3249 3250 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3251 // the number of identical bits in the top of the input value. 3252 Mask = ~Mask; 3253 Mask <<= Mask.getBitWidth()-VTBits; 3254 // Return # leading zeros. We use 'min' here in case Val was zero before 3255 // shifting. We don't want to return '64' as for an i32 "0". 3256 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3257 } 3258 3259 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3260 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3261 !isa<ConstantSDNode>(Op.getOperand(1))) 3262 return false; 3263 3264 if (Op.getOpcode() == ISD::OR && 3265 !MaskedValueIsZero(Op.getOperand(0), 3266 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3267 return false; 3268 3269 return true; 3270 } 3271 3272 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3273 // If we're told that NaNs won't happen, assume they won't. 3274 if (getTarget().Options.NoNaNsFPMath) 3275 return true; 3276 3277 if (Op->getFlags().hasNoNaNs()) 3278 return true; 3279 3280 // If the value is a constant, we can obviously see if it is a NaN or not. 3281 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3282 return !C->getValueAPF().isNaN(); 3283 3284 // TODO: Recognize more cases here. 3285 3286 return false; 3287 } 3288 3289 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3290 // If the value is a constant, we can obviously see if it is a zero or not. 3291 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3292 return !C->isZero(); 3293 3294 // TODO: Recognize more cases here. 3295 switch (Op.getOpcode()) { 3296 default: break; 3297 case ISD::OR: 3298 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3299 return !C->isNullValue(); 3300 break; 3301 } 3302 3303 return false; 3304 } 3305 3306 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3307 // Check the obvious case. 3308 if (A == B) return true; 3309 3310 // For for negative and positive zero. 3311 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3312 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3313 if (CA->isZero() && CB->isZero()) return true; 3314 3315 // Otherwise they may not be equal. 3316 return false; 3317 } 3318 3319 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3320 assert(A.getValueType() == B.getValueType() && 3321 "Values must have the same type"); 3322 KnownBits AKnown, BKnown; 3323 computeKnownBits(A, AKnown); 3324 computeKnownBits(B, BKnown); 3325 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3326 } 3327 3328 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3329 ArrayRef<SDValue> Ops, 3330 llvm::SelectionDAG &DAG) { 3331 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3332 assert(llvm::all_of(Ops, 3333 [Ops](SDValue Op) { 3334 return Ops[0].getValueType() == Op.getValueType(); 3335 }) && 3336 "Concatenation of vectors with inconsistent value types!"); 3337 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3338 VT.getVectorNumElements() && 3339 "Incorrect element count in vector concatenation!"); 3340 3341 if (Ops.size() == 1) 3342 return Ops[0]; 3343 3344 // Concat of UNDEFs is UNDEF. 3345 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3346 return DAG.getUNDEF(VT); 3347 3348 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3349 // simplified to one big BUILD_VECTOR. 3350 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3351 EVT SVT = VT.getScalarType(); 3352 SmallVector<SDValue, 16> Elts; 3353 for (SDValue Op : Ops) { 3354 EVT OpVT = Op.getValueType(); 3355 if (Op.isUndef()) 3356 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3357 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3358 Elts.append(Op->op_begin(), Op->op_end()); 3359 else 3360 return SDValue(); 3361 } 3362 3363 // BUILD_VECTOR requires all inputs to be of the same type, find the 3364 // maximum type and extend them all. 3365 for (SDValue Op : Elts) 3366 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3367 3368 if (SVT.bitsGT(VT.getScalarType())) 3369 for (SDValue &Op : Elts) 3370 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3371 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3372 : DAG.getSExtOrTrunc(Op, DL, SVT); 3373 3374 return DAG.getBuildVector(VT, DL, Elts); 3375 } 3376 3377 /// Gets or creates the specified node. 3378 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3379 FoldingSetNodeID ID; 3380 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3381 void *IP = nullptr; 3382 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3383 return SDValue(E, 0); 3384 3385 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3386 getVTList(VT)); 3387 CSEMap.InsertNode(N, IP); 3388 3389 InsertNode(N); 3390 return SDValue(N, 0); 3391 } 3392 3393 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3394 SDValue Operand, const SDNodeFlags Flags) { 3395 // Constant fold unary operations with an integer constant operand. Even 3396 // opaque constant will be folded, because the folding of unary operations 3397 // doesn't create new constants with different values. Nevertheless, the 3398 // opaque flag is preserved during folding to prevent future folding with 3399 // other constants. 3400 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3401 const APInt &Val = C->getAPIntValue(); 3402 switch (Opcode) { 3403 default: break; 3404 case ISD::SIGN_EXTEND: 3405 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3406 C->isTargetOpcode(), C->isOpaque()); 3407 case ISD::ANY_EXTEND: 3408 case ISD::ZERO_EXTEND: 3409 case ISD::TRUNCATE: 3410 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3411 C->isTargetOpcode(), C->isOpaque()); 3412 case ISD::UINT_TO_FP: 3413 case ISD::SINT_TO_FP: { 3414 APFloat apf(EVTToAPFloatSemantics(VT), 3415 APInt::getNullValue(VT.getSizeInBits())); 3416 (void)apf.convertFromAPInt(Val, 3417 Opcode==ISD::SINT_TO_FP, 3418 APFloat::rmNearestTiesToEven); 3419 return getConstantFP(apf, DL, VT); 3420 } 3421 case ISD::BITCAST: 3422 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3423 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3424 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3425 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3426 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3427 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3428 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3429 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3430 break; 3431 case ISD::ABS: 3432 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3433 C->isOpaque()); 3434 case ISD::BITREVERSE: 3435 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3436 C->isOpaque()); 3437 case ISD::BSWAP: 3438 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3439 C->isOpaque()); 3440 case ISD::CTPOP: 3441 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3442 C->isOpaque()); 3443 case ISD::CTLZ: 3444 case ISD::CTLZ_ZERO_UNDEF: 3445 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3446 C->isOpaque()); 3447 case ISD::CTTZ: 3448 case ISD::CTTZ_ZERO_UNDEF: 3449 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3450 C->isOpaque()); 3451 case ISD::FP16_TO_FP: { 3452 bool Ignored; 3453 APFloat FPV(APFloat::IEEEhalf(), 3454 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3455 3456 // This can return overflow, underflow, or inexact; we don't care. 3457 // FIXME need to be more flexible about rounding mode. 3458 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3459 APFloat::rmNearestTiesToEven, &Ignored); 3460 return getConstantFP(FPV, DL, VT); 3461 } 3462 } 3463 } 3464 3465 // Constant fold unary operations with a floating point constant operand. 3466 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3467 APFloat V = C->getValueAPF(); // make copy 3468 switch (Opcode) { 3469 case ISD::FNEG: 3470 V.changeSign(); 3471 return getConstantFP(V, DL, VT); 3472 case ISD::FABS: 3473 V.clearSign(); 3474 return getConstantFP(V, DL, VT); 3475 case ISD::FCEIL: { 3476 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3477 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3478 return getConstantFP(V, DL, VT); 3479 break; 3480 } 3481 case ISD::FTRUNC: { 3482 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3483 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3484 return getConstantFP(V, DL, VT); 3485 break; 3486 } 3487 case ISD::FFLOOR: { 3488 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3489 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3490 return getConstantFP(V, DL, VT); 3491 break; 3492 } 3493 case ISD::FP_EXTEND: { 3494 bool ignored; 3495 // This can return overflow, underflow, or inexact; we don't care. 3496 // FIXME need to be more flexible about rounding mode. 3497 (void)V.convert(EVTToAPFloatSemantics(VT), 3498 APFloat::rmNearestTiesToEven, &ignored); 3499 return getConstantFP(V, DL, VT); 3500 } 3501 case ISD::FP_TO_SINT: 3502 case ISD::FP_TO_UINT: { 3503 bool ignored; 3504 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3505 // FIXME need to be more flexible about rounding mode. 3506 APFloat::opStatus s = 3507 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3508 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3509 break; 3510 return getConstant(IntVal, DL, VT); 3511 } 3512 case ISD::BITCAST: 3513 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3514 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3515 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3516 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3517 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3518 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3519 break; 3520 case ISD::FP_TO_FP16: { 3521 bool Ignored; 3522 // This can return overflow, underflow, or inexact; we don't care. 3523 // FIXME need to be more flexible about rounding mode. 3524 (void)V.convert(APFloat::IEEEhalf(), 3525 APFloat::rmNearestTiesToEven, &Ignored); 3526 return getConstant(V.bitcastToAPInt(), DL, VT); 3527 } 3528 } 3529 } 3530 3531 // Constant fold unary operations with a vector integer or float operand. 3532 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3533 if (BV->isConstant()) { 3534 switch (Opcode) { 3535 default: 3536 // FIXME: Entirely reasonable to perform folding of other unary 3537 // operations here as the need arises. 3538 break; 3539 case ISD::FNEG: 3540 case ISD::FABS: 3541 case ISD::FCEIL: 3542 case ISD::FTRUNC: 3543 case ISD::FFLOOR: 3544 case ISD::FP_EXTEND: 3545 case ISD::FP_TO_SINT: 3546 case ISD::FP_TO_UINT: 3547 case ISD::TRUNCATE: 3548 case ISD::UINT_TO_FP: 3549 case ISD::SINT_TO_FP: 3550 case ISD::ABS: 3551 case ISD::BITREVERSE: 3552 case ISD::BSWAP: 3553 case ISD::CTLZ: 3554 case ISD::CTLZ_ZERO_UNDEF: 3555 case ISD::CTTZ: 3556 case ISD::CTTZ_ZERO_UNDEF: 3557 case ISD::CTPOP: { 3558 SDValue Ops = { Operand }; 3559 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3560 return Fold; 3561 } 3562 } 3563 } 3564 } 3565 3566 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3567 switch (Opcode) { 3568 case ISD::TokenFactor: 3569 case ISD::MERGE_VALUES: 3570 case ISD::CONCAT_VECTORS: 3571 return Operand; // Factor, merge or concat of one node? No need. 3572 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3573 case ISD::FP_EXTEND: 3574 assert(VT.isFloatingPoint() && 3575 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3576 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3577 assert((!VT.isVector() || 3578 VT.getVectorNumElements() == 3579 Operand.getValueType().getVectorNumElements()) && 3580 "Vector element count mismatch!"); 3581 assert(Operand.getValueType().bitsLT(VT) && 3582 "Invalid fpext node, dst < src!"); 3583 if (Operand.isUndef()) 3584 return getUNDEF(VT); 3585 break; 3586 case ISD::SIGN_EXTEND: 3587 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3588 "Invalid SIGN_EXTEND!"); 3589 if (Operand.getValueType() == VT) return Operand; // noop extension 3590 assert((!VT.isVector() || 3591 VT.getVectorNumElements() == 3592 Operand.getValueType().getVectorNumElements()) && 3593 "Vector element count mismatch!"); 3594 assert(Operand.getValueType().bitsLT(VT) && 3595 "Invalid sext node, dst < src!"); 3596 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3597 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3598 else if (OpOpcode == ISD::UNDEF) 3599 // sext(undef) = 0, because the top bits will all be the same. 3600 return getConstant(0, DL, VT); 3601 break; 3602 case ISD::ZERO_EXTEND: 3603 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3604 "Invalid ZERO_EXTEND!"); 3605 if (Operand.getValueType() == VT) return Operand; // noop extension 3606 assert((!VT.isVector() || 3607 VT.getVectorNumElements() == 3608 Operand.getValueType().getVectorNumElements()) && 3609 "Vector element count mismatch!"); 3610 assert(Operand.getValueType().bitsLT(VT) && 3611 "Invalid zext node, dst < src!"); 3612 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3613 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3614 else if (OpOpcode == ISD::UNDEF) 3615 // zext(undef) = 0, because the top bits will be zero. 3616 return getConstant(0, DL, VT); 3617 break; 3618 case ISD::ANY_EXTEND: 3619 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3620 "Invalid ANY_EXTEND!"); 3621 if (Operand.getValueType() == VT) return Operand; // noop extension 3622 assert((!VT.isVector() || 3623 VT.getVectorNumElements() == 3624 Operand.getValueType().getVectorNumElements()) && 3625 "Vector element count mismatch!"); 3626 assert(Operand.getValueType().bitsLT(VT) && 3627 "Invalid anyext node, dst < src!"); 3628 3629 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3630 OpOpcode == ISD::ANY_EXTEND) 3631 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3632 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3633 else if (OpOpcode == ISD::UNDEF) 3634 return getUNDEF(VT); 3635 3636 // (ext (trunx x)) -> x 3637 if (OpOpcode == ISD::TRUNCATE) { 3638 SDValue OpOp = Operand.getOperand(0); 3639 if (OpOp.getValueType() == VT) 3640 return OpOp; 3641 } 3642 break; 3643 case ISD::TRUNCATE: 3644 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3645 "Invalid TRUNCATE!"); 3646 if (Operand.getValueType() == VT) return Operand; // noop truncate 3647 assert((!VT.isVector() || 3648 VT.getVectorNumElements() == 3649 Operand.getValueType().getVectorNumElements()) && 3650 "Vector element count mismatch!"); 3651 assert(Operand.getValueType().bitsGT(VT) && 3652 "Invalid truncate node, src < dst!"); 3653 if (OpOpcode == ISD::TRUNCATE) 3654 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3655 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3656 OpOpcode == ISD::ANY_EXTEND) { 3657 // If the source is smaller than the dest, we still need an extend. 3658 if (Operand.getOperand(0).getValueType().getScalarType() 3659 .bitsLT(VT.getScalarType())) 3660 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3661 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3662 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3663 return Operand.getOperand(0); 3664 } 3665 if (OpOpcode == ISD::UNDEF) 3666 return getUNDEF(VT); 3667 break; 3668 case ISD::ABS: 3669 assert(VT.isInteger() && VT == Operand.getValueType() && 3670 "Invalid ABS!"); 3671 if (OpOpcode == ISD::UNDEF) 3672 return getUNDEF(VT); 3673 break; 3674 case ISD::BSWAP: 3675 assert(VT.isInteger() && VT == Operand.getValueType() && 3676 "Invalid BSWAP!"); 3677 assert((VT.getScalarSizeInBits() % 16 == 0) && 3678 "BSWAP types must be a multiple of 16 bits!"); 3679 if (OpOpcode == ISD::UNDEF) 3680 return getUNDEF(VT); 3681 break; 3682 case ISD::BITREVERSE: 3683 assert(VT.isInteger() && VT == Operand.getValueType() && 3684 "Invalid BITREVERSE!"); 3685 if (OpOpcode == ISD::UNDEF) 3686 return getUNDEF(VT); 3687 break; 3688 case ISD::BITCAST: 3689 // Basic sanity checking. 3690 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3691 "Cannot BITCAST between types of different sizes!"); 3692 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3693 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3694 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3695 if (OpOpcode == ISD::UNDEF) 3696 return getUNDEF(VT); 3697 break; 3698 case ISD::SCALAR_TO_VECTOR: 3699 assert(VT.isVector() && !Operand.getValueType().isVector() && 3700 (VT.getVectorElementType() == Operand.getValueType() || 3701 (VT.getVectorElementType().isInteger() && 3702 Operand.getValueType().isInteger() && 3703 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3704 "Illegal SCALAR_TO_VECTOR node!"); 3705 if (OpOpcode == ISD::UNDEF) 3706 return getUNDEF(VT); 3707 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3708 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3709 isa<ConstantSDNode>(Operand.getOperand(1)) && 3710 Operand.getConstantOperandVal(1) == 0 && 3711 Operand.getOperand(0).getValueType() == VT) 3712 return Operand.getOperand(0); 3713 break; 3714 case ISD::FNEG: 3715 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3716 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3717 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3718 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 3719 Operand.getOperand(0), Operand.getNode()->getFlags()); 3720 if (OpOpcode == ISD::FNEG) // --X -> X 3721 return Operand.getOperand(0); 3722 break; 3723 case ISD::FABS: 3724 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3725 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 3726 break; 3727 } 3728 3729 SDNode *N; 3730 SDVTList VTs = getVTList(VT); 3731 SDValue Ops[] = {Operand}; 3732 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3733 FoldingSetNodeID ID; 3734 AddNodeIDNode(ID, Opcode, VTs, Ops); 3735 void *IP = nullptr; 3736 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 3737 E->intersectFlagsWith(Flags); 3738 return SDValue(E, 0); 3739 } 3740 3741 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3742 N->setFlags(Flags); 3743 createOperands(N, Ops); 3744 CSEMap.InsertNode(N, IP); 3745 } else { 3746 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3747 createOperands(N, Ops); 3748 } 3749 3750 InsertNode(N); 3751 return SDValue(N, 0); 3752 } 3753 3754 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3755 const APInt &C2) { 3756 switch (Opcode) { 3757 case ISD::ADD: return std::make_pair(C1 + C2, true); 3758 case ISD::SUB: return std::make_pair(C1 - C2, true); 3759 case ISD::MUL: return std::make_pair(C1 * C2, true); 3760 case ISD::AND: return std::make_pair(C1 & C2, true); 3761 case ISD::OR: return std::make_pair(C1 | C2, true); 3762 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3763 case ISD::SHL: return std::make_pair(C1 << C2, true); 3764 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3765 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3766 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3767 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3768 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3769 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3770 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3771 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3772 case ISD::UDIV: 3773 if (!C2.getBoolValue()) 3774 break; 3775 return std::make_pair(C1.udiv(C2), true); 3776 case ISD::UREM: 3777 if (!C2.getBoolValue()) 3778 break; 3779 return std::make_pair(C1.urem(C2), true); 3780 case ISD::SDIV: 3781 if (!C2.getBoolValue()) 3782 break; 3783 return std::make_pair(C1.sdiv(C2), true); 3784 case ISD::SREM: 3785 if (!C2.getBoolValue()) 3786 break; 3787 return std::make_pair(C1.srem(C2), true); 3788 } 3789 return std::make_pair(APInt(1, 0), false); 3790 } 3791 3792 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3793 EVT VT, const ConstantSDNode *Cst1, 3794 const ConstantSDNode *Cst2) { 3795 if (Cst1->isOpaque() || Cst2->isOpaque()) 3796 return SDValue(); 3797 3798 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3799 Cst2->getAPIntValue()); 3800 if (!Folded.second) 3801 return SDValue(); 3802 return getConstant(Folded.first, DL, VT); 3803 } 3804 3805 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 3806 const GlobalAddressSDNode *GA, 3807 const SDNode *N2) { 3808 if (GA->getOpcode() != ISD::GlobalAddress) 3809 return SDValue(); 3810 if (!TLI->isOffsetFoldingLegal(GA)) 3811 return SDValue(); 3812 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 3813 if (!Cst2) 3814 return SDValue(); 3815 int64_t Offset = Cst2->getSExtValue(); 3816 switch (Opcode) { 3817 case ISD::ADD: break; 3818 case ISD::SUB: Offset = -uint64_t(Offset); break; 3819 default: return SDValue(); 3820 } 3821 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 3822 GA->getOffset() + uint64_t(Offset)); 3823 } 3824 3825 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 3826 switch (Opcode) { 3827 case ISD::SDIV: 3828 case ISD::UDIV: 3829 case ISD::SREM: 3830 case ISD::UREM: { 3831 // If a divisor is zero/undef or any element of a divisor vector is 3832 // zero/undef, the whole op is undef. 3833 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 3834 SDValue Divisor = Ops[1]; 3835 if (Divisor.isUndef() || isNullConstant(Divisor)) 3836 return true; 3837 3838 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 3839 any_of(Divisor->op_values(), 3840 [](SDValue V) { return V.isUndef() || isNullConstant(V); }); 3841 // TODO: Handle signed overflow. 3842 } 3843 // TODO: Handle oversized shifts. 3844 default: 3845 return false; 3846 } 3847 } 3848 3849 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3850 EVT VT, SDNode *Cst1, 3851 SDNode *Cst2) { 3852 // If the opcode is a target-specific ISD node, there's nothing we can 3853 // do here and the operand rules may not line up with the below, so 3854 // bail early. 3855 if (Opcode >= ISD::BUILTIN_OP_END) 3856 return SDValue(); 3857 3858 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 3859 return getUNDEF(VT); 3860 3861 // Handle the case of two scalars. 3862 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 3863 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 3864 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 3865 assert((!Folded || !VT.isVector()) && 3866 "Can't fold vectors ops with scalar operands"); 3867 return Folded; 3868 } 3869 } 3870 3871 // fold (add Sym, c) -> Sym+c 3872 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 3873 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 3874 if (isCommutativeBinOp(Opcode)) 3875 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 3876 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 3877 3878 // For vectors extract each constant element into Inputs so we can constant 3879 // fold them individually. 3880 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 3881 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 3882 if (!BV1 || !BV2) 3883 return SDValue(); 3884 3885 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 3886 3887 EVT SVT = VT.getScalarType(); 3888 SmallVector<SDValue, 4> Outputs; 3889 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 3890 SDValue V1 = BV1->getOperand(I); 3891 SDValue V2 = BV2->getOperand(I); 3892 3893 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 3894 // FIXME: This is valid and could be handled by truncation. 3895 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 3896 return SDValue(); 3897 3898 // Fold one vector element. 3899 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 3900 3901 // Scalar folding only succeeded if the result is a constant or UNDEF. 3902 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 3903 ScalarResult.getOpcode() != ISD::ConstantFP) 3904 return SDValue(); 3905 Outputs.push_back(ScalarResult); 3906 } 3907 3908 assert(VT.getVectorNumElements() == Outputs.size() && 3909 "Vector size mismatch!"); 3910 3911 // We may have a vector type but a scalar result. Create a splat. 3912 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3913 3914 // Build a big vector out of the scalar elements we generated. 3915 return getBuildVector(VT, SDLoc(), Outputs); 3916 } 3917 3918 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 3919 const SDLoc &DL, EVT VT, 3920 ArrayRef<SDValue> Ops, 3921 const SDNodeFlags Flags) { 3922 // If the opcode is a target-specific ISD node, there's nothing we can 3923 // do here and the operand rules may not line up with the below, so 3924 // bail early. 3925 if (Opcode >= ISD::BUILTIN_OP_END) 3926 return SDValue(); 3927 3928 if (isUndef(Opcode, Ops)) 3929 return getUNDEF(VT); 3930 3931 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 3932 if (!VT.isVector()) 3933 return SDValue(); 3934 3935 unsigned NumElts = VT.getVectorNumElements(); 3936 3937 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 3938 return !Op.getValueType().isVector() || 3939 Op.getValueType().getVectorNumElements() == NumElts; 3940 }; 3941 3942 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 3943 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 3944 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 3945 (BV && BV->isConstant()); 3946 }; 3947 3948 // All operands must be vector types with the same number of elements as 3949 // the result type and must be either UNDEF or a build vector of constant 3950 // or UNDEF scalars. 3951 if (!all_of(Ops, IsConstantBuildVectorOrUndef) || 3952 !all_of(Ops, IsScalarOrSameVectorSize)) 3953 return SDValue(); 3954 3955 // If we are comparing vectors, then the result needs to be a i1 boolean 3956 // that is then sign-extended back to the legal result type. 3957 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 3958 3959 // Find legal integer scalar type for constant promotion and 3960 // ensure that its scalar size is at least as large as source. 3961 EVT LegalSVT = VT.getScalarType(); 3962 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 3963 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 3964 if (LegalSVT.bitsLT(VT.getScalarType())) 3965 return SDValue(); 3966 } 3967 3968 // Constant fold each scalar lane separately. 3969 SmallVector<SDValue, 4> ScalarResults; 3970 for (unsigned i = 0; i != NumElts; i++) { 3971 SmallVector<SDValue, 4> ScalarOps; 3972 for (SDValue Op : Ops) { 3973 EVT InSVT = Op.getValueType().getScalarType(); 3974 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 3975 if (!InBV) { 3976 // We've checked that this is UNDEF or a constant of some kind. 3977 if (Op.isUndef()) 3978 ScalarOps.push_back(getUNDEF(InSVT)); 3979 else 3980 ScalarOps.push_back(Op); 3981 continue; 3982 } 3983 3984 SDValue ScalarOp = InBV->getOperand(i); 3985 EVT ScalarVT = ScalarOp.getValueType(); 3986 3987 // Build vector (integer) scalar operands may need implicit 3988 // truncation - do this before constant folding. 3989 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 3990 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 3991 3992 ScalarOps.push_back(ScalarOp); 3993 } 3994 3995 // Constant fold the scalar operands. 3996 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 3997 3998 // Legalize the (integer) scalar constant if necessary. 3999 if (LegalSVT != SVT) 4000 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4001 4002 // Scalar folding only succeeded if the result is a constant or UNDEF. 4003 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4004 ScalarResult.getOpcode() != ISD::ConstantFP) 4005 return SDValue(); 4006 ScalarResults.push_back(ScalarResult); 4007 } 4008 4009 return getBuildVector(VT, DL, ScalarResults); 4010 } 4011 4012 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4013 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4014 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4015 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4016 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4017 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4018 4019 // Canonicalize constant to RHS if commutative. 4020 if (isCommutativeBinOp(Opcode)) { 4021 if (N1C && !N2C) { 4022 std::swap(N1C, N2C); 4023 std::swap(N1, N2); 4024 } else if (N1CFP && !N2CFP) { 4025 std::swap(N1CFP, N2CFP); 4026 std::swap(N1, N2); 4027 } 4028 } 4029 4030 switch (Opcode) { 4031 default: break; 4032 case ISD::TokenFactor: 4033 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4034 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4035 // Fold trivial token factors. 4036 if (N1.getOpcode() == ISD::EntryToken) return N2; 4037 if (N2.getOpcode() == ISD::EntryToken) return N1; 4038 if (N1 == N2) return N1; 4039 break; 4040 case ISD::CONCAT_VECTORS: { 4041 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4042 SDValue Ops[] = {N1, N2}; 4043 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4044 return V; 4045 break; 4046 } 4047 case ISD::AND: 4048 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4049 assert(N1.getValueType() == N2.getValueType() && 4050 N1.getValueType() == VT && "Binary operator types must match!"); 4051 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4052 // worth handling here. 4053 if (N2C && N2C->isNullValue()) 4054 return N2; 4055 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4056 return N1; 4057 break; 4058 case ISD::OR: 4059 case ISD::XOR: 4060 case ISD::ADD: 4061 case ISD::SUB: 4062 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4063 assert(N1.getValueType() == N2.getValueType() && 4064 N1.getValueType() == VT && "Binary operator types must match!"); 4065 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4066 // it's worth handling here. 4067 if (N2C && N2C->isNullValue()) 4068 return N1; 4069 break; 4070 case ISD::UDIV: 4071 case ISD::UREM: 4072 case ISD::MULHU: 4073 case ISD::MULHS: 4074 case ISD::MUL: 4075 case ISD::SDIV: 4076 case ISD::SREM: 4077 case ISD::SMIN: 4078 case ISD::SMAX: 4079 case ISD::UMIN: 4080 case ISD::UMAX: 4081 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4082 assert(N1.getValueType() == N2.getValueType() && 4083 N1.getValueType() == VT && "Binary operator types must match!"); 4084 break; 4085 case ISD::FADD: 4086 case ISD::FSUB: 4087 case ISD::FMUL: 4088 case ISD::FDIV: 4089 case ISD::FREM: 4090 if (getTarget().Options.UnsafeFPMath) { 4091 if (Opcode == ISD::FADD) { 4092 // x+0 --> x 4093 if (N2CFP && N2CFP->getValueAPF().isZero()) 4094 return N1; 4095 } else if (Opcode == ISD::FSUB) { 4096 // x-0 --> x 4097 if (N2CFP && N2CFP->getValueAPF().isZero()) 4098 return N1; 4099 } else if (Opcode == ISD::FMUL) { 4100 // x*0 --> 0 4101 if (N2CFP && N2CFP->isZero()) 4102 return N2; 4103 // x*1 --> x 4104 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4105 return N1; 4106 } 4107 } 4108 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4109 assert(N1.getValueType() == N2.getValueType() && 4110 N1.getValueType() == VT && "Binary operator types must match!"); 4111 break; 4112 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4113 assert(N1.getValueType() == VT && 4114 N1.getValueType().isFloatingPoint() && 4115 N2.getValueType().isFloatingPoint() && 4116 "Invalid FCOPYSIGN!"); 4117 break; 4118 case ISD::SHL: 4119 case ISD::SRA: 4120 case ISD::SRL: 4121 case ISD::ROTL: 4122 case ISD::ROTR: 4123 assert(VT == N1.getValueType() && 4124 "Shift operators return type must be the same as their first arg"); 4125 assert(VT.isInteger() && N2.getValueType().isInteger() && 4126 "Shifts only work on integers"); 4127 assert((!VT.isVector() || VT == N2.getValueType()) && 4128 "Vector shift amounts must be in the same as their first arg"); 4129 // Verify that the shift amount VT is bit enough to hold valid shift 4130 // amounts. This catches things like trying to shift an i1024 value by an 4131 // i8, which is easy to fall into in generic code that uses 4132 // TLI.getShiftAmount(). 4133 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4134 "Invalid use of small shift amount with oversized value!"); 4135 4136 // Always fold shifts of i1 values so the code generator doesn't need to 4137 // handle them. Since we know the size of the shift has to be less than the 4138 // size of the value, the shift/rotate count is guaranteed to be zero. 4139 if (VT == MVT::i1) 4140 return N1; 4141 if (N2C && N2C->isNullValue()) 4142 return N1; 4143 break; 4144 case ISD::FP_ROUND_INREG: { 4145 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4146 assert(VT == N1.getValueType() && "Not an inreg round!"); 4147 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4148 "Cannot FP_ROUND_INREG integer types"); 4149 assert(EVT.isVector() == VT.isVector() && 4150 "FP_ROUND_INREG type should be vector iff the operand " 4151 "type is vector!"); 4152 assert((!EVT.isVector() || 4153 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4154 "Vector element counts must match in FP_ROUND_INREG"); 4155 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4156 (void)EVT; 4157 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4158 break; 4159 } 4160 case ISD::FP_ROUND: 4161 assert(VT.isFloatingPoint() && 4162 N1.getValueType().isFloatingPoint() && 4163 VT.bitsLE(N1.getValueType()) && 4164 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4165 "Invalid FP_ROUND!"); 4166 if (N1.getValueType() == VT) return N1; // noop conversion. 4167 break; 4168 case ISD::AssertSext: 4169 case ISD::AssertZext: { 4170 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4171 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4172 assert(VT.isInteger() && EVT.isInteger() && 4173 "Cannot *_EXTEND_INREG FP types"); 4174 assert(!EVT.isVector() && 4175 "AssertSExt/AssertZExt type should be the vector element type " 4176 "rather than the vector type!"); 4177 assert(EVT.bitsLE(VT) && "Not extending!"); 4178 if (VT == EVT) return N1; // noop assertion. 4179 break; 4180 } 4181 case ISD::SIGN_EXTEND_INREG: { 4182 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4183 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4184 assert(VT.isInteger() && EVT.isInteger() && 4185 "Cannot *_EXTEND_INREG FP types"); 4186 assert(EVT.isVector() == VT.isVector() && 4187 "SIGN_EXTEND_INREG type should be vector iff the operand " 4188 "type is vector!"); 4189 assert((!EVT.isVector() || 4190 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4191 "Vector element counts must match in SIGN_EXTEND_INREG"); 4192 assert(EVT.bitsLE(VT) && "Not extending!"); 4193 if (EVT == VT) return N1; // Not actually extending 4194 4195 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4196 unsigned FromBits = EVT.getScalarSizeInBits(); 4197 Val <<= Val.getBitWidth() - FromBits; 4198 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4199 return getConstant(Val, DL, ConstantVT); 4200 }; 4201 4202 if (N1C) { 4203 const APInt &Val = N1C->getAPIntValue(); 4204 return SignExtendInReg(Val, VT); 4205 } 4206 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4207 SmallVector<SDValue, 8> Ops; 4208 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4209 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4210 SDValue Op = N1.getOperand(i); 4211 if (Op.isUndef()) { 4212 Ops.push_back(getUNDEF(OpVT)); 4213 continue; 4214 } 4215 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4216 APInt Val = C->getAPIntValue(); 4217 Ops.push_back(SignExtendInReg(Val, OpVT)); 4218 } 4219 return getBuildVector(VT, DL, Ops); 4220 } 4221 break; 4222 } 4223 case ISD::EXTRACT_VECTOR_ELT: 4224 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4225 if (N1.isUndef()) 4226 return getUNDEF(VT); 4227 4228 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4229 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4230 return getUNDEF(VT); 4231 4232 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4233 // expanding copies of large vectors from registers. 4234 if (N2C && 4235 N1.getOpcode() == ISD::CONCAT_VECTORS && 4236 N1.getNumOperands() > 0) { 4237 unsigned Factor = 4238 N1.getOperand(0).getValueType().getVectorNumElements(); 4239 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4240 N1.getOperand(N2C->getZExtValue() / Factor), 4241 getConstant(N2C->getZExtValue() % Factor, DL, 4242 N2.getValueType())); 4243 } 4244 4245 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4246 // expanding large vector constants. 4247 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4248 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4249 4250 if (VT != Elt.getValueType()) 4251 // If the vector element type is not legal, the BUILD_VECTOR operands 4252 // are promoted and implicitly truncated, and the result implicitly 4253 // extended. Make that explicit here. 4254 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4255 4256 return Elt; 4257 } 4258 4259 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4260 // operations are lowered to scalars. 4261 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4262 // If the indices are the same, return the inserted element else 4263 // if the indices are known different, extract the element from 4264 // the original vector. 4265 SDValue N1Op2 = N1.getOperand(2); 4266 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4267 4268 if (N1Op2C && N2C) { 4269 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4270 if (VT == N1.getOperand(1).getValueType()) 4271 return N1.getOperand(1); 4272 else 4273 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4274 } 4275 4276 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4277 } 4278 } 4279 break; 4280 case ISD::EXTRACT_ELEMENT: 4281 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4282 assert(!N1.getValueType().isVector() && !VT.isVector() && 4283 (N1.getValueType().isInteger() == VT.isInteger()) && 4284 N1.getValueType() != VT && 4285 "Wrong types for EXTRACT_ELEMENT!"); 4286 4287 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4288 // 64-bit integers into 32-bit parts. Instead of building the extract of 4289 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4290 if (N1.getOpcode() == ISD::BUILD_PAIR) 4291 return N1.getOperand(N2C->getZExtValue()); 4292 4293 // EXTRACT_ELEMENT of a constant int is also very common. 4294 if (N1C) { 4295 unsigned ElementSize = VT.getSizeInBits(); 4296 unsigned Shift = ElementSize * N2C->getZExtValue(); 4297 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4298 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4299 } 4300 break; 4301 case ISD::EXTRACT_SUBVECTOR: 4302 if (VT.isSimple() && N1.getValueType().isSimple()) { 4303 assert(VT.isVector() && N1.getValueType().isVector() && 4304 "Extract subvector VTs must be a vectors!"); 4305 assert(VT.getVectorElementType() == 4306 N1.getValueType().getVectorElementType() && 4307 "Extract subvector VTs must have the same element type!"); 4308 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4309 "Extract subvector must be from larger vector to smaller vector!"); 4310 4311 if (N2C) { 4312 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4313 <= N1.getValueType().getVectorNumElements()) 4314 && "Extract subvector overflow!"); 4315 } 4316 4317 // Trivial extraction. 4318 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4319 return N1; 4320 4321 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4322 if (N1.isUndef()) 4323 return getUNDEF(VT); 4324 4325 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4326 // the concat have the same type as the extract. 4327 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4328 N1.getNumOperands() > 0 && 4329 VT == N1.getOperand(0).getValueType()) { 4330 unsigned Factor = VT.getVectorNumElements(); 4331 return N1.getOperand(N2C->getZExtValue() / Factor); 4332 } 4333 4334 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4335 // during shuffle legalization. 4336 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4337 VT == N1.getOperand(1).getValueType()) 4338 return N1.getOperand(1); 4339 } 4340 break; 4341 } 4342 4343 // Perform trivial constant folding. 4344 if (SDValue SV = 4345 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4346 return SV; 4347 4348 // Constant fold FP operations. 4349 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4350 if (N1CFP) { 4351 if (N2CFP) { 4352 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4353 APFloat::opStatus s; 4354 switch (Opcode) { 4355 case ISD::FADD: 4356 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4357 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4358 return getConstantFP(V1, DL, VT); 4359 break; 4360 case ISD::FSUB: 4361 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4362 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4363 return getConstantFP(V1, DL, VT); 4364 break; 4365 case ISD::FMUL: 4366 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4367 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4368 return getConstantFP(V1, DL, VT); 4369 break; 4370 case ISD::FDIV: 4371 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4372 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4373 s!=APFloat::opDivByZero)) { 4374 return getConstantFP(V1, DL, VT); 4375 } 4376 break; 4377 case ISD::FREM : 4378 s = V1.mod(V2); 4379 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4380 s!=APFloat::opDivByZero)) { 4381 return getConstantFP(V1, DL, VT); 4382 } 4383 break; 4384 case ISD::FCOPYSIGN: 4385 V1.copySign(V2); 4386 return getConstantFP(V1, DL, VT); 4387 default: break; 4388 } 4389 } 4390 4391 if (Opcode == ISD::FP_ROUND) { 4392 APFloat V = N1CFP->getValueAPF(); // make copy 4393 bool ignored; 4394 // This can return overflow, underflow, or inexact; we don't care. 4395 // FIXME need to be more flexible about rounding mode. 4396 (void)V.convert(EVTToAPFloatSemantics(VT), 4397 APFloat::rmNearestTiesToEven, &ignored); 4398 return getConstantFP(V, DL, VT); 4399 } 4400 } 4401 4402 // Canonicalize an UNDEF to the RHS, even over a constant. 4403 if (N1.isUndef()) { 4404 if (isCommutativeBinOp(Opcode)) { 4405 std::swap(N1, N2); 4406 } else { 4407 switch (Opcode) { 4408 case ISD::FP_ROUND_INREG: 4409 case ISD::SIGN_EXTEND_INREG: 4410 case ISD::SUB: 4411 case ISD::FSUB: 4412 case ISD::FDIV: 4413 case ISD::FREM: 4414 case ISD::SRA: 4415 return N1; // fold op(undef, arg2) -> undef 4416 case ISD::UDIV: 4417 case ISD::SDIV: 4418 case ISD::UREM: 4419 case ISD::SREM: 4420 case ISD::SRL: 4421 case ISD::SHL: 4422 if (!VT.isVector()) 4423 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4424 // For vectors, we can't easily build an all zero vector, just return 4425 // the LHS. 4426 return N2; 4427 } 4428 } 4429 } 4430 4431 // Fold a bunch of operators when the RHS is undef. 4432 if (N2.isUndef()) { 4433 switch (Opcode) { 4434 case ISD::XOR: 4435 if (N1.isUndef()) 4436 // Handle undef ^ undef -> 0 special case. This is a common 4437 // idiom (misuse). 4438 return getConstant(0, DL, VT); 4439 LLVM_FALLTHROUGH; 4440 case ISD::ADD: 4441 case ISD::ADDC: 4442 case ISD::ADDE: 4443 case ISD::SUB: 4444 case ISD::UDIV: 4445 case ISD::SDIV: 4446 case ISD::UREM: 4447 case ISD::SREM: 4448 return N2; // fold op(arg1, undef) -> undef 4449 case ISD::FADD: 4450 case ISD::FSUB: 4451 case ISD::FMUL: 4452 case ISD::FDIV: 4453 case ISD::FREM: 4454 if (getTarget().Options.UnsafeFPMath) 4455 return N2; 4456 break; 4457 case ISD::MUL: 4458 case ISD::AND: 4459 case ISD::SRL: 4460 case ISD::SHL: 4461 if (!VT.isVector()) 4462 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4463 // For vectors, we can't easily build an all zero vector, just return 4464 // the LHS. 4465 return N1; 4466 case ISD::OR: 4467 if (!VT.isVector()) 4468 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4469 // For vectors, we can't easily build an all one vector, just return 4470 // the LHS. 4471 return N1; 4472 case ISD::SRA: 4473 return N1; 4474 } 4475 } 4476 4477 // Memoize this node if possible. 4478 SDNode *N; 4479 SDVTList VTs = getVTList(VT); 4480 SDValue Ops[] = {N1, N2}; 4481 if (VT != MVT::Glue) { 4482 FoldingSetNodeID ID; 4483 AddNodeIDNode(ID, Opcode, VTs, Ops); 4484 void *IP = nullptr; 4485 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4486 E->intersectFlagsWith(Flags); 4487 return SDValue(E, 0); 4488 } 4489 4490 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4491 N->setFlags(Flags); 4492 createOperands(N, Ops); 4493 CSEMap.InsertNode(N, IP); 4494 } else { 4495 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4496 createOperands(N, Ops); 4497 } 4498 4499 InsertNode(N); 4500 return SDValue(N, 0); 4501 } 4502 4503 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4504 SDValue N1, SDValue N2, SDValue N3) { 4505 // Perform various simplifications. 4506 switch (Opcode) { 4507 case ISD::FMA: { 4508 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4509 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4510 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4511 if (N1CFP && N2CFP && N3CFP) { 4512 APFloat V1 = N1CFP->getValueAPF(); 4513 const APFloat &V2 = N2CFP->getValueAPF(); 4514 const APFloat &V3 = N3CFP->getValueAPF(); 4515 APFloat::opStatus s = 4516 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4517 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4518 return getConstantFP(V1, DL, VT); 4519 } 4520 break; 4521 } 4522 case ISD::CONCAT_VECTORS: { 4523 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4524 SDValue Ops[] = {N1, N2, N3}; 4525 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4526 return V; 4527 break; 4528 } 4529 case ISD::SETCC: { 4530 // Use FoldSetCC to simplify SETCC's. 4531 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4532 return V; 4533 // Vector constant folding. 4534 SDValue Ops[] = {N1, N2, N3}; 4535 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4536 return V; 4537 break; 4538 } 4539 case ISD::SELECT: 4540 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4541 if (N1C->getZExtValue()) 4542 return N2; // select true, X, Y -> X 4543 return N3; // select false, X, Y -> Y 4544 } 4545 4546 if (N2 == N3) return N2; // select C, X, X -> X 4547 break; 4548 case ISD::VECTOR_SHUFFLE: 4549 llvm_unreachable("should use getVectorShuffle constructor!"); 4550 case ISD::INSERT_VECTOR_ELT: { 4551 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4552 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4553 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4554 return getUNDEF(VT); 4555 break; 4556 } 4557 case ISD::INSERT_SUBVECTOR: { 4558 SDValue Index = N3; 4559 if (VT.isSimple() && N1.getValueType().isSimple() 4560 && N2.getValueType().isSimple()) { 4561 assert(VT.isVector() && N1.getValueType().isVector() && 4562 N2.getValueType().isVector() && 4563 "Insert subvector VTs must be a vectors"); 4564 assert(VT == N1.getValueType() && 4565 "Dest and insert subvector source types must match!"); 4566 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4567 "Insert subvector must be from smaller vector to larger vector!"); 4568 if (isa<ConstantSDNode>(Index)) { 4569 assert((N2.getValueType().getVectorNumElements() + 4570 cast<ConstantSDNode>(Index)->getZExtValue() 4571 <= VT.getVectorNumElements()) 4572 && "Insert subvector overflow!"); 4573 } 4574 4575 // Trivial insertion. 4576 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4577 return N2; 4578 } 4579 break; 4580 } 4581 case ISD::BITCAST: 4582 // Fold bit_convert nodes from a type to themselves. 4583 if (N1.getValueType() == VT) 4584 return N1; 4585 break; 4586 } 4587 4588 // Memoize node if it doesn't produce a flag. 4589 SDNode *N; 4590 SDVTList VTs = getVTList(VT); 4591 SDValue Ops[] = {N1, N2, N3}; 4592 if (VT != MVT::Glue) { 4593 FoldingSetNodeID ID; 4594 AddNodeIDNode(ID, Opcode, VTs, Ops); 4595 void *IP = nullptr; 4596 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4597 return SDValue(E, 0); 4598 4599 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4600 createOperands(N, Ops); 4601 CSEMap.InsertNode(N, IP); 4602 } else { 4603 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4604 createOperands(N, Ops); 4605 } 4606 4607 InsertNode(N); 4608 return SDValue(N, 0); 4609 } 4610 4611 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4612 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4613 SDValue Ops[] = { N1, N2, N3, N4 }; 4614 return getNode(Opcode, DL, VT, Ops); 4615 } 4616 4617 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4618 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4619 SDValue N5) { 4620 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4621 return getNode(Opcode, DL, VT, Ops); 4622 } 4623 4624 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4625 /// the incoming stack arguments to be loaded from the stack. 4626 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4627 SmallVector<SDValue, 8> ArgChains; 4628 4629 // Include the original chain at the beginning of the list. When this is 4630 // used by target LowerCall hooks, this helps legalize find the 4631 // CALLSEQ_BEGIN node. 4632 ArgChains.push_back(Chain); 4633 4634 // Add a chain value for each stack argument. 4635 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4636 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4637 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4638 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4639 if (FI->getIndex() < 0) 4640 ArgChains.push_back(SDValue(L, 1)); 4641 4642 // Build a tokenfactor for all the chains. 4643 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4644 } 4645 4646 /// getMemsetValue - Vectorized representation of the memset value 4647 /// operand. 4648 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4649 const SDLoc &dl) { 4650 assert(!Value.isUndef()); 4651 4652 unsigned NumBits = VT.getScalarSizeInBits(); 4653 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4654 assert(C->getAPIntValue().getBitWidth() == 8); 4655 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4656 if (VT.isInteger()) 4657 return DAG.getConstant(Val, dl, VT); 4658 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4659 VT); 4660 } 4661 4662 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4663 EVT IntVT = VT.getScalarType(); 4664 if (!IntVT.isInteger()) 4665 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4666 4667 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4668 if (NumBits > 8) { 4669 // Use a multiplication with 0x010101... to extend the input to the 4670 // required length. 4671 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4672 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4673 DAG.getConstant(Magic, dl, IntVT)); 4674 } 4675 4676 if (VT != Value.getValueType() && !VT.isInteger()) 4677 Value = DAG.getBitcast(VT.getScalarType(), Value); 4678 if (VT != Value.getValueType()) 4679 Value = DAG.getSplatBuildVector(VT, dl, Value); 4680 4681 return Value; 4682 } 4683 4684 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4685 /// used when a memcpy is turned into a memset when the source is a constant 4686 /// string ptr. 4687 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4688 const TargetLowering &TLI, 4689 const ConstantDataArraySlice &Slice) { 4690 // Handle vector with all elements zero. 4691 if (Slice.Array == nullptr) { 4692 if (VT.isInteger()) 4693 return DAG.getConstant(0, dl, VT); 4694 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4695 return DAG.getConstantFP(0.0, dl, VT); 4696 else if (VT.isVector()) { 4697 unsigned NumElts = VT.getVectorNumElements(); 4698 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4699 return DAG.getNode(ISD::BITCAST, dl, VT, 4700 DAG.getConstant(0, dl, 4701 EVT::getVectorVT(*DAG.getContext(), 4702 EltVT, NumElts))); 4703 } else 4704 llvm_unreachable("Expected type!"); 4705 } 4706 4707 assert(!VT.isVector() && "Can't handle vector type here!"); 4708 unsigned NumVTBits = VT.getSizeInBits(); 4709 unsigned NumVTBytes = NumVTBits / 8; 4710 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 4711 4712 APInt Val(NumVTBits, 0); 4713 if (DAG.getDataLayout().isLittleEndian()) { 4714 for (unsigned i = 0; i != NumBytes; ++i) 4715 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 4716 } else { 4717 for (unsigned i = 0; i != NumBytes; ++i) 4718 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 4719 } 4720 4721 // If the "cost" of materializing the integer immediate is less than the cost 4722 // of a load, then it is cost effective to turn the load into the immediate. 4723 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4724 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4725 return DAG.getConstant(Val, dl, VT); 4726 return SDValue(nullptr, 0); 4727 } 4728 4729 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4730 const SDLoc &DL) { 4731 EVT VT = Base.getValueType(); 4732 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4733 } 4734 4735 /// Returns true if memcpy source is constant data. 4736 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 4737 uint64_t SrcDelta = 0; 4738 GlobalAddressSDNode *G = nullptr; 4739 if (Src.getOpcode() == ISD::GlobalAddress) 4740 G = cast<GlobalAddressSDNode>(Src); 4741 else if (Src.getOpcode() == ISD::ADD && 4742 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4743 Src.getOperand(1).getOpcode() == ISD::Constant) { 4744 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4745 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4746 } 4747 if (!G) 4748 return false; 4749 4750 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 4751 SrcDelta + G->getOffset()); 4752 } 4753 4754 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4755 /// Return true if the number of memory ops is below the threshold (Limit). 4756 /// It returns the types of the sequence of memory ops to perform 4757 /// memset / memcpy by reference. 4758 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4759 unsigned Limit, uint64_t Size, 4760 unsigned DstAlign, unsigned SrcAlign, 4761 bool IsMemset, 4762 bool ZeroMemset, 4763 bool MemcpyStrSrc, 4764 bool AllowOverlap, 4765 unsigned DstAS, unsigned SrcAS, 4766 SelectionDAG &DAG, 4767 const TargetLowering &TLI) { 4768 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4769 "Expecting memcpy / memset source to meet alignment requirement!"); 4770 // If 'SrcAlign' is zero, that means the memory operation does not need to 4771 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4772 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4773 // is the specified alignment of the memory operation. If it is zero, that 4774 // means it's possible to change the alignment of the destination. 4775 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4776 // not need to be loaded. 4777 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4778 IsMemset, ZeroMemset, MemcpyStrSrc, 4779 DAG.getMachineFunction()); 4780 4781 if (VT == MVT::Other) { 4782 if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) || 4783 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) { 4784 VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS); 4785 } else { 4786 switch (DstAlign & 7) { 4787 case 0: VT = MVT::i64; break; 4788 case 4: VT = MVT::i32; break; 4789 case 2: VT = MVT::i16; break; 4790 default: VT = MVT::i8; break; 4791 } 4792 } 4793 4794 MVT LVT = MVT::i64; 4795 while (!TLI.isTypeLegal(LVT)) 4796 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 4797 assert(LVT.isInteger()); 4798 4799 if (VT.bitsGT(LVT)) 4800 VT = LVT; 4801 } 4802 4803 unsigned NumMemOps = 0; 4804 while (Size != 0) { 4805 unsigned VTSize = VT.getSizeInBits() / 8; 4806 while (VTSize > Size) { 4807 // For now, only use non-vector load / store's for the left-over pieces. 4808 EVT NewVT = VT; 4809 unsigned NewVTSize; 4810 4811 bool Found = false; 4812 if (VT.isVector() || VT.isFloatingPoint()) { 4813 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 4814 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 4815 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 4816 Found = true; 4817 else if (NewVT == MVT::i64 && 4818 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 4819 TLI.isSafeMemOpType(MVT::f64)) { 4820 // i64 is usually not legal on 32-bit targets, but f64 may be. 4821 NewVT = MVT::f64; 4822 Found = true; 4823 } 4824 } 4825 4826 if (!Found) { 4827 do { 4828 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 4829 if (NewVT == MVT::i8) 4830 break; 4831 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 4832 } 4833 NewVTSize = NewVT.getSizeInBits() / 8; 4834 4835 // If the new VT cannot cover all of the remaining bits, then consider 4836 // issuing a (or a pair of) unaligned and overlapping load / store. 4837 // FIXME: Only does this for 64-bit or more since we don't have proper 4838 // cost model for unaligned load / store. 4839 bool Fast; 4840 if (NumMemOps && AllowOverlap && 4841 VTSize >= 8 && NewVTSize < Size && 4842 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 4843 VTSize = Size; 4844 else { 4845 VT = NewVT; 4846 VTSize = NewVTSize; 4847 } 4848 } 4849 4850 if (++NumMemOps > Limit) 4851 return false; 4852 4853 MemOps.push_back(VT); 4854 Size -= VTSize; 4855 } 4856 4857 return true; 4858 } 4859 4860 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 4861 // On Darwin, -Os means optimize for size without hurting performance, so 4862 // only really optimize for size when -Oz (MinSize) is used. 4863 if (MF.getTarget().getTargetTriple().isOSDarwin()) 4864 return MF.getFunction()->optForMinSize(); 4865 return MF.getFunction()->optForSize(); 4866 } 4867 4868 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 4869 SDValue Chain, SDValue Dst, SDValue Src, 4870 uint64_t Size, unsigned Align, 4871 bool isVol, bool AlwaysInline, 4872 MachinePointerInfo DstPtrInfo, 4873 MachinePointerInfo SrcPtrInfo) { 4874 // Turn a memcpy of undef to nop. 4875 if (Src.isUndef()) 4876 return Chain; 4877 4878 // Expand memcpy to a series of load and store ops if the size operand falls 4879 // below a certain threshold. 4880 // TODO: In the AlwaysInline case, if the size is big then generate a loop 4881 // rather than maybe a humongous number of loads and stores. 4882 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4883 std::vector<EVT> MemOps; 4884 bool DstAlignCanChange = false; 4885 MachineFunction &MF = DAG.getMachineFunction(); 4886 MachineFrameInfo &MFI = MF.getFrameInfo(); 4887 bool OptSize = shouldLowerMemFuncForSize(MF); 4888 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4889 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 4890 DstAlignCanChange = true; 4891 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4892 if (Align > SrcAlign) 4893 SrcAlign = Align; 4894 ConstantDataArraySlice Slice; 4895 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 4896 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 4897 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 4898 4899 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4900 (DstAlignCanChange ? 0 : Align), 4901 (isZeroConstant ? 0 : SrcAlign), 4902 false, false, CopyFromConstant, true, 4903 DstPtrInfo.getAddrSpace(), 4904 SrcPtrInfo.getAddrSpace(), 4905 DAG, TLI)) 4906 return SDValue(); 4907 4908 if (DstAlignCanChange) { 4909 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4910 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4911 4912 // Don't promote to an alignment that would require dynamic stack 4913 // realignment. 4914 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 4915 if (!TRI->needsStackRealignment(MF)) 4916 while (NewAlign > Align && 4917 DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign)) 4918 NewAlign /= 2; 4919 4920 if (NewAlign > Align) { 4921 // Give the stack frame object a larger alignment if needed. 4922 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 4923 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 4924 Align = NewAlign; 4925 } 4926 } 4927 4928 MachineMemOperand::Flags MMOFlags = 4929 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 4930 SmallVector<SDValue, 8> OutChains; 4931 unsigned NumMemOps = MemOps.size(); 4932 uint64_t SrcOff = 0, DstOff = 0; 4933 for (unsigned i = 0; i != NumMemOps; ++i) { 4934 EVT VT = MemOps[i]; 4935 unsigned VTSize = VT.getSizeInBits() / 8; 4936 SDValue Value, Store; 4937 4938 if (VTSize > Size) { 4939 // Issuing an unaligned load / store pair that overlaps with the previous 4940 // pair. Adjust the offset accordingly. 4941 assert(i == NumMemOps-1 && i != 0); 4942 SrcOff -= VTSize - Size; 4943 DstOff -= VTSize - Size; 4944 } 4945 4946 if (CopyFromConstant && 4947 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 4948 // It's unlikely a store of a vector immediate can be done in a single 4949 // instruction. It would require a load from a constantpool first. 4950 // We only handle zero vectors here. 4951 // FIXME: Handle other cases where store of vector immediate is done in 4952 // a single instruction. 4953 ConstantDataArraySlice SubSlice; 4954 if (SrcOff < Slice.Length) { 4955 SubSlice = Slice; 4956 SubSlice.move(SrcOff); 4957 } else { 4958 // This is an out-of-bounds access and hence UB. Pretend we read zero. 4959 SubSlice.Array = nullptr; 4960 SubSlice.Offset = 0; 4961 SubSlice.Length = VTSize; 4962 } 4963 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 4964 if (Value.getNode()) 4965 Store = DAG.getStore(Chain, dl, Value, 4966 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4967 DstPtrInfo.getWithOffset(DstOff), Align, 4968 MMOFlags); 4969 } 4970 4971 if (!Store.getNode()) { 4972 // The type might not be legal for the target. This should only happen 4973 // if the type is smaller than a legal type, as on PPC, so the right 4974 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 4975 // to Load/Store if NVT==VT. 4976 // FIXME does the case above also need this? 4977 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 4978 assert(NVT.bitsGE(VT)); 4979 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 4980 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 4981 SrcPtrInfo.getWithOffset(SrcOff), VT, 4982 MinAlign(SrcAlign, SrcOff), MMOFlags); 4983 OutChains.push_back(Value.getValue(1)); 4984 Store = DAG.getTruncStore( 4985 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4986 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 4987 } 4988 OutChains.push_back(Store); 4989 SrcOff += VTSize; 4990 DstOff += VTSize; 4991 Size -= VTSize; 4992 } 4993 4994 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4995 } 4996 4997 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 4998 SDValue Chain, SDValue Dst, SDValue Src, 4999 uint64_t Size, unsigned Align, 5000 bool isVol, bool AlwaysInline, 5001 MachinePointerInfo DstPtrInfo, 5002 MachinePointerInfo SrcPtrInfo) { 5003 // Turn a memmove of undef to nop. 5004 if (Src.isUndef()) 5005 return Chain; 5006 5007 // Expand memmove to a series of load and store ops if the size operand falls 5008 // below a certain threshold. 5009 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5010 std::vector<EVT> MemOps; 5011 bool DstAlignCanChange = false; 5012 MachineFunction &MF = DAG.getMachineFunction(); 5013 MachineFrameInfo &MFI = MF.getFrameInfo(); 5014 bool OptSize = shouldLowerMemFuncForSize(MF); 5015 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5016 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5017 DstAlignCanChange = true; 5018 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5019 if (Align > SrcAlign) 5020 SrcAlign = Align; 5021 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5022 5023 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5024 (DstAlignCanChange ? 0 : Align), SrcAlign, 5025 false, false, false, false, 5026 DstPtrInfo.getAddrSpace(), 5027 SrcPtrInfo.getAddrSpace(), 5028 DAG, TLI)) 5029 return SDValue(); 5030 5031 if (DstAlignCanChange) { 5032 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5033 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5034 if (NewAlign > Align) { 5035 // Give the stack frame object a larger alignment if needed. 5036 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5037 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5038 Align = NewAlign; 5039 } 5040 } 5041 5042 MachineMemOperand::Flags MMOFlags = 5043 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5044 uint64_t SrcOff = 0, DstOff = 0; 5045 SmallVector<SDValue, 8> LoadValues; 5046 SmallVector<SDValue, 8> LoadChains; 5047 SmallVector<SDValue, 8> OutChains; 5048 unsigned NumMemOps = MemOps.size(); 5049 for (unsigned i = 0; i < NumMemOps; i++) { 5050 EVT VT = MemOps[i]; 5051 unsigned VTSize = VT.getSizeInBits() / 8; 5052 SDValue Value; 5053 5054 Value = 5055 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5056 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, MMOFlags); 5057 LoadValues.push_back(Value); 5058 LoadChains.push_back(Value.getValue(1)); 5059 SrcOff += VTSize; 5060 } 5061 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5062 OutChains.clear(); 5063 for (unsigned i = 0; i < NumMemOps; i++) { 5064 EVT VT = MemOps[i]; 5065 unsigned VTSize = VT.getSizeInBits() / 8; 5066 SDValue Store; 5067 5068 Store = DAG.getStore(Chain, dl, LoadValues[i], 5069 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5070 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5071 OutChains.push_back(Store); 5072 DstOff += VTSize; 5073 } 5074 5075 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5076 } 5077 5078 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5079 /// operations. 5080 /// 5081 /// \param DAG Selection DAG where lowered code is placed. 5082 /// \param dl Link to corresponding IR location. 5083 /// \param Chain Control flow dependency. 5084 /// \param Dst Pointer to destination memory location. 5085 /// \param Src Value of byte to write into the memory. 5086 /// \param Size Number of bytes to write. 5087 /// \param Align Alignment of the destination in bytes. 5088 /// \param isVol True if destination is volatile. 5089 /// \param DstPtrInfo IR information on the memory pointer. 5090 /// \returns New head in the control flow, if lowering was successful, empty 5091 /// SDValue otherwise. 5092 /// 5093 /// The function tries to replace 'llvm.memset' intrinsic with several store 5094 /// operations and value calculation code. This is usually profitable for small 5095 /// memory size. 5096 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5097 SDValue Chain, SDValue Dst, SDValue Src, 5098 uint64_t Size, unsigned Align, bool isVol, 5099 MachinePointerInfo DstPtrInfo) { 5100 // Turn a memset of undef to nop. 5101 if (Src.isUndef()) 5102 return Chain; 5103 5104 // Expand memset to a series of load/store ops if the size operand 5105 // falls below a certain threshold. 5106 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5107 std::vector<EVT> MemOps; 5108 bool DstAlignCanChange = false; 5109 MachineFunction &MF = DAG.getMachineFunction(); 5110 MachineFrameInfo &MFI = MF.getFrameInfo(); 5111 bool OptSize = shouldLowerMemFuncForSize(MF); 5112 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5113 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5114 DstAlignCanChange = true; 5115 bool IsZeroVal = 5116 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5117 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5118 Size, (DstAlignCanChange ? 0 : Align), 0, 5119 true, IsZeroVal, false, true, 5120 DstPtrInfo.getAddrSpace(), ~0u, 5121 DAG, TLI)) 5122 return SDValue(); 5123 5124 if (DstAlignCanChange) { 5125 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5126 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5127 if (NewAlign > Align) { 5128 // Give the stack frame object a larger alignment if needed. 5129 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5130 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5131 Align = NewAlign; 5132 } 5133 } 5134 5135 SmallVector<SDValue, 8> OutChains; 5136 uint64_t DstOff = 0; 5137 unsigned NumMemOps = MemOps.size(); 5138 5139 // Find the largest store and generate the bit pattern for it. 5140 EVT LargestVT = MemOps[0]; 5141 for (unsigned i = 1; i < NumMemOps; i++) 5142 if (MemOps[i].bitsGT(LargestVT)) 5143 LargestVT = MemOps[i]; 5144 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5145 5146 for (unsigned i = 0; i < NumMemOps; i++) { 5147 EVT VT = MemOps[i]; 5148 unsigned VTSize = VT.getSizeInBits() / 8; 5149 if (VTSize > Size) { 5150 // Issuing an unaligned load / store pair that overlaps with the previous 5151 // pair. Adjust the offset accordingly. 5152 assert(i == NumMemOps-1 && i != 0); 5153 DstOff -= VTSize - Size; 5154 } 5155 5156 // If this store is smaller than the largest store see whether we can get 5157 // the smaller value for free with a truncate. 5158 SDValue Value = MemSetValue; 5159 if (VT.bitsLT(LargestVT)) { 5160 if (!LargestVT.isVector() && !VT.isVector() && 5161 TLI.isTruncateFree(LargestVT, VT)) 5162 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5163 else 5164 Value = getMemsetValue(Src, VT, DAG, dl); 5165 } 5166 assert(Value.getValueType() == VT && "Value with wrong type."); 5167 SDValue Store = DAG.getStore( 5168 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5169 DstPtrInfo.getWithOffset(DstOff), Align, 5170 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5171 OutChains.push_back(Store); 5172 DstOff += VT.getSizeInBits() / 8; 5173 Size -= VTSize; 5174 } 5175 5176 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5177 } 5178 5179 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5180 unsigned AS) { 5181 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5182 // pointer operands can be losslessly bitcasted to pointers of address space 0 5183 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5184 report_fatal_error("cannot lower memory intrinsic in address space " + 5185 Twine(AS)); 5186 } 5187 } 5188 5189 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5190 SDValue Src, SDValue Size, unsigned Align, 5191 bool isVol, bool AlwaysInline, bool isTailCall, 5192 MachinePointerInfo DstPtrInfo, 5193 MachinePointerInfo SrcPtrInfo) { 5194 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5195 5196 // Check to see if we should lower the memcpy to loads and stores first. 5197 // For cases within the target-specified limits, this is the best choice. 5198 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5199 if (ConstantSize) { 5200 // Memcpy with size zero? Just return the original chain. 5201 if (ConstantSize->isNullValue()) 5202 return Chain; 5203 5204 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5205 ConstantSize->getZExtValue(),Align, 5206 isVol, false, DstPtrInfo, SrcPtrInfo); 5207 if (Result.getNode()) 5208 return Result; 5209 } 5210 5211 // Then check to see if we should lower the memcpy with target-specific 5212 // code. If the target chooses to do this, this is the next best. 5213 if (TSI) { 5214 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5215 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5216 DstPtrInfo, SrcPtrInfo); 5217 if (Result.getNode()) 5218 return Result; 5219 } 5220 5221 // If we really need inline code and the target declined to provide it, 5222 // use a (potentially long) sequence of loads and stores. 5223 if (AlwaysInline) { 5224 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5225 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5226 ConstantSize->getZExtValue(), Align, isVol, 5227 true, DstPtrInfo, SrcPtrInfo); 5228 } 5229 5230 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5231 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5232 5233 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5234 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5235 // respect volatile, so they may do things like read or write memory 5236 // beyond the given memory regions. But fixing this isn't easy, and most 5237 // people don't care. 5238 5239 // Emit a library call. 5240 TargetLowering::ArgListTy Args; 5241 TargetLowering::ArgListEntry Entry; 5242 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5243 Entry.Node = Dst; Args.push_back(Entry); 5244 Entry.Node = Src; Args.push_back(Entry); 5245 Entry.Node = Size; Args.push_back(Entry); 5246 // FIXME: pass in SDLoc 5247 TargetLowering::CallLoweringInfo CLI(*this); 5248 CLI.setDebugLoc(dl) 5249 .setChain(Chain) 5250 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5251 Dst.getValueType().getTypeForEVT(*getContext()), 5252 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5253 TLI->getPointerTy(getDataLayout())), 5254 std::move(Args)) 5255 .setDiscardResult() 5256 .setTailCall(isTailCall); 5257 5258 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5259 return CallResult.second; 5260 } 5261 5262 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5263 SDValue Src, SDValue Size, unsigned Align, 5264 bool isVol, bool isTailCall, 5265 MachinePointerInfo DstPtrInfo, 5266 MachinePointerInfo SrcPtrInfo) { 5267 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5268 5269 // Check to see if we should lower the memmove to loads and stores first. 5270 // For cases within the target-specified limits, this is the best choice. 5271 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5272 if (ConstantSize) { 5273 // Memmove with size zero? Just return the original chain. 5274 if (ConstantSize->isNullValue()) 5275 return Chain; 5276 5277 SDValue Result = 5278 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5279 ConstantSize->getZExtValue(), Align, isVol, 5280 false, DstPtrInfo, SrcPtrInfo); 5281 if (Result.getNode()) 5282 return Result; 5283 } 5284 5285 // Then check to see if we should lower the memmove with target-specific 5286 // code. If the target chooses to do this, this is the next best. 5287 if (TSI) { 5288 SDValue Result = TSI->EmitTargetCodeForMemmove( 5289 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5290 if (Result.getNode()) 5291 return Result; 5292 } 5293 5294 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5295 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5296 5297 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5298 // not be safe. See memcpy above for more details. 5299 5300 // Emit a library call. 5301 TargetLowering::ArgListTy Args; 5302 TargetLowering::ArgListEntry Entry; 5303 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5304 Entry.Node = Dst; Args.push_back(Entry); 5305 Entry.Node = Src; Args.push_back(Entry); 5306 Entry.Node = Size; Args.push_back(Entry); 5307 // FIXME: pass in SDLoc 5308 TargetLowering::CallLoweringInfo CLI(*this); 5309 CLI.setDebugLoc(dl) 5310 .setChain(Chain) 5311 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5312 Dst.getValueType().getTypeForEVT(*getContext()), 5313 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5314 TLI->getPointerTy(getDataLayout())), 5315 std::move(Args)) 5316 .setDiscardResult() 5317 .setTailCall(isTailCall); 5318 5319 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5320 return CallResult.second; 5321 } 5322 5323 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5324 SDValue Src, SDValue Size, unsigned Align, 5325 bool isVol, bool isTailCall, 5326 MachinePointerInfo DstPtrInfo) { 5327 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5328 5329 // Check to see if we should lower the memset to stores first. 5330 // For cases within the target-specified limits, this is the best choice. 5331 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5332 if (ConstantSize) { 5333 // Memset with size zero? Just return the original chain. 5334 if (ConstantSize->isNullValue()) 5335 return Chain; 5336 5337 SDValue Result = 5338 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5339 Align, isVol, DstPtrInfo); 5340 5341 if (Result.getNode()) 5342 return Result; 5343 } 5344 5345 // Then check to see if we should lower the memset with target-specific 5346 // code. If the target chooses to do this, this is the next best. 5347 if (TSI) { 5348 SDValue Result = TSI->EmitTargetCodeForMemset( 5349 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5350 if (Result.getNode()) 5351 return Result; 5352 } 5353 5354 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5355 5356 // Emit a library call. 5357 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5358 TargetLowering::ArgListTy Args; 5359 TargetLowering::ArgListEntry Entry; 5360 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5361 Args.push_back(Entry); 5362 Entry.Node = Src; 5363 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5364 Args.push_back(Entry); 5365 Entry.Node = Size; 5366 Entry.Ty = IntPtrTy; 5367 Args.push_back(Entry); 5368 5369 // FIXME: pass in SDLoc 5370 TargetLowering::CallLoweringInfo CLI(*this); 5371 CLI.setDebugLoc(dl) 5372 .setChain(Chain) 5373 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5374 Dst.getValueType().getTypeForEVT(*getContext()), 5375 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5376 TLI->getPointerTy(getDataLayout())), 5377 std::move(Args)) 5378 .setDiscardResult() 5379 .setTailCall(isTailCall); 5380 5381 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5382 return CallResult.second; 5383 } 5384 5385 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5386 SDVTList VTList, ArrayRef<SDValue> Ops, 5387 MachineMemOperand *MMO) { 5388 FoldingSetNodeID ID; 5389 ID.AddInteger(MemVT.getRawBits()); 5390 AddNodeIDNode(ID, Opcode, VTList, Ops); 5391 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5392 void* IP = nullptr; 5393 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5394 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5395 return SDValue(E, 0); 5396 } 5397 5398 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5399 VTList, MemVT, MMO); 5400 createOperands(N, Ops); 5401 5402 CSEMap.InsertNode(N, IP); 5403 InsertNode(N); 5404 return SDValue(N, 0); 5405 } 5406 5407 SDValue SelectionDAG::getAtomicCmpSwap( 5408 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5409 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5410 unsigned Alignment, AtomicOrdering SuccessOrdering, 5411 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { 5412 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5413 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5414 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5415 5416 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5417 Alignment = getEVTAlignment(MemVT); 5418 5419 MachineFunction &MF = getMachineFunction(); 5420 5421 // FIXME: Volatile isn't really correct; we should keep track of atomic 5422 // orderings in the memoperand. 5423 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5424 MachineMemOperand::MOStore; 5425 MachineMemOperand *MMO = 5426 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5427 AAMDNodes(), nullptr, SynchScope, SuccessOrdering, 5428 FailureOrdering); 5429 5430 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5431 } 5432 5433 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5434 EVT MemVT, SDVTList VTs, SDValue Chain, 5435 SDValue Ptr, SDValue Cmp, SDValue Swp, 5436 MachineMemOperand *MMO) { 5437 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5438 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5439 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5440 5441 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5442 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5443 } 5444 5445 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5446 SDValue Chain, SDValue Ptr, SDValue Val, 5447 const Value *PtrVal, unsigned Alignment, 5448 AtomicOrdering Ordering, 5449 SynchronizationScope SynchScope) { 5450 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5451 Alignment = getEVTAlignment(MemVT); 5452 5453 MachineFunction &MF = getMachineFunction(); 5454 // An atomic store does not load. An atomic load does not store. 5455 // (An atomicrmw obviously both loads and stores.) 5456 // For now, atomics are considered to be volatile always, and they are 5457 // chained as such. 5458 // FIXME: Volatile isn't really correct; we should keep track of atomic 5459 // orderings in the memoperand. 5460 auto Flags = MachineMemOperand::MOVolatile; 5461 if (Opcode != ISD::ATOMIC_STORE) 5462 Flags |= MachineMemOperand::MOLoad; 5463 if (Opcode != ISD::ATOMIC_LOAD) 5464 Flags |= MachineMemOperand::MOStore; 5465 5466 MachineMemOperand *MMO = 5467 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5468 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5469 nullptr, SynchScope, Ordering); 5470 5471 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5472 } 5473 5474 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5475 SDValue Chain, SDValue Ptr, SDValue Val, 5476 MachineMemOperand *MMO) { 5477 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5478 Opcode == ISD::ATOMIC_LOAD_SUB || 5479 Opcode == ISD::ATOMIC_LOAD_AND || 5480 Opcode == ISD::ATOMIC_LOAD_OR || 5481 Opcode == ISD::ATOMIC_LOAD_XOR || 5482 Opcode == ISD::ATOMIC_LOAD_NAND || 5483 Opcode == ISD::ATOMIC_LOAD_MIN || 5484 Opcode == ISD::ATOMIC_LOAD_MAX || 5485 Opcode == ISD::ATOMIC_LOAD_UMIN || 5486 Opcode == ISD::ATOMIC_LOAD_UMAX || 5487 Opcode == ISD::ATOMIC_SWAP || 5488 Opcode == ISD::ATOMIC_STORE) && 5489 "Invalid Atomic Op"); 5490 5491 EVT VT = Val.getValueType(); 5492 5493 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5494 getVTList(VT, MVT::Other); 5495 SDValue Ops[] = {Chain, Ptr, Val}; 5496 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5497 } 5498 5499 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5500 EVT VT, SDValue Chain, SDValue Ptr, 5501 MachineMemOperand *MMO) { 5502 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5503 5504 SDVTList VTs = getVTList(VT, MVT::Other); 5505 SDValue Ops[] = {Chain, Ptr}; 5506 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5507 } 5508 5509 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5510 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5511 if (Ops.size() == 1) 5512 return Ops[0]; 5513 5514 SmallVector<EVT, 4> VTs; 5515 VTs.reserve(Ops.size()); 5516 for (unsigned i = 0; i < Ops.size(); ++i) 5517 VTs.push_back(Ops[i].getValueType()); 5518 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5519 } 5520 5521 SDValue SelectionDAG::getMemIntrinsicNode( 5522 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5523 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, 5524 bool ReadMem, bool WriteMem, unsigned Size) { 5525 if (Align == 0) // Ensure that codegen never sees alignment 0 5526 Align = getEVTAlignment(MemVT); 5527 5528 MachineFunction &MF = getMachineFunction(); 5529 auto Flags = MachineMemOperand::MONone; 5530 if (WriteMem) 5531 Flags |= MachineMemOperand::MOStore; 5532 if (ReadMem) 5533 Flags |= MachineMemOperand::MOLoad; 5534 if (Vol) 5535 Flags |= MachineMemOperand::MOVolatile; 5536 if (!Size) 5537 Size = MemVT.getStoreSize(); 5538 MachineMemOperand *MMO = 5539 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5540 5541 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5542 } 5543 5544 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5545 SDVTList VTList, 5546 ArrayRef<SDValue> Ops, EVT MemVT, 5547 MachineMemOperand *MMO) { 5548 assert((Opcode == ISD::INTRINSIC_VOID || 5549 Opcode == ISD::INTRINSIC_W_CHAIN || 5550 Opcode == ISD::PREFETCH || 5551 Opcode == ISD::LIFETIME_START || 5552 Opcode == ISD::LIFETIME_END || 5553 (Opcode <= INT_MAX && 5554 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5555 "Opcode is not a memory-accessing opcode!"); 5556 5557 // Memoize the node unless it returns a flag. 5558 MemIntrinsicSDNode *N; 5559 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5560 FoldingSetNodeID ID; 5561 AddNodeIDNode(ID, Opcode, VTList, Ops); 5562 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5563 void *IP = nullptr; 5564 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5565 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5566 return SDValue(E, 0); 5567 } 5568 5569 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5570 VTList, MemVT, MMO); 5571 createOperands(N, Ops); 5572 5573 CSEMap.InsertNode(N, IP); 5574 } else { 5575 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5576 VTList, MemVT, MMO); 5577 createOperands(N, Ops); 5578 } 5579 InsertNode(N); 5580 return SDValue(N, 0); 5581 } 5582 5583 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5584 /// MachinePointerInfo record from it. This is particularly useful because the 5585 /// code generator has many cases where it doesn't bother passing in a 5586 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5587 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5588 int64_t Offset = 0) { 5589 // If this is FI+Offset, we can model it. 5590 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5591 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5592 FI->getIndex(), Offset); 5593 5594 // If this is (FI+Offset1)+Offset2, we can model it. 5595 if (Ptr.getOpcode() != ISD::ADD || 5596 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5597 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5598 return MachinePointerInfo(); 5599 5600 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5601 return MachinePointerInfo::getFixedStack( 5602 DAG.getMachineFunction(), FI, 5603 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5604 } 5605 5606 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5607 /// MachinePointerInfo record from it. This is particularly useful because the 5608 /// code generator has many cases where it doesn't bother passing in a 5609 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5610 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5611 SDValue OffsetOp) { 5612 // If the 'Offset' value isn't a constant, we can't handle this. 5613 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5614 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); 5615 if (OffsetOp.isUndef()) 5616 return InferPointerInfo(DAG, Ptr); 5617 return MachinePointerInfo(); 5618 } 5619 5620 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5621 EVT VT, const SDLoc &dl, SDValue Chain, 5622 SDValue Ptr, SDValue Offset, 5623 MachinePointerInfo PtrInfo, EVT MemVT, 5624 unsigned Alignment, 5625 MachineMemOperand::Flags MMOFlags, 5626 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5627 assert(Chain.getValueType() == MVT::Other && 5628 "Invalid chain type"); 5629 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5630 Alignment = getEVTAlignment(MemVT); 5631 5632 MMOFlags |= MachineMemOperand::MOLoad; 5633 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5634 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5635 // clients. 5636 if (PtrInfo.V.isNull()) 5637 PtrInfo = InferPointerInfo(*this, Ptr, Offset); 5638 5639 MachineFunction &MF = getMachineFunction(); 5640 MachineMemOperand *MMO = MF.getMachineMemOperand( 5641 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5642 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5643 } 5644 5645 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5646 EVT VT, const SDLoc &dl, SDValue Chain, 5647 SDValue Ptr, SDValue Offset, EVT MemVT, 5648 MachineMemOperand *MMO) { 5649 if (VT == MemVT) { 5650 ExtType = ISD::NON_EXTLOAD; 5651 } else if (ExtType == ISD::NON_EXTLOAD) { 5652 assert(VT == MemVT && "Non-extending load from different memory type!"); 5653 } else { 5654 // Extending load. 5655 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5656 "Should only be an extending load, not truncating!"); 5657 assert(VT.isInteger() == MemVT.isInteger() && 5658 "Cannot convert from FP to Int or Int -> FP!"); 5659 assert(VT.isVector() == MemVT.isVector() && 5660 "Cannot use an ext load to convert to or from a vector!"); 5661 assert((!VT.isVector() || 5662 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5663 "Cannot use an ext load to change the number of vector elements!"); 5664 } 5665 5666 bool Indexed = AM != ISD::UNINDEXED; 5667 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5668 5669 SDVTList VTs = Indexed ? 5670 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5671 SDValue Ops[] = { Chain, Ptr, Offset }; 5672 FoldingSetNodeID ID; 5673 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5674 ID.AddInteger(MemVT.getRawBits()); 5675 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5676 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5677 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5678 void *IP = nullptr; 5679 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5680 cast<LoadSDNode>(E)->refineAlignment(MMO); 5681 return SDValue(E, 0); 5682 } 5683 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5684 ExtType, MemVT, MMO); 5685 createOperands(N, Ops); 5686 5687 CSEMap.InsertNode(N, IP); 5688 InsertNode(N); 5689 return SDValue(N, 0); 5690 } 5691 5692 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5693 SDValue Ptr, MachinePointerInfo PtrInfo, 5694 unsigned Alignment, 5695 MachineMemOperand::Flags MMOFlags, 5696 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5697 SDValue Undef = getUNDEF(Ptr.getValueType()); 5698 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5699 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5700 } 5701 5702 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5703 SDValue Ptr, MachineMemOperand *MMO) { 5704 SDValue Undef = getUNDEF(Ptr.getValueType()); 5705 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5706 VT, MMO); 5707 } 5708 5709 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5710 EVT VT, SDValue Chain, SDValue Ptr, 5711 MachinePointerInfo PtrInfo, EVT MemVT, 5712 unsigned Alignment, 5713 MachineMemOperand::Flags MMOFlags, 5714 const AAMDNodes &AAInfo) { 5715 SDValue Undef = getUNDEF(Ptr.getValueType()); 5716 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5717 MemVT, Alignment, MMOFlags, AAInfo); 5718 } 5719 5720 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5721 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5722 MachineMemOperand *MMO) { 5723 SDValue Undef = getUNDEF(Ptr.getValueType()); 5724 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5725 MemVT, MMO); 5726 } 5727 5728 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5729 SDValue Base, SDValue Offset, 5730 ISD::MemIndexedMode AM) { 5731 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5732 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5733 // Don't propagate the invariant or dereferenceable flags. 5734 auto MMOFlags = 5735 LD->getMemOperand()->getFlags() & 5736 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5737 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5738 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5739 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5740 LD->getAAInfo()); 5741 } 5742 5743 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5744 SDValue Ptr, MachinePointerInfo PtrInfo, 5745 unsigned Alignment, 5746 MachineMemOperand::Flags MMOFlags, 5747 const AAMDNodes &AAInfo) { 5748 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 5749 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5750 Alignment = getEVTAlignment(Val.getValueType()); 5751 5752 MMOFlags |= MachineMemOperand::MOStore; 5753 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5754 5755 if (PtrInfo.V.isNull()) 5756 PtrInfo = InferPointerInfo(*this, Ptr); 5757 5758 MachineFunction &MF = getMachineFunction(); 5759 MachineMemOperand *MMO = MF.getMachineMemOperand( 5760 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 5761 return getStore(Chain, dl, Val, Ptr, MMO); 5762 } 5763 5764 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5765 SDValue Ptr, MachineMemOperand *MMO) { 5766 assert(Chain.getValueType() == MVT::Other && 5767 "Invalid chain type"); 5768 EVT VT = Val.getValueType(); 5769 SDVTList VTs = getVTList(MVT::Other); 5770 SDValue Undef = getUNDEF(Ptr.getValueType()); 5771 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5772 FoldingSetNodeID ID; 5773 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5774 ID.AddInteger(VT.getRawBits()); 5775 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5776 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 5777 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5778 void *IP = nullptr; 5779 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5780 cast<StoreSDNode>(E)->refineAlignment(MMO); 5781 return SDValue(E, 0); 5782 } 5783 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5784 ISD::UNINDEXED, false, VT, MMO); 5785 createOperands(N, Ops); 5786 5787 CSEMap.InsertNode(N, IP); 5788 InsertNode(N); 5789 return SDValue(N, 0); 5790 } 5791 5792 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5793 SDValue Ptr, MachinePointerInfo PtrInfo, 5794 EVT SVT, unsigned Alignment, 5795 MachineMemOperand::Flags MMOFlags, 5796 const AAMDNodes &AAInfo) { 5797 assert(Chain.getValueType() == MVT::Other && 5798 "Invalid chain type"); 5799 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5800 Alignment = getEVTAlignment(SVT); 5801 5802 MMOFlags |= MachineMemOperand::MOStore; 5803 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5804 5805 if (PtrInfo.V.isNull()) 5806 PtrInfo = InferPointerInfo(*this, Ptr); 5807 5808 MachineFunction &MF = getMachineFunction(); 5809 MachineMemOperand *MMO = MF.getMachineMemOperand( 5810 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 5811 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 5812 } 5813 5814 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5815 SDValue Ptr, EVT SVT, 5816 MachineMemOperand *MMO) { 5817 EVT VT = Val.getValueType(); 5818 5819 assert(Chain.getValueType() == MVT::Other && 5820 "Invalid chain type"); 5821 if (VT == SVT) 5822 return getStore(Chain, dl, Val, Ptr, MMO); 5823 5824 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 5825 "Should only be a truncating store, not extending!"); 5826 assert(VT.isInteger() == SVT.isInteger() && 5827 "Can't do FP-INT conversion!"); 5828 assert(VT.isVector() == SVT.isVector() && 5829 "Cannot use trunc store to convert to or from a vector!"); 5830 assert((!VT.isVector() || 5831 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 5832 "Cannot use trunc store to change the number of vector elements!"); 5833 5834 SDVTList VTs = getVTList(MVT::Other); 5835 SDValue Undef = getUNDEF(Ptr.getValueType()); 5836 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5837 FoldingSetNodeID ID; 5838 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5839 ID.AddInteger(SVT.getRawBits()); 5840 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5841 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 5842 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5843 void *IP = nullptr; 5844 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5845 cast<StoreSDNode>(E)->refineAlignment(MMO); 5846 return SDValue(E, 0); 5847 } 5848 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5849 ISD::UNINDEXED, true, SVT, MMO); 5850 createOperands(N, Ops); 5851 5852 CSEMap.InsertNode(N, IP); 5853 InsertNode(N); 5854 return SDValue(N, 0); 5855 } 5856 5857 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 5858 SDValue Base, SDValue Offset, 5859 ISD::MemIndexedMode AM) { 5860 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 5861 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 5862 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 5863 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 5864 FoldingSetNodeID ID; 5865 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5866 ID.AddInteger(ST->getMemoryVT().getRawBits()); 5867 ID.AddInteger(ST->getRawSubclassData()); 5868 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 5869 void *IP = nullptr; 5870 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 5871 return SDValue(E, 0); 5872 5873 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5874 ST->isTruncatingStore(), ST->getMemoryVT(), 5875 ST->getMemOperand()); 5876 createOperands(N, Ops); 5877 5878 CSEMap.InsertNode(N, IP); 5879 InsertNode(N); 5880 return SDValue(N, 0); 5881 } 5882 5883 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5884 SDValue Ptr, SDValue Mask, SDValue Src0, 5885 EVT MemVT, MachineMemOperand *MMO, 5886 ISD::LoadExtType ExtTy, bool isExpanding) { 5887 5888 SDVTList VTs = getVTList(VT, MVT::Other); 5889 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 5890 FoldingSetNodeID ID; 5891 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 5892 ID.AddInteger(VT.getRawBits()); 5893 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 5894 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 5895 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5896 void *IP = nullptr; 5897 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5898 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 5899 return SDValue(E, 0); 5900 } 5901 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5902 ExtTy, isExpanding, MemVT, MMO); 5903 createOperands(N, Ops); 5904 5905 CSEMap.InsertNode(N, IP); 5906 InsertNode(N); 5907 return SDValue(N, 0); 5908 } 5909 5910 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 5911 SDValue Val, SDValue Ptr, SDValue Mask, 5912 EVT MemVT, MachineMemOperand *MMO, 5913 bool IsTruncating, bool IsCompressing) { 5914 assert(Chain.getValueType() == MVT::Other && 5915 "Invalid chain type"); 5916 EVT VT = Val.getValueType(); 5917 SDVTList VTs = getVTList(MVT::Other); 5918 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 5919 FoldingSetNodeID ID; 5920 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 5921 ID.AddInteger(VT.getRawBits()); 5922 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 5923 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 5924 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5925 void *IP = nullptr; 5926 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5927 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 5928 return SDValue(E, 0); 5929 } 5930 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5931 IsTruncating, IsCompressing, MemVT, MMO); 5932 createOperands(N, Ops); 5933 5934 CSEMap.InsertNode(N, IP); 5935 InsertNode(N); 5936 return SDValue(N, 0); 5937 } 5938 5939 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 5940 ArrayRef<SDValue> Ops, 5941 MachineMemOperand *MMO) { 5942 assert(Ops.size() == 5 && "Incompatible number of operands"); 5943 5944 FoldingSetNodeID ID; 5945 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 5946 ID.AddInteger(VT.getRawBits()); 5947 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 5948 dl.getIROrder(), VTs, VT, MMO)); 5949 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5950 void *IP = nullptr; 5951 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5952 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 5953 return SDValue(E, 0); 5954 } 5955 5956 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 5957 VTs, VT, MMO); 5958 createOperands(N, Ops); 5959 5960 assert(N->getValue().getValueType() == N->getValueType(0) && 5961 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 5962 assert(N->getMask().getValueType().getVectorNumElements() == 5963 N->getValueType(0).getVectorNumElements() && 5964 "Vector width mismatch between mask and data"); 5965 assert(N->getIndex().getValueType().getVectorNumElements() == 5966 N->getValueType(0).getVectorNumElements() && 5967 "Vector width mismatch between index and data"); 5968 5969 CSEMap.InsertNode(N, IP); 5970 InsertNode(N); 5971 return SDValue(N, 0); 5972 } 5973 5974 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 5975 ArrayRef<SDValue> Ops, 5976 MachineMemOperand *MMO) { 5977 assert(Ops.size() == 5 && "Incompatible number of operands"); 5978 5979 FoldingSetNodeID ID; 5980 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 5981 ID.AddInteger(VT.getRawBits()); 5982 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 5983 dl.getIROrder(), VTs, VT, MMO)); 5984 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5985 void *IP = nullptr; 5986 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5987 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 5988 return SDValue(E, 0); 5989 } 5990 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 5991 VTs, VT, MMO); 5992 createOperands(N, Ops); 5993 5994 assert(N->getMask().getValueType().getVectorNumElements() == 5995 N->getValue().getValueType().getVectorNumElements() && 5996 "Vector width mismatch between mask and data"); 5997 assert(N->getIndex().getValueType().getVectorNumElements() == 5998 N->getValue().getValueType().getVectorNumElements() && 5999 "Vector width mismatch between index and data"); 6000 6001 CSEMap.InsertNode(N, IP); 6002 InsertNode(N); 6003 return SDValue(N, 0); 6004 } 6005 6006 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6007 SDValue Ptr, SDValue SV, unsigned Align) { 6008 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6009 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6010 } 6011 6012 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6013 ArrayRef<SDUse> Ops) { 6014 switch (Ops.size()) { 6015 case 0: return getNode(Opcode, DL, VT); 6016 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6017 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6018 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6019 default: break; 6020 } 6021 6022 // Copy from an SDUse array into an SDValue array for use with 6023 // the regular getNode logic. 6024 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6025 return getNode(Opcode, DL, VT, NewOps); 6026 } 6027 6028 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6029 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6030 unsigned NumOps = Ops.size(); 6031 switch (NumOps) { 6032 case 0: return getNode(Opcode, DL, VT); 6033 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6034 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6035 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6036 default: break; 6037 } 6038 6039 switch (Opcode) { 6040 default: break; 6041 case ISD::CONCAT_VECTORS: { 6042 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6043 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6044 return V; 6045 break; 6046 } 6047 case ISD::SELECT_CC: { 6048 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6049 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6050 "LHS and RHS of condition must have same type!"); 6051 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6052 "True and False arms of SelectCC must have same type!"); 6053 assert(Ops[2].getValueType() == VT && 6054 "select_cc node must be of same type as true and false value!"); 6055 break; 6056 } 6057 case ISD::BR_CC: { 6058 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6059 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6060 "LHS/RHS of comparison should match types!"); 6061 break; 6062 } 6063 } 6064 6065 // Memoize nodes. 6066 SDNode *N; 6067 SDVTList VTs = getVTList(VT); 6068 6069 if (VT != MVT::Glue) { 6070 FoldingSetNodeID ID; 6071 AddNodeIDNode(ID, Opcode, VTs, Ops); 6072 void *IP = nullptr; 6073 6074 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6075 return SDValue(E, 0); 6076 6077 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6078 createOperands(N, Ops); 6079 6080 CSEMap.InsertNode(N, IP); 6081 } else { 6082 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6083 createOperands(N, Ops); 6084 } 6085 6086 InsertNode(N); 6087 return SDValue(N, 0); 6088 } 6089 6090 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6091 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6092 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6093 } 6094 6095 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6096 ArrayRef<SDValue> Ops) { 6097 if (VTList.NumVTs == 1) 6098 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6099 6100 #if 0 6101 switch (Opcode) { 6102 // FIXME: figure out how to safely handle things like 6103 // int foo(int x) { return 1 << (x & 255); } 6104 // int bar() { return foo(256); } 6105 case ISD::SRA_PARTS: 6106 case ISD::SRL_PARTS: 6107 case ISD::SHL_PARTS: 6108 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6109 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6110 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6111 else if (N3.getOpcode() == ISD::AND) 6112 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6113 // If the and is only masking out bits that cannot effect the shift, 6114 // eliminate the and. 6115 unsigned NumBits = VT.getScalarSizeInBits()*2; 6116 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6117 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6118 } 6119 break; 6120 } 6121 #endif 6122 6123 // Memoize the node unless it returns a flag. 6124 SDNode *N; 6125 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6126 FoldingSetNodeID ID; 6127 AddNodeIDNode(ID, Opcode, VTList, Ops); 6128 void *IP = nullptr; 6129 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6130 return SDValue(E, 0); 6131 6132 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6133 createOperands(N, Ops); 6134 CSEMap.InsertNode(N, IP); 6135 } else { 6136 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6137 createOperands(N, Ops); 6138 } 6139 InsertNode(N); 6140 return SDValue(N, 0); 6141 } 6142 6143 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6144 SDVTList VTList) { 6145 return getNode(Opcode, DL, VTList, None); 6146 } 6147 6148 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6149 SDValue N1) { 6150 SDValue Ops[] = { N1 }; 6151 return getNode(Opcode, DL, VTList, Ops); 6152 } 6153 6154 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6155 SDValue N1, SDValue N2) { 6156 SDValue Ops[] = { N1, N2 }; 6157 return getNode(Opcode, DL, VTList, Ops); 6158 } 6159 6160 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6161 SDValue N1, SDValue N2, SDValue N3) { 6162 SDValue Ops[] = { N1, N2, N3 }; 6163 return getNode(Opcode, DL, VTList, Ops); 6164 } 6165 6166 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6167 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6168 SDValue Ops[] = { N1, N2, N3, N4 }; 6169 return getNode(Opcode, DL, VTList, Ops); 6170 } 6171 6172 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6173 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6174 SDValue N5) { 6175 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6176 return getNode(Opcode, DL, VTList, Ops); 6177 } 6178 6179 SDVTList SelectionDAG::getVTList(EVT VT) { 6180 return makeVTList(SDNode::getValueTypeList(VT), 1); 6181 } 6182 6183 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6184 FoldingSetNodeID ID; 6185 ID.AddInteger(2U); 6186 ID.AddInteger(VT1.getRawBits()); 6187 ID.AddInteger(VT2.getRawBits()); 6188 6189 void *IP = nullptr; 6190 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6191 if (!Result) { 6192 EVT *Array = Allocator.Allocate<EVT>(2); 6193 Array[0] = VT1; 6194 Array[1] = VT2; 6195 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6196 VTListMap.InsertNode(Result, IP); 6197 } 6198 return Result->getSDVTList(); 6199 } 6200 6201 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6202 FoldingSetNodeID ID; 6203 ID.AddInteger(3U); 6204 ID.AddInteger(VT1.getRawBits()); 6205 ID.AddInteger(VT2.getRawBits()); 6206 ID.AddInteger(VT3.getRawBits()); 6207 6208 void *IP = nullptr; 6209 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6210 if (!Result) { 6211 EVT *Array = Allocator.Allocate<EVT>(3); 6212 Array[0] = VT1; 6213 Array[1] = VT2; 6214 Array[2] = VT3; 6215 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6216 VTListMap.InsertNode(Result, IP); 6217 } 6218 return Result->getSDVTList(); 6219 } 6220 6221 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6222 FoldingSetNodeID ID; 6223 ID.AddInteger(4U); 6224 ID.AddInteger(VT1.getRawBits()); 6225 ID.AddInteger(VT2.getRawBits()); 6226 ID.AddInteger(VT3.getRawBits()); 6227 ID.AddInteger(VT4.getRawBits()); 6228 6229 void *IP = nullptr; 6230 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6231 if (!Result) { 6232 EVT *Array = Allocator.Allocate<EVT>(4); 6233 Array[0] = VT1; 6234 Array[1] = VT2; 6235 Array[2] = VT3; 6236 Array[3] = VT4; 6237 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6238 VTListMap.InsertNode(Result, IP); 6239 } 6240 return Result->getSDVTList(); 6241 } 6242 6243 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6244 unsigned NumVTs = VTs.size(); 6245 FoldingSetNodeID ID; 6246 ID.AddInteger(NumVTs); 6247 for (unsigned index = 0; index < NumVTs; index++) { 6248 ID.AddInteger(VTs[index].getRawBits()); 6249 } 6250 6251 void *IP = nullptr; 6252 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6253 if (!Result) { 6254 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6255 std::copy(VTs.begin(), VTs.end(), Array); 6256 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6257 VTListMap.InsertNode(Result, IP); 6258 } 6259 return Result->getSDVTList(); 6260 } 6261 6262 6263 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6264 /// specified operands. If the resultant node already exists in the DAG, 6265 /// this does not modify the specified node, instead it returns the node that 6266 /// already exists. If the resultant node does not exist in the DAG, the 6267 /// input node is returned. As a degenerate case, if you specify the same 6268 /// input operands as the node already has, the input node is returned. 6269 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6270 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6271 6272 // Check to see if there is no change. 6273 if (Op == N->getOperand(0)) return N; 6274 6275 // See if the modified node already exists. 6276 void *InsertPos = nullptr; 6277 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6278 return Existing; 6279 6280 // Nope it doesn't. Remove the node from its current place in the maps. 6281 if (InsertPos) 6282 if (!RemoveNodeFromCSEMaps(N)) 6283 InsertPos = nullptr; 6284 6285 // Now we update the operands. 6286 N->OperandList[0].set(Op); 6287 6288 // If this gets put into a CSE map, add it. 6289 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6290 return N; 6291 } 6292 6293 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6294 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6295 6296 // Check to see if there is no change. 6297 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6298 return N; // No operands changed, just return the input node. 6299 6300 // See if the modified node already exists. 6301 void *InsertPos = nullptr; 6302 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6303 return Existing; 6304 6305 // Nope it doesn't. Remove the node from its current place in the maps. 6306 if (InsertPos) 6307 if (!RemoveNodeFromCSEMaps(N)) 6308 InsertPos = nullptr; 6309 6310 // Now we update the operands. 6311 if (N->OperandList[0] != Op1) 6312 N->OperandList[0].set(Op1); 6313 if (N->OperandList[1] != Op2) 6314 N->OperandList[1].set(Op2); 6315 6316 // If this gets put into a CSE map, add it. 6317 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6318 return N; 6319 } 6320 6321 SDNode *SelectionDAG:: 6322 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6323 SDValue Ops[] = { Op1, Op2, Op3 }; 6324 return UpdateNodeOperands(N, Ops); 6325 } 6326 6327 SDNode *SelectionDAG:: 6328 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6329 SDValue Op3, SDValue Op4) { 6330 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6331 return UpdateNodeOperands(N, Ops); 6332 } 6333 6334 SDNode *SelectionDAG:: 6335 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6336 SDValue Op3, SDValue Op4, SDValue Op5) { 6337 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6338 return UpdateNodeOperands(N, Ops); 6339 } 6340 6341 SDNode *SelectionDAG:: 6342 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6343 unsigned NumOps = Ops.size(); 6344 assert(N->getNumOperands() == NumOps && 6345 "Update with wrong number of operands"); 6346 6347 // If no operands changed just return the input node. 6348 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6349 return N; 6350 6351 // See if the modified node already exists. 6352 void *InsertPos = nullptr; 6353 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6354 return Existing; 6355 6356 // Nope it doesn't. Remove the node from its current place in the maps. 6357 if (InsertPos) 6358 if (!RemoveNodeFromCSEMaps(N)) 6359 InsertPos = nullptr; 6360 6361 // Now we update the operands. 6362 for (unsigned i = 0; i != NumOps; ++i) 6363 if (N->OperandList[i] != Ops[i]) 6364 N->OperandList[i].set(Ops[i]); 6365 6366 // If this gets put into a CSE map, add it. 6367 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6368 return N; 6369 } 6370 6371 /// DropOperands - Release the operands and set this node to have 6372 /// zero operands. 6373 void SDNode::DropOperands() { 6374 // Unlike the code in MorphNodeTo that does this, we don't need to 6375 // watch for dead nodes here. 6376 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6377 SDUse &Use = *I++; 6378 Use.set(SDValue()); 6379 } 6380 } 6381 6382 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6383 /// machine opcode. 6384 /// 6385 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6386 EVT VT) { 6387 SDVTList VTs = getVTList(VT); 6388 return SelectNodeTo(N, MachineOpc, VTs, None); 6389 } 6390 6391 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6392 EVT VT, SDValue Op1) { 6393 SDVTList VTs = getVTList(VT); 6394 SDValue Ops[] = { Op1 }; 6395 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6396 } 6397 6398 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6399 EVT VT, SDValue Op1, 6400 SDValue Op2) { 6401 SDVTList VTs = getVTList(VT); 6402 SDValue Ops[] = { Op1, Op2 }; 6403 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6404 } 6405 6406 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6407 EVT VT, SDValue Op1, 6408 SDValue Op2, SDValue Op3) { 6409 SDVTList VTs = getVTList(VT); 6410 SDValue Ops[] = { Op1, Op2, Op3 }; 6411 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6412 } 6413 6414 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6415 EVT VT, ArrayRef<SDValue> Ops) { 6416 SDVTList VTs = getVTList(VT); 6417 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6418 } 6419 6420 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6421 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6422 SDVTList VTs = getVTList(VT1, VT2); 6423 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6424 } 6425 6426 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6427 EVT VT1, EVT VT2) { 6428 SDVTList VTs = getVTList(VT1, VT2); 6429 return SelectNodeTo(N, MachineOpc, VTs, None); 6430 } 6431 6432 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6433 EVT VT1, EVT VT2, EVT VT3, 6434 ArrayRef<SDValue> Ops) { 6435 SDVTList VTs = getVTList(VT1, VT2, VT3); 6436 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6437 } 6438 6439 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6440 EVT VT1, EVT VT2, 6441 SDValue Op1, SDValue Op2) { 6442 SDVTList VTs = getVTList(VT1, VT2); 6443 SDValue Ops[] = { Op1, Op2 }; 6444 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6445 } 6446 6447 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6448 SDVTList VTs,ArrayRef<SDValue> Ops) { 6449 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6450 // Reset the NodeID to -1. 6451 New->setNodeId(-1); 6452 if (New != N) { 6453 ReplaceAllUsesWith(N, New); 6454 RemoveDeadNode(N); 6455 } 6456 return New; 6457 } 6458 6459 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6460 /// the line number information on the merged node since it is not possible to 6461 /// preserve the information that operation is associated with multiple lines. 6462 /// This will make the debugger working better at -O0, were there is a higher 6463 /// probability having other instructions associated with that line. 6464 /// 6465 /// For IROrder, we keep the smaller of the two 6466 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6467 DebugLoc NLoc = N->getDebugLoc(); 6468 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6469 N->setDebugLoc(DebugLoc()); 6470 } 6471 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6472 N->setIROrder(Order); 6473 return N; 6474 } 6475 6476 /// MorphNodeTo - This *mutates* the specified node to have the specified 6477 /// return type, opcode, and operands. 6478 /// 6479 /// Note that MorphNodeTo returns the resultant node. If there is already a 6480 /// node of the specified opcode and operands, it returns that node instead of 6481 /// the current one. Note that the SDLoc need not be the same. 6482 /// 6483 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6484 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6485 /// node, and because it doesn't require CSE recalculation for any of 6486 /// the node's users. 6487 /// 6488 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6489 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6490 /// the legalizer which maintain worklists that would need to be updated when 6491 /// deleting things. 6492 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6493 SDVTList VTs, ArrayRef<SDValue> Ops) { 6494 // If an identical node already exists, use it. 6495 void *IP = nullptr; 6496 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6497 FoldingSetNodeID ID; 6498 AddNodeIDNode(ID, Opc, VTs, Ops); 6499 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6500 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6501 } 6502 6503 if (!RemoveNodeFromCSEMaps(N)) 6504 IP = nullptr; 6505 6506 // Start the morphing. 6507 N->NodeType = Opc; 6508 N->ValueList = VTs.VTs; 6509 N->NumValues = VTs.NumVTs; 6510 6511 // Clear the operands list, updating used nodes to remove this from their 6512 // use list. Keep track of any operands that become dead as a result. 6513 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6514 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6515 SDUse &Use = *I++; 6516 SDNode *Used = Use.getNode(); 6517 Use.set(SDValue()); 6518 if (Used->use_empty()) 6519 DeadNodeSet.insert(Used); 6520 } 6521 6522 // For MachineNode, initialize the memory references information. 6523 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6524 MN->setMemRefs(nullptr, nullptr); 6525 6526 // Swap for an appropriately sized array from the recycler. 6527 removeOperands(N); 6528 createOperands(N, Ops); 6529 6530 // Delete any nodes that are still dead after adding the uses for the 6531 // new operands. 6532 if (!DeadNodeSet.empty()) { 6533 SmallVector<SDNode *, 16> DeadNodes; 6534 for (SDNode *N : DeadNodeSet) 6535 if (N->use_empty()) 6536 DeadNodes.push_back(N); 6537 RemoveDeadNodes(DeadNodes); 6538 } 6539 6540 if (IP) 6541 CSEMap.InsertNode(N, IP); // Memoize the new node. 6542 return N; 6543 } 6544 6545 6546 /// getMachineNode - These are used for target selectors to create a new node 6547 /// with specified return type(s), MachineInstr opcode, and operands. 6548 /// 6549 /// Note that getMachineNode returns the resultant node. If there is already a 6550 /// node of the specified opcode and operands, it returns that node instead of 6551 /// the current one. 6552 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6553 EVT VT) { 6554 SDVTList VTs = getVTList(VT); 6555 return getMachineNode(Opcode, dl, VTs, None); 6556 } 6557 6558 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6559 EVT VT, SDValue Op1) { 6560 SDVTList VTs = getVTList(VT); 6561 SDValue Ops[] = { Op1 }; 6562 return getMachineNode(Opcode, dl, VTs, Ops); 6563 } 6564 6565 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6566 EVT VT, SDValue Op1, SDValue Op2) { 6567 SDVTList VTs = getVTList(VT); 6568 SDValue Ops[] = { Op1, Op2 }; 6569 return getMachineNode(Opcode, dl, VTs, Ops); 6570 } 6571 6572 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6573 EVT VT, SDValue Op1, SDValue Op2, 6574 SDValue Op3) { 6575 SDVTList VTs = getVTList(VT); 6576 SDValue Ops[] = { Op1, Op2, Op3 }; 6577 return getMachineNode(Opcode, dl, VTs, Ops); 6578 } 6579 6580 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6581 EVT VT, ArrayRef<SDValue> Ops) { 6582 SDVTList VTs = getVTList(VT); 6583 return getMachineNode(Opcode, dl, VTs, Ops); 6584 } 6585 6586 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6587 EVT VT1, EVT VT2, SDValue Op1, 6588 SDValue Op2) { 6589 SDVTList VTs = getVTList(VT1, VT2); 6590 SDValue Ops[] = { Op1, Op2 }; 6591 return getMachineNode(Opcode, dl, VTs, Ops); 6592 } 6593 6594 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6595 EVT VT1, EVT VT2, SDValue Op1, 6596 SDValue Op2, SDValue Op3) { 6597 SDVTList VTs = getVTList(VT1, VT2); 6598 SDValue Ops[] = { Op1, Op2, Op3 }; 6599 return getMachineNode(Opcode, dl, VTs, Ops); 6600 } 6601 6602 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6603 EVT VT1, EVT VT2, 6604 ArrayRef<SDValue> Ops) { 6605 SDVTList VTs = getVTList(VT1, VT2); 6606 return getMachineNode(Opcode, dl, VTs, Ops); 6607 } 6608 6609 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6610 EVT VT1, EVT VT2, EVT VT3, 6611 SDValue Op1, SDValue Op2) { 6612 SDVTList VTs = getVTList(VT1, VT2, VT3); 6613 SDValue Ops[] = { Op1, Op2 }; 6614 return getMachineNode(Opcode, dl, VTs, Ops); 6615 } 6616 6617 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6618 EVT VT1, EVT VT2, EVT VT3, 6619 SDValue Op1, SDValue Op2, 6620 SDValue Op3) { 6621 SDVTList VTs = getVTList(VT1, VT2, VT3); 6622 SDValue Ops[] = { Op1, Op2, Op3 }; 6623 return getMachineNode(Opcode, dl, VTs, Ops); 6624 } 6625 6626 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6627 EVT VT1, EVT VT2, EVT VT3, 6628 ArrayRef<SDValue> Ops) { 6629 SDVTList VTs = getVTList(VT1, VT2, VT3); 6630 return getMachineNode(Opcode, dl, VTs, Ops); 6631 } 6632 6633 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6634 ArrayRef<EVT> ResultTys, 6635 ArrayRef<SDValue> Ops) { 6636 SDVTList VTs = getVTList(ResultTys); 6637 return getMachineNode(Opcode, dl, VTs, Ops); 6638 } 6639 6640 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6641 SDVTList VTs, 6642 ArrayRef<SDValue> Ops) { 6643 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6644 MachineSDNode *N; 6645 void *IP = nullptr; 6646 6647 if (DoCSE) { 6648 FoldingSetNodeID ID; 6649 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6650 IP = nullptr; 6651 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6652 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6653 } 6654 } 6655 6656 // Allocate a new MachineSDNode. 6657 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6658 createOperands(N, Ops); 6659 6660 if (DoCSE) 6661 CSEMap.InsertNode(N, IP); 6662 6663 InsertNode(N); 6664 return N; 6665 } 6666 6667 /// getTargetExtractSubreg - A convenience function for creating 6668 /// TargetOpcode::EXTRACT_SUBREG nodes. 6669 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6670 SDValue Operand) { 6671 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6672 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6673 VT, Operand, SRIdxVal); 6674 return SDValue(Subreg, 0); 6675 } 6676 6677 /// getTargetInsertSubreg - A convenience function for creating 6678 /// TargetOpcode::INSERT_SUBREG nodes. 6679 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6680 SDValue Operand, SDValue Subreg) { 6681 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6682 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6683 VT, Operand, Subreg, SRIdxVal); 6684 return SDValue(Result, 0); 6685 } 6686 6687 /// getNodeIfExists - Get the specified node if it's already available, or 6688 /// else return NULL. 6689 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6690 ArrayRef<SDValue> Ops, 6691 const SDNodeFlags Flags) { 6692 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6693 FoldingSetNodeID ID; 6694 AddNodeIDNode(ID, Opcode, VTList, Ops); 6695 void *IP = nullptr; 6696 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 6697 E->intersectFlagsWith(Flags); 6698 return E; 6699 } 6700 } 6701 return nullptr; 6702 } 6703 6704 /// getDbgValue - Creates a SDDbgValue node. 6705 /// 6706 /// SDNode 6707 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, 6708 unsigned R, bool IsIndirect, uint64_t Off, 6709 const DebugLoc &DL, unsigned O) { 6710 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6711 "Expected inlined-at fields to agree"); 6712 return new (DbgInfo->getAlloc()) 6713 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O); 6714 } 6715 6716 /// Constant 6717 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr, 6718 const Value *C, uint64_t Off, 6719 const DebugLoc &DL, unsigned O) { 6720 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6721 "Expected inlined-at fields to agree"); 6722 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O); 6723 } 6724 6725 /// FrameIndex 6726 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, 6727 unsigned FI, uint64_t Off, 6728 const DebugLoc &DL, 6729 unsigned O) { 6730 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6731 "Expected inlined-at fields to agree"); 6732 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O); 6733 } 6734 6735 namespace { 6736 6737 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 6738 /// pointed to by a use iterator is deleted, increment the use iterator 6739 /// so that it doesn't dangle. 6740 /// 6741 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 6742 SDNode::use_iterator &UI; 6743 SDNode::use_iterator &UE; 6744 6745 void NodeDeleted(SDNode *N, SDNode *E) override { 6746 // Increment the iterator as needed. 6747 while (UI != UE && N == *UI) 6748 ++UI; 6749 } 6750 6751 public: 6752 RAUWUpdateListener(SelectionDAG &d, 6753 SDNode::use_iterator &ui, 6754 SDNode::use_iterator &ue) 6755 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 6756 }; 6757 6758 } 6759 6760 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6761 /// This can cause recursive merging of nodes in the DAG. 6762 /// 6763 /// This version assumes From has a single result value. 6764 /// 6765 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 6766 SDNode *From = FromN.getNode(); 6767 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 6768 "Cannot replace with this method!"); 6769 assert(From != To.getNode() && "Cannot replace uses of with self"); 6770 6771 // Preserve Debug Values 6772 TransferDbgValues(FromN, To); 6773 6774 // Iterate over all the existing uses of From. New uses will be added 6775 // to the beginning of the use list, which we avoid visiting. 6776 // This specifically avoids visiting uses of From that arise while the 6777 // replacement is happening, because any such uses would be the result 6778 // of CSE: If an existing node looks like From after one of its operands 6779 // is replaced by To, we don't want to replace of all its users with To 6780 // too. See PR3018 for more info. 6781 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6782 RAUWUpdateListener Listener(*this, UI, UE); 6783 while (UI != UE) { 6784 SDNode *User = *UI; 6785 6786 // This node is about to morph, remove its old self from the CSE maps. 6787 RemoveNodeFromCSEMaps(User); 6788 6789 // A user can appear in a use list multiple times, and when this 6790 // happens the uses are usually next to each other in the list. 6791 // To help reduce the number of CSE recomputations, process all 6792 // the uses of this user that we can find this way. 6793 do { 6794 SDUse &Use = UI.getUse(); 6795 ++UI; 6796 Use.set(To); 6797 } while (UI != UE && *UI == User); 6798 6799 // Now that we have modified User, add it back to the CSE maps. If it 6800 // already exists there, recursively merge the results together. 6801 AddModifiedNodeToCSEMaps(User); 6802 } 6803 6804 6805 // If we just RAUW'd the root, take note. 6806 if (FromN == getRoot()) 6807 setRoot(To); 6808 } 6809 6810 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6811 /// This can cause recursive merging of nodes in the DAG. 6812 /// 6813 /// This version assumes that for each value of From, there is a 6814 /// corresponding value in To in the same position with the same type. 6815 /// 6816 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 6817 #ifndef NDEBUG 6818 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6819 assert((!From->hasAnyUseOfValue(i) || 6820 From->getValueType(i) == To->getValueType(i)) && 6821 "Cannot use this version of ReplaceAllUsesWith!"); 6822 #endif 6823 6824 // Handle the trivial case. 6825 if (From == To) 6826 return; 6827 6828 // Preserve Debug Info. Only do this if there's a use. 6829 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6830 if (From->hasAnyUseOfValue(i)) { 6831 assert((i < To->getNumValues()) && "Invalid To location"); 6832 TransferDbgValues(SDValue(From, i), SDValue(To, i)); 6833 } 6834 6835 // Iterate over just the existing users of From. See the comments in 6836 // the ReplaceAllUsesWith above. 6837 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6838 RAUWUpdateListener Listener(*this, UI, UE); 6839 while (UI != UE) { 6840 SDNode *User = *UI; 6841 6842 // This node is about to morph, remove its old self from the CSE maps. 6843 RemoveNodeFromCSEMaps(User); 6844 6845 // A user can appear in a use list multiple times, and when this 6846 // happens the uses are usually next to each other in the list. 6847 // To help reduce the number of CSE recomputations, process all 6848 // the uses of this user that we can find this way. 6849 do { 6850 SDUse &Use = UI.getUse(); 6851 ++UI; 6852 Use.setNode(To); 6853 } while (UI != UE && *UI == User); 6854 6855 // Now that we have modified User, add it back to the CSE maps. If it 6856 // already exists there, recursively merge the results together. 6857 AddModifiedNodeToCSEMaps(User); 6858 } 6859 6860 // If we just RAUW'd the root, take note. 6861 if (From == getRoot().getNode()) 6862 setRoot(SDValue(To, getRoot().getResNo())); 6863 } 6864 6865 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6866 /// This can cause recursive merging of nodes in the DAG. 6867 /// 6868 /// This version can replace From with any result values. To must match the 6869 /// number and types of values returned by From. 6870 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 6871 if (From->getNumValues() == 1) // Handle the simple case efficiently. 6872 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 6873 6874 // Preserve Debug Info. 6875 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6876 TransferDbgValues(SDValue(From, i), *To); 6877 6878 // Iterate over just the existing users of From. See the comments in 6879 // the ReplaceAllUsesWith above. 6880 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6881 RAUWUpdateListener Listener(*this, UI, UE); 6882 while (UI != UE) { 6883 SDNode *User = *UI; 6884 6885 // This node is about to morph, remove its old self from the CSE maps. 6886 RemoveNodeFromCSEMaps(User); 6887 6888 // A user can appear in a use list multiple times, and when this 6889 // happens the uses are usually next to each other in the list. 6890 // To help reduce the number of CSE recomputations, process all 6891 // the uses of this user that we can find this way. 6892 do { 6893 SDUse &Use = UI.getUse(); 6894 const SDValue &ToOp = To[Use.getResNo()]; 6895 ++UI; 6896 Use.set(ToOp); 6897 } while (UI != UE && *UI == User); 6898 6899 // Now that we have modified User, add it back to the CSE maps. If it 6900 // already exists there, recursively merge the results together. 6901 AddModifiedNodeToCSEMaps(User); 6902 } 6903 6904 // If we just RAUW'd the root, take note. 6905 if (From == getRoot().getNode()) 6906 setRoot(SDValue(To[getRoot().getResNo()])); 6907 } 6908 6909 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 6910 /// uses of other values produced by From.getNode() alone. The Deleted 6911 /// vector is handled the same way as for ReplaceAllUsesWith. 6912 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 6913 // Handle the really simple, really trivial case efficiently. 6914 if (From == To) return; 6915 6916 // Handle the simple, trivial, case efficiently. 6917 if (From.getNode()->getNumValues() == 1) { 6918 ReplaceAllUsesWith(From, To); 6919 return; 6920 } 6921 6922 // Preserve Debug Info. 6923 TransferDbgValues(From, To); 6924 6925 // Iterate over just the existing users of From. See the comments in 6926 // the ReplaceAllUsesWith above. 6927 SDNode::use_iterator UI = From.getNode()->use_begin(), 6928 UE = From.getNode()->use_end(); 6929 RAUWUpdateListener Listener(*this, UI, UE); 6930 while (UI != UE) { 6931 SDNode *User = *UI; 6932 bool UserRemovedFromCSEMaps = false; 6933 6934 // A user can appear in a use list multiple times, and when this 6935 // happens the uses are usually next to each other in the list. 6936 // To help reduce the number of CSE recomputations, process all 6937 // the uses of this user that we can find this way. 6938 do { 6939 SDUse &Use = UI.getUse(); 6940 6941 // Skip uses of different values from the same node. 6942 if (Use.getResNo() != From.getResNo()) { 6943 ++UI; 6944 continue; 6945 } 6946 6947 // If this node hasn't been modified yet, it's still in the CSE maps, 6948 // so remove its old self from the CSE maps. 6949 if (!UserRemovedFromCSEMaps) { 6950 RemoveNodeFromCSEMaps(User); 6951 UserRemovedFromCSEMaps = true; 6952 } 6953 6954 ++UI; 6955 Use.set(To); 6956 } while (UI != UE && *UI == User); 6957 6958 // We are iterating over all uses of the From node, so if a use 6959 // doesn't use the specific value, no changes are made. 6960 if (!UserRemovedFromCSEMaps) 6961 continue; 6962 6963 // Now that we have modified User, add it back to the CSE maps. If it 6964 // already exists there, recursively merge the results together. 6965 AddModifiedNodeToCSEMaps(User); 6966 } 6967 6968 // If we just RAUW'd the root, take note. 6969 if (From == getRoot()) 6970 setRoot(To); 6971 } 6972 6973 namespace { 6974 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 6975 /// to record information about a use. 6976 struct UseMemo { 6977 SDNode *User; 6978 unsigned Index; 6979 SDUse *Use; 6980 }; 6981 6982 /// operator< - Sort Memos by User. 6983 bool operator<(const UseMemo &L, const UseMemo &R) { 6984 return (intptr_t)L.User < (intptr_t)R.User; 6985 } 6986 } 6987 6988 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 6989 /// uses of other values produced by From.getNode() alone. The same value 6990 /// may appear in both the From and To list. The Deleted vector is 6991 /// handled the same way as for ReplaceAllUsesWith. 6992 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 6993 const SDValue *To, 6994 unsigned Num){ 6995 // Handle the simple, trivial case efficiently. 6996 if (Num == 1) 6997 return ReplaceAllUsesOfValueWith(*From, *To); 6998 6999 TransferDbgValues(*From, *To); 7000 7001 // Read up all the uses and make records of them. This helps 7002 // processing new uses that are introduced during the 7003 // replacement process. 7004 SmallVector<UseMemo, 4> Uses; 7005 for (unsigned i = 0; i != Num; ++i) { 7006 unsigned FromResNo = From[i].getResNo(); 7007 SDNode *FromNode = From[i].getNode(); 7008 for (SDNode::use_iterator UI = FromNode->use_begin(), 7009 E = FromNode->use_end(); UI != E; ++UI) { 7010 SDUse &Use = UI.getUse(); 7011 if (Use.getResNo() == FromResNo) { 7012 UseMemo Memo = { *UI, i, &Use }; 7013 Uses.push_back(Memo); 7014 } 7015 } 7016 } 7017 7018 // Sort the uses, so that all the uses from a given User are together. 7019 std::sort(Uses.begin(), Uses.end()); 7020 7021 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7022 UseIndex != UseIndexEnd; ) { 7023 // We know that this user uses some value of From. If it is the right 7024 // value, update it. 7025 SDNode *User = Uses[UseIndex].User; 7026 7027 // This node is about to morph, remove its old self from the CSE maps. 7028 RemoveNodeFromCSEMaps(User); 7029 7030 // The Uses array is sorted, so all the uses for a given User 7031 // are next to each other in the list. 7032 // To help reduce the number of CSE recomputations, process all 7033 // the uses of this user that we can find this way. 7034 do { 7035 unsigned i = Uses[UseIndex].Index; 7036 SDUse &Use = *Uses[UseIndex].Use; 7037 ++UseIndex; 7038 7039 Use.set(To[i]); 7040 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7041 7042 // Now that we have modified User, add it back to the CSE maps. If it 7043 // already exists there, recursively merge the results together. 7044 AddModifiedNodeToCSEMaps(User); 7045 } 7046 } 7047 7048 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7049 /// based on their topological order. It returns the maximum id and a vector 7050 /// of the SDNodes* in assigned order by reference. 7051 unsigned SelectionDAG::AssignTopologicalOrder() { 7052 7053 unsigned DAGSize = 0; 7054 7055 // SortedPos tracks the progress of the algorithm. Nodes before it are 7056 // sorted, nodes after it are unsorted. When the algorithm completes 7057 // it is at the end of the list. 7058 allnodes_iterator SortedPos = allnodes_begin(); 7059 7060 // Visit all the nodes. Move nodes with no operands to the front of 7061 // the list immediately. Annotate nodes that do have operands with their 7062 // operand count. Before we do this, the Node Id fields of the nodes 7063 // may contain arbitrary values. After, the Node Id fields for nodes 7064 // before SortedPos will contain the topological sort index, and the 7065 // Node Id fields for nodes At SortedPos and after will contain the 7066 // count of outstanding operands. 7067 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7068 SDNode *N = &*I++; 7069 checkForCycles(N, this); 7070 unsigned Degree = N->getNumOperands(); 7071 if (Degree == 0) { 7072 // A node with no uses, add it to the result array immediately. 7073 N->setNodeId(DAGSize++); 7074 allnodes_iterator Q(N); 7075 if (Q != SortedPos) 7076 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7077 assert(SortedPos != AllNodes.end() && "Overran node list"); 7078 ++SortedPos; 7079 } else { 7080 // Temporarily use the Node Id as scratch space for the degree count. 7081 N->setNodeId(Degree); 7082 } 7083 } 7084 7085 // Visit all the nodes. As we iterate, move nodes into sorted order, 7086 // such that by the time the end is reached all nodes will be sorted. 7087 for (SDNode &Node : allnodes()) { 7088 SDNode *N = &Node; 7089 checkForCycles(N, this); 7090 // N is in sorted position, so all its uses have one less operand 7091 // that needs to be sorted. 7092 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7093 UI != UE; ++UI) { 7094 SDNode *P = *UI; 7095 unsigned Degree = P->getNodeId(); 7096 assert(Degree != 0 && "Invalid node degree"); 7097 --Degree; 7098 if (Degree == 0) { 7099 // All of P's operands are sorted, so P may sorted now. 7100 P->setNodeId(DAGSize++); 7101 if (P->getIterator() != SortedPos) 7102 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7103 assert(SortedPos != AllNodes.end() && "Overran node list"); 7104 ++SortedPos; 7105 } else { 7106 // Update P's outstanding operand count. 7107 P->setNodeId(Degree); 7108 } 7109 } 7110 if (Node.getIterator() == SortedPos) { 7111 #ifndef NDEBUG 7112 allnodes_iterator I(N); 7113 SDNode *S = &*++I; 7114 dbgs() << "Overran sorted position:\n"; 7115 S->dumprFull(this); dbgs() << "\n"; 7116 dbgs() << "Checking if this is due to cycles\n"; 7117 checkForCycles(this, true); 7118 #endif 7119 llvm_unreachable(nullptr); 7120 } 7121 } 7122 7123 assert(SortedPos == AllNodes.end() && 7124 "Topological sort incomplete!"); 7125 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7126 "First node in topological sort is not the entry token!"); 7127 assert(AllNodes.front().getNodeId() == 0 && 7128 "First node in topological sort has non-zero id!"); 7129 assert(AllNodes.front().getNumOperands() == 0 && 7130 "First node in topological sort has operands!"); 7131 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7132 "Last node in topologic sort has unexpected id!"); 7133 assert(AllNodes.back().use_empty() && 7134 "Last node in topologic sort has users!"); 7135 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7136 return DAGSize; 7137 } 7138 7139 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7140 /// value is produced by SD. 7141 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7142 if (SD) { 7143 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7144 SD->setHasDebugValue(true); 7145 } 7146 DbgInfo->add(DB, SD, isParameter); 7147 } 7148 7149 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes. 7150 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 7151 if (From == To || !From.getNode()->getHasDebugValue()) 7152 return; 7153 SDNode *FromNode = From.getNode(); 7154 SDNode *ToNode = To.getNode(); 7155 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 7156 SmallVector<SDDbgValue *, 2> ClonedDVs; 7157 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 7158 I != E; ++I) { 7159 SDDbgValue *Dbg = *I; 7160 // Only add Dbgvalues attached to same ResNo. 7161 if (Dbg->getKind() == SDDbgValue::SDNODE && 7162 Dbg->getSDNode() == From.getNode() && 7163 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) { 7164 assert(FromNode != ToNode && 7165 "Should not transfer Debug Values intranode"); 7166 SDDbgValue *Clone = 7167 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode, 7168 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(), 7169 Dbg->getDebugLoc(), Dbg->getOrder()); 7170 ClonedDVs.push_back(Clone); 7171 Dbg->setIsInvalidated(); 7172 } 7173 } 7174 for (SDDbgValue *I : ClonedDVs) 7175 AddDbgValue(I, ToNode, false); 7176 } 7177 7178 //===----------------------------------------------------------------------===// 7179 // SDNode Class 7180 //===----------------------------------------------------------------------===// 7181 7182 bool llvm::isNullConstant(SDValue V) { 7183 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7184 return Const != nullptr && Const->isNullValue(); 7185 } 7186 7187 bool llvm::isNullFPConstant(SDValue V) { 7188 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7189 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7190 } 7191 7192 bool llvm::isAllOnesConstant(SDValue V) { 7193 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7194 return Const != nullptr && Const->isAllOnesValue(); 7195 } 7196 7197 bool llvm::isOneConstant(SDValue V) { 7198 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7199 return Const != nullptr && Const->isOne(); 7200 } 7201 7202 bool llvm::isBitwiseNot(SDValue V) { 7203 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7204 } 7205 7206 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7207 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7208 return CN; 7209 7210 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7211 BitVector UndefElements; 7212 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7213 7214 // BuildVectors can truncate their operands. Ignore that case here. 7215 // FIXME: We blindly ignore splats which include undef which is overly 7216 // pessimistic. 7217 if (CN && UndefElements.none() && 7218 CN->getValueType(0) == N.getValueType().getScalarType()) 7219 return CN; 7220 } 7221 7222 return nullptr; 7223 } 7224 7225 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7226 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7227 return CN; 7228 7229 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7230 BitVector UndefElements; 7231 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7232 7233 if (CN && UndefElements.none()) 7234 return CN; 7235 } 7236 7237 return nullptr; 7238 } 7239 7240 HandleSDNode::~HandleSDNode() { 7241 DropOperands(); 7242 } 7243 7244 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7245 const DebugLoc &DL, 7246 const GlobalValue *GA, EVT VT, 7247 int64_t o, unsigned char TF) 7248 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7249 TheGlobal = GA; 7250 } 7251 7252 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7253 EVT VT, unsigned SrcAS, 7254 unsigned DestAS) 7255 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7256 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7257 7258 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7259 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7260 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7261 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7262 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7263 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7264 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7265 7266 // We check here that the size of the memory operand fits within the size of 7267 // the MMO. This is because the MMO might indicate only a possible address 7268 // range instead of specifying the affected memory addresses precisely. 7269 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7270 } 7271 7272 /// Profile - Gather unique data for the node. 7273 /// 7274 void SDNode::Profile(FoldingSetNodeID &ID) const { 7275 AddNodeIDNode(ID, this); 7276 } 7277 7278 namespace { 7279 struct EVTArray { 7280 std::vector<EVT> VTs; 7281 7282 EVTArray() { 7283 VTs.reserve(MVT::LAST_VALUETYPE); 7284 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7285 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7286 } 7287 }; 7288 } 7289 7290 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 7291 static ManagedStatic<EVTArray> SimpleVTArray; 7292 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 7293 7294 /// getValueTypeList - Return a pointer to the specified value type. 7295 /// 7296 const EVT *SDNode::getValueTypeList(EVT VT) { 7297 if (VT.isExtended()) { 7298 sys::SmartScopedLock<true> Lock(*VTMutex); 7299 return &(*EVTs->insert(VT).first); 7300 } else { 7301 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7302 "Value type out of range!"); 7303 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7304 } 7305 } 7306 7307 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7308 /// indicated value. This method ignores uses of other values defined by this 7309 /// operation. 7310 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7311 assert(Value < getNumValues() && "Bad value!"); 7312 7313 // TODO: Only iterate over uses of a given value of the node 7314 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7315 if (UI.getUse().getResNo() == Value) { 7316 if (NUses == 0) 7317 return false; 7318 --NUses; 7319 } 7320 } 7321 7322 // Found exactly the right number of uses? 7323 return NUses == 0; 7324 } 7325 7326 7327 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7328 /// value. This method ignores uses of other values defined by this operation. 7329 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7330 assert(Value < getNumValues() && "Bad value!"); 7331 7332 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7333 if (UI.getUse().getResNo() == Value) 7334 return true; 7335 7336 return false; 7337 } 7338 7339 7340 /// isOnlyUserOf - Return true if this node is the only use of N. 7341 /// 7342 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7343 bool Seen = false; 7344 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7345 SDNode *User = *I; 7346 if (User == this) 7347 Seen = true; 7348 else 7349 return false; 7350 } 7351 7352 return Seen; 7353 } 7354 7355 /// Return true if the only users of N are contained in Nodes. 7356 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7357 bool Seen = false; 7358 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7359 SDNode *User = *I; 7360 if (llvm::any_of(Nodes, 7361 [&User](const SDNode *Node) { return User == Node; })) 7362 Seen = true; 7363 else 7364 return false; 7365 } 7366 7367 return Seen; 7368 } 7369 7370 /// isOperand - Return true if this node is an operand of N. 7371 /// 7372 bool SDValue::isOperandOf(const SDNode *N) const { 7373 for (const SDValue &Op : N->op_values()) 7374 if (*this == Op) 7375 return true; 7376 return false; 7377 } 7378 7379 bool SDNode::isOperandOf(const SDNode *N) const { 7380 for (const SDValue &Op : N->op_values()) 7381 if (this == Op.getNode()) 7382 return true; 7383 return false; 7384 } 7385 7386 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7387 /// be a chain) reaches the specified operand without crossing any 7388 /// side-effecting instructions on any chain path. In practice, this looks 7389 /// through token factors and non-volatile loads. In order to remain efficient, 7390 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7391 /// 7392 /// Note that we only need to examine chains when we're searching for 7393 /// side-effects; SelectionDAG requires that all side-effects are represented 7394 /// by chains, even if another operand would force a specific ordering. This 7395 /// constraint is necessary to allow transformations like splitting loads. 7396 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7397 unsigned Depth) const { 7398 if (*this == Dest) return true; 7399 7400 // Don't search too deeply, we just want to be able to see through 7401 // TokenFactor's etc. 7402 if (Depth == 0) return false; 7403 7404 // If this is a token factor, all inputs to the TF happen in parallel. 7405 if (getOpcode() == ISD::TokenFactor) { 7406 // First, try a shallow search. 7407 if (is_contained((*this)->ops(), Dest)) { 7408 // We found the chain we want as an operand of this TokenFactor. 7409 // Essentially, we reach the chain without side-effects if we could 7410 // serialize the TokenFactor into a simple chain of operations with 7411 // Dest as the last operation. This is automatically true if the 7412 // chain has one use: there are no other ordering constraints. 7413 // If the chain has more than one use, we give up: some other 7414 // use of Dest might force a side-effect between Dest and the current 7415 // node. 7416 if (Dest.hasOneUse()) 7417 return true; 7418 } 7419 // Next, try a deep search: check whether every operand of the TokenFactor 7420 // reaches Dest. 7421 return all_of((*this)->ops(), [=](SDValue Op) { 7422 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 7423 }); 7424 } 7425 7426 // Loads don't have side effects, look through them. 7427 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7428 if (!Ld->isVolatile()) 7429 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7430 } 7431 return false; 7432 } 7433 7434 bool SDNode::hasPredecessor(const SDNode *N) const { 7435 SmallPtrSet<const SDNode *, 32> Visited; 7436 SmallVector<const SDNode *, 16> Worklist; 7437 Worklist.push_back(this); 7438 return hasPredecessorHelper(N, Visited, Worklist); 7439 } 7440 7441 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 7442 this->Flags.intersectWith(Flags); 7443 } 7444 7445 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7446 assert(N->getNumValues() == 1 && 7447 "Can't unroll a vector with multiple results!"); 7448 7449 EVT VT = N->getValueType(0); 7450 unsigned NE = VT.getVectorNumElements(); 7451 EVT EltVT = VT.getVectorElementType(); 7452 SDLoc dl(N); 7453 7454 SmallVector<SDValue, 8> Scalars; 7455 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7456 7457 // If ResNE is 0, fully unroll the vector op. 7458 if (ResNE == 0) 7459 ResNE = NE; 7460 else if (NE > ResNE) 7461 NE = ResNE; 7462 7463 unsigned i; 7464 for (i= 0; i != NE; ++i) { 7465 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7466 SDValue Operand = N->getOperand(j); 7467 EVT OperandVT = Operand.getValueType(); 7468 if (OperandVT.isVector()) { 7469 // A vector operand; extract a single element. 7470 EVT OperandEltVT = OperandVT.getVectorElementType(); 7471 Operands[j] = 7472 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7473 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7474 } else { 7475 // A scalar operand; just use it as is. 7476 Operands[j] = Operand; 7477 } 7478 } 7479 7480 switch (N->getOpcode()) { 7481 default: { 7482 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7483 N->getFlags())); 7484 break; 7485 } 7486 case ISD::VSELECT: 7487 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7488 break; 7489 case ISD::SHL: 7490 case ISD::SRA: 7491 case ISD::SRL: 7492 case ISD::ROTL: 7493 case ISD::ROTR: 7494 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7495 getShiftAmountOperand(Operands[0].getValueType(), 7496 Operands[1]))); 7497 break; 7498 case ISD::SIGN_EXTEND_INREG: 7499 case ISD::FP_ROUND_INREG: { 7500 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7501 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7502 Operands[0], 7503 getValueType(ExtVT))); 7504 } 7505 } 7506 } 7507 7508 for (; i < ResNE; ++i) 7509 Scalars.push_back(getUNDEF(EltVT)); 7510 7511 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7512 return getBuildVector(VecVT, dl, Scalars); 7513 } 7514 7515 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7516 LoadSDNode *Base, 7517 unsigned Bytes, 7518 int Dist) const { 7519 if (LD->isVolatile() || Base->isVolatile()) 7520 return false; 7521 if (LD->isIndexed() || Base->isIndexed()) 7522 return false; 7523 if (LD->getChain() != Base->getChain()) 7524 return false; 7525 EVT VT = LD->getValueType(0); 7526 if (VT.getSizeInBits() / 8 != Bytes) 7527 return false; 7528 7529 SDValue Loc = LD->getOperand(1); 7530 SDValue BaseLoc = Base->getOperand(1); 7531 if (Loc.getOpcode() == ISD::FrameIndex) { 7532 if (BaseLoc.getOpcode() != ISD::FrameIndex) 7533 return false; 7534 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7535 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 7536 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 7537 int FS = MFI.getObjectSize(FI); 7538 int BFS = MFI.getObjectSize(BFI); 7539 if (FS != BFS || FS != (int)Bytes) return false; 7540 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 7541 } 7542 7543 // Handle X + C. 7544 if (isBaseWithConstantOffset(Loc)) { 7545 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 7546 if (Loc.getOperand(0) == BaseLoc) { 7547 // If the base location is a simple address with no offset itself, then 7548 // the second load's first add operand should be the base address. 7549 if (LocOffset == Dist * (int)Bytes) 7550 return true; 7551 } else if (isBaseWithConstantOffset(BaseLoc)) { 7552 // The base location itself has an offset, so subtract that value from the 7553 // second load's offset before comparing to distance * size. 7554 int64_t BOffset = 7555 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue(); 7556 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) { 7557 if ((LocOffset - BOffset) == Dist * (int)Bytes) 7558 return true; 7559 } 7560 } 7561 } 7562 const GlobalValue *GV1 = nullptr; 7563 const GlobalValue *GV2 = nullptr; 7564 int64_t Offset1 = 0; 7565 int64_t Offset2 = 0; 7566 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 7567 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 7568 if (isGA1 && isGA2 && GV1 == GV2) 7569 return Offset1 == (Offset2 + Dist*Bytes); 7570 return false; 7571 } 7572 7573 7574 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7575 /// it cannot be inferred. 7576 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7577 // If this is a GlobalAddress + cst, return the alignment. 7578 const GlobalValue *GV; 7579 int64_t GVOffset = 0; 7580 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7581 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7582 KnownBits Known(PtrWidth); 7583 llvm::computeKnownBits(GV, Known, getDataLayout()); 7584 unsigned AlignBits = Known.countMinTrailingZeros(); 7585 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7586 if (Align) 7587 return MinAlign(Align, GVOffset); 7588 } 7589 7590 // If this is a direct reference to a stack slot, use information about the 7591 // stack slot's alignment. 7592 int FrameIdx = 1 << 31; 7593 int64_t FrameOffset = 0; 7594 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7595 FrameIdx = FI->getIndex(); 7596 } else if (isBaseWithConstantOffset(Ptr) && 7597 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7598 // Handle FI+Cst 7599 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7600 FrameOffset = Ptr.getConstantOperandVal(1); 7601 } 7602 7603 if (FrameIdx != (1 << 31)) { 7604 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7605 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7606 FrameOffset); 7607 return FIInfoAlign; 7608 } 7609 7610 return 0; 7611 } 7612 7613 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7614 /// which is split (or expanded) into two not necessarily identical pieces. 7615 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 7616 // Currently all types are split in half. 7617 EVT LoVT, HiVT; 7618 if (!VT.isVector()) 7619 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 7620 else 7621 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 7622 7623 return std::make_pair(LoVT, HiVT); 7624 } 7625 7626 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 7627 /// low/high part. 7628 std::pair<SDValue, SDValue> 7629 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 7630 const EVT &HiVT) { 7631 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 7632 N.getValueType().getVectorNumElements() && 7633 "More vector elements requested than available!"); 7634 SDValue Lo, Hi; 7635 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 7636 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 7637 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 7638 getConstant(LoVT.getVectorNumElements(), DL, 7639 TLI->getVectorIdxTy(getDataLayout()))); 7640 return std::make_pair(Lo, Hi); 7641 } 7642 7643 void SelectionDAG::ExtractVectorElements(SDValue Op, 7644 SmallVectorImpl<SDValue> &Args, 7645 unsigned Start, unsigned Count) { 7646 EVT VT = Op.getValueType(); 7647 if (Count == 0) 7648 Count = VT.getVectorNumElements(); 7649 7650 EVT EltVT = VT.getVectorElementType(); 7651 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 7652 SDLoc SL(Op); 7653 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 7654 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7655 Op, getConstant(i, SL, IdxTy))); 7656 } 7657 } 7658 7659 // getAddressSpace - Return the address space this GlobalAddress belongs to. 7660 unsigned GlobalAddressSDNode::getAddressSpace() const { 7661 return getGlobal()->getType()->getAddressSpace(); 7662 } 7663 7664 7665 Type *ConstantPoolSDNode::getType() const { 7666 if (isMachineConstantPoolEntry()) 7667 return Val.MachineCPVal->getType(); 7668 return Val.ConstVal->getType(); 7669 } 7670 7671 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 7672 unsigned &SplatBitSize, 7673 bool &HasAnyUndefs, 7674 unsigned MinSplatBits, 7675 bool IsBigEndian) const { 7676 EVT VT = getValueType(0); 7677 assert(VT.isVector() && "Expected a vector type"); 7678 unsigned VecWidth = VT.getSizeInBits(); 7679 if (MinSplatBits > VecWidth) 7680 return false; 7681 7682 // FIXME: The widths are based on this node's type, but build vectors can 7683 // truncate their operands. 7684 SplatValue = APInt(VecWidth, 0); 7685 SplatUndef = APInt(VecWidth, 0); 7686 7687 // Get the bits. Bits with undefined values (when the corresponding element 7688 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 7689 // in SplatValue. If any of the values are not constant, give up and return 7690 // false. 7691 unsigned int NumOps = getNumOperands(); 7692 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 7693 unsigned EltWidth = VT.getScalarSizeInBits(); 7694 7695 for (unsigned j = 0; j < NumOps; ++j) { 7696 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 7697 SDValue OpVal = getOperand(i); 7698 unsigned BitPos = j * EltWidth; 7699 7700 if (OpVal.isUndef()) 7701 SplatUndef.setBits(BitPos, BitPos + EltWidth); 7702 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 7703 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 7704 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 7705 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 7706 else 7707 return false; 7708 } 7709 7710 // The build_vector is all constants or undefs. Find the smallest element 7711 // size that splats the vector. 7712 HasAnyUndefs = (SplatUndef != 0); 7713 7714 // FIXME: This does not work for vectors with elements less than 8 bits. 7715 while (VecWidth > 8) { 7716 unsigned HalfSize = VecWidth / 2; 7717 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 7718 APInt LowValue = SplatValue.trunc(HalfSize); 7719 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 7720 APInt LowUndef = SplatUndef.trunc(HalfSize); 7721 7722 // If the two halves do not match (ignoring undef bits), stop here. 7723 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 7724 MinSplatBits > HalfSize) 7725 break; 7726 7727 SplatValue = HighValue | LowValue; 7728 SplatUndef = HighUndef & LowUndef; 7729 7730 VecWidth = HalfSize; 7731 } 7732 7733 SplatBitSize = VecWidth; 7734 return true; 7735 } 7736 7737 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 7738 if (UndefElements) { 7739 UndefElements->clear(); 7740 UndefElements->resize(getNumOperands()); 7741 } 7742 SDValue Splatted; 7743 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 7744 SDValue Op = getOperand(i); 7745 if (Op.isUndef()) { 7746 if (UndefElements) 7747 (*UndefElements)[i] = true; 7748 } else if (!Splatted) { 7749 Splatted = Op; 7750 } else if (Splatted != Op) { 7751 return SDValue(); 7752 } 7753 } 7754 7755 if (!Splatted) { 7756 assert(getOperand(0).isUndef() && 7757 "Can only have a splat without a constant for all undefs."); 7758 return getOperand(0); 7759 } 7760 7761 return Splatted; 7762 } 7763 7764 ConstantSDNode * 7765 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 7766 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 7767 } 7768 7769 ConstantFPSDNode * 7770 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 7771 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 7772 } 7773 7774 int32_t 7775 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 7776 uint32_t BitWidth) const { 7777 if (ConstantFPSDNode *CN = 7778 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 7779 bool IsExact; 7780 APSInt IntVal(BitWidth); 7781 const APFloat &APF = CN->getValueAPF(); 7782 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 7783 APFloat::opOK || 7784 !IsExact) 7785 return -1; 7786 7787 return IntVal.exactLogBase2(); 7788 } 7789 return -1; 7790 } 7791 7792 bool BuildVectorSDNode::isConstant() const { 7793 for (const SDValue &Op : op_values()) { 7794 unsigned Opc = Op.getOpcode(); 7795 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 7796 return false; 7797 } 7798 return true; 7799 } 7800 7801 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 7802 // Find the first non-undef value in the shuffle mask. 7803 unsigned i, e; 7804 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 7805 /* search */; 7806 7807 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 7808 7809 // Make sure all remaining elements are either undef or the same as the first 7810 // non-undef value. 7811 for (int Idx = Mask[i]; i != e; ++i) 7812 if (Mask[i] >= 0 && Mask[i] != Idx) 7813 return false; 7814 return true; 7815 } 7816 7817 // \brief Returns the SDNode if it is a constant integer BuildVector 7818 // or constant integer. 7819 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 7820 if (isa<ConstantSDNode>(N)) 7821 return N.getNode(); 7822 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 7823 return N.getNode(); 7824 // Treat a GlobalAddress supporting constant offset folding as a 7825 // constant integer. 7826 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 7827 if (GA->getOpcode() == ISD::GlobalAddress && 7828 TLI->isOffsetFoldingLegal(GA)) 7829 return GA; 7830 return nullptr; 7831 } 7832 7833 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 7834 if (isa<ConstantFPSDNode>(N)) 7835 return N.getNode(); 7836 7837 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 7838 return N.getNode(); 7839 7840 return nullptr; 7841 } 7842 7843 #ifndef NDEBUG 7844 static void checkForCyclesHelper(const SDNode *N, 7845 SmallPtrSetImpl<const SDNode*> &Visited, 7846 SmallPtrSetImpl<const SDNode*> &Checked, 7847 const llvm::SelectionDAG *DAG) { 7848 // If this node has already been checked, don't check it again. 7849 if (Checked.count(N)) 7850 return; 7851 7852 // If a node has already been visited on this depth-first walk, reject it as 7853 // a cycle. 7854 if (!Visited.insert(N).second) { 7855 errs() << "Detected cycle in SelectionDAG\n"; 7856 dbgs() << "Offending node:\n"; 7857 N->dumprFull(DAG); dbgs() << "\n"; 7858 abort(); 7859 } 7860 7861 for (const SDValue &Op : N->op_values()) 7862 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 7863 7864 Checked.insert(N); 7865 Visited.erase(N); 7866 } 7867 #endif 7868 7869 void llvm::checkForCycles(const llvm::SDNode *N, 7870 const llvm::SelectionDAG *DAG, 7871 bool force) { 7872 #ifndef NDEBUG 7873 bool check = force; 7874 #ifdef EXPENSIVE_CHECKS 7875 check = true; 7876 #endif // EXPENSIVE_CHECKS 7877 if (check) { 7878 assert(N && "Checking nonexistent SDNode"); 7879 SmallPtrSet<const SDNode*, 32> visited; 7880 SmallPtrSet<const SDNode*, 32> checked; 7881 checkForCyclesHelper(N, visited, checked, DAG); 7882 } 7883 #endif // !NDEBUG 7884 } 7885 7886 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 7887 checkForCycles(DAG->getRoot().getNode(), DAG, force); 7888 } 7889