1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineConstantPool.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineModuleInfo.h" 27 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfo.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/ManagedStatic.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/Mutex.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetInstrInfo.h" 44 #include "llvm/Target/TargetIntrinsicInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetOptions.h" 48 #include "llvm/Target/TargetRegisterInfo.h" 49 #include "llvm/Target/TargetSubtargetInfo.h" 50 #include <algorithm> 51 #include <cmath> 52 #include <utility> 53 54 using namespace llvm; 55 56 /// makeVTList - Return an instance of the SDVTList struct initialized with the 57 /// specified members. 58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 59 SDVTList Res = {VTs, NumVTs}; 60 return Res; 61 } 62 63 // Default null implementations of the callbacks. 64 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 65 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 66 67 //===----------------------------------------------------------------------===// 68 // ConstantFPSDNode Class 69 //===----------------------------------------------------------------------===// 70 71 /// isExactlyValue - We don't rely on operator== working on double values, as 72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 73 /// As such, this method can be used to do an exact bit-for-bit comparison of 74 /// two floating point values. 75 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 76 return getValueAPF().bitwiseIsEqual(V); 77 } 78 79 bool ConstantFPSDNode::isValueValidForType(EVT VT, 80 const APFloat& Val) { 81 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 82 83 // convert modifies in place, so make a copy. 84 APFloat Val2 = APFloat(Val); 85 bool losesInfo; 86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 87 APFloat::rmNearestTiesToEven, 88 &losesInfo); 89 return !losesInfo; 90 } 91 92 //===----------------------------------------------------------------------===// 93 // ISD Namespace 94 //===----------------------------------------------------------------------===// 95 96 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 97 auto *BV = dyn_cast<BuildVectorSDNode>(N); 98 if (!BV) 99 return false; 100 101 APInt SplatUndef; 102 unsigned SplatBitSize; 103 bool HasUndefs; 104 EVT EltVT = N->getValueType(0).getVectorElementType(); 105 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) && 106 EltVT.getSizeInBits() >= SplatBitSize; 107 } 108 109 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 110 // specializations of the more general isConstantSplatVector()? 111 112 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 113 // Look through a bit convert. 114 while (N->getOpcode() == ISD::BITCAST) 115 N = N->getOperand(0).getNode(); 116 117 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 118 119 unsigned i = 0, e = N->getNumOperands(); 120 121 // Skip over all of the undef values. 122 while (i != e && N->getOperand(i).isUndef()) 123 ++i; 124 125 // Do not accept an all-undef vector. 126 if (i == e) return false; 127 128 // Do not accept build_vectors that aren't all constants or which have non-~0 129 // elements. We have to be a bit careful here, as the type of the constant 130 // may not be the same as the type of the vector elements due to type 131 // legalization (the elements are promoted to a legal type for the target and 132 // a vector of a type may be legal when the base element type is not). 133 // We only want to check enough bits to cover the vector elements, because 134 // we care if the resultant vector is all ones, not whether the individual 135 // constants are. 136 SDValue NotZero = N->getOperand(i); 137 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 138 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 139 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 140 return false; 141 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 142 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 143 return false; 144 } else 145 return false; 146 147 // Okay, we have at least one ~0 value, check to see if the rest match or are 148 // undefs. Even with the above element type twiddling, this should be OK, as 149 // the same type legalization should have applied to all the elements. 150 for (++i; i != e; ++i) 151 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 152 return false; 153 return true; 154 } 155 156 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 157 // Look through a bit convert. 158 while (N->getOpcode() == ISD::BITCAST) 159 N = N->getOperand(0).getNode(); 160 161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 162 163 bool IsAllUndef = true; 164 for (const SDValue &Op : N->op_values()) { 165 if (Op.isUndef()) 166 continue; 167 IsAllUndef = false; 168 // Do not accept build_vectors that aren't all constants or which have non-0 169 // elements. We have to be a bit careful here, as the type of the constant 170 // may not be the same as the type of the vector elements due to type 171 // legalization (the elements are promoted to a legal type for the target 172 // and a vector of a type may be legal when the base element type is not). 173 // We only want to check enough bits to cover the vector elements, because 174 // we care if the resultant vector is all zeros, not whether the individual 175 // constants are. 176 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 177 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 178 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 179 return false; 180 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 181 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 182 return false; 183 } else 184 return false; 185 } 186 187 // Do not accept an all-undef vector. 188 if (IsAllUndef) 189 return false; 190 return true; 191 } 192 193 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 194 if (N->getOpcode() != ISD::BUILD_VECTOR) 195 return false; 196 197 for (const SDValue &Op : N->op_values()) { 198 if (Op.isUndef()) 199 continue; 200 if (!isa<ConstantSDNode>(Op)) 201 return false; 202 } 203 return true; 204 } 205 206 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 207 if (N->getOpcode() != ISD::BUILD_VECTOR) 208 return false; 209 210 for (const SDValue &Op : N->op_values()) { 211 if (Op.isUndef()) 212 continue; 213 if (!isa<ConstantFPSDNode>(Op)) 214 return false; 215 } 216 return true; 217 } 218 219 bool ISD::allOperandsUndef(const SDNode *N) { 220 // Return false if the node has no operands. 221 // This is "logically inconsistent" with the definition of "all" but 222 // is probably the desired behavior. 223 if (N->getNumOperands() == 0) 224 return false; 225 226 for (const SDValue &Op : N->op_values()) 227 if (!Op.isUndef()) 228 return false; 229 230 return true; 231 } 232 233 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 234 switch (ExtType) { 235 case ISD::EXTLOAD: 236 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 237 case ISD::SEXTLOAD: 238 return ISD::SIGN_EXTEND; 239 case ISD::ZEXTLOAD: 240 return ISD::ZERO_EXTEND; 241 default: 242 break; 243 } 244 245 llvm_unreachable("Invalid LoadExtType"); 246 } 247 248 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 249 // To perform this operation, we just need to swap the L and G bits of the 250 // operation. 251 unsigned OldL = (Operation >> 2) & 1; 252 unsigned OldG = (Operation >> 1) & 1; 253 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 254 (OldL << 1) | // New G bit 255 (OldG << 2)); // New L bit. 256 } 257 258 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 259 unsigned Operation = Op; 260 if (isInteger) 261 Operation ^= 7; // Flip L, G, E bits, but not U. 262 else 263 Operation ^= 15; // Flip all of the condition bits. 264 265 if (Operation > ISD::SETTRUE2) 266 Operation &= ~8; // Don't let N and U bits get set. 267 268 return ISD::CondCode(Operation); 269 } 270 271 272 /// For an integer comparison, return 1 if the comparison is a signed operation 273 /// and 2 if the result is an unsigned comparison. Return zero if the operation 274 /// does not depend on the sign of the input (setne and seteq). 275 static int isSignedOp(ISD::CondCode Opcode) { 276 switch (Opcode) { 277 default: llvm_unreachable("Illegal integer setcc operation!"); 278 case ISD::SETEQ: 279 case ISD::SETNE: return 0; 280 case ISD::SETLT: 281 case ISD::SETLE: 282 case ISD::SETGT: 283 case ISD::SETGE: return 1; 284 case ISD::SETULT: 285 case ISD::SETULE: 286 case ISD::SETUGT: 287 case ISD::SETUGE: return 2; 288 } 289 } 290 291 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 292 bool isInteger) { 293 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 294 // Cannot fold a signed integer setcc with an unsigned integer setcc. 295 return ISD::SETCC_INVALID; 296 297 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 298 299 // If the N and U bits get set then the resultant comparison DOES suddenly 300 // care about orderedness, and is true when ordered. 301 if (Op > ISD::SETTRUE2) 302 Op &= ~16; // Clear the U bit if the N bit is set. 303 304 // Canonicalize illegal integer setcc's. 305 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 306 Op = ISD::SETNE; 307 308 return ISD::CondCode(Op); 309 } 310 311 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 312 bool isInteger) { 313 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 314 // Cannot fold a signed setcc with an unsigned setcc. 315 return ISD::SETCC_INVALID; 316 317 // Combine all of the condition bits. 318 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 319 320 // Canonicalize illegal integer setcc's. 321 if (isInteger) { 322 switch (Result) { 323 default: break; 324 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 325 case ISD::SETOEQ: // SETEQ & SETU[LG]E 326 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 327 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 328 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 329 } 330 } 331 332 return Result; 333 } 334 335 //===----------------------------------------------------------------------===// 336 // SDNode Profile Support 337 //===----------------------------------------------------------------------===// 338 339 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 340 /// 341 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 342 ID.AddInteger(OpC); 343 } 344 345 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 346 /// solely with their pointer. 347 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 348 ID.AddPointer(VTList.VTs); 349 } 350 351 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 352 /// 353 static void AddNodeIDOperands(FoldingSetNodeID &ID, 354 ArrayRef<SDValue> Ops) { 355 for (auto& Op : Ops) { 356 ID.AddPointer(Op.getNode()); 357 ID.AddInteger(Op.getResNo()); 358 } 359 } 360 361 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 362 /// 363 static void AddNodeIDOperands(FoldingSetNodeID &ID, 364 ArrayRef<SDUse> Ops) { 365 for (auto& Op : Ops) { 366 ID.AddPointer(Op.getNode()); 367 ID.AddInteger(Op.getResNo()); 368 } 369 } 370 371 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 372 SDVTList VTList, ArrayRef<SDValue> OpList) { 373 AddNodeIDOpcode(ID, OpC); 374 AddNodeIDValueTypes(ID, VTList); 375 AddNodeIDOperands(ID, OpList); 376 } 377 378 /// If this is an SDNode with special info, add this info to the NodeID data. 379 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 380 switch (N->getOpcode()) { 381 case ISD::TargetExternalSymbol: 382 case ISD::ExternalSymbol: 383 case ISD::MCSymbol: 384 llvm_unreachable("Should only be used on nodes with operands"); 385 default: break; // Normal nodes don't need extra info. 386 case ISD::TargetConstant: 387 case ISD::Constant: { 388 const ConstantSDNode *C = cast<ConstantSDNode>(N); 389 ID.AddPointer(C->getConstantIntValue()); 390 ID.AddBoolean(C->isOpaque()); 391 break; 392 } 393 case ISD::TargetConstantFP: 394 case ISD::ConstantFP: { 395 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 396 break; 397 } 398 case ISD::TargetGlobalAddress: 399 case ISD::GlobalAddress: 400 case ISD::TargetGlobalTLSAddress: 401 case ISD::GlobalTLSAddress: { 402 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 403 ID.AddPointer(GA->getGlobal()); 404 ID.AddInteger(GA->getOffset()); 405 ID.AddInteger(GA->getTargetFlags()); 406 break; 407 } 408 case ISD::BasicBlock: 409 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 410 break; 411 case ISD::Register: 412 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 413 break; 414 case ISD::RegisterMask: 415 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 416 break; 417 case ISD::SRCVALUE: 418 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 419 break; 420 case ISD::FrameIndex: 421 case ISD::TargetFrameIndex: 422 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 423 break; 424 case ISD::JumpTable: 425 case ISD::TargetJumpTable: 426 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 427 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 428 break; 429 case ISD::ConstantPool: 430 case ISD::TargetConstantPool: { 431 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 432 ID.AddInteger(CP->getAlignment()); 433 ID.AddInteger(CP->getOffset()); 434 if (CP->isMachineConstantPoolEntry()) 435 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 436 else 437 ID.AddPointer(CP->getConstVal()); 438 ID.AddInteger(CP->getTargetFlags()); 439 break; 440 } 441 case ISD::TargetIndex: { 442 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 443 ID.AddInteger(TI->getIndex()); 444 ID.AddInteger(TI->getOffset()); 445 ID.AddInteger(TI->getTargetFlags()); 446 break; 447 } 448 case ISD::LOAD: { 449 const LoadSDNode *LD = cast<LoadSDNode>(N); 450 ID.AddInteger(LD->getMemoryVT().getRawBits()); 451 ID.AddInteger(LD->getRawSubclassData()); 452 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 453 break; 454 } 455 case ISD::STORE: { 456 const StoreSDNode *ST = cast<StoreSDNode>(N); 457 ID.AddInteger(ST->getMemoryVT().getRawBits()); 458 ID.AddInteger(ST->getRawSubclassData()); 459 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 460 break; 461 } 462 case ISD::ATOMIC_CMP_SWAP: 463 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 464 case ISD::ATOMIC_SWAP: 465 case ISD::ATOMIC_LOAD_ADD: 466 case ISD::ATOMIC_LOAD_SUB: 467 case ISD::ATOMIC_LOAD_AND: 468 case ISD::ATOMIC_LOAD_OR: 469 case ISD::ATOMIC_LOAD_XOR: 470 case ISD::ATOMIC_LOAD_NAND: 471 case ISD::ATOMIC_LOAD_MIN: 472 case ISD::ATOMIC_LOAD_MAX: 473 case ISD::ATOMIC_LOAD_UMIN: 474 case ISD::ATOMIC_LOAD_UMAX: 475 case ISD::ATOMIC_LOAD: 476 case ISD::ATOMIC_STORE: { 477 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 478 ID.AddInteger(AT->getMemoryVT().getRawBits()); 479 ID.AddInteger(AT->getRawSubclassData()); 480 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 481 break; 482 } 483 case ISD::PREFETCH: { 484 const MemSDNode *PF = cast<MemSDNode>(N); 485 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 486 break; 487 } 488 case ISD::VECTOR_SHUFFLE: { 489 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 490 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 491 i != e; ++i) 492 ID.AddInteger(SVN->getMaskElt(i)); 493 break; 494 } 495 case ISD::TargetBlockAddress: 496 case ISD::BlockAddress: { 497 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 498 ID.AddPointer(BA->getBlockAddress()); 499 ID.AddInteger(BA->getOffset()); 500 ID.AddInteger(BA->getTargetFlags()); 501 break; 502 } 503 } // end switch (N->getOpcode()) 504 505 // Target specific memory nodes could also have address spaces to check. 506 if (N->isTargetMemoryOpcode()) 507 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 508 } 509 510 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 511 /// data. 512 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 513 AddNodeIDOpcode(ID, N->getOpcode()); 514 // Add the return value info. 515 AddNodeIDValueTypes(ID, N->getVTList()); 516 // Add the operand info. 517 AddNodeIDOperands(ID, N->ops()); 518 519 // Handle SDNode leafs with special info. 520 AddNodeIDCustom(ID, N); 521 } 522 523 //===----------------------------------------------------------------------===// 524 // SelectionDAG Class 525 //===----------------------------------------------------------------------===// 526 527 /// doNotCSE - Return true if CSE should not be performed for this node. 528 static bool doNotCSE(SDNode *N) { 529 if (N->getValueType(0) == MVT::Glue) 530 return true; // Never CSE anything that produces a flag. 531 532 switch (N->getOpcode()) { 533 default: break; 534 case ISD::HANDLENODE: 535 case ISD::EH_LABEL: 536 return true; // Never CSE these nodes. 537 } 538 539 // Check that remaining values produced are not flags. 540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 541 if (N->getValueType(i) == MVT::Glue) 542 return true; // Never CSE anything that produces a flag. 543 544 return false; 545 } 546 547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 548 /// SelectionDAG. 549 void SelectionDAG::RemoveDeadNodes() { 550 // Create a dummy node (which is not added to allnodes), that adds a reference 551 // to the root node, preventing it from being deleted. 552 HandleSDNode Dummy(getRoot()); 553 554 SmallVector<SDNode*, 128> DeadNodes; 555 556 // Add all obviously-dead nodes to the DeadNodes worklist. 557 for (SDNode &Node : allnodes()) 558 if (Node.use_empty()) 559 DeadNodes.push_back(&Node); 560 561 RemoveDeadNodes(DeadNodes); 562 563 // If the root changed (e.g. it was a dead load, update the root). 564 setRoot(Dummy.getValue()); 565 } 566 567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 568 /// given list, and any nodes that become unreachable as a result. 569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 570 571 // Process the worklist, deleting the nodes and adding their uses to the 572 // worklist. 573 while (!DeadNodes.empty()) { 574 SDNode *N = DeadNodes.pop_back_val(); 575 576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 577 DUL->NodeDeleted(N, nullptr); 578 579 // Take the node out of the appropriate CSE map. 580 RemoveNodeFromCSEMaps(N); 581 582 // Next, brutally remove the operand list. This is safe to do, as there are 583 // no cycles in the graph. 584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 585 SDUse &Use = *I++; 586 SDNode *Operand = Use.getNode(); 587 Use.set(SDValue()); 588 589 // Now that we removed this operand, see if there are no uses of it left. 590 if (Operand->use_empty()) 591 DeadNodes.push_back(Operand); 592 } 593 594 DeallocateNode(N); 595 } 596 } 597 598 void SelectionDAG::RemoveDeadNode(SDNode *N){ 599 SmallVector<SDNode*, 16> DeadNodes(1, N); 600 601 // Create a dummy node that adds a reference to the root node, preventing 602 // it from being deleted. (This matters if the root is an operand of the 603 // dead node.) 604 HandleSDNode Dummy(getRoot()); 605 606 RemoveDeadNodes(DeadNodes); 607 } 608 609 void SelectionDAG::DeleteNode(SDNode *N) { 610 // First take this out of the appropriate CSE map. 611 RemoveNodeFromCSEMaps(N); 612 613 // Finally, remove uses due to operands of this node, remove from the 614 // AllNodes list, and delete the node. 615 DeleteNodeNotInCSEMaps(N); 616 } 617 618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 619 assert(N->getIterator() != AllNodes.begin() && 620 "Cannot delete the entry node!"); 621 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 622 623 // Drop all of the operands and decrement used node's use counts. 624 N->DropOperands(); 625 626 DeallocateNode(N); 627 } 628 629 void SDDbgInfo::erase(const SDNode *Node) { 630 DbgValMapType::iterator I = DbgValMap.find(Node); 631 if (I == DbgValMap.end()) 632 return; 633 for (auto &Val: I->second) 634 Val->setIsInvalidated(); 635 DbgValMap.erase(I); 636 } 637 638 void SelectionDAG::DeallocateNode(SDNode *N) { 639 // If we have operands, deallocate them. 640 removeOperands(N); 641 642 // Set the opcode to DELETED_NODE to help catch bugs when node 643 // memory is reallocated. 644 N->NodeType = ISD::DELETED_NODE; 645 646 NodeAllocator.Deallocate(AllNodes.remove(N)); 647 648 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 649 // them and forget about that node. 650 DbgInfo->erase(N); 651 } 652 653 #ifndef NDEBUG 654 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 655 static void VerifySDNode(SDNode *N) { 656 switch (N->getOpcode()) { 657 default: 658 break; 659 case ISD::BUILD_PAIR: { 660 EVT VT = N->getValueType(0); 661 assert(N->getNumValues() == 1 && "Too many results!"); 662 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 663 "Wrong return type!"); 664 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 665 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 666 "Mismatched operand types!"); 667 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 668 "Wrong operand type!"); 669 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 670 "Wrong return type size"); 671 break; 672 } 673 case ISD::BUILD_VECTOR: { 674 assert(N->getNumValues() == 1 && "Too many results!"); 675 assert(N->getValueType(0).isVector() && "Wrong return type!"); 676 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 677 "Wrong number of operands!"); 678 EVT EltVT = N->getValueType(0).getVectorElementType(); 679 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 680 assert((I->getValueType() == EltVT || 681 (EltVT.isInteger() && I->getValueType().isInteger() && 682 EltVT.bitsLE(I->getValueType()))) && 683 "Wrong operand type!"); 684 assert(I->getValueType() == N->getOperand(0).getValueType() && 685 "Operands must all have the same type"); 686 } 687 break; 688 } 689 } 690 } 691 #endif // NDEBUG 692 693 /// \brief Insert a newly allocated node into the DAG. 694 /// 695 /// Handles insertion into the all nodes list and CSE map, as well as 696 /// verification and other common operations when a new node is allocated. 697 void SelectionDAG::InsertNode(SDNode *N) { 698 AllNodes.push_back(N); 699 #ifndef NDEBUG 700 N->PersistentId = NextPersistentId++; 701 VerifySDNode(N); 702 #endif 703 } 704 705 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 706 /// correspond to it. This is useful when we're about to delete or repurpose 707 /// the node. We don't want future request for structurally identical nodes 708 /// to return N anymore. 709 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 710 bool Erased = false; 711 switch (N->getOpcode()) { 712 case ISD::HANDLENODE: return false; // noop. 713 case ISD::CONDCODE: 714 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 715 "Cond code doesn't exist!"); 716 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 717 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 718 break; 719 case ISD::ExternalSymbol: 720 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 721 break; 722 case ISD::TargetExternalSymbol: { 723 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 724 Erased = TargetExternalSymbols.erase( 725 std::pair<std::string,unsigned char>(ESN->getSymbol(), 726 ESN->getTargetFlags())); 727 break; 728 } 729 case ISD::MCSymbol: { 730 auto *MCSN = cast<MCSymbolSDNode>(N); 731 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 732 break; 733 } 734 case ISD::VALUETYPE: { 735 EVT VT = cast<VTSDNode>(N)->getVT(); 736 if (VT.isExtended()) { 737 Erased = ExtendedValueTypeNodes.erase(VT); 738 } else { 739 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 740 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 741 } 742 break; 743 } 744 default: 745 // Remove it from the CSE Map. 746 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 747 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 748 Erased = CSEMap.RemoveNode(N); 749 break; 750 } 751 #ifndef NDEBUG 752 // Verify that the node was actually in one of the CSE maps, unless it has a 753 // flag result (which cannot be CSE'd) or is one of the special cases that are 754 // not subject to CSE. 755 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 756 !N->isMachineOpcode() && !doNotCSE(N)) { 757 N->dump(this); 758 dbgs() << "\n"; 759 llvm_unreachable("Node is not in map!"); 760 } 761 #endif 762 return Erased; 763 } 764 765 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 766 /// maps and modified in place. Add it back to the CSE maps, unless an identical 767 /// node already exists, in which case transfer all its users to the existing 768 /// node. This transfer can potentially trigger recursive merging. 769 /// 770 void 771 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 772 // For node types that aren't CSE'd, just act as if no identical node 773 // already exists. 774 if (!doNotCSE(N)) { 775 SDNode *Existing = CSEMap.GetOrInsertNode(N); 776 if (Existing != N) { 777 // If there was already an existing matching node, use ReplaceAllUsesWith 778 // to replace the dead one with the existing one. This can cause 779 // recursive merging of other unrelated nodes down the line. 780 ReplaceAllUsesWith(N, Existing); 781 782 // N is now dead. Inform the listeners and delete it. 783 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 784 DUL->NodeDeleted(N, Existing); 785 DeleteNodeNotInCSEMaps(N); 786 return; 787 } 788 } 789 790 // If the node doesn't already exist, we updated it. Inform listeners. 791 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 792 DUL->NodeUpdated(N); 793 } 794 795 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 796 /// were replaced with those specified. If this node is never memoized, 797 /// return null, otherwise return a pointer to the slot it would take. If a 798 /// node already exists with these operands, the slot will be non-null. 799 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 800 void *&InsertPos) { 801 if (doNotCSE(N)) 802 return nullptr; 803 804 SDValue Ops[] = { Op }; 805 FoldingSetNodeID ID; 806 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 807 AddNodeIDCustom(ID, N); 808 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 809 if (Node) 810 if (const SDNodeFlags *Flags = N->getFlags()) 811 Node->intersectFlagsWith(Flags); 812 return Node; 813 } 814 815 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 816 /// were replaced with those specified. If this node is never memoized, 817 /// return null, otherwise return a pointer to the slot it would take. If a 818 /// node already exists with these operands, the slot will be non-null. 819 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 820 SDValue Op1, SDValue Op2, 821 void *&InsertPos) { 822 if (doNotCSE(N)) 823 return nullptr; 824 825 SDValue Ops[] = { Op1, Op2 }; 826 FoldingSetNodeID ID; 827 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 828 AddNodeIDCustom(ID, N); 829 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 830 if (Node) 831 if (const SDNodeFlags *Flags = N->getFlags()) 832 Node->intersectFlagsWith(Flags); 833 return Node; 834 } 835 836 837 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 838 /// were replaced with those specified. If this node is never memoized, 839 /// return null, otherwise return a pointer to the slot it would take. If a 840 /// node already exists with these operands, the slot will be non-null. 841 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 842 void *&InsertPos) { 843 if (doNotCSE(N)) 844 return nullptr; 845 846 FoldingSetNodeID ID; 847 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 848 AddNodeIDCustom(ID, N); 849 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 850 if (Node) 851 if (const SDNodeFlags *Flags = N->getFlags()) 852 Node->intersectFlagsWith(Flags); 853 return Node; 854 } 855 856 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 857 Type *Ty = VT == MVT::iPTR ? 858 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 859 VT.getTypeForEVT(*getContext()); 860 861 return getDataLayout().getABITypeAlignment(Ty); 862 } 863 864 // EntryNode could meaningfully have debug info if we can find it... 865 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 866 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL), 867 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 868 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false), 869 UpdateListeners(nullptr) { 870 InsertNode(&EntryNode); 871 DbgInfo = new SDDbgInfo(); 872 } 873 874 void SelectionDAG::init(MachineFunction &mf) { 875 MF = &mf; 876 TLI = getSubtarget().getTargetLowering(); 877 TSI = getSubtarget().getSelectionDAGInfo(); 878 Context = &mf.getFunction()->getContext(); 879 } 880 881 SelectionDAG::~SelectionDAG() { 882 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 883 allnodes_clear(); 884 OperandRecycler.clear(OperandAllocator); 885 delete DbgInfo; 886 } 887 888 void SelectionDAG::allnodes_clear() { 889 assert(&*AllNodes.begin() == &EntryNode); 890 AllNodes.remove(AllNodes.begin()); 891 while (!AllNodes.empty()) 892 DeallocateNode(&AllNodes.front()); 893 #ifndef NDEBUG 894 NextPersistentId = 0; 895 #endif 896 } 897 898 SDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, const SDLoc &DL, 899 SDVTList VTs, SDValue N1, SDValue N2, 900 const SDNodeFlags *Flags) { 901 SDValue Ops[] = {N1, N2}; 902 903 if (isBinOpWithFlags(Opcode)) { 904 // If no flags were passed in, use a default flags object. 905 SDNodeFlags F; 906 if (Flags == nullptr) 907 Flags = &F; 908 909 auto *FN = newSDNode<BinaryWithFlagsSDNode>(Opcode, DL.getIROrder(), 910 DL.getDebugLoc(), VTs, *Flags); 911 createOperands(FN, Ops); 912 913 return FN; 914 } 915 916 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 917 createOperands(N, Ops); 918 return N; 919 } 920 921 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 922 void *&InsertPos) { 923 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 924 if (N) { 925 switch (N->getOpcode()) { 926 default: break; 927 case ISD::Constant: 928 case ISD::ConstantFP: 929 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 930 "debug location. Use another overload."); 931 } 932 } 933 return N; 934 } 935 936 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 937 const SDLoc &DL, void *&InsertPos) { 938 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 939 if (N) { 940 switch (N->getOpcode()) { 941 case ISD::Constant: 942 case ISD::ConstantFP: 943 // Erase debug location from the node if the node is used at several 944 // different places. Do not propagate one location to all uses as it 945 // will cause a worse single stepping debugging experience. 946 if (N->getDebugLoc() != DL.getDebugLoc()) 947 N->setDebugLoc(DebugLoc()); 948 break; 949 default: 950 // When the node's point of use is located earlier in the instruction 951 // sequence than its prior point of use, update its debug info to the 952 // earlier location. 953 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 954 N->setDebugLoc(DL.getDebugLoc()); 955 break; 956 } 957 } 958 return N; 959 } 960 961 void SelectionDAG::clear() { 962 allnodes_clear(); 963 OperandRecycler.clear(OperandAllocator); 964 OperandAllocator.Reset(); 965 CSEMap.clear(); 966 967 ExtendedValueTypeNodes.clear(); 968 ExternalSymbols.clear(); 969 TargetExternalSymbols.clear(); 970 MCSymbols.clear(); 971 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 972 static_cast<CondCodeSDNode*>(nullptr)); 973 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 974 static_cast<SDNode*>(nullptr)); 975 976 EntryNode.UseList = nullptr; 977 InsertNode(&EntryNode); 978 Root = getEntryNode(); 979 DbgInfo->clear(); 980 } 981 982 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 983 return VT.bitsGT(Op.getValueType()) ? 984 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 985 getNode(ISD::TRUNCATE, DL, VT, Op); 986 } 987 988 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 989 return VT.bitsGT(Op.getValueType()) ? 990 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 991 getNode(ISD::TRUNCATE, DL, VT, Op); 992 } 993 994 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 995 return VT.bitsGT(Op.getValueType()) ? 996 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 997 getNode(ISD::TRUNCATE, DL, VT, Op); 998 } 999 1000 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1001 EVT OpVT) { 1002 if (VT.bitsLE(Op.getValueType())) 1003 return getNode(ISD::TRUNCATE, SL, VT, Op); 1004 1005 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1006 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1007 } 1008 1009 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1010 assert(!VT.isVector() && 1011 "getZeroExtendInReg should use the vector element type instead of " 1012 "the vector type!"); 1013 if (Op.getValueType() == VT) return Op; 1014 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1015 APInt Imm = APInt::getLowBitsSet(BitWidth, 1016 VT.getSizeInBits()); 1017 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1018 getConstant(Imm, DL, Op.getValueType())); 1019 } 1020 1021 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1022 EVT VT) { 1023 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1024 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1025 "The sizes of the input and result must match in order to perform the " 1026 "extend in-register."); 1027 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1028 "The destination vector type must have fewer lanes than the input."); 1029 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1030 } 1031 1032 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1033 EVT VT) { 1034 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1035 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1036 "The sizes of the input and result must match in order to perform the " 1037 "extend in-register."); 1038 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1039 "The destination vector type must have fewer lanes than the input."); 1040 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1041 } 1042 1043 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1044 EVT VT) { 1045 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1046 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1047 "The sizes of the input and result must match in order to perform the " 1048 "extend in-register."); 1049 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1050 "The destination vector type must have fewer lanes than the input."); 1051 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1052 } 1053 1054 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1055 /// 1056 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1057 EVT EltVT = VT.getScalarType(); 1058 SDValue NegOne = 1059 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1060 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1061 } 1062 1063 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1064 EVT EltVT = VT.getScalarType(); 1065 SDValue TrueValue; 1066 switch (TLI->getBooleanContents(VT)) { 1067 case TargetLowering::ZeroOrOneBooleanContent: 1068 case TargetLowering::UndefinedBooleanContent: 1069 TrueValue = getConstant(1, DL, VT); 1070 break; 1071 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1072 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1073 VT); 1074 break; 1075 } 1076 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1077 } 1078 1079 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1080 bool isT, bool isO) { 1081 EVT EltVT = VT.getScalarType(); 1082 assert((EltVT.getSizeInBits() >= 64 || 1083 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1084 "getConstant with a uint64_t value that doesn't fit in the type!"); 1085 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1086 } 1087 1088 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1089 bool isT, bool isO) { 1090 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1091 } 1092 1093 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1094 EVT VT, bool isT, bool isO) { 1095 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1096 1097 EVT EltVT = VT.getScalarType(); 1098 const ConstantInt *Elt = &Val; 1099 1100 // In some cases the vector type is legal but the element type is illegal and 1101 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1102 // inserted value (the type does not need to match the vector element type). 1103 // Any extra bits introduced will be truncated away. 1104 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1105 TargetLowering::TypePromoteInteger) { 1106 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1107 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1108 Elt = ConstantInt::get(*getContext(), NewVal); 1109 } 1110 // In other cases the element type is illegal and needs to be expanded, for 1111 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1112 // the value into n parts and use a vector type with n-times the elements. 1113 // Then bitcast to the type requested. 1114 // Legalizing constants too early makes the DAGCombiner's job harder so we 1115 // only legalize if the DAG tells us we must produce legal types. 1116 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1117 TLI->getTypeAction(*getContext(), EltVT) == 1118 TargetLowering::TypeExpandInteger) { 1119 const APInt &NewVal = Elt->getValue(); 1120 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1121 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1122 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1123 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1124 1125 // Check the temporary vector is the correct size. If this fails then 1126 // getTypeToTransformTo() probably returned a type whose size (in bits) 1127 // isn't a power-of-2 factor of the requested type size. 1128 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1129 1130 SmallVector<SDValue, 2> EltParts; 1131 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1132 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1133 .zextOrTrunc(ViaEltSizeInBits), DL, 1134 ViaEltVT, isT, isO)); 1135 } 1136 1137 // EltParts is currently in little endian order. If we actually want 1138 // big-endian order then reverse it now. 1139 if (getDataLayout().isBigEndian()) 1140 std::reverse(EltParts.begin(), EltParts.end()); 1141 1142 // The elements must be reversed when the element order is different 1143 // to the endianness of the elements (because the BITCAST is itself a 1144 // vector shuffle in this situation). However, we do not need any code to 1145 // perform this reversal because getConstant() is producing a vector 1146 // splat. 1147 // This situation occurs in MIPS MSA. 1148 1149 SmallVector<SDValue, 8> Ops; 1150 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1151 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1152 return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1153 } 1154 1155 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1156 "APInt size does not match type size!"); 1157 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1158 FoldingSetNodeID ID; 1159 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1160 ID.AddPointer(Elt); 1161 ID.AddBoolean(isO); 1162 void *IP = nullptr; 1163 SDNode *N = nullptr; 1164 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1165 if (!VT.isVector()) 1166 return SDValue(N, 0); 1167 1168 if (!N) { 1169 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1170 CSEMap.InsertNode(N, IP); 1171 InsertNode(N); 1172 } 1173 1174 SDValue Result(N, 0); 1175 if (VT.isVector()) 1176 Result = getSplatBuildVector(VT, DL, Result); 1177 return Result; 1178 } 1179 1180 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1181 bool isTarget) { 1182 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1183 } 1184 1185 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1186 bool isTarget) { 1187 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1188 } 1189 1190 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1191 EVT VT, bool isTarget) { 1192 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1193 1194 EVT EltVT = VT.getScalarType(); 1195 1196 // Do the map lookup using the actual bit pattern for the floating point 1197 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1198 // we don't have issues with SNANs. 1199 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1200 FoldingSetNodeID ID; 1201 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1202 ID.AddPointer(&V); 1203 void *IP = nullptr; 1204 SDNode *N = nullptr; 1205 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1206 if (!VT.isVector()) 1207 return SDValue(N, 0); 1208 1209 if (!N) { 1210 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1211 CSEMap.InsertNode(N, IP); 1212 InsertNode(N); 1213 } 1214 1215 SDValue Result(N, 0); 1216 if (VT.isVector()) 1217 Result = getSplatBuildVector(VT, DL, Result); 1218 return Result; 1219 } 1220 1221 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1222 bool isTarget) { 1223 EVT EltVT = VT.getScalarType(); 1224 if (EltVT == MVT::f32) 1225 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1226 else if (EltVT == MVT::f64) 1227 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1228 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1229 EltVT == MVT::f16) { 1230 bool Ignored; 1231 APFloat APF = APFloat(Val); 1232 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1233 &Ignored); 1234 return getConstantFP(APF, DL, VT, isTarget); 1235 } else 1236 llvm_unreachable("Unsupported type in getConstantFP"); 1237 } 1238 1239 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1240 EVT VT, int64_t Offset, bool isTargetGA, 1241 unsigned char TargetFlags) { 1242 assert((TargetFlags == 0 || isTargetGA) && 1243 "Cannot set target flags on target-independent globals"); 1244 1245 // Truncate (with sign-extension) the offset value to the pointer size. 1246 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1247 if (BitWidth < 64) 1248 Offset = SignExtend64(Offset, BitWidth); 1249 1250 unsigned Opc; 1251 if (GV->isThreadLocal()) 1252 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1253 else 1254 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1255 1256 FoldingSetNodeID ID; 1257 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1258 ID.AddPointer(GV); 1259 ID.AddInteger(Offset); 1260 ID.AddInteger(TargetFlags); 1261 void *IP = nullptr; 1262 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1263 return SDValue(E, 0); 1264 1265 auto *N = newSDNode<GlobalAddressSDNode>( 1266 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1267 CSEMap.InsertNode(N, IP); 1268 InsertNode(N); 1269 return SDValue(N, 0); 1270 } 1271 1272 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1273 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1274 FoldingSetNodeID ID; 1275 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1276 ID.AddInteger(FI); 1277 void *IP = nullptr; 1278 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1279 return SDValue(E, 0); 1280 1281 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1282 CSEMap.InsertNode(N, IP); 1283 InsertNode(N); 1284 return SDValue(N, 0); 1285 } 1286 1287 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1288 unsigned char TargetFlags) { 1289 assert((TargetFlags == 0 || isTarget) && 1290 "Cannot set target flags on target-independent jump tables"); 1291 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1292 FoldingSetNodeID ID; 1293 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1294 ID.AddInteger(JTI); 1295 ID.AddInteger(TargetFlags); 1296 void *IP = nullptr; 1297 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1298 return SDValue(E, 0); 1299 1300 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1301 CSEMap.InsertNode(N, IP); 1302 InsertNode(N); 1303 return SDValue(N, 0); 1304 } 1305 1306 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1307 unsigned Alignment, int Offset, 1308 bool isTarget, 1309 unsigned char TargetFlags) { 1310 assert((TargetFlags == 0 || isTarget) && 1311 "Cannot set target flags on target-independent globals"); 1312 if (Alignment == 0) 1313 Alignment = MF->getFunction()->optForSize() 1314 ? getDataLayout().getABITypeAlignment(C->getType()) 1315 : getDataLayout().getPrefTypeAlignment(C->getType()); 1316 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1317 FoldingSetNodeID ID; 1318 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1319 ID.AddInteger(Alignment); 1320 ID.AddInteger(Offset); 1321 ID.AddPointer(C); 1322 ID.AddInteger(TargetFlags); 1323 void *IP = nullptr; 1324 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1325 return SDValue(E, 0); 1326 1327 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1328 TargetFlags); 1329 CSEMap.InsertNode(N, IP); 1330 InsertNode(N); 1331 return SDValue(N, 0); 1332 } 1333 1334 1335 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1336 unsigned Alignment, int Offset, 1337 bool isTarget, 1338 unsigned char TargetFlags) { 1339 assert((TargetFlags == 0 || isTarget) && 1340 "Cannot set target flags on target-independent globals"); 1341 if (Alignment == 0) 1342 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1343 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1344 FoldingSetNodeID ID; 1345 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1346 ID.AddInteger(Alignment); 1347 ID.AddInteger(Offset); 1348 C->addSelectionDAGCSEId(ID); 1349 ID.AddInteger(TargetFlags); 1350 void *IP = nullptr; 1351 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1352 return SDValue(E, 0); 1353 1354 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1355 TargetFlags); 1356 CSEMap.InsertNode(N, IP); 1357 InsertNode(N); 1358 return SDValue(N, 0); 1359 } 1360 1361 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1362 unsigned char TargetFlags) { 1363 FoldingSetNodeID ID; 1364 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1365 ID.AddInteger(Index); 1366 ID.AddInteger(Offset); 1367 ID.AddInteger(TargetFlags); 1368 void *IP = nullptr; 1369 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1370 return SDValue(E, 0); 1371 1372 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1373 CSEMap.InsertNode(N, IP); 1374 InsertNode(N); 1375 return SDValue(N, 0); 1376 } 1377 1378 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1379 FoldingSetNodeID ID; 1380 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1381 ID.AddPointer(MBB); 1382 void *IP = nullptr; 1383 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1384 return SDValue(E, 0); 1385 1386 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1387 CSEMap.InsertNode(N, IP); 1388 InsertNode(N); 1389 return SDValue(N, 0); 1390 } 1391 1392 SDValue SelectionDAG::getValueType(EVT VT) { 1393 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1394 ValueTypeNodes.size()) 1395 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1396 1397 SDNode *&N = VT.isExtended() ? 1398 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1399 1400 if (N) return SDValue(N, 0); 1401 N = newSDNode<VTSDNode>(VT); 1402 InsertNode(N); 1403 return SDValue(N, 0); 1404 } 1405 1406 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1407 SDNode *&N = ExternalSymbols[Sym]; 1408 if (N) return SDValue(N, 0); 1409 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1410 InsertNode(N); 1411 return SDValue(N, 0); 1412 } 1413 1414 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1415 SDNode *&N = MCSymbols[Sym]; 1416 if (N) 1417 return SDValue(N, 0); 1418 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1419 InsertNode(N); 1420 return SDValue(N, 0); 1421 } 1422 1423 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1424 unsigned char TargetFlags) { 1425 SDNode *&N = 1426 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1427 TargetFlags)]; 1428 if (N) return SDValue(N, 0); 1429 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1430 InsertNode(N); 1431 return SDValue(N, 0); 1432 } 1433 1434 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1435 if ((unsigned)Cond >= CondCodeNodes.size()) 1436 CondCodeNodes.resize(Cond+1); 1437 1438 if (!CondCodeNodes[Cond]) { 1439 auto *N = newSDNode<CondCodeSDNode>(Cond); 1440 CondCodeNodes[Cond] = N; 1441 InsertNode(N); 1442 } 1443 1444 return SDValue(CondCodeNodes[Cond], 0); 1445 } 1446 1447 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1448 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1449 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1450 std::swap(N1, N2); 1451 ShuffleVectorSDNode::commuteMask(M); 1452 } 1453 1454 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1455 SDValue N2, ArrayRef<int> Mask) { 1456 assert(VT.getVectorNumElements() == Mask.size() && 1457 "Must have the same number of vector elements as mask elements!"); 1458 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1459 "Invalid VECTOR_SHUFFLE"); 1460 1461 // Canonicalize shuffle undef, undef -> undef 1462 if (N1.isUndef() && N2.isUndef()) 1463 return getUNDEF(VT); 1464 1465 // Validate that all indices in Mask are within the range of the elements 1466 // input to the shuffle. 1467 int NElts = Mask.size(); 1468 assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) && 1469 "Index out of range"); 1470 1471 // Copy the mask so we can do any needed cleanup. 1472 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1473 1474 // Canonicalize shuffle v, v -> v, undef 1475 if (N1 == N2) { 1476 N2 = getUNDEF(VT); 1477 for (int i = 0; i != NElts; ++i) 1478 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1479 } 1480 1481 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1482 if (N1.isUndef()) 1483 commuteShuffle(N1, N2, MaskVec); 1484 1485 // If shuffling a splat, try to blend the splat instead. We do this here so 1486 // that even when this arises during lowering we don't have to re-handle it. 1487 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1488 BitVector UndefElements; 1489 SDValue Splat = BV->getSplatValue(&UndefElements); 1490 if (!Splat) 1491 return; 1492 1493 for (int i = 0; i < NElts; ++i) { 1494 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1495 continue; 1496 1497 // If this input comes from undef, mark it as such. 1498 if (UndefElements[MaskVec[i] - Offset]) { 1499 MaskVec[i] = -1; 1500 continue; 1501 } 1502 1503 // If we can blend a non-undef lane, use that instead. 1504 if (!UndefElements[i]) 1505 MaskVec[i] = i + Offset; 1506 } 1507 }; 1508 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1509 BlendSplat(N1BV, 0); 1510 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1511 BlendSplat(N2BV, NElts); 1512 1513 // Canonicalize all index into lhs, -> shuffle lhs, undef 1514 // Canonicalize all index into rhs, -> shuffle rhs, undef 1515 bool AllLHS = true, AllRHS = true; 1516 bool N2Undef = N2.isUndef(); 1517 for (int i = 0; i != NElts; ++i) { 1518 if (MaskVec[i] >= NElts) { 1519 if (N2Undef) 1520 MaskVec[i] = -1; 1521 else 1522 AllLHS = false; 1523 } else if (MaskVec[i] >= 0) { 1524 AllRHS = false; 1525 } 1526 } 1527 if (AllLHS && AllRHS) 1528 return getUNDEF(VT); 1529 if (AllLHS && !N2Undef) 1530 N2 = getUNDEF(VT); 1531 if (AllRHS) { 1532 N1 = getUNDEF(VT); 1533 commuteShuffle(N1, N2, MaskVec); 1534 } 1535 // Reset our undef status after accounting for the mask. 1536 N2Undef = N2.isUndef(); 1537 // Re-check whether both sides ended up undef. 1538 if (N1.isUndef() && N2Undef) 1539 return getUNDEF(VT); 1540 1541 // If Identity shuffle return that node. 1542 bool Identity = true, AllSame = true; 1543 for (int i = 0; i != NElts; ++i) { 1544 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1545 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1546 } 1547 if (Identity && NElts) 1548 return N1; 1549 1550 // Shuffling a constant splat doesn't change the result. 1551 if (N2Undef) { 1552 SDValue V = N1; 1553 1554 // Look through any bitcasts. We check that these don't change the number 1555 // (and size) of elements and just changes their types. 1556 while (V.getOpcode() == ISD::BITCAST) 1557 V = V->getOperand(0); 1558 1559 // A splat should always show up as a build vector node. 1560 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1561 BitVector UndefElements; 1562 SDValue Splat = BV->getSplatValue(&UndefElements); 1563 // If this is a splat of an undef, shuffling it is also undef. 1564 if (Splat && Splat.isUndef()) 1565 return getUNDEF(VT); 1566 1567 bool SameNumElts = 1568 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1569 1570 // We only have a splat which can skip shuffles if there is a splatted 1571 // value and no undef lanes rearranged by the shuffle. 1572 if (Splat && UndefElements.none()) { 1573 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1574 // number of elements match or the value splatted is a zero constant. 1575 if (SameNumElts) 1576 return N1; 1577 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1578 if (C->isNullValue()) 1579 return N1; 1580 } 1581 1582 // If the shuffle itself creates a splat, build the vector directly. 1583 if (AllSame && SameNumElts) { 1584 EVT BuildVT = BV->getValueType(0); 1585 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1586 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1587 1588 // We may have jumped through bitcasts, so the type of the 1589 // BUILD_VECTOR may not match the type of the shuffle. 1590 if (BuildVT != VT) 1591 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1592 return NewBV; 1593 } 1594 } 1595 } 1596 1597 FoldingSetNodeID ID; 1598 SDValue Ops[2] = { N1, N2 }; 1599 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1600 for (int i = 0; i != NElts; ++i) 1601 ID.AddInteger(MaskVec[i]); 1602 1603 void* IP = nullptr; 1604 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1605 return SDValue(E, 0); 1606 1607 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1608 // SDNode doesn't have access to it. This memory will be "leaked" when 1609 // the node is deallocated, but recovered when the NodeAllocator is released. 1610 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1611 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1612 1613 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1614 dl.getDebugLoc(), MaskAlloc); 1615 createOperands(N, Ops); 1616 1617 CSEMap.InsertNode(N, IP); 1618 InsertNode(N); 1619 return SDValue(N, 0); 1620 } 1621 1622 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1623 MVT VT = SV.getSimpleValueType(0); 1624 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1625 ShuffleVectorSDNode::commuteMask(MaskVec); 1626 1627 SDValue Op0 = SV.getOperand(0); 1628 SDValue Op1 = SV.getOperand(1); 1629 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1630 } 1631 1632 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1633 FoldingSetNodeID ID; 1634 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1635 ID.AddInteger(RegNo); 1636 void *IP = nullptr; 1637 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1638 return SDValue(E, 0); 1639 1640 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1641 CSEMap.InsertNode(N, IP); 1642 InsertNode(N); 1643 return SDValue(N, 0); 1644 } 1645 1646 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1647 FoldingSetNodeID ID; 1648 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1649 ID.AddPointer(RegMask); 1650 void *IP = nullptr; 1651 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1652 return SDValue(E, 0); 1653 1654 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1655 CSEMap.InsertNode(N, IP); 1656 InsertNode(N); 1657 return SDValue(N, 0); 1658 } 1659 1660 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1661 MCSymbol *Label) { 1662 FoldingSetNodeID ID; 1663 SDValue Ops[] = { Root }; 1664 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops); 1665 ID.AddPointer(Label); 1666 void *IP = nullptr; 1667 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1668 return SDValue(E, 0); 1669 1670 auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1671 createOperands(N, Ops); 1672 1673 CSEMap.InsertNode(N, IP); 1674 InsertNode(N); 1675 return SDValue(N, 0); 1676 } 1677 1678 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1679 int64_t Offset, 1680 bool isTarget, 1681 unsigned char TargetFlags) { 1682 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1683 1684 FoldingSetNodeID ID; 1685 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1686 ID.AddPointer(BA); 1687 ID.AddInteger(Offset); 1688 ID.AddInteger(TargetFlags); 1689 void *IP = nullptr; 1690 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1691 return SDValue(E, 0); 1692 1693 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1694 CSEMap.InsertNode(N, IP); 1695 InsertNode(N); 1696 return SDValue(N, 0); 1697 } 1698 1699 SDValue SelectionDAG::getSrcValue(const Value *V) { 1700 assert((!V || V->getType()->isPointerTy()) && 1701 "SrcValue is not a pointer?"); 1702 1703 FoldingSetNodeID ID; 1704 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1705 ID.AddPointer(V); 1706 1707 void *IP = nullptr; 1708 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1709 return SDValue(E, 0); 1710 1711 auto *N = newSDNode<SrcValueSDNode>(V); 1712 CSEMap.InsertNode(N, IP); 1713 InsertNode(N); 1714 return SDValue(N, 0); 1715 } 1716 1717 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1718 FoldingSetNodeID ID; 1719 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1720 ID.AddPointer(MD); 1721 1722 void *IP = nullptr; 1723 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1724 return SDValue(E, 0); 1725 1726 auto *N = newSDNode<MDNodeSDNode>(MD); 1727 CSEMap.InsertNode(N, IP); 1728 InsertNode(N); 1729 return SDValue(N, 0); 1730 } 1731 1732 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1733 if (VT == V.getValueType()) 1734 return V; 1735 1736 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1737 } 1738 1739 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1740 unsigned SrcAS, unsigned DestAS) { 1741 SDValue Ops[] = {Ptr}; 1742 FoldingSetNodeID ID; 1743 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1744 ID.AddInteger(SrcAS); 1745 ID.AddInteger(DestAS); 1746 1747 void *IP = nullptr; 1748 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1749 return SDValue(E, 0); 1750 1751 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1752 VT, SrcAS, DestAS); 1753 createOperands(N, Ops); 1754 1755 CSEMap.InsertNode(N, IP); 1756 InsertNode(N); 1757 return SDValue(N, 0); 1758 } 1759 1760 /// getShiftAmountOperand - Return the specified value casted to 1761 /// the target's desired shift amount type. 1762 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1763 EVT OpTy = Op.getValueType(); 1764 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1765 if (OpTy == ShTy || OpTy.isVector()) return Op; 1766 1767 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1768 } 1769 1770 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1771 SDLoc dl(Node); 1772 const TargetLowering &TLI = getTargetLoweringInfo(); 1773 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1774 EVT VT = Node->getValueType(0); 1775 SDValue Tmp1 = Node->getOperand(0); 1776 SDValue Tmp2 = Node->getOperand(1); 1777 unsigned Align = Node->getConstantOperandVal(3); 1778 1779 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1780 Tmp2, MachinePointerInfo(V)); 1781 SDValue VAList = VAListLoad; 1782 1783 if (Align > TLI.getMinStackArgumentAlignment()) { 1784 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1785 1786 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1787 getConstant(Align - 1, dl, VAList.getValueType())); 1788 1789 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1790 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1791 } 1792 1793 // Increment the pointer, VAList, to the next vaarg 1794 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1795 getConstant(getDataLayout().getTypeAllocSize( 1796 VT.getTypeForEVT(*getContext())), 1797 dl, VAList.getValueType())); 1798 // Store the incremented VAList to the legalized pointer 1799 Tmp1 = 1800 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1801 // Load the actual argument out of the pointer VAList 1802 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1803 } 1804 1805 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1806 SDLoc dl(Node); 1807 const TargetLowering &TLI = getTargetLoweringInfo(); 1808 // This defaults to loading a pointer from the input and storing it to the 1809 // output, returning the chain. 1810 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1811 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1812 SDValue Tmp1 = 1813 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1814 Node->getOperand(2), MachinePointerInfo(VS)); 1815 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1816 MachinePointerInfo(VD)); 1817 } 1818 1819 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1820 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1821 unsigned ByteSize = VT.getStoreSize(); 1822 Type *Ty = VT.getTypeForEVT(*getContext()); 1823 unsigned StackAlign = 1824 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1825 1826 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1827 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); 1828 } 1829 1830 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1831 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1832 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1833 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1834 const DataLayout &DL = getDataLayout(); 1835 unsigned Align = 1836 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1837 1838 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1839 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1840 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout())); 1841 } 1842 1843 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1844 ISD::CondCode Cond, const SDLoc &dl) { 1845 // These setcc operations always fold. 1846 switch (Cond) { 1847 default: break; 1848 case ISD::SETFALSE: 1849 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1850 case ISD::SETTRUE: 1851 case ISD::SETTRUE2: { 1852 TargetLowering::BooleanContent Cnt = 1853 TLI->getBooleanContents(N1->getValueType(0)); 1854 return getConstant( 1855 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1856 VT); 1857 } 1858 1859 case ISD::SETOEQ: 1860 case ISD::SETOGT: 1861 case ISD::SETOGE: 1862 case ISD::SETOLT: 1863 case ISD::SETOLE: 1864 case ISD::SETONE: 1865 case ISD::SETO: 1866 case ISD::SETUO: 1867 case ISD::SETUEQ: 1868 case ISD::SETUNE: 1869 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1870 break; 1871 } 1872 1873 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1874 const APInt &C2 = N2C->getAPIntValue(); 1875 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1876 const APInt &C1 = N1C->getAPIntValue(); 1877 1878 switch (Cond) { 1879 default: llvm_unreachable("Unknown integer setcc!"); 1880 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1881 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1882 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1883 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1884 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1885 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1886 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1887 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1888 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1889 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1890 } 1891 } 1892 } 1893 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1894 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1895 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1896 switch (Cond) { 1897 default: break; 1898 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1899 return getUNDEF(VT); 1900 LLVM_FALLTHROUGH; 1901 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1902 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1903 return getUNDEF(VT); 1904 LLVM_FALLTHROUGH; 1905 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1906 R==APFloat::cmpLessThan, dl, VT); 1907 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1908 return getUNDEF(VT); 1909 LLVM_FALLTHROUGH; 1910 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1911 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1912 return getUNDEF(VT); 1913 LLVM_FALLTHROUGH; 1914 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1915 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1916 return getUNDEF(VT); 1917 LLVM_FALLTHROUGH; 1918 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1919 R==APFloat::cmpEqual, dl, VT); 1920 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1921 return getUNDEF(VT); 1922 LLVM_FALLTHROUGH; 1923 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1924 R==APFloat::cmpEqual, dl, VT); 1925 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1926 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1927 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1928 R==APFloat::cmpEqual, dl, VT); 1929 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1930 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1931 R==APFloat::cmpLessThan, dl, VT); 1932 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1933 R==APFloat::cmpUnordered, dl, VT); 1934 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1935 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1936 } 1937 } else { 1938 // Ensure that the constant occurs on the RHS. 1939 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1940 MVT CompVT = N1.getValueType().getSimpleVT(); 1941 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1942 return SDValue(); 1943 1944 return getSetCC(dl, VT, N2, N1, SwappedCond); 1945 } 1946 } 1947 1948 // Could not fold it. 1949 return SDValue(); 1950 } 1951 1952 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1953 /// use this predicate to simplify operations downstream. 1954 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1955 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1956 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 1957 } 1958 1959 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1960 /// this predicate to simplify operations downstream. Mask is known to be zero 1961 /// for bits that V cannot have. 1962 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1963 unsigned Depth) const { 1964 APInt KnownZero, KnownOne; 1965 computeKnownBits(Op, KnownZero, KnownOne, Depth); 1966 return (KnownZero & Mask) == Mask; 1967 } 1968 1969 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 1970 /// is less than the element bit-width of the shift node, return it. 1971 static const APInt *getValidShiftAmountConstant(SDValue V) { 1972 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 1973 // Shifting more than the bitwidth is not valid. 1974 const APInt &ShAmt = SA->getAPIntValue(); 1975 if (ShAmt.ult(V.getScalarValueSizeInBits())) 1976 return &ShAmt; 1977 } 1978 return nullptr; 1979 } 1980 1981 /// Determine which bits of Op are known to be either zero or one and return 1982 /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are 1983 /// those that are shared by every vector element. 1984 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, 1985 APInt &KnownOne, unsigned Depth) const { 1986 EVT VT = Op.getValueType(); 1987 APInt DemandedElts = VT.isVector() 1988 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 1989 : APInt(1, 1); 1990 computeKnownBits(Op, KnownZero, KnownOne, DemandedElts, Depth); 1991 } 1992 1993 /// Determine which bits of Op are known to be either zero or one and return 1994 /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows 1995 /// us to only collect the known bits that are shared by the requested vector 1996 /// elements. 1997 /// TODO: We only support DemandedElts on a few opcodes so far, the remainder 1998 /// should be added when they become necessary. 1999 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, 2000 APInt &KnownOne, const APInt &DemandedElts, 2001 unsigned Depth) const { 2002 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2003 2004 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 2005 if (Depth == 6) 2006 return; // Limit search depth. 2007 2008 APInt KnownZero2, KnownOne2; 2009 unsigned NumElts = DemandedElts.getBitWidth(); 2010 2011 if (!DemandedElts) 2012 return; // No demanded elts, better to assume we don't know anything. 2013 2014 unsigned Opcode = Op.getOpcode(); 2015 switch (Opcode) { 2016 case ISD::Constant: 2017 // We know all of the bits for a constant! 2018 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 2019 KnownZero = ~KnownOne; 2020 break; 2021 case ISD::BUILD_VECTOR: 2022 // Collect the known bits that are shared by every demanded vector element. 2023 assert(NumElts == Op.getValueType().getVectorNumElements() && 2024 "Unexpected vector size"); 2025 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 2026 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2027 if (!DemandedElts[i]) 2028 continue; 2029 2030 SDValue SrcOp = Op.getOperand(i); 2031 computeKnownBits(SrcOp, KnownZero2, KnownOne2, Depth + 1); 2032 2033 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2034 if (SrcOp.getValueSizeInBits() != BitWidth) { 2035 assert(SrcOp.getValueSizeInBits() > BitWidth && 2036 "Expected BUILD_VECTOR implicit truncation"); 2037 KnownOne2 = KnownOne2.trunc(BitWidth); 2038 KnownZero2 = KnownZero2.trunc(BitWidth); 2039 } 2040 2041 // Known bits are the values that are shared by every demanded element. 2042 KnownOne &= KnownOne2; 2043 KnownZero &= KnownZero2; 2044 2045 // If we don't know any bits, early out. 2046 if (!KnownOne && !KnownZero) 2047 break; 2048 } 2049 break; 2050 case ISD::VECTOR_SHUFFLE: { 2051 // Collect the known bits that are shared by every vector element referenced 2052 // by the shuffle. 2053 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2054 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 2055 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2056 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2057 for (unsigned i = 0; i != NumElts; ++i) { 2058 if (!DemandedElts[i]) 2059 continue; 2060 2061 int M = SVN->getMaskElt(i); 2062 if (M < 0) { 2063 // For UNDEF elements, we don't know anything about the common state of 2064 // the shuffle result. 2065 KnownOne.clearAllBits(); 2066 KnownZero.clearAllBits(); 2067 DemandedLHS.clearAllBits(); 2068 DemandedRHS.clearAllBits(); 2069 break; 2070 } 2071 2072 if ((unsigned)M < NumElts) 2073 DemandedLHS.setBit((unsigned)M % NumElts); 2074 else 2075 DemandedRHS.setBit((unsigned)M % NumElts); 2076 } 2077 // Known bits are the values that are shared by every demanded element. 2078 if (!!DemandedLHS) { 2079 SDValue LHS = Op.getOperand(0); 2080 computeKnownBits(LHS, KnownZero2, KnownOne2, DemandedLHS, Depth + 1); 2081 KnownOne &= KnownOne2; 2082 KnownZero &= KnownZero2; 2083 } 2084 // If we don't know any bits, early out. 2085 if (!KnownOne && !KnownZero) 2086 break; 2087 if (!!DemandedRHS) { 2088 SDValue RHS = Op.getOperand(1); 2089 computeKnownBits(RHS, KnownZero2, KnownOne2, DemandedRHS, Depth + 1); 2090 KnownOne &= KnownOne2; 2091 KnownZero &= KnownZero2; 2092 } 2093 break; 2094 } 2095 case ISD::CONCAT_VECTORS: { 2096 // Split DemandedElts and test each of the demanded subvectors. 2097 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 2098 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2099 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2100 unsigned NumSubVectors = Op.getNumOperands(); 2101 for (unsigned i = 0; i != NumSubVectors; ++i) { 2102 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2103 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2104 if (!!DemandedSub) { 2105 SDValue Sub = Op.getOperand(i); 2106 computeKnownBits(Sub, KnownZero2, KnownOne2, DemandedSub, Depth + 1); 2107 KnownOne &= KnownOne2; 2108 KnownZero &= KnownZero2; 2109 } 2110 // If we don't know any bits, early out. 2111 if (!KnownOne && !KnownZero) 2112 break; 2113 } 2114 break; 2115 } 2116 case ISD::EXTRACT_SUBVECTOR: { 2117 // If we know the element index, just demand that subvector elements, 2118 // otherwise demand them all. 2119 SDValue Src = Op.getOperand(0); 2120 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2121 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2122 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2123 // Offset the demanded elts by the subvector index. 2124 uint64_t Idx = SubIdx->getZExtValue(); 2125 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2126 computeKnownBits(Src, KnownZero, KnownOne, DemandedSrc, Depth + 1); 2127 } else { 2128 computeKnownBits(Src, KnownZero, KnownOne, Depth + 1); 2129 } 2130 break; 2131 } 2132 case ISD::BITCAST: { 2133 SDValue N0 = Op.getOperand(0); 2134 unsigned SubBitWidth = N0.getScalarValueSizeInBits(); 2135 2136 // Ignore bitcasts from floating point. 2137 if (!N0.getValueType().isInteger()) 2138 break; 2139 2140 // Fast handling of 'identity' bitcasts. 2141 if (BitWidth == SubBitWidth) { 2142 computeKnownBits(N0, KnownZero, KnownOne, DemandedElts, Depth + 1); 2143 break; 2144 } 2145 2146 // Support big-endian targets when it becomes useful. 2147 bool IsLE = getDataLayout().isLittleEndian(); 2148 if (!IsLE) 2149 break; 2150 2151 // Bitcast 'small element' vector to 'large element' scalar/vector. 2152 if ((BitWidth % SubBitWidth) == 0) { 2153 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2154 2155 // Collect known bits for the (larger) output by collecting the known 2156 // bits from each set of sub elements and shift these into place. 2157 // We need to separately call computeKnownBits for each set of 2158 // sub elements as the knownbits for each is likely to be different. 2159 unsigned SubScale = BitWidth / SubBitWidth; 2160 APInt SubDemandedElts(NumElts * SubScale, 0); 2161 for (unsigned i = 0; i != NumElts; ++i) 2162 if (DemandedElts[i]) 2163 SubDemandedElts.setBit(i * SubScale); 2164 2165 for (unsigned i = 0; i != SubScale; ++i) { 2166 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts.shl(i), 2167 Depth + 1); 2168 KnownOne |= KnownOne2.zext(BitWidth).shl(SubBitWidth * i); 2169 KnownZero |= KnownZero2.zext(BitWidth).shl(SubBitWidth * i); 2170 } 2171 } 2172 2173 // Bitcast 'large element' scalar/vector to 'small element' vector. 2174 if ((SubBitWidth % BitWidth) == 0) { 2175 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2176 2177 // Collect known bits for the (smaller) output by collecting the known 2178 // bits from the overlapping larger input elements and extracting the 2179 // sub sections we actually care about. 2180 unsigned SubScale = SubBitWidth / BitWidth; 2181 APInt SubDemandedElts(NumElts / SubScale, 0); 2182 for (unsigned i = 0; i != NumElts; ++i) 2183 if (DemandedElts[i]) 2184 SubDemandedElts.setBit(i / SubScale); 2185 2186 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts, Depth + 1); 2187 2188 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 2189 for (unsigned i = 0; i != NumElts; ++i) 2190 if (DemandedElts[i]) { 2191 unsigned Offset = (i % SubScale) * BitWidth; 2192 KnownOne &= KnownOne2.lshr(Offset).trunc(BitWidth); 2193 KnownZero &= KnownZero2.lshr(Offset).trunc(BitWidth); 2194 // If we don't know any bits, early out. 2195 if (!KnownOne && !KnownZero) 2196 break; 2197 } 2198 } 2199 break; 2200 } 2201 case ISD::AND: 2202 // If either the LHS or the RHS are Zero, the result is zero. 2203 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts, 2204 Depth + 1); 2205 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2206 Depth + 1); 2207 2208 // Output known-1 bits are only known if set in both the LHS & RHS. 2209 KnownOne &= KnownOne2; 2210 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2211 KnownZero |= KnownZero2; 2212 break; 2213 case ISD::OR: 2214 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts, 2215 Depth + 1); 2216 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2217 Depth + 1); 2218 2219 // Output known-0 bits are only known if clear in both the LHS & RHS. 2220 KnownZero &= KnownZero2; 2221 // Output known-1 are known to be set if set in either the LHS | RHS. 2222 KnownOne |= KnownOne2; 2223 break; 2224 case ISD::XOR: { 2225 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts, 2226 Depth + 1); 2227 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2228 Depth + 1); 2229 2230 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2231 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 2232 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2233 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 2234 KnownZero = KnownZeroOut; 2235 break; 2236 } 2237 case ISD::MUL: { 2238 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts, 2239 Depth + 1); 2240 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2241 Depth + 1); 2242 2243 // If low bits are zero in either operand, output low known-0 bits. 2244 // Also compute a conservative estimate for high known-0 bits. 2245 // More trickiness is possible, but this is sufficient for the 2246 // interesting case of alignment computation. 2247 KnownOne.clearAllBits(); 2248 unsigned TrailZ = KnownZero.countTrailingOnes() + 2249 KnownZero2.countTrailingOnes(); 2250 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 2251 KnownZero2.countLeadingOnes(), 2252 BitWidth) - BitWidth; 2253 2254 TrailZ = std::min(TrailZ, BitWidth); 2255 LeadZ = std::min(LeadZ, BitWidth); 2256 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 2257 APInt::getHighBitsSet(BitWidth, LeadZ); 2258 break; 2259 } 2260 case ISD::UDIV: { 2261 // For the purposes of computing leading zeros we can conservatively 2262 // treat a udiv as a logical right shift by the power of 2 known to 2263 // be less than the denominator. 2264 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2265 Depth + 1); 2266 unsigned LeadZ = KnownZero2.countLeadingOnes(); 2267 2268 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2269 Depth + 1); 2270 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 2271 if (RHSUnknownLeadingOnes != BitWidth) 2272 LeadZ = std::min(BitWidth, 2273 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 2274 2275 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 2276 break; 2277 } 2278 case ISD::SELECT: 2279 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 2280 // If we don't know any bits, early out. 2281 if (!KnownOne && !KnownZero) 2282 break; 2283 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2284 2285 // Only known if known in both the LHS and RHS. 2286 KnownOne &= KnownOne2; 2287 KnownZero &= KnownZero2; 2288 break; 2289 case ISD::SELECT_CC: 2290 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 2291 // If we don't know any bits, early out. 2292 if (!KnownOne && !KnownZero) 2293 break; 2294 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 2295 2296 // Only known if known in both the LHS and RHS. 2297 KnownOne &= KnownOne2; 2298 KnownZero &= KnownZero2; 2299 break; 2300 case ISD::SADDO: 2301 case ISD::UADDO: 2302 case ISD::SSUBO: 2303 case ISD::USUBO: 2304 case ISD::SMULO: 2305 case ISD::UMULO: 2306 if (Op.getResNo() != 1) 2307 break; 2308 // The boolean result conforms to getBooleanContents. 2309 // If we know the result of a setcc has the top bits zero, use this info. 2310 // We know that we have an integer-based boolean since these operations 2311 // are only available for integer. 2312 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2313 TargetLowering::ZeroOrOneBooleanContent && 2314 BitWidth > 1) 2315 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2316 break; 2317 case ISD::SETCC: 2318 // If we know the result of a setcc has the top bits zero, use this info. 2319 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2320 TargetLowering::ZeroOrOneBooleanContent && 2321 BitWidth > 1) 2322 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2323 break; 2324 case ISD::SHL: 2325 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2326 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2327 Depth + 1); 2328 KnownZero = KnownZero << *ShAmt; 2329 KnownOne = KnownOne << *ShAmt; 2330 // Low bits are known zero. 2331 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt->getZExtValue()); 2332 } 2333 break; 2334 case ISD::SRL: 2335 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2336 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2337 Depth + 1); 2338 KnownZero = KnownZero.lshr(*ShAmt); 2339 KnownOne = KnownOne.lshr(*ShAmt); 2340 // High bits are known zero. 2341 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue()); 2342 KnownZero |= HighBits; 2343 } 2344 break; 2345 case ISD::SRA: 2346 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2347 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2348 Depth + 1); 2349 KnownZero = KnownZero.lshr(*ShAmt); 2350 KnownOne = KnownOne.lshr(*ShAmt); 2351 // If we know the value of the sign bit, then we know it is copied across 2352 // the high bits by the shift amount. 2353 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue()); 2354 APInt SignBit = APInt::getSignBit(BitWidth); 2355 SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask. 2356 if (KnownZero.intersects(SignBit)) { 2357 KnownZero |= HighBits; // New bits are known zero. 2358 } else if (KnownOne.intersects(SignBit)) { 2359 KnownOne |= HighBits; // New bits are known one. 2360 } 2361 } 2362 break; 2363 case ISD::SIGN_EXTEND_INREG: { 2364 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2365 unsigned EBits = EVT.getScalarSizeInBits(); 2366 2367 // Sign extension. Compute the demanded bits in the result that are not 2368 // present in the input. 2369 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2370 2371 APInt InSignBit = APInt::getSignBit(EBits); 2372 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2373 2374 // If the sign extended bits are demanded, we know that the sign 2375 // bit is demanded. 2376 InSignBit = InSignBit.zext(BitWidth); 2377 if (NewBits.getBoolValue()) 2378 InputDemandedBits |= InSignBit; 2379 2380 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2381 Depth + 1); 2382 KnownOne &= InputDemandedBits; 2383 KnownZero &= InputDemandedBits; 2384 2385 // If the sign bit of the input is known set or clear, then we know the 2386 // top bits of the result. 2387 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 2388 KnownZero |= NewBits; 2389 KnownOne &= ~NewBits; 2390 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 2391 KnownOne |= NewBits; 2392 KnownZero &= ~NewBits; 2393 } else { // Input sign bit unknown 2394 KnownZero &= ~NewBits; 2395 KnownOne &= ~NewBits; 2396 } 2397 break; 2398 } 2399 case ISD::CTTZ: 2400 case ISD::CTTZ_ZERO_UNDEF: 2401 case ISD::CTLZ: 2402 case ISD::CTLZ_ZERO_UNDEF: 2403 case ISD::CTPOP: { 2404 unsigned LowBits = Log2_32(BitWidth)+1; 2405 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 2406 KnownOne.clearAllBits(); 2407 break; 2408 } 2409 case ISD::LOAD: { 2410 LoadSDNode *LD = cast<LoadSDNode>(Op); 2411 // If this is a ZEXTLoad and we are looking at the loaded value. 2412 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2413 EVT VT = LD->getMemoryVT(); 2414 unsigned MemBits = VT.getScalarSizeInBits(); 2415 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 2416 } else if (const MDNode *Ranges = LD->getRanges()) { 2417 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2418 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero, KnownOne); 2419 } 2420 break; 2421 } 2422 case ISD::ZERO_EXTEND: { 2423 EVT InVT = Op.getOperand(0).getValueType(); 2424 unsigned InBits = InVT.getScalarSizeInBits(); 2425 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 2426 KnownZero = KnownZero.trunc(InBits); 2427 KnownOne = KnownOne.trunc(InBits); 2428 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2429 Depth + 1); 2430 KnownZero = KnownZero.zext(BitWidth); 2431 KnownOne = KnownOne.zext(BitWidth); 2432 KnownZero |= NewBits; 2433 break; 2434 } 2435 case ISD::SIGN_EXTEND: { 2436 EVT InVT = Op.getOperand(0).getValueType(); 2437 unsigned InBits = InVT.getScalarSizeInBits(); 2438 2439 KnownZero = KnownZero.trunc(InBits); 2440 KnownOne = KnownOne.trunc(InBits); 2441 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2442 Depth + 1); 2443 2444 // If the sign bit is known to be zero or one, then sext will extend 2445 // it to the top bits, else it will just zext. 2446 KnownZero = KnownZero.sext(BitWidth); 2447 KnownOne = KnownOne.sext(BitWidth); 2448 break; 2449 } 2450 case ISD::ANY_EXTEND: { 2451 EVT InVT = Op.getOperand(0).getValueType(); 2452 unsigned InBits = InVT.getScalarSizeInBits(); 2453 KnownZero = KnownZero.trunc(InBits); 2454 KnownOne = KnownOne.trunc(InBits); 2455 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2456 KnownZero = KnownZero.zext(BitWidth); 2457 KnownOne = KnownOne.zext(BitWidth); 2458 break; 2459 } 2460 case ISD::TRUNCATE: { 2461 EVT InVT = Op.getOperand(0).getValueType(); 2462 unsigned InBits = InVT.getScalarSizeInBits(); 2463 KnownZero = KnownZero.zext(InBits); 2464 KnownOne = KnownOne.zext(InBits); 2465 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2466 Depth + 1); 2467 KnownZero = KnownZero.trunc(BitWidth); 2468 KnownOne = KnownOne.trunc(BitWidth); 2469 break; 2470 } 2471 case ISD::AssertZext: { 2472 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2473 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2474 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2475 KnownZero |= (~InMask); 2476 KnownOne &= (~KnownZero); 2477 break; 2478 } 2479 case ISD::FGETSIGN: 2480 // All bits are zero except the low bit. 2481 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2482 break; 2483 2484 case ISD::SUB: { 2485 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2486 // We know that the top bits of C-X are clear if X contains less bits 2487 // than C (i.e. no wrap-around can happen). For example, 20-X is 2488 // positive if we can prove that X is >= 0 and < 16. 2489 if (CLHS->getAPIntValue().isNonNegative()) { 2490 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2491 // NLZ can't be BitWidth with no sign bit 2492 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2493 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2494 Depth + 1); 2495 2496 // If all of the MaskV bits are known to be zero, then we know the 2497 // output top bits are zero, because we now know that the output is 2498 // from [0-C]. 2499 if ((KnownZero2 & MaskV) == MaskV) { 2500 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2501 // Top bits known zero. 2502 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 2503 } 2504 } 2505 } 2506 LLVM_FALLTHROUGH; 2507 } 2508 case ISD::ADD: 2509 case ISD::ADDE: { 2510 // Output known-0 bits are known if clear or set in both the low clear bits 2511 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2512 // low 3 bits clear. 2513 // Output known-0 bits are also known if the top bits of each input are 2514 // known to be clear. For example, if one input has the top 10 bits clear 2515 // and the other has the top 8 bits clear, we know the top 7 bits of the 2516 // output must be clear. 2517 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2518 Depth + 1); 2519 unsigned KnownZeroHigh = KnownZero2.countLeadingOnes(); 2520 unsigned KnownZeroLow = KnownZero2.countTrailingOnes(); 2521 2522 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2523 Depth + 1); 2524 KnownZeroHigh = std::min(KnownZeroHigh, 2525 KnownZero2.countLeadingOnes()); 2526 KnownZeroLow = std::min(KnownZeroLow, 2527 KnownZero2.countTrailingOnes()); 2528 2529 if (Opcode == ISD::ADD) { 2530 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow); 2531 if (KnownZeroHigh > 1) 2532 KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1); 2533 break; 2534 } 2535 2536 // With ADDE, a carry bit may be added in, so we can only use this 2537 // information if we know (at least) that the low two bits are clear. We 2538 // then return to the caller that the low bit is unknown but that other bits 2539 // are known zero. 2540 if (KnownZeroLow >= 2) // ADDE 2541 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow); 2542 break; 2543 } 2544 case ISD::SREM: 2545 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2546 const APInt &RA = Rem->getAPIntValue().abs(); 2547 if (RA.isPowerOf2()) { 2548 APInt LowBits = RA - 1; 2549 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2550 Depth + 1); 2551 2552 // The low bits of the first operand are unchanged by the srem. 2553 KnownZero = KnownZero2 & LowBits; 2554 KnownOne = KnownOne2 & LowBits; 2555 2556 // If the first operand is non-negative or has all low bits zero, then 2557 // the upper bits are all zero. 2558 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2559 KnownZero |= ~LowBits; 2560 2561 // If the first operand is negative and not all low bits are zero, then 2562 // the upper bits are all one. 2563 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2564 KnownOne |= ~LowBits; 2565 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2566 } 2567 } 2568 break; 2569 case ISD::UREM: { 2570 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2571 const APInt &RA = Rem->getAPIntValue(); 2572 if (RA.isPowerOf2()) { 2573 APInt LowBits = (RA - 1); 2574 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2575 Depth + 1); 2576 2577 // The upper bits are all zero, the lower ones are unchanged. 2578 KnownZero = KnownZero2 | ~LowBits; 2579 KnownOne = KnownOne2 & LowBits; 2580 break; 2581 } 2582 } 2583 2584 // Since the result is less than or equal to either operand, any leading 2585 // zero bits in either operand must also exist in the result. 2586 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2587 Depth + 1); 2588 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2589 Depth + 1); 2590 2591 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2592 KnownZero2.countLeadingOnes()); 2593 KnownOne.clearAllBits(); 2594 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2595 break; 2596 } 2597 case ISD::EXTRACT_ELEMENT: { 2598 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2599 const unsigned Index = Op.getConstantOperandVal(1); 2600 const unsigned BitWidth = Op.getValueSizeInBits(); 2601 2602 // Remove low part of known bits mask 2603 KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth); 2604 KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth); 2605 2606 // Remove high part of known bit mask 2607 KnownZero = KnownZero.trunc(BitWidth); 2608 KnownOne = KnownOne.trunc(BitWidth); 2609 break; 2610 } 2611 case ISD::EXTRACT_VECTOR_ELT: { 2612 SDValue InVec = Op.getOperand(0); 2613 SDValue EltNo = Op.getOperand(1); 2614 EVT VecVT = InVec.getValueType(); 2615 const unsigned BitWidth = Op.getValueSizeInBits(); 2616 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2617 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2618 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2619 // anything about the extended bits. 2620 if (BitWidth > EltBitWidth) { 2621 KnownZero = KnownZero.trunc(EltBitWidth); 2622 KnownOne = KnownOne.trunc(EltBitWidth); 2623 } 2624 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2625 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2626 // If we know the element index, just demand that vector element. 2627 unsigned Idx = ConstEltNo->getZExtValue(); 2628 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2629 computeKnownBits(InVec, KnownZero, KnownOne, DemandedElt, Depth + 1); 2630 } else { 2631 // Unknown element index, so ignore DemandedElts and demand them all. 2632 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1); 2633 } 2634 if (BitWidth > EltBitWidth) { 2635 KnownZero = KnownZero.zext(BitWidth); 2636 KnownOne = KnownOne.zext(BitWidth); 2637 } 2638 break; 2639 } 2640 case ISD::INSERT_VECTOR_ELT: { 2641 SDValue InVec = Op.getOperand(0); 2642 SDValue InVal = Op.getOperand(1); 2643 SDValue EltNo = Op.getOperand(2); 2644 2645 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2646 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2647 // If we know the element index, split the demand between the 2648 // source vector and the inserted element. 2649 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 2650 unsigned EltIdx = CEltNo->getZExtValue(); 2651 2652 // If we demand the inserted element then add its common known bits. 2653 if (DemandedElts[EltIdx]) { 2654 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1); 2655 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth()); 2656 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());; 2657 } 2658 2659 // If we demand the source vector then add its common known bits, ensuring 2660 // that we don't demand the inserted element. 2661 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2662 if (!!VectorElts) { 2663 computeKnownBits(InVec, KnownZero2, KnownOne2, VectorElts, Depth + 1); 2664 KnownOne &= KnownOne2; 2665 KnownZero &= KnownZero2; 2666 } 2667 } else { 2668 // Unknown element index, so ignore DemandedElts and demand them all. 2669 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1); 2670 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1); 2671 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth()); 2672 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());; 2673 } 2674 break; 2675 } 2676 case ISD::BITREVERSE: { 2677 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2678 Depth + 1); 2679 KnownZero = KnownZero2.reverseBits(); 2680 KnownOne = KnownOne2.reverseBits(); 2681 break; 2682 } 2683 case ISD::BSWAP: { 2684 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, 2685 Depth + 1); 2686 KnownZero = KnownZero2.byteSwap(); 2687 KnownOne = KnownOne2.byteSwap(); 2688 break; 2689 } 2690 case ISD::UMIN: { 2691 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2692 Depth + 1); 2693 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2694 Depth + 1); 2695 2696 // UMIN - we know that the result will have the maximum of the 2697 // known zero leading bits of the inputs. 2698 unsigned LeadZero = KnownZero.countLeadingOnes(); 2699 LeadZero = std::max(LeadZero, KnownZero2.countLeadingOnes()); 2700 2701 KnownZero &= KnownZero2; 2702 KnownOne &= KnownOne2; 2703 KnownZero |= APInt::getHighBitsSet(BitWidth, LeadZero); 2704 break; 2705 } 2706 case ISD::UMAX: { 2707 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2708 Depth + 1); 2709 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2710 Depth + 1); 2711 2712 // UMAX - we know that the result will have the maximum of the 2713 // known one leading bits of the inputs. 2714 unsigned LeadOne = KnownOne.countLeadingOnes(); 2715 LeadOne = std::max(LeadOne, KnownOne2.countLeadingOnes()); 2716 2717 KnownZero &= KnownZero2; 2718 KnownOne &= KnownOne2; 2719 KnownOne |= APInt::getHighBitsSet(BitWidth, LeadOne); 2720 break; 2721 } 2722 case ISD::SMIN: 2723 case ISD::SMAX: { 2724 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, 2725 Depth + 1); 2726 // If we don't know any bits, early out. 2727 if (!KnownOne && !KnownZero) 2728 break; 2729 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts, 2730 Depth + 1); 2731 KnownZero &= KnownZero2; 2732 KnownOne &= KnownOne2; 2733 break; 2734 } 2735 case ISD::FrameIndex: 2736 case ISD::TargetFrameIndex: 2737 if (unsigned Align = InferPtrAlignment(Op)) { 2738 // The low bits are known zero if the pointer is aligned. 2739 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2740 break; 2741 } 2742 break; 2743 2744 default: 2745 if (Opcode < ISD::BUILTIN_OP_END) 2746 break; 2747 LLVM_FALLTHROUGH; 2748 case ISD::INTRINSIC_WO_CHAIN: 2749 case ISD::INTRINSIC_W_CHAIN: 2750 case ISD::INTRINSIC_VOID: 2751 // Allow the target to implement this method for its nodes. 2752 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2753 break; 2754 } 2755 2756 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 2757 } 2758 2759 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2760 EVT OpVT = Val.getValueType(); 2761 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2762 2763 // Is the constant a known power of 2? 2764 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2765 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2766 2767 // A left-shift of a constant one will have exactly one bit set because 2768 // shifting the bit off the end is undefined. 2769 if (Val.getOpcode() == ISD::SHL) { 2770 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0)); 2771 if (C && C->getAPIntValue() == 1) 2772 return true; 2773 } 2774 2775 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2776 // one bit set. 2777 if (Val.getOpcode() == ISD::SRL) { 2778 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0)); 2779 if (C && C->getAPIntValue().isSignBit()) 2780 return true; 2781 } 2782 2783 // Are all operands of a build vector constant powers of two? 2784 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2785 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2786 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2787 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2788 return false; 2789 })) 2790 return true; 2791 2792 // More could be done here, though the above checks are enough 2793 // to handle some common cases. 2794 2795 // Fall back to computeKnownBits to catch other known cases. 2796 APInt KnownZero, KnownOne; 2797 computeKnownBits(Val, KnownZero, KnownOne); 2798 return (KnownZero.countPopulation() == BitWidth - 1) && 2799 (KnownOne.countPopulation() == 1); 2800 } 2801 2802 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 2803 EVT VT = Op.getValueType(); 2804 assert(VT.isInteger() && "Invalid VT!"); 2805 unsigned VTBits = VT.getScalarSizeInBits(); 2806 unsigned Tmp, Tmp2; 2807 unsigned FirstAnswer = 1; 2808 2809 if (Depth == 6) 2810 return 1; // Limit search depth. 2811 2812 switch (Op.getOpcode()) { 2813 default: break; 2814 case ISD::AssertSext: 2815 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2816 return VTBits-Tmp+1; 2817 case ISD::AssertZext: 2818 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2819 return VTBits-Tmp; 2820 2821 case ISD::Constant: { 2822 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2823 return Val.getNumSignBits(); 2824 } 2825 2826 case ISD::SIGN_EXTEND: 2827 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 2828 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2829 2830 case ISD::SIGN_EXTEND_INREG: 2831 // Max of the input and what this extends. 2832 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 2833 Tmp = VTBits-Tmp+1; 2834 2835 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2836 return std::max(Tmp, Tmp2); 2837 2838 case ISD::SRA: 2839 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2840 // SRA X, C -> adds C sign bits. 2841 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2842 APInt ShiftVal = C->getAPIntValue(); 2843 ShiftVal += Tmp; 2844 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 2845 } 2846 return Tmp; 2847 case ISD::SHL: 2848 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 2849 // shl destroys sign bits. 2850 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2851 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 2852 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 2853 return Tmp - C->getZExtValue(); 2854 } 2855 break; 2856 case ISD::AND: 2857 case ISD::OR: 2858 case ISD::XOR: // NOT is handled here. 2859 // Logical binary ops preserve the number of sign bits at the worst. 2860 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2861 if (Tmp != 1) { 2862 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2863 FirstAnswer = std::min(Tmp, Tmp2); 2864 // We computed what we know about the sign bits as our first 2865 // answer. Now proceed to the generic code that uses 2866 // computeKnownBits, and pick whichever answer is better. 2867 } 2868 break; 2869 2870 case ISD::SELECT: 2871 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2872 if (Tmp == 1) return 1; // Early out. 2873 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2874 return std::min(Tmp, Tmp2); 2875 case ISD::SELECT_CC: 2876 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2877 if (Tmp == 1) return 1; // Early out. 2878 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1); 2879 return std::min(Tmp, Tmp2); 2880 case ISD::SMIN: 2881 case ISD::SMAX: 2882 case ISD::UMIN: 2883 case ISD::UMAX: 2884 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 2885 if (Tmp == 1) 2886 return 1; // Early out. 2887 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 2888 return std::min(Tmp, Tmp2); 2889 case ISD::SADDO: 2890 case ISD::UADDO: 2891 case ISD::SSUBO: 2892 case ISD::USUBO: 2893 case ISD::SMULO: 2894 case ISD::UMULO: 2895 if (Op.getResNo() != 1) 2896 break; 2897 // The boolean result conforms to getBooleanContents. Fall through. 2898 // If setcc returns 0/-1, all bits are sign bits. 2899 // We know that we have an integer-based boolean since these operations 2900 // are only available for integer. 2901 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2902 TargetLowering::ZeroOrNegativeOneBooleanContent) 2903 return VTBits; 2904 break; 2905 case ISD::SETCC: 2906 // If setcc returns 0/-1, all bits are sign bits. 2907 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2908 TargetLowering::ZeroOrNegativeOneBooleanContent) 2909 return VTBits; 2910 break; 2911 case ISD::ROTL: 2912 case ISD::ROTR: 2913 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2914 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2915 2916 // Handle rotate right by N like a rotate left by 32-N. 2917 if (Op.getOpcode() == ISD::ROTR) 2918 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2919 2920 // If we aren't rotating out all of the known-in sign bits, return the 2921 // number that are left. This handles rotl(sext(x), 1) for example. 2922 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2923 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2924 } 2925 break; 2926 case ISD::ADD: 2927 // Add can have at most one carry bit. Thus we know that the output 2928 // is, at worst, one more bit than the inputs. 2929 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2930 if (Tmp == 1) return 1; // Early out. 2931 2932 // Special case decrementing a value (ADD X, -1): 2933 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2934 if (CRHS->isAllOnesValue()) { 2935 APInt KnownZero, KnownOne; 2936 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2937 2938 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2939 // sign bits set. 2940 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2941 return VTBits; 2942 2943 // If we are subtracting one from a positive number, there is no carry 2944 // out of the result. 2945 if (KnownZero.isNegative()) 2946 return Tmp; 2947 } 2948 2949 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2950 if (Tmp2 == 1) return 1; 2951 return std::min(Tmp, Tmp2)-1; 2952 2953 case ISD::SUB: 2954 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2955 if (Tmp2 == 1) return 1; 2956 2957 // Handle NEG. 2958 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 2959 if (CLHS->isNullValue()) { 2960 APInt KnownZero, KnownOne; 2961 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2962 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2963 // sign bits set. 2964 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2965 return VTBits; 2966 2967 // If the input is known to be positive (the sign bit is known clear), 2968 // the output of the NEG has the same number of sign bits as the input. 2969 if (KnownZero.isNegative()) 2970 return Tmp2; 2971 2972 // Otherwise, we treat this like a SUB. 2973 } 2974 2975 // Sub can have at most one carry bit. Thus we know that the output 2976 // is, at worst, one more bit than the inputs. 2977 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2978 if (Tmp == 1) return 1; // Early out. 2979 return std::min(Tmp, Tmp2)-1; 2980 case ISD::TRUNCATE: { 2981 // Check if the sign bits of source go down as far as the truncated value. 2982 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 2983 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 2984 if (NumSrcSignBits > (NumSrcBits - VTBits)) 2985 return NumSrcSignBits - (NumSrcBits - VTBits); 2986 break; 2987 } 2988 case ISD::EXTRACT_ELEMENT: { 2989 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2990 const int BitWidth = Op.getValueSizeInBits(); 2991 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 2992 2993 // Get reverse index (starting from 1), Op1 value indexes elements from 2994 // little end. Sign starts at big end. 2995 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 2996 2997 // If the sign portion ends in our element the subtraction gives correct 2998 // result. Otherwise it gives either negative or > bitwidth result 2999 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3000 } 3001 case ISD::EXTRACT_VECTOR_ELT: { 3002 // At the moment we keep this simple and skip tracking the specific 3003 // element. This way we get the lowest common denominator for all elements 3004 // of the vector. 3005 // TODO: get information for given vector element 3006 const unsigned BitWidth = Op.getValueSizeInBits(); 3007 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3008 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3009 // anything about sign bits. But if the sizes match we can derive knowledge 3010 // about sign bits from the vector operand. 3011 if (BitWidth == EltBitWidth) 3012 return ComputeNumSignBits(Op.getOperand(0), Depth+1); 3013 break; 3014 } 3015 case ISD::EXTRACT_SUBVECTOR: 3016 return ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3017 case ISD::CONCAT_VECTORS: 3018 // Determine the minimum number of sign bits across all input vectors. 3019 // Early out if the result is already 1. 3020 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3021 for (unsigned i = 1, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) 3022 Tmp = std::min(Tmp, ComputeNumSignBits(Op.getOperand(i), Depth + 1)); 3023 return Tmp; 3024 } 3025 3026 // If we are looking at the loaded value of the SDNode. 3027 if (Op.getResNo() == 0) { 3028 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3029 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3030 unsigned ExtType = LD->getExtensionType(); 3031 switch (ExtType) { 3032 default: break; 3033 case ISD::SEXTLOAD: // '17' bits known 3034 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3035 return VTBits-Tmp+1; 3036 case ISD::ZEXTLOAD: // '16' bits known 3037 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3038 return VTBits-Tmp; 3039 } 3040 } 3041 } 3042 3043 // Allow the target to implement this method for its nodes. 3044 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3045 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3046 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3047 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3048 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth); 3049 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 3050 } 3051 3052 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3053 // use this information. 3054 APInt KnownZero, KnownOne; 3055 computeKnownBits(Op, KnownZero, KnownOne, Depth); 3056 3057 APInt Mask; 3058 if (KnownZero.isNegative()) { // sign bit is 0 3059 Mask = KnownZero; 3060 } else if (KnownOne.isNegative()) { // sign bit is 1; 3061 Mask = KnownOne; 3062 } else { 3063 // Nothing known. 3064 return FirstAnswer; 3065 } 3066 3067 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3068 // the number of identical bits in the top of the input value. 3069 Mask = ~Mask; 3070 Mask <<= Mask.getBitWidth()-VTBits; 3071 // Return # leading zeros. We use 'min' here in case Val was zero before 3072 // shifting. We don't want to return '64' as for an i32 "0". 3073 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3074 } 3075 3076 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3077 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3078 !isa<ConstantSDNode>(Op.getOperand(1))) 3079 return false; 3080 3081 if (Op.getOpcode() == ISD::OR && 3082 !MaskedValueIsZero(Op.getOperand(0), 3083 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3084 return false; 3085 3086 return true; 3087 } 3088 3089 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3090 // If we're told that NaNs won't happen, assume they won't. 3091 if (getTarget().Options.NoNaNsFPMath) 3092 return true; 3093 3094 if (const BinaryWithFlagsSDNode *BF = dyn_cast<BinaryWithFlagsSDNode>(Op)) 3095 return BF->Flags.hasNoNaNs(); 3096 3097 // If the value is a constant, we can obviously see if it is a NaN or not. 3098 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3099 return !C->getValueAPF().isNaN(); 3100 3101 // TODO: Recognize more cases here. 3102 3103 return false; 3104 } 3105 3106 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3107 // If the value is a constant, we can obviously see if it is a zero or not. 3108 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3109 return !C->isZero(); 3110 3111 // TODO: Recognize more cases here. 3112 switch (Op.getOpcode()) { 3113 default: break; 3114 case ISD::OR: 3115 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3116 return !C->isNullValue(); 3117 break; 3118 } 3119 3120 return false; 3121 } 3122 3123 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3124 // Check the obvious case. 3125 if (A == B) return true; 3126 3127 // For for negative and positive zero. 3128 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3129 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3130 if (CA->isZero() && CB->isZero()) return true; 3131 3132 // Otherwise they may not be equal. 3133 return false; 3134 } 3135 3136 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3137 assert(A.getValueType() == B.getValueType() && 3138 "Values must have the same type"); 3139 APInt AZero, AOne; 3140 APInt BZero, BOne; 3141 computeKnownBits(A, AZero, AOne); 3142 computeKnownBits(B, BZero, BOne); 3143 return (AZero | BZero).isAllOnesValue(); 3144 } 3145 3146 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3147 ArrayRef<SDValue> Ops, 3148 llvm::SelectionDAG &DAG) { 3149 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3150 assert(llvm::all_of(Ops, 3151 [Ops](SDValue Op) { 3152 return Ops[0].getValueType() == Op.getValueType(); 3153 }) && 3154 "Concatenation of vectors with inconsistent value types!"); 3155 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3156 VT.getVectorNumElements() && 3157 "Incorrect element count in vector concatenation!"); 3158 3159 if (Ops.size() == 1) 3160 return Ops[0]; 3161 3162 // Concat of UNDEFs is UNDEF. 3163 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3164 return DAG.getUNDEF(VT); 3165 3166 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3167 // simplified to one big BUILD_VECTOR. 3168 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3169 EVT SVT = VT.getScalarType(); 3170 SmallVector<SDValue, 16> Elts; 3171 for (SDValue Op : Ops) { 3172 EVT OpVT = Op.getValueType(); 3173 if (Op.isUndef()) 3174 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3175 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3176 Elts.append(Op->op_begin(), Op->op_end()); 3177 else 3178 return SDValue(); 3179 } 3180 3181 // BUILD_VECTOR requires all inputs to be of the same type, find the 3182 // maximum type and extend them all. 3183 for (SDValue Op : Elts) 3184 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3185 3186 if (SVT.bitsGT(VT.getScalarType())) 3187 for (SDValue &Op : Elts) 3188 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3189 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3190 : DAG.getSExtOrTrunc(Op, DL, SVT); 3191 3192 return DAG.getBuildVector(VT, DL, Elts); 3193 } 3194 3195 /// Gets or creates the specified node. 3196 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3197 FoldingSetNodeID ID; 3198 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3199 void *IP = nullptr; 3200 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3201 return SDValue(E, 0); 3202 3203 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3204 getVTList(VT)); 3205 CSEMap.InsertNode(N, IP); 3206 3207 InsertNode(N); 3208 return SDValue(N, 0); 3209 } 3210 3211 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3212 SDValue Operand) { 3213 // Constant fold unary operations with an integer constant operand. Even 3214 // opaque constant will be folded, because the folding of unary operations 3215 // doesn't create new constants with different values. Nevertheless, the 3216 // opaque flag is preserved during folding to prevent future folding with 3217 // other constants. 3218 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3219 const APInt &Val = C->getAPIntValue(); 3220 switch (Opcode) { 3221 default: break; 3222 case ISD::SIGN_EXTEND: 3223 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3224 C->isTargetOpcode(), C->isOpaque()); 3225 case ISD::ANY_EXTEND: 3226 case ISD::ZERO_EXTEND: 3227 case ISD::TRUNCATE: 3228 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3229 C->isTargetOpcode(), C->isOpaque()); 3230 case ISD::UINT_TO_FP: 3231 case ISD::SINT_TO_FP: { 3232 APFloat apf(EVTToAPFloatSemantics(VT), 3233 APInt::getNullValue(VT.getSizeInBits())); 3234 (void)apf.convertFromAPInt(Val, 3235 Opcode==ISD::SINT_TO_FP, 3236 APFloat::rmNearestTiesToEven); 3237 return getConstantFP(apf, DL, VT); 3238 } 3239 case ISD::BITCAST: 3240 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3241 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3242 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3243 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3244 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3245 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3246 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3247 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3248 break; 3249 case ISD::BITREVERSE: 3250 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3251 C->isOpaque()); 3252 case ISD::BSWAP: 3253 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3254 C->isOpaque()); 3255 case ISD::CTPOP: 3256 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3257 C->isOpaque()); 3258 case ISD::CTLZ: 3259 case ISD::CTLZ_ZERO_UNDEF: 3260 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3261 C->isOpaque()); 3262 case ISD::CTTZ: 3263 case ISD::CTTZ_ZERO_UNDEF: 3264 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3265 C->isOpaque()); 3266 } 3267 } 3268 3269 // Constant fold unary operations with a floating point constant operand. 3270 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3271 APFloat V = C->getValueAPF(); // make copy 3272 switch (Opcode) { 3273 case ISD::FNEG: 3274 V.changeSign(); 3275 return getConstantFP(V, DL, VT); 3276 case ISD::FABS: 3277 V.clearSign(); 3278 return getConstantFP(V, DL, VT); 3279 case ISD::FCEIL: { 3280 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3281 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3282 return getConstantFP(V, DL, VT); 3283 break; 3284 } 3285 case ISD::FTRUNC: { 3286 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3287 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3288 return getConstantFP(V, DL, VT); 3289 break; 3290 } 3291 case ISD::FFLOOR: { 3292 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3293 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3294 return getConstantFP(V, DL, VT); 3295 break; 3296 } 3297 case ISD::FP_EXTEND: { 3298 bool ignored; 3299 // This can return overflow, underflow, or inexact; we don't care. 3300 // FIXME need to be more flexible about rounding mode. 3301 (void)V.convert(EVTToAPFloatSemantics(VT), 3302 APFloat::rmNearestTiesToEven, &ignored); 3303 return getConstantFP(V, DL, VT); 3304 } 3305 case ISD::FP_TO_SINT: 3306 case ISD::FP_TO_UINT: { 3307 integerPart x[2]; 3308 bool ignored; 3309 static_assert(integerPartWidth >= 64, "APFloat parts too small!"); 3310 // FIXME need to be more flexible about rounding mode. 3311 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 3312 Opcode==ISD::FP_TO_SINT, 3313 APFloat::rmTowardZero, &ignored); 3314 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 3315 break; 3316 APInt api(VT.getSizeInBits(), x); 3317 return getConstant(api, DL, VT); 3318 } 3319 case ISD::BITCAST: 3320 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3321 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3322 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3323 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3324 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3325 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3326 break; 3327 } 3328 } 3329 3330 // Constant fold unary operations with a vector integer or float operand. 3331 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3332 if (BV->isConstant()) { 3333 switch (Opcode) { 3334 default: 3335 // FIXME: Entirely reasonable to perform folding of other unary 3336 // operations here as the need arises. 3337 break; 3338 case ISD::FNEG: 3339 case ISD::FABS: 3340 case ISD::FCEIL: 3341 case ISD::FTRUNC: 3342 case ISD::FFLOOR: 3343 case ISD::FP_EXTEND: 3344 case ISD::FP_TO_SINT: 3345 case ISD::FP_TO_UINT: 3346 case ISD::TRUNCATE: 3347 case ISD::UINT_TO_FP: 3348 case ISD::SINT_TO_FP: 3349 case ISD::BITREVERSE: 3350 case ISD::BSWAP: 3351 case ISD::CTLZ: 3352 case ISD::CTLZ_ZERO_UNDEF: 3353 case ISD::CTTZ: 3354 case ISD::CTTZ_ZERO_UNDEF: 3355 case ISD::CTPOP: { 3356 SDValue Ops = { Operand }; 3357 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3358 return Fold; 3359 } 3360 } 3361 } 3362 } 3363 3364 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3365 switch (Opcode) { 3366 case ISD::TokenFactor: 3367 case ISD::MERGE_VALUES: 3368 case ISD::CONCAT_VECTORS: 3369 return Operand; // Factor, merge or concat of one node? No need. 3370 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3371 case ISD::FP_EXTEND: 3372 assert(VT.isFloatingPoint() && 3373 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3374 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3375 assert((!VT.isVector() || 3376 VT.getVectorNumElements() == 3377 Operand.getValueType().getVectorNumElements()) && 3378 "Vector element count mismatch!"); 3379 assert(Operand.getValueType().bitsLT(VT) && 3380 "Invalid fpext node, dst < src!"); 3381 if (Operand.isUndef()) 3382 return getUNDEF(VT); 3383 break; 3384 case ISD::SIGN_EXTEND: 3385 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3386 "Invalid SIGN_EXTEND!"); 3387 if (Operand.getValueType() == VT) return Operand; // noop extension 3388 assert((!VT.isVector() || 3389 VT.getVectorNumElements() == 3390 Operand.getValueType().getVectorNumElements()) && 3391 "Vector element count mismatch!"); 3392 assert(Operand.getValueType().bitsLT(VT) && 3393 "Invalid sext node, dst < src!"); 3394 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3395 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3396 else if (OpOpcode == ISD::UNDEF) 3397 // sext(undef) = 0, because the top bits will all be the same. 3398 return getConstant(0, DL, VT); 3399 break; 3400 case ISD::ZERO_EXTEND: 3401 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3402 "Invalid ZERO_EXTEND!"); 3403 if (Operand.getValueType() == VT) return Operand; // noop extension 3404 assert((!VT.isVector() || 3405 VT.getVectorNumElements() == 3406 Operand.getValueType().getVectorNumElements()) && 3407 "Vector element count mismatch!"); 3408 assert(Operand.getValueType().bitsLT(VT) && 3409 "Invalid zext node, dst < src!"); 3410 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3411 return getNode(ISD::ZERO_EXTEND, DL, VT, 3412 Operand.getNode()->getOperand(0)); 3413 else if (OpOpcode == ISD::UNDEF) 3414 // zext(undef) = 0, because the top bits will be zero. 3415 return getConstant(0, DL, VT); 3416 break; 3417 case ISD::ANY_EXTEND: 3418 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3419 "Invalid ANY_EXTEND!"); 3420 if (Operand.getValueType() == VT) return Operand; // noop extension 3421 assert((!VT.isVector() || 3422 VT.getVectorNumElements() == 3423 Operand.getValueType().getVectorNumElements()) && 3424 "Vector element count mismatch!"); 3425 assert(Operand.getValueType().bitsLT(VT) && 3426 "Invalid anyext node, dst < src!"); 3427 3428 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3429 OpOpcode == ISD::ANY_EXTEND) 3430 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3431 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3432 else if (OpOpcode == ISD::UNDEF) 3433 return getUNDEF(VT); 3434 3435 // (ext (trunx x)) -> x 3436 if (OpOpcode == ISD::TRUNCATE) { 3437 SDValue OpOp = Operand.getNode()->getOperand(0); 3438 if (OpOp.getValueType() == VT) 3439 return OpOp; 3440 } 3441 break; 3442 case ISD::TRUNCATE: 3443 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3444 "Invalid TRUNCATE!"); 3445 if (Operand.getValueType() == VT) return Operand; // noop truncate 3446 assert((!VT.isVector() || 3447 VT.getVectorNumElements() == 3448 Operand.getValueType().getVectorNumElements()) && 3449 "Vector element count mismatch!"); 3450 assert(Operand.getValueType().bitsGT(VT) && 3451 "Invalid truncate node, src < dst!"); 3452 if (OpOpcode == ISD::TRUNCATE) 3453 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 3454 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3455 OpOpcode == ISD::ANY_EXTEND) { 3456 // If the source is smaller than the dest, we still need an extend. 3457 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 3458 .bitsLT(VT.getScalarType())) 3459 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 3460 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 3461 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 3462 return Operand.getNode()->getOperand(0); 3463 } 3464 if (OpOpcode == ISD::UNDEF) 3465 return getUNDEF(VT); 3466 break; 3467 case ISD::BSWAP: 3468 assert(VT.isInteger() && VT == Operand.getValueType() && 3469 "Invalid BSWAP!"); 3470 assert((VT.getScalarSizeInBits() % 16 == 0) && 3471 "BSWAP types must be a multiple of 16 bits!"); 3472 if (OpOpcode == ISD::UNDEF) 3473 return getUNDEF(VT); 3474 break; 3475 case ISD::BITREVERSE: 3476 assert(VT.isInteger() && VT == Operand.getValueType() && 3477 "Invalid BITREVERSE!"); 3478 if (OpOpcode == ISD::UNDEF) 3479 return getUNDEF(VT); 3480 break; 3481 case ISD::BITCAST: 3482 // Basic sanity checking. 3483 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3484 "Cannot BITCAST between types of different sizes!"); 3485 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3486 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3487 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3488 if (OpOpcode == ISD::UNDEF) 3489 return getUNDEF(VT); 3490 break; 3491 case ISD::SCALAR_TO_VECTOR: 3492 assert(VT.isVector() && !Operand.getValueType().isVector() && 3493 (VT.getVectorElementType() == Operand.getValueType() || 3494 (VT.getVectorElementType().isInteger() && 3495 Operand.getValueType().isInteger() && 3496 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3497 "Illegal SCALAR_TO_VECTOR node!"); 3498 if (OpOpcode == ISD::UNDEF) 3499 return getUNDEF(VT); 3500 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3501 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3502 isa<ConstantSDNode>(Operand.getOperand(1)) && 3503 Operand.getConstantOperandVal(1) == 0 && 3504 Operand.getOperand(0).getValueType() == VT) 3505 return Operand.getOperand(0); 3506 break; 3507 case ISD::FNEG: 3508 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3509 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3510 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3511 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 3512 Operand.getNode()->getOperand(0), 3513 &cast<BinaryWithFlagsSDNode>(Operand.getNode())->Flags); 3514 if (OpOpcode == ISD::FNEG) // --X -> X 3515 return Operand.getNode()->getOperand(0); 3516 break; 3517 case ISD::FABS: 3518 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3519 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 3520 break; 3521 } 3522 3523 SDNode *N; 3524 SDVTList VTs = getVTList(VT); 3525 SDValue Ops[] = {Operand}; 3526 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3527 FoldingSetNodeID ID; 3528 AddNodeIDNode(ID, Opcode, VTs, Ops); 3529 void *IP = nullptr; 3530 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3531 return SDValue(E, 0); 3532 3533 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3534 createOperands(N, Ops); 3535 CSEMap.InsertNode(N, IP); 3536 } else { 3537 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3538 createOperands(N, Ops); 3539 } 3540 3541 InsertNode(N); 3542 return SDValue(N, 0); 3543 } 3544 3545 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3546 const APInt &C2) { 3547 switch (Opcode) { 3548 case ISD::ADD: return std::make_pair(C1 + C2, true); 3549 case ISD::SUB: return std::make_pair(C1 - C2, true); 3550 case ISD::MUL: return std::make_pair(C1 * C2, true); 3551 case ISD::AND: return std::make_pair(C1 & C2, true); 3552 case ISD::OR: return std::make_pair(C1 | C2, true); 3553 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3554 case ISD::SHL: return std::make_pair(C1 << C2, true); 3555 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3556 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3557 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3558 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3559 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3560 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3561 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3562 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3563 case ISD::UDIV: 3564 if (!C2.getBoolValue()) 3565 break; 3566 return std::make_pair(C1.udiv(C2), true); 3567 case ISD::UREM: 3568 if (!C2.getBoolValue()) 3569 break; 3570 return std::make_pair(C1.urem(C2), true); 3571 case ISD::SDIV: 3572 if (!C2.getBoolValue()) 3573 break; 3574 return std::make_pair(C1.sdiv(C2), true); 3575 case ISD::SREM: 3576 if (!C2.getBoolValue()) 3577 break; 3578 return std::make_pair(C1.srem(C2), true); 3579 } 3580 return std::make_pair(APInt(1, 0), false); 3581 } 3582 3583 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3584 EVT VT, const ConstantSDNode *Cst1, 3585 const ConstantSDNode *Cst2) { 3586 if (Cst1->isOpaque() || Cst2->isOpaque()) 3587 return SDValue(); 3588 3589 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3590 Cst2->getAPIntValue()); 3591 if (!Folded.second) 3592 return SDValue(); 3593 return getConstant(Folded.first, DL, VT); 3594 } 3595 3596 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 3597 const GlobalAddressSDNode *GA, 3598 const SDNode *N2) { 3599 if (GA->getOpcode() != ISD::GlobalAddress) 3600 return SDValue(); 3601 if (!TLI->isOffsetFoldingLegal(GA)) 3602 return SDValue(); 3603 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 3604 if (!Cst2) 3605 return SDValue(); 3606 int64_t Offset = Cst2->getSExtValue(); 3607 switch (Opcode) { 3608 case ISD::ADD: break; 3609 case ISD::SUB: Offset = -uint64_t(Offset); break; 3610 default: return SDValue(); 3611 } 3612 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 3613 GA->getOffset() + uint64_t(Offset)); 3614 } 3615 3616 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3617 EVT VT, SDNode *Cst1, 3618 SDNode *Cst2) { 3619 // If the opcode is a target-specific ISD node, there's nothing we can 3620 // do here and the operand rules may not line up with the below, so 3621 // bail early. 3622 if (Opcode >= ISD::BUILTIN_OP_END) 3623 return SDValue(); 3624 3625 // Handle the case of two scalars. 3626 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 3627 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 3628 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 3629 assert((!Folded || !VT.isVector()) && 3630 "Can't fold vectors ops with scalar operands"); 3631 return Folded; 3632 } 3633 } 3634 3635 // fold (add Sym, c) -> Sym+c 3636 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 3637 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 3638 if (isCommutativeBinOp(Opcode)) 3639 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 3640 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 3641 3642 // For vectors extract each constant element into Inputs so we can constant 3643 // fold them individually. 3644 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 3645 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 3646 if (!BV1 || !BV2) 3647 return SDValue(); 3648 3649 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 3650 3651 EVT SVT = VT.getScalarType(); 3652 SmallVector<SDValue, 4> Outputs; 3653 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 3654 SDValue V1 = BV1->getOperand(I); 3655 SDValue V2 = BV2->getOperand(I); 3656 3657 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 3658 // FIXME: This is valid and could be handled by truncation. 3659 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 3660 return SDValue(); 3661 3662 // Fold one vector element. 3663 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 3664 3665 // Scalar folding only succeeded if the result is a constant or UNDEF. 3666 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 3667 ScalarResult.getOpcode() != ISD::ConstantFP) 3668 return SDValue(); 3669 Outputs.push_back(ScalarResult); 3670 } 3671 3672 assert(VT.getVectorNumElements() == Outputs.size() && 3673 "Vector size mismatch!"); 3674 3675 // We may have a vector type but a scalar result. Create a splat. 3676 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 3677 3678 // Build a big vector out of the scalar elements we generated. 3679 return getBuildVector(VT, SDLoc(), Outputs); 3680 } 3681 3682 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 3683 const SDLoc &DL, EVT VT, 3684 ArrayRef<SDValue> Ops, 3685 const SDNodeFlags *Flags) { 3686 // If the opcode is a target-specific ISD node, there's nothing we can 3687 // do here and the operand rules may not line up with the below, so 3688 // bail early. 3689 if (Opcode >= ISD::BUILTIN_OP_END) 3690 return SDValue(); 3691 3692 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 3693 if (!VT.isVector()) 3694 return SDValue(); 3695 3696 unsigned NumElts = VT.getVectorNumElements(); 3697 3698 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 3699 return !Op.getValueType().isVector() || 3700 Op.getValueType().getVectorNumElements() == NumElts; 3701 }; 3702 3703 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 3704 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 3705 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 3706 (BV && BV->isConstant()); 3707 }; 3708 3709 // All operands must be vector types with the same number of elements as 3710 // the result type and must be either UNDEF or a build vector of constant 3711 // or UNDEF scalars. 3712 if (!all_of(Ops, IsConstantBuildVectorOrUndef) || 3713 !all_of(Ops, IsScalarOrSameVectorSize)) 3714 return SDValue(); 3715 3716 // If we are comparing vectors, then the result needs to be a i1 boolean 3717 // that is then sign-extended back to the legal result type. 3718 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 3719 3720 // Find legal integer scalar type for constant promotion and 3721 // ensure that its scalar size is at least as large as source. 3722 EVT LegalSVT = VT.getScalarType(); 3723 if (LegalSVT.isInteger()) { 3724 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 3725 if (LegalSVT.bitsLT(VT.getScalarType())) 3726 return SDValue(); 3727 } 3728 3729 // Constant fold each scalar lane separately. 3730 SmallVector<SDValue, 4> ScalarResults; 3731 for (unsigned i = 0; i != NumElts; i++) { 3732 SmallVector<SDValue, 4> ScalarOps; 3733 for (SDValue Op : Ops) { 3734 EVT InSVT = Op.getValueType().getScalarType(); 3735 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 3736 if (!InBV) { 3737 // We've checked that this is UNDEF or a constant of some kind. 3738 if (Op.isUndef()) 3739 ScalarOps.push_back(getUNDEF(InSVT)); 3740 else 3741 ScalarOps.push_back(Op); 3742 continue; 3743 } 3744 3745 SDValue ScalarOp = InBV->getOperand(i); 3746 EVT ScalarVT = ScalarOp.getValueType(); 3747 3748 // Build vector (integer) scalar operands may need implicit 3749 // truncation - do this before constant folding. 3750 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 3751 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 3752 3753 ScalarOps.push_back(ScalarOp); 3754 } 3755 3756 // Constant fold the scalar operands. 3757 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 3758 3759 // Legalize the (integer) scalar constant if necessary. 3760 if (LegalSVT != SVT) 3761 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 3762 3763 // Scalar folding only succeeded if the result is a constant or UNDEF. 3764 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 3765 ScalarResult.getOpcode() != ISD::ConstantFP) 3766 return SDValue(); 3767 ScalarResults.push_back(ScalarResult); 3768 } 3769 3770 return getBuildVector(VT, DL, ScalarResults); 3771 } 3772 3773 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3774 SDValue N1, SDValue N2, 3775 const SDNodeFlags *Flags) { 3776 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3777 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 3778 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 3779 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 3780 3781 // Canonicalize constant to RHS if commutative. 3782 if (isCommutativeBinOp(Opcode)) { 3783 if (N1C && !N2C) { 3784 std::swap(N1C, N2C); 3785 std::swap(N1, N2); 3786 } else if (N1CFP && !N2CFP) { 3787 std::swap(N1CFP, N2CFP); 3788 std::swap(N1, N2); 3789 } 3790 } 3791 3792 switch (Opcode) { 3793 default: break; 3794 case ISD::TokenFactor: 3795 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 3796 N2.getValueType() == MVT::Other && "Invalid token factor!"); 3797 // Fold trivial token factors. 3798 if (N1.getOpcode() == ISD::EntryToken) return N2; 3799 if (N2.getOpcode() == ISD::EntryToken) return N1; 3800 if (N1 == N2) return N1; 3801 break; 3802 case ISD::CONCAT_VECTORS: { 3803 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 3804 SDValue Ops[] = {N1, N2}; 3805 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 3806 return V; 3807 break; 3808 } 3809 case ISD::AND: 3810 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3811 assert(N1.getValueType() == N2.getValueType() && 3812 N1.getValueType() == VT && "Binary operator types must match!"); 3813 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 3814 // worth handling here. 3815 if (N2C && N2C->isNullValue()) 3816 return N2; 3817 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 3818 return N1; 3819 break; 3820 case ISD::OR: 3821 case ISD::XOR: 3822 case ISD::ADD: 3823 case ISD::SUB: 3824 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3825 assert(N1.getValueType() == N2.getValueType() && 3826 N1.getValueType() == VT && "Binary operator types must match!"); 3827 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 3828 // it's worth handling here. 3829 if (N2C && N2C->isNullValue()) 3830 return N1; 3831 break; 3832 case ISD::UDIV: 3833 case ISD::UREM: 3834 case ISD::MULHU: 3835 case ISD::MULHS: 3836 case ISD::MUL: 3837 case ISD::SDIV: 3838 case ISD::SREM: 3839 case ISD::SMIN: 3840 case ISD::SMAX: 3841 case ISD::UMIN: 3842 case ISD::UMAX: 3843 assert(VT.isInteger() && "This operator does not apply to FP types!"); 3844 assert(N1.getValueType() == N2.getValueType() && 3845 N1.getValueType() == VT && "Binary operator types must match!"); 3846 break; 3847 case ISD::FADD: 3848 case ISD::FSUB: 3849 case ISD::FMUL: 3850 case ISD::FDIV: 3851 case ISD::FREM: 3852 if (getTarget().Options.UnsafeFPMath) { 3853 if (Opcode == ISD::FADD) { 3854 // x+0 --> x 3855 if (N2CFP && N2CFP->getValueAPF().isZero()) 3856 return N1; 3857 } else if (Opcode == ISD::FSUB) { 3858 // x-0 --> x 3859 if (N2CFP && N2CFP->getValueAPF().isZero()) 3860 return N1; 3861 } else if (Opcode == ISD::FMUL) { 3862 // x*0 --> 0 3863 if (N2CFP && N2CFP->isZero()) 3864 return N2; 3865 // x*1 --> x 3866 if (N2CFP && N2CFP->isExactlyValue(1.0)) 3867 return N1; 3868 } 3869 } 3870 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 3871 assert(N1.getValueType() == N2.getValueType() && 3872 N1.getValueType() == VT && "Binary operator types must match!"); 3873 break; 3874 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 3875 assert(N1.getValueType() == VT && 3876 N1.getValueType().isFloatingPoint() && 3877 N2.getValueType().isFloatingPoint() && 3878 "Invalid FCOPYSIGN!"); 3879 break; 3880 case ISD::SHL: 3881 case ISD::SRA: 3882 case ISD::SRL: 3883 case ISD::ROTL: 3884 case ISD::ROTR: 3885 assert(VT == N1.getValueType() && 3886 "Shift operators return type must be the same as their first arg"); 3887 assert(VT.isInteger() && N2.getValueType().isInteger() && 3888 "Shifts only work on integers"); 3889 assert((!VT.isVector() || VT == N2.getValueType()) && 3890 "Vector shift amounts must be in the same as their first arg"); 3891 // Verify that the shift amount VT is bit enough to hold valid shift 3892 // amounts. This catches things like trying to shift an i1024 value by an 3893 // i8, which is easy to fall into in generic code that uses 3894 // TLI.getShiftAmount(). 3895 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 3896 "Invalid use of small shift amount with oversized value!"); 3897 3898 // Always fold shifts of i1 values so the code generator doesn't need to 3899 // handle them. Since we know the size of the shift has to be less than the 3900 // size of the value, the shift/rotate count is guaranteed to be zero. 3901 if (VT == MVT::i1) 3902 return N1; 3903 if (N2C && N2C->isNullValue()) 3904 return N1; 3905 break; 3906 case ISD::FP_ROUND_INREG: { 3907 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3908 assert(VT == N1.getValueType() && "Not an inreg round!"); 3909 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 3910 "Cannot FP_ROUND_INREG integer types"); 3911 assert(EVT.isVector() == VT.isVector() && 3912 "FP_ROUND_INREG type should be vector iff the operand " 3913 "type is vector!"); 3914 assert((!EVT.isVector() || 3915 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3916 "Vector element counts must match in FP_ROUND_INREG"); 3917 assert(EVT.bitsLE(VT) && "Not rounding down!"); 3918 (void)EVT; 3919 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 3920 break; 3921 } 3922 case ISD::FP_ROUND: 3923 assert(VT.isFloatingPoint() && 3924 N1.getValueType().isFloatingPoint() && 3925 VT.bitsLE(N1.getValueType()) && 3926 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 3927 "Invalid FP_ROUND!"); 3928 if (N1.getValueType() == VT) return N1; // noop conversion. 3929 break; 3930 case ISD::AssertSext: 3931 case ISD::AssertZext: { 3932 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3933 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3934 assert(VT.isInteger() && EVT.isInteger() && 3935 "Cannot *_EXTEND_INREG FP types"); 3936 assert(!EVT.isVector() && 3937 "AssertSExt/AssertZExt type should be the vector element type " 3938 "rather than the vector type!"); 3939 assert(EVT.bitsLE(VT) && "Not extending!"); 3940 if (VT == EVT) return N1; // noop assertion. 3941 break; 3942 } 3943 case ISD::SIGN_EXTEND_INREG: { 3944 EVT EVT = cast<VTSDNode>(N2)->getVT(); 3945 assert(VT == N1.getValueType() && "Not an inreg extend!"); 3946 assert(VT.isInteger() && EVT.isInteger() && 3947 "Cannot *_EXTEND_INREG FP types"); 3948 assert(EVT.isVector() == VT.isVector() && 3949 "SIGN_EXTEND_INREG type should be vector iff the operand " 3950 "type is vector!"); 3951 assert((!EVT.isVector() || 3952 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 3953 "Vector element counts must match in SIGN_EXTEND_INREG"); 3954 assert(EVT.bitsLE(VT) && "Not extending!"); 3955 if (EVT == VT) return N1; // Not actually extending 3956 3957 auto SignExtendInReg = [&](APInt Val) { 3958 unsigned FromBits = EVT.getScalarSizeInBits(); 3959 Val <<= Val.getBitWidth() - FromBits; 3960 Val = Val.ashr(Val.getBitWidth() - FromBits); 3961 return getConstant(Val, DL, VT.getScalarType()); 3962 }; 3963 3964 if (N1C) { 3965 const APInt &Val = N1C->getAPIntValue(); 3966 return SignExtendInReg(Val); 3967 } 3968 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 3969 SmallVector<SDValue, 8> Ops; 3970 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 3971 SDValue Op = N1.getOperand(i); 3972 if (Op.isUndef()) { 3973 Ops.push_back(getUNDEF(VT.getScalarType())); 3974 continue; 3975 } 3976 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 3977 APInt Val = C->getAPIntValue(); 3978 Val = Val.zextOrTrunc(VT.getScalarSizeInBits()); 3979 Ops.push_back(SignExtendInReg(Val)); 3980 continue; 3981 } 3982 break; 3983 } 3984 if (Ops.size() == VT.getVectorNumElements()) 3985 return getBuildVector(VT, DL, Ops); 3986 } 3987 break; 3988 } 3989 case ISD::EXTRACT_VECTOR_ELT: 3990 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 3991 if (N1.isUndef()) 3992 return getUNDEF(VT); 3993 3994 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 3995 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 3996 return getUNDEF(VT); 3997 3998 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 3999 // expanding copies of large vectors from registers. 4000 if (N2C && 4001 N1.getOpcode() == ISD::CONCAT_VECTORS && 4002 N1.getNumOperands() > 0) { 4003 unsigned Factor = 4004 N1.getOperand(0).getValueType().getVectorNumElements(); 4005 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4006 N1.getOperand(N2C->getZExtValue() / Factor), 4007 getConstant(N2C->getZExtValue() % Factor, DL, 4008 N2.getValueType())); 4009 } 4010 4011 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4012 // expanding large vector constants. 4013 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4014 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4015 4016 if (VT != Elt.getValueType()) 4017 // If the vector element type is not legal, the BUILD_VECTOR operands 4018 // are promoted and implicitly truncated, and the result implicitly 4019 // extended. Make that explicit here. 4020 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4021 4022 return Elt; 4023 } 4024 4025 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4026 // operations are lowered to scalars. 4027 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4028 // If the indices are the same, return the inserted element else 4029 // if the indices are known different, extract the element from 4030 // the original vector. 4031 SDValue N1Op2 = N1.getOperand(2); 4032 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4033 4034 if (N1Op2C && N2C) { 4035 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4036 if (VT == N1.getOperand(1).getValueType()) 4037 return N1.getOperand(1); 4038 else 4039 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4040 } 4041 4042 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4043 } 4044 } 4045 break; 4046 case ISD::EXTRACT_ELEMENT: 4047 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4048 assert(!N1.getValueType().isVector() && !VT.isVector() && 4049 (N1.getValueType().isInteger() == VT.isInteger()) && 4050 N1.getValueType() != VT && 4051 "Wrong types for EXTRACT_ELEMENT!"); 4052 4053 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4054 // 64-bit integers into 32-bit parts. Instead of building the extract of 4055 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4056 if (N1.getOpcode() == ISD::BUILD_PAIR) 4057 return N1.getOperand(N2C->getZExtValue()); 4058 4059 // EXTRACT_ELEMENT of a constant int is also very common. 4060 if (N1C) { 4061 unsigned ElementSize = VT.getSizeInBits(); 4062 unsigned Shift = ElementSize * N2C->getZExtValue(); 4063 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4064 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4065 } 4066 break; 4067 case ISD::EXTRACT_SUBVECTOR: 4068 if (VT.isSimple() && N1.getValueType().isSimple()) { 4069 assert(VT.isVector() && N1.getValueType().isVector() && 4070 "Extract subvector VTs must be a vectors!"); 4071 assert(VT.getVectorElementType() == 4072 N1.getValueType().getVectorElementType() && 4073 "Extract subvector VTs must have the same element type!"); 4074 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4075 "Extract subvector must be from larger vector to smaller vector!"); 4076 4077 if (N2C) { 4078 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4079 <= N1.getValueType().getVectorNumElements()) 4080 && "Extract subvector overflow!"); 4081 } 4082 4083 // Trivial extraction. 4084 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4085 return N1; 4086 4087 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4088 if (N1.isUndef()) 4089 return getUNDEF(VT); 4090 4091 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4092 // the concat have the same type as the extract. 4093 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4094 N1.getNumOperands() > 0 && 4095 VT == N1.getOperand(0).getValueType()) { 4096 unsigned Factor = VT.getVectorNumElements(); 4097 return N1.getOperand(N2C->getZExtValue() / Factor); 4098 } 4099 4100 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4101 // during shuffle legalization. 4102 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4103 VT == N1.getOperand(1).getValueType()) 4104 return N1.getOperand(1); 4105 } 4106 break; 4107 } 4108 4109 // Perform trivial constant folding. 4110 if (SDValue SV = 4111 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4112 return SV; 4113 4114 // Constant fold FP operations. 4115 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4116 if (N1CFP) { 4117 if (N2CFP) { 4118 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4119 APFloat::opStatus s; 4120 switch (Opcode) { 4121 case ISD::FADD: 4122 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4123 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4124 return getConstantFP(V1, DL, VT); 4125 break; 4126 case ISD::FSUB: 4127 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4128 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4129 return getConstantFP(V1, DL, VT); 4130 break; 4131 case ISD::FMUL: 4132 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4133 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4134 return getConstantFP(V1, DL, VT); 4135 break; 4136 case ISD::FDIV: 4137 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4138 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4139 s!=APFloat::opDivByZero)) { 4140 return getConstantFP(V1, DL, VT); 4141 } 4142 break; 4143 case ISD::FREM : 4144 s = V1.mod(V2); 4145 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4146 s!=APFloat::opDivByZero)) { 4147 return getConstantFP(V1, DL, VT); 4148 } 4149 break; 4150 case ISD::FCOPYSIGN: 4151 V1.copySign(V2); 4152 return getConstantFP(V1, DL, VT); 4153 default: break; 4154 } 4155 } 4156 4157 if (Opcode == ISD::FP_ROUND) { 4158 APFloat V = N1CFP->getValueAPF(); // make copy 4159 bool ignored; 4160 // This can return overflow, underflow, or inexact; we don't care. 4161 // FIXME need to be more flexible about rounding mode. 4162 (void)V.convert(EVTToAPFloatSemantics(VT), 4163 APFloat::rmNearestTiesToEven, &ignored); 4164 return getConstantFP(V, DL, VT); 4165 } 4166 } 4167 4168 // Canonicalize an UNDEF to the RHS, even over a constant. 4169 if (N1.isUndef()) { 4170 if (isCommutativeBinOp(Opcode)) { 4171 std::swap(N1, N2); 4172 } else { 4173 switch (Opcode) { 4174 case ISD::FP_ROUND_INREG: 4175 case ISD::SIGN_EXTEND_INREG: 4176 case ISD::SUB: 4177 case ISD::FSUB: 4178 case ISD::FDIV: 4179 case ISD::FREM: 4180 case ISD::SRA: 4181 return N1; // fold op(undef, arg2) -> undef 4182 case ISD::UDIV: 4183 case ISD::SDIV: 4184 case ISD::UREM: 4185 case ISD::SREM: 4186 case ISD::SRL: 4187 case ISD::SHL: 4188 if (!VT.isVector()) 4189 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4190 // For vectors, we can't easily build an all zero vector, just return 4191 // the LHS. 4192 return N2; 4193 } 4194 } 4195 } 4196 4197 // Fold a bunch of operators when the RHS is undef. 4198 if (N2.isUndef()) { 4199 switch (Opcode) { 4200 case ISD::XOR: 4201 if (N1.isUndef()) 4202 // Handle undef ^ undef -> 0 special case. This is a common 4203 // idiom (misuse). 4204 return getConstant(0, DL, VT); 4205 LLVM_FALLTHROUGH; 4206 case ISD::ADD: 4207 case ISD::ADDC: 4208 case ISD::ADDE: 4209 case ISD::SUB: 4210 case ISD::UDIV: 4211 case ISD::SDIV: 4212 case ISD::UREM: 4213 case ISD::SREM: 4214 return N2; // fold op(arg1, undef) -> undef 4215 case ISD::FADD: 4216 case ISD::FSUB: 4217 case ISD::FMUL: 4218 case ISD::FDIV: 4219 case ISD::FREM: 4220 if (getTarget().Options.UnsafeFPMath) 4221 return N2; 4222 break; 4223 case ISD::MUL: 4224 case ISD::AND: 4225 case ISD::SRL: 4226 case ISD::SHL: 4227 if (!VT.isVector()) 4228 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4229 // For vectors, we can't easily build an all zero vector, just return 4230 // the LHS. 4231 return N1; 4232 case ISD::OR: 4233 if (!VT.isVector()) 4234 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4235 // For vectors, we can't easily build an all one vector, just return 4236 // the LHS. 4237 return N1; 4238 case ISD::SRA: 4239 return N1; 4240 } 4241 } 4242 4243 // Memoize this node if possible. 4244 SDNode *N; 4245 SDVTList VTs = getVTList(VT); 4246 if (VT != MVT::Glue) { 4247 SDValue Ops[] = {N1, N2}; 4248 FoldingSetNodeID ID; 4249 AddNodeIDNode(ID, Opcode, VTs, Ops); 4250 void *IP = nullptr; 4251 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4252 if (Flags) 4253 E->intersectFlagsWith(Flags); 4254 return SDValue(E, 0); 4255 } 4256 4257 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags); 4258 CSEMap.InsertNode(N, IP); 4259 } else { 4260 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags); 4261 } 4262 4263 InsertNode(N); 4264 return SDValue(N, 0); 4265 } 4266 4267 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4268 SDValue N1, SDValue N2, SDValue N3) { 4269 // Perform various simplifications. 4270 switch (Opcode) { 4271 case ISD::FMA: { 4272 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4273 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4274 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4275 if (N1CFP && N2CFP && N3CFP) { 4276 APFloat V1 = N1CFP->getValueAPF(); 4277 const APFloat &V2 = N2CFP->getValueAPF(); 4278 const APFloat &V3 = N3CFP->getValueAPF(); 4279 APFloat::opStatus s = 4280 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4281 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4282 return getConstantFP(V1, DL, VT); 4283 } 4284 break; 4285 } 4286 case ISD::CONCAT_VECTORS: { 4287 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4288 SDValue Ops[] = {N1, N2, N3}; 4289 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4290 return V; 4291 break; 4292 } 4293 case ISD::SETCC: { 4294 // Use FoldSetCC to simplify SETCC's. 4295 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4296 return V; 4297 // Vector constant folding. 4298 SDValue Ops[] = {N1, N2, N3}; 4299 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4300 return V; 4301 break; 4302 } 4303 case ISD::SELECT: 4304 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4305 if (N1C->getZExtValue()) 4306 return N2; // select true, X, Y -> X 4307 return N3; // select false, X, Y -> Y 4308 } 4309 4310 if (N2 == N3) return N2; // select C, X, X -> X 4311 break; 4312 case ISD::VECTOR_SHUFFLE: 4313 llvm_unreachable("should use getVectorShuffle constructor!"); 4314 case ISD::INSERT_VECTOR_ELT: { 4315 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4316 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4317 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4318 return getUNDEF(VT); 4319 break; 4320 } 4321 case ISD::INSERT_SUBVECTOR: { 4322 SDValue Index = N3; 4323 if (VT.isSimple() && N1.getValueType().isSimple() 4324 && N2.getValueType().isSimple()) { 4325 assert(VT.isVector() && N1.getValueType().isVector() && 4326 N2.getValueType().isVector() && 4327 "Insert subvector VTs must be a vectors"); 4328 assert(VT == N1.getValueType() && 4329 "Dest and insert subvector source types must match!"); 4330 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4331 "Insert subvector must be from smaller vector to larger vector!"); 4332 if (isa<ConstantSDNode>(Index)) { 4333 assert((N2.getValueType().getVectorNumElements() + 4334 cast<ConstantSDNode>(Index)->getZExtValue() 4335 <= VT.getVectorNumElements()) 4336 && "Insert subvector overflow!"); 4337 } 4338 4339 // Trivial insertion. 4340 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4341 return N2; 4342 } 4343 break; 4344 } 4345 case ISD::BITCAST: 4346 // Fold bit_convert nodes from a type to themselves. 4347 if (N1.getValueType() == VT) 4348 return N1; 4349 break; 4350 } 4351 4352 // Memoize node if it doesn't produce a flag. 4353 SDNode *N; 4354 SDVTList VTs = getVTList(VT); 4355 SDValue Ops[] = {N1, N2, N3}; 4356 if (VT != MVT::Glue) { 4357 FoldingSetNodeID ID; 4358 AddNodeIDNode(ID, Opcode, VTs, Ops); 4359 void *IP = nullptr; 4360 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4361 return SDValue(E, 0); 4362 4363 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4364 createOperands(N, Ops); 4365 CSEMap.InsertNode(N, IP); 4366 } else { 4367 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4368 createOperands(N, Ops); 4369 } 4370 4371 InsertNode(N); 4372 return SDValue(N, 0); 4373 } 4374 4375 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4376 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4377 SDValue Ops[] = { N1, N2, N3, N4 }; 4378 return getNode(Opcode, DL, VT, Ops); 4379 } 4380 4381 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4382 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4383 SDValue N5) { 4384 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4385 return getNode(Opcode, DL, VT, Ops); 4386 } 4387 4388 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4389 /// the incoming stack arguments to be loaded from the stack. 4390 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4391 SmallVector<SDValue, 8> ArgChains; 4392 4393 // Include the original chain at the beginning of the list. When this is 4394 // used by target LowerCall hooks, this helps legalize find the 4395 // CALLSEQ_BEGIN node. 4396 ArgChains.push_back(Chain); 4397 4398 // Add a chain value for each stack argument. 4399 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4400 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4401 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4402 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4403 if (FI->getIndex() < 0) 4404 ArgChains.push_back(SDValue(L, 1)); 4405 4406 // Build a tokenfactor for all the chains. 4407 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4408 } 4409 4410 /// getMemsetValue - Vectorized representation of the memset value 4411 /// operand. 4412 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4413 const SDLoc &dl) { 4414 assert(!Value.isUndef()); 4415 4416 unsigned NumBits = VT.getScalarSizeInBits(); 4417 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4418 assert(C->getAPIntValue().getBitWidth() == 8); 4419 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4420 if (VT.isInteger()) 4421 return DAG.getConstant(Val, dl, VT); 4422 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4423 VT); 4424 } 4425 4426 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4427 EVT IntVT = VT.getScalarType(); 4428 if (!IntVT.isInteger()) 4429 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4430 4431 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4432 if (NumBits > 8) { 4433 // Use a multiplication with 0x010101... to extend the input to the 4434 // required length. 4435 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4436 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4437 DAG.getConstant(Magic, dl, IntVT)); 4438 } 4439 4440 if (VT != Value.getValueType() && !VT.isInteger()) 4441 Value = DAG.getBitcast(VT.getScalarType(), Value); 4442 if (VT != Value.getValueType()) 4443 Value = DAG.getSplatBuildVector(VT, dl, Value); 4444 4445 return Value; 4446 } 4447 4448 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4449 /// used when a memcpy is turned into a memset when the source is a constant 4450 /// string ptr. 4451 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4452 const TargetLowering &TLI, StringRef Str) { 4453 // Handle vector with all elements zero. 4454 if (Str.empty()) { 4455 if (VT.isInteger()) 4456 return DAG.getConstant(0, dl, VT); 4457 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4458 return DAG.getConstantFP(0.0, dl, VT); 4459 else if (VT.isVector()) { 4460 unsigned NumElts = VT.getVectorNumElements(); 4461 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4462 return DAG.getNode(ISD::BITCAST, dl, VT, 4463 DAG.getConstant(0, dl, 4464 EVT::getVectorVT(*DAG.getContext(), 4465 EltVT, NumElts))); 4466 } else 4467 llvm_unreachable("Expected type!"); 4468 } 4469 4470 assert(!VT.isVector() && "Can't handle vector type here!"); 4471 unsigned NumVTBits = VT.getSizeInBits(); 4472 unsigned NumVTBytes = NumVTBits / 8; 4473 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 4474 4475 APInt Val(NumVTBits, 0); 4476 if (DAG.getDataLayout().isLittleEndian()) { 4477 for (unsigned i = 0; i != NumBytes; ++i) 4478 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 4479 } else { 4480 for (unsigned i = 0; i != NumBytes; ++i) 4481 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 4482 } 4483 4484 // If the "cost" of materializing the integer immediate is less than the cost 4485 // of a load, then it is cost effective to turn the load into the immediate. 4486 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4487 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4488 return DAG.getConstant(Val, dl, VT); 4489 return SDValue(nullptr, 0); 4490 } 4491 4492 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4493 const SDLoc &DL) { 4494 EVT VT = Base.getValueType(); 4495 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4496 } 4497 4498 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 4499 /// 4500 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 4501 uint64_t SrcDelta = 0; 4502 GlobalAddressSDNode *G = nullptr; 4503 if (Src.getOpcode() == ISD::GlobalAddress) 4504 G = cast<GlobalAddressSDNode>(Src); 4505 else if (Src.getOpcode() == ISD::ADD && 4506 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4507 Src.getOperand(1).getOpcode() == ISD::Constant) { 4508 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4509 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4510 } 4511 if (!G) 4512 return false; 4513 4514 return getConstantStringInfo(G->getGlobal(), Str, 4515 SrcDelta + G->getOffset(), false); 4516 } 4517 4518 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4519 /// Return true if the number of memory ops is below the threshold (Limit). 4520 /// It returns the types of the sequence of memory ops to perform 4521 /// memset / memcpy by reference. 4522 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4523 unsigned Limit, uint64_t Size, 4524 unsigned DstAlign, unsigned SrcAlign, 4525 bool IsMemset, 4526 bool ZeroMemset, 4527 bool MemcpyStrSrc, 4528 bool AllowOverlap, 4529 unsigned DstAS, unsigned SrcAS, 4530 SelectionDAG &DAG, 4531 const TargetLowering &TLI) { 4532 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4533 "Expecting memcpy / memset source to meet alignment requirement!"); 4534 // If 'SrcAlign' is zero, that means the memory operation does not need to 4535 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4536 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4537 // is the specified alignment of the memory operation. If it is zero, that 4538 // means it's possible to change the alignment of the destination. 4539 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4540 // not need to be loaded. 4541 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4542 IsMemset, ZeroMemset, MemcpyStrSrc, 4543 DAG.getMachineFunction()); 4544 4545 if (VT == MVT::Other) { 4546 if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) || 4547 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) { 4548 VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS); 4549 } else { 4550 switch (DstAlign & 7) { 4551 case 0: VT = MVT::i64; break; 4552 case 4: VT = MVT::i32; break; 4553 case 2: VT = MVT::i16; break; 4554 default: VT = MVT::i8; break; 4555 } 4556 } 4557 4558 MVT LVT = MVT::i64; 4559 while (!TLI.isTypeLegal(LVT)) 4560 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 4561 assert(LVT.isInteger()); 4562 4563 if (VT.bitsGT(LVT)) 4564 VT = LVT; 4565 } 4566 4567 unsigned NumMemOps = 0; 4568 while (Size != 0) { 4569 unsigned VTSize = VT.getSizeInBits() / 8; 4570 while (VTSize > Size) { 4571 // For now, only use non-vector load / store's for the left-over pieces. 4572 EVT NewVT = VT; 4573 unsigned NewVTSize; 4574 4575 bool Found = false; 4576 if (VT.isVector() || VT.isFloatingPoint()) { 4577 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 4578 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 4579 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 4580 Found = true; 4581 else if (NewVT == MVT::i64 && 4582 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 4583 TLI.isSafeMemOpType(MVT::f64)) { 4584 // i64 is usually not legal on 32-bit targets, but f64 may be. 4585 NewVT = MVT::f64; 4586 Found = true; 4587 } 4588 } 4589 4590 if (!Found) { 4591 do { 4592 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 4593 if (NewVT == MVT::i8) 4594 break; 4595 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 4596 } 4597 NewVTSize = NewVT.getSizeInBits() / 8; 4598 4599 // If the new VT cannot cover all of the remaining bits, then consider 4600 // issuing a (or a pair of) unaligned and overlapping load / store. 4601 // FIXME: Only does this for 64-bit or more since we don't have proper 4602 // cost model for unaligned load / store. 4603 bool Fast; 4604 if (NumMemOps && AllowOverlap && 4605 VTSize >= 8 && NewVTSize < Size && 4606 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 4607 VTSize = Size; 4608 else { 4609 VT = NewVT; 4610 VTSize = NewVTSize; 4611 } 4612 } 4613 4614 if (++NumMemOps > Limit) 4615 return false; 4616 4617 MemOps.push_back(VT); 4618 Size -= VTSize; 4619 } 4620 4621 return true; 4622 } 4623 4624 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 4625 // On Darwin, -Os means optimize for size without hurting performance, so 4626 // only really optimize for size when -Oz (MinSize) is used. 4627 if (MF.getTarget().getTargetTriple().isOSDarwin()) 4628 return MF.getFunction()->optForMinSize(); 4629 return MF.getFunction()->optForSize(); 4630 } 4631 4632 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 4633 SDValue Chain, SDValue Dst, SDValue Src, 4634 uint64_t Size, unsigned Align, 4635 bool isVol, bool AlwaysInline, 4636 MachinePointerInfo DstPtrInfo, 4637 MachinePointerInfo SrcPtrInfo) { 4638 // Turn a memcpy of undef to nop. 4639 if (Src.isUndef()) 4640 return Chain; 4641 4642 // Expand memcpy to a series of load and store ops if the size operand falls 4643 // below a certain threshold. 4644 // TODO: In the AlwaysInline case, if the size is big then generate a loop 4645 // rather than maybe a humongous number of loads and stores. 4646 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4647 std::vector<EVT> MemOps; 4648 bool DstAlignCanChange = false; 4649 MachineFunction &MF = DAG.getMachineFunction(); 4650 MachineFrameInfo &MFI = MF.getFrameInfo(); 4651 bool OptSize = shouldLowerMemFuncForSize(MF); 4652 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4653 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 4654 DstAlignCanChange = true; 4655 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4656 if (Align > SrcAlign) 4657 SrcAlign = Align; 4658 StringRef Str; 4659 bool CopyFromStr = isMemSrcFromString(Src, Str); 4660 bool isZeroStr = CopyFromStr && Str.empty(); 4661 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 4662 4663 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4664 (DstAlignCanChange ? 0 : Align), 4665 (isZeroStr ? 0 : SrcAlign), 4666 false, false, CopyFromStr, true, 4667 DstPtrInfo.getAddrSpace(), 4668 SrcPtrInfo.getAddrSpace(), 4669 DAG, TLI)) 4670 return SDValue(); 4671 4672 if (DstAlignCanChange) { 4673 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4674 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4675 4676 // Don't promote to an alignment that would require dynamic stack 4677 // realignment. 4678 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 4679 if (!TRI->needsStackRealignment(MF)) 4680 while (NewAlign > Align && 4681 DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign)) 4682 NewAlign /= 2; 4683 4684 if (NewAlign > Align) { 4685 // Give the stack frame object a larger alignment if needed. 4686 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 4687 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 4688 Align = NewAlign; 4689 } 4690 } 4691 4692 MachineMemOperand::Flags MMOFlags = 4693 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 4694 SmallVector<SDValue, 8> OutChains; 4695 unsigned NumMemOps = MemOps.size(); 4696 uint64_t SrcOff = 0, DstOff = 0; 4697 for (unsigned i = 0; i != NumMemOps; ++i) { 4698 EVT VT = MemOps[i]; 4699 unsigned VTSize = VT.getSizeInBits() / 8; 4700 SDValue Value, Store; 4701 4702 if (VTSize > Size) { 4703 // Issuing an unaligned load / store pair that overlaps with the previous 4704 // pair. Adjust the offset accordingly. 4705 assert(i == NumMemOps-1 && i != 0); 4706 SrcOff -= VTSize - Size; 4707 DstOff -= VTSize - Size; 4708 } 4709 4710 if (CopyFromStr && 4711 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 4712 // It's unlikely a store of a vector immediate can be done in a single 4713 // instruction. It would require a load from a constantpool first. 4714 // We only handle zero vectors here. 4715 // FIXME: Handle other cases where store of vector immediate is done in 4716 // a single instruction. 4717 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 4718 if (Value.getNode()) 4719 Store = DAG.getStore(Chain, dl, Value, 4720 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4721 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 4722 } 4723 4724 if (!Store.getNode()) { 4725 // The type might not be legal for the target. This should only happen 4726 // if the type is smaller than a legal type, as on PPC, so the right 4727 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 4728 // to Load/Store if NVT==VT. 4729 // FIXME does the case above also need this? 4730 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 4731 assert(NVT.bitsGE(VT)); 4732 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 4733 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 4734 SrcPtrInfo.getWithOffset(SrcOff), VT, 4735 MinAlign(SrcAlign, SrcOff), MMOFlags); 4736 OutChains.push_back(Value.getValue(1)); 4737 Store = DAG.getTruncStore( 4738 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4739 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 4740 } 4741 OutChains.push_back(Store); 4742 SrcOff += VTSize; 4743 DstOff += VTSize; 4744 Size -= VTSize; 4745 } 4746 4747 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4748 } 4749 4750 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 4751 SDValue Chain, SDValue Dst, SDValue Src, 4752 uint64_t Size, unsigned Align, 4753 bool isVol, bool AlwaysInline, 4754 MachinePointerInfo DstPtrInfo, 4755 MachinePointerInfo SrcPtrInfo) { 4756 // Turn a memmove of undef to nop. 4757 if (Src.isUndef()) 4758 return Chain; 4759 4760 // Expand memmove to a series of load and store ops if the size operand falls 4761 // below a certain threshold. 4762 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4763 std::vector<EVT> MemOps; 4764 bool DstAlignCanChange = false; 4765 MachineFunction &MF = DAG.getMachineFunction(); 4766 MachineFrameInfo &MFI = MF.getFrameInfo(); 4767 bool OptSize = shouldLowerMemFuncForSize(MF); 4768 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4769 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 4770 DstAlignCanChange = true; 4771 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 4772 if (Align > SrcAlign) 4773 SrcAlign = Align; 4774 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 4775 4776 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 4777 (DstAlignCanChange ? 0 : Align), SrcAlign, 4778 false, false, false, false, 4779 DstPtrInfo.getAddrSpace(), 4780 SrcPtrInfo.getAddrSpace(), 4781 DAG, TLI)) 4782 return SDValue(); 4783 4784 if (DstAlignCanChange) { 4785 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4786 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4787 if (NewAlign > Align) { 4788 // Give the stack frame object a larger alignment if needed. 4789 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 4790 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 4791 Align = NewAlign; 4792 } 4793 } 4794 4795 MachineMemOperand::Flags MMOFlags = 4796 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 4797 uint64_t SrcOff = 0, DstOff = 0; 4798 SmallVector<SDValue, 8> LoadValues; 4799 SmallVector<SDValue, 8> LoadChains; 4800 SmallVector<SDValue, 8> OutChains; 4801 unsigned NumMemOps = MemOps.size(); 4802 for (unsigned i = 0; i < NumMemOps; i++) { 4803 EVT VT = MemOps[i]; 4804 unsigned VTSize = VT.getSizeInBits() / 8; 4805 SDValue Value; 4806 4807 Value = 4808 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 4809 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, MMOFlags); 4810 LoadValues.push_back(Value); 4811 LoadChains.push_back(Value.getValue(1)); 4812 SrcOff += VTSize; 4813 } 4814 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 4815 OutChains.clear(); 4816 for (unsigned i = 0; i < NumMemOps; i++) { 4817 EVT VT = MemOps[i]; 4818 unsigned VTSize = VT.getSizeInBits() / 8; 4819 SDValue Store; 4820 4821 Store = DAG.getStore(Chain, dl, LoadValues[i], 4822 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4823 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 4824 OutChains.push_back(Store); 4825 DstOff += VTSize; 4826 } 4827 4828 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4829 } 4830 4831 /// \brief Lower the call to 'memset' intrinsic function into a series of store 4832 /// operations. 4833 /// 4834 /// \param DAG Selection DAG where lowered code is placed. 4835 /// \param dl Link to corresponding IR location. 4836 /// \param Chain Control flow dependency. 4837 /// \param Dst Pointer to destination memory location. 4838 /// \param Src Value of byte to write into the memory. 4839 /// \param Size Number of bytes to write. 4840 /// \param Align Alignment of the destination in bytes. 4841 /// \param isVol True if destination is volatile. 4842 /// \param DstPtrInfo IR information on the memory pointer. 4843 /// \returns New head in the control flow, if lowering was successful, empty 4844 /// SDValue otherwise. 4845 /// 4846 /// The function tries to replace 'llvm.memset' intrinsic with several store 4847 /// operations and value calculation code. This is usually profitable for small 4848 /// memory size. 4849 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 4850 SDValue Chain, SDValue Dst, SDValue Src, 4851 uint64_t Size, unsigned Align, bool isVol, 4852 MachinePointerInfo DstPtrInfo) { 4853 // Turn a memset of undef to nop. 4854 if (Src.isUndef()) 4855 return Chain; 4856 4857 // Expand memset to a series of load/store ops if the size operand 4858 // falls below a certain threshold. 4859 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4860 std::vector<EVT> MemOps; 4861 bool DstAlignCanChange = false; 4862 MachineFunction &MF = DAG.getMachineFunction(); 4863 MachineFrameInfo &MFI = MF.getFrameInfo(); 4864 bool OptSize = shouldLowerMemFuncForSize(MF); 4865 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 4866 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 4867 DstAlignCanChange = true; 4868 bool IsZeroVal = 4869 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 4870 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 4871 Size, (DstAlignCanChange ? 0 : Align), 0, 4872 true, IsZeroVal, false, true, 4873 DstPtrInfo.getAddrSpace(), ~0u, 4874 DAG, TLI)) 4875 return SDValue(); 4876 4877 if (DstAlignCanChange) { 4878 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 4879 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 4880 if (NewAlign > Align) { 4881 // Give the stack frame object a larger alignment if needed. 4882 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 4883 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 4884 Align = NewAlign; 4885 } 4886 } 4887 4888 SmallVector<SDValue, 8> OutChains; 4889 uint64_t DstOff = 0; 4890 unsigned NumMemOps = MemOps.size(); 4891 4892 // Find the largest store and generate the bit pattern for it. 4893 EVT LargestVT = MemOps[0]; 4894 for (unsigned i = 1; i < NumMemOps; i++) 4895 if (MemOps[i].bitsGT(LargestVT)) 4896 LargestVT = MemOps[i]; 4897 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 4898 4899 for (unsigned i = 0; i < NumMemOps; i++) { 4900 EVT VT = MemOps[i]; 4901 unsigned VTSize = VT.getSizeInBits() / 8; 4902 if (VTSize > Size) { 4903 // Issuing an unaligned load / store pair that overlaps with the previous 4904 // pair. Adjust the offset accordingly. 4905 assert(i == NumMemOps-1 && i != 0); 4906 DstOff -= VTSize - Size; 4907 } 4908 4909 // If this store is smaller than the largest store see whether we can get 4910 // the smaller value for free with a truncate. 4911 SDValue Value = MemSetValue; 4912 if (VT.bitsLT(LargestVT)) { 4913 if (!LargestVT.isVector() && !VT.isVector() && 4914 TLI.isTruncateFree(LargestVT, VT)) 4915 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 4916 else 4917 Value = getMemsetValue(Src, VT, DAG, dl); 4918 } 4919 assert(Value.getValueType() == VT && "Value with wrong type."); 4920 SDValue Store = DAG.getStore( 4921 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 4922 DstPtrInfo.getWithOffset(DstOff), Align, 4923 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 4924 OutChains.push_back(Store); 4925 DstOff += VT.getSizeInBits() / 8; 4926 Size -= VTSize; 4927 } 4928 4929 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 4930 } 4931 4932 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 4933 unsigned AS) { 4934 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 4935 // pointer operands can be losslessly bitcasted to pointers of address space 0 4936 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 4937 report_fatal_error("cannot lower memory intrinsic in address space " + 4938 Twine(AS)); 4939 } 4940 } 4941 4942 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 4943 SDValue Src, SDValue Size, unsigned Align, 4944 bool isVol, bool AlwaysInline, bool isTailCall, 4945 MachinePointerInfo DstPtrInfo, 4946 MachinePointerInfo SrcPtrInfo) { 4947 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4948 4949 // Check to see if we should lower the memcpy to loads and stores first. 4950 // For cases within the target-specified limits, this is the best choice. 4951 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4952 if (ConstantSize) { 4953 // Memcpy with size zero? Just return the original chain. 4954 if (ConstantSize->isNullValue()) 4955 return Chain; 4956 4957 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4958 ConstantSize->getZExtValue(),Align, 4959 isVol, false, DstPtrInfo, SrcPtrInfo); 4960 if (Result.getNode()) 4961 return Result; 4962 } 4963 4964 // Then check to see if we should lower the memcpy with target-specific 4965 // code. If the target chooses to do this, this is the next best. 4966 if (TSI) { 4967 SDValue Result = TSI->EmitTargetCodeForMemcpy( 4968 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 4969 DstPtrInfo, SrcPtrInfo); 4970 if (Result.getNode()) 4971 return Result; 4972 } 4973 4974 // If we really need inline code and the target declined to provide it, 4975 // use a (potentially long) sequence of loads and stores. 4976 if (AlwaysInline) { 4977 assert(ConstantSize && "AlwaysInline requires a constant size!"); 4978 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 4979 ConstantSize->getZExtValue(), Align, isVol, 4980 true, DstPtrInfo, SrcPtrInfo); 4981 } 4982 4983 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 4984 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 4985 4986 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 4987 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 4988 // respect volatile, so they may do things like read or write memory 4989 // beyond the given memory regions. But fixing this isn't easy, and most 4990 // people don't care. 4991 4992 // Emit a library call. 4993 TargetLowering::ArgListTy Args; 4994 TargetLowering::ArgListEntry Entry; 4995 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 4996 Entry.Node = Dst; Args.push_back(Entry); 4997 Entry.Node = Src; Args.push_back(Entry); 4998 Entry.Node = Size; Args.push_back(Entry); 4999 // FIXME: pass in SDLoc 5000 TargetLowering::CallLoweringInfo CLI(*this); 5001 CLI.setDebugLoc(dl) 5002 .setChain(Chain) 5003 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5004 Dst.getValueType().getTypeForEVT(*getContext()), 5005 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5006 TLI->getPointerTy(getDataLayout())), 5007 std::move(Args)) 5008 .setDiscardResult() 5009 .setTailCall(isTailCall); 5010 5011 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5012 return CallResult.second; 5013 } 5014 5015 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5016 SDValue Src, SDValue Size, unsigned Align, 5017 bool isVol, bool isTailCall, 5018 MachinePointerInfo DstPtrInfo, 5019 MachinePointerInfo SrcPtrInfo) { 5020 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5021 5022 // Check to see if we should lower the memmove to loads and stores first. 5023 // For cases within the target-specified limits, this is the best choice. 5024 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5025 if (ConstantSize) { 5026 // Memmove with size zero? Just return the original chain. 5027 if (ConstantSize->isNullValue()) 5028 return Chain; 5029 5030 SDValue Result = 5031 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5032 ConstantSize->getZExtValue(), Align, isVol, 5033 false, DstPtrInfo, SrcPtrInfo); 5034 if (Result.getNode()) 5035 return Result; 5036 } 5037 5038 // Then check to see if we should lower the memmove with target-specific 5039 // code. If the target chooses to do this, this is the next best. 5040 if (TSI) { 5041 SDValue Result = TSI->EmitTargetCodeForMemmove( 5042 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5043 if (Result.getNode()) 5044 return Result; 5045 } 5046 5047 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5048 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5049 5050 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5051 // not be safe. See memcpy above for more details. 5052 5053 // Emit a library call. 5054 TargetLowering::ArgListTy Args; 5055 TargetLowering::ArgListEntry Entry; 5056 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5057 Entry.Node = Dst; Args.push_back(Entry); 5058 Entry.Node = Src; Args.push_back(Entry); 5059 Entry.Node = Size; Args.push_back(Entry); 5060 // FIXME: pass in SDLoc 5061 TargetLowering::CallLoweringInfo CLI(*this); 5062 CLI.setDebugLoc(dl) 5063 .setChain(Chain) 5064 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5065 Dst.getValueType().getTypeForEVT(*getContext()), 5066 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5067 TLI->getPointerTy(getDataLayout())), 5068 std::move(Args)) 5069 .setDiscardResult() 5070 .setTailCall(isTailCall); 5071 5072 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5073 return CallResult.second; 5074 } 5075 5076 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5077 SDValue Src, SDValue Size, unsigned Align, 5078 bool isVol, bool isTailCall, 5079 MachinePointerInfo DstPtrInfo) { 5080 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5081 5082 // Check to see if we should lower the memset to stores first. 5083 // For cases within the target-specified limits, this is the best choice. 5084 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5085 if (ConstantSize) { 5086 // Memset with size zero? Just return the original chain. 5087 if (ConstantSize->isNullValue()) 5088 return Chain; 5089 5090 SDValue Result = 5091 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5092 Align, isVol, DstPtrInfo); 5093 5094 if (Result.getNode()) 5095 return Result; 5096 } 5097 5098 // Then check to see if we should lower the memset with target-specific 5099 // code. If the target chooses to do this, this is the next best. 5100 if (TSI) { 5101 SDValue Result = TSI->EmitTargetCodeForMemset( 5102 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5103 if (Result.getNode()) 5104 return Result; 5105 } 5106 5107 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5108 5109 // Emit a library call. 5110 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5111 TargetLowering::ArgListTy Args; 5112 TargetLowering::ArgListEntry Entry; 5113 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5114 Args.push_back(Entry); 5115 Entry.Node = Src; 5116 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5117 Args.push_back(Entry); 5118 Entry.Node = Size; 5119 Entry.Ty = IntPtrTy; 5120 Args.push_back(Entry); 5121 5122 // FIXME: pass in SDLoc 5123 TargetLowering::CallLoweringInfo CLI(*this); 5124 CLI.setDebugLoc(dl) 5125 .setChain(Chain) 5126 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5127 Dst.getValueType().getTypeForEVT(*getContext()), 5128 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5129 TLI->getPointerTy(getDataLayout())), 5130 std::move(Args)) 5131 .setDiscardResult() 5132 .setTailCall(isTailCall); 5133 5134 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5135 return CallResult.second; 5136 } 5137 5138 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5139 SDVTList VTList, ArrayRef<SDValue> Ops, 5140 MachineMemOperand *MMO) { 5141 FoldingSetNodeID ID; 5142 ID.AddInteger(MemVT.getRawBits()); 5143 AddNodeIDNode(ID, Opcode, VTList, Ops); 5144 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5145 void* IP = nullptr; 5146 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5147 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5148 return SDValue(E, 0); 5149 } 5150 5151 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5152 VTList, MemVT, MMO); 5153 createOperands(N, Ops); 5154 5155 CSEMap.InsertNode(N, IP); 5156 InsertNode(N); 5157 return SDValue(N, 0); 5158 } 5159 5160 SDValue SelectionDAG::getAtomicCmpSwap( 5161 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5162 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5163 unsigned Alignment, AtomicOrdering SuccessOrdering, 5164 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { 5165 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5166 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5167 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5168 5169 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5170 Alignment = getEVTAlignment(MemVT); 5171 5172 MachineFunction &MF = getMachineFunction(); 5173 5174 // FIXME: Volatile isn't really correct; we should keep track of atomic 5175 // orderings in the memoperand. 5176 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5177 MachineMemOperand::MOStore; 5178 MachineMemOperand *MMO = 5179 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5180 AAMDNodes(), nullptr, SynchScope, SuccessOrdering, 5181 FailureOrdering); 5182 5183 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5184 } 5185 5186 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5187 EVT MemVT, SDVTList VTs, SDValue Chain, 5188 SDValue Ptr, SDValue Cmp, SDValue Swp, 5189 MachineMemOperand *MMO) { 5190 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5191 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5192 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5193 5194 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5195 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5196 } 5197 5198 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5199 SDValue Chain, SDValue Ptr, SDValue Val, 5200 const Value *PtrVal, unsigned Alignment, 5201 AtomicOrdering Ordering, 5202 SynchronizationScope SynchScope) { 5203 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5204 Alignment = getEVTAlignment(MemVT); 5205 5206 MachineFunction &MF = getMachineFunction(); 5207 // An atomic store does not load. An atomic load does not store. 5208 // (An atomicrmw obviously both loads and stores.) 5209 // For now, atomics are considered to be volatile always, and they are 5210 // chained as such. 5211 // FIXME: Volatile isn't really correct; we should keep track of atomic 5212 // orderings in the memoperand. 5213 auto Flags = MachineMemOperand::MOVolatile; 5214 if (Opcode != ISD::ATOMIC_STORE) 5215 Flags |= MachineMemOperand::MOLoad; 5216 if (Opcode != ISD::ATOMIC_LOAD) 5217 Flags |= MachineMemOperand::MOStore; 5218 5219 MachineMemOperand *MMO = 5220 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5221 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5222 nullptr, SynchScope, Ordering); 5223 5224 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5225 } 5226 5227 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5228 SDValue Chain, SDValue Ptr, SDValue Val, 5229 MachineMemOperand *MMO) { 5230 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5231 Opcode == ISD::ATOMIC_LOAD_SUB || 5232 Opcode == ISD::ATOMIC_LOAD_AND || 5233 Opcode == ISD::ATOMIC_LOAD_OR || 5234 Opcode == ISD::ATOMIC_LOAD_XOR || 5235 Opcode == ISD::ATOMIC_LOAD_NAND || 5236 Opcode == ISD::ATOMIC_LOAD_MIN || 5237 Opcode == ISD::ATOMIC_LOAD_MAX || 5238 Opcode == ISD::ATOMIC_LOAD_UMIN || 5239 Opcode == ISD::ATOMIC_LOAD_UMAX || 5240 Opcode == ISD::ATOMIC_SWAP || 5241 Opcode == ISD::ATOMIC_STORE) && 5242 "Invalid Atomic Op"); 5243 5244 EVT VT = Val.getValueType(); 5245 5246 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5247 getVTList(VT, MVT::Other); 5248 SDValue Ops[] = {Chain, Ptr, Val}; 5249 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5250 } 5251 5252 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5253 EVT VT, SDValue Chain, SDValue Ptr, 5254 MachineMemOperand *MMO) { 5255 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5256 5257 SDVTList VTs = getVTList(VT, MVT::Other); 5258 SDValue Ops[] = {Chain, Ptr}; 5259 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5260 } 5261 5262 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5263 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5264 if (Ops.size() == 1) 5265 return Ops[0]; 5266 5267 SmallVector<EVT, 4> VTs; 5268 VTs.reserve(Ops.size()); 5269 for (unsigned i = 0; i < Ops.size(); ++i) 5270 VTs.push_back(Ops[i].getValueType()); 5271 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5272 } 5273 5274 SDValue SelectionDAG::getMemIntrinsicNode( 5275 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5276 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, 5277 bool ReadMem, bool WriteMem, unsigned Size) { 5278 if (Align == 0) // Ensure that codegen never sees alignment 0 5279 Align = getEVTAlignment(MemVT); 5280 5281 MachineFunction &MF = getMachineFunction(); 5282 auto Flags = MachineMemOperand::MONone; 5283 if (WriteMem) 5284 Flags |= MachineMemOperand::MOStore; 5285 if (ReadMem) 5286 Flags |= MachineMemOperand::MOLoad; 5287 if (Vol) 5288 Flags |= MachineMemOperand::MOVolatile; 5289 if (!Size) 5290 Size = MemVT.getStoreSize(); 5291 MachineMemOperand *MMO = 5292 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5293 5294 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5295 } 5296 5297 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5298 SDVTList VTList, 5299 ArrayRef<SDValue> Ops, EVT MemVT, 5300 MachineMemOperand *MMO) { 5301 assert((Opcode == ISD::INTRINSIC_VOID || 5302 Opcode == ISD::INTRINSIC_W_CHAIN || 5303 Opcode == ISD::PREFETCH || 5304 Opcode == ISD::LIFETIME_START || 5305 Opcode == ISD::LIFETIME_END || 5306 (Opcode <= INT_MAX && 5307 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5308 "Opcode is not a memory-accessing opcode!"); 5309 5310 // Memoize the node unless it returns a flag. 5311 MemIntrinsicSDNode *N; 5312 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5313 FoldingSetNodeID ID; 5314 AddNodeIDNode(ID, Opcode, VTList, Ops); 5315 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5316 void *IP = nullptr; 5317 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5318 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5319 return SDValue(E, 0); 5320 } 5321 5322 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5323 VTList, MemVT, MMO); 5324 createOperands(N, Ops); 5325 5326 CSEMap.InsertNode(N, IP); 5327 } else { 5328 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5329 VTList, MemVT, MMO); 5330 createOperands(N, Ops); 5331 } 5332 InsertNode(N); 5333 return SDValue(N, 0); 5334 } 5335 5336 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5337 /// MachinePointerInfo record from it. This is particularly useful because the 5338 /// code generator has many cases where it doesn't bother passing in a 5339 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5340 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5341 int64_t Offset = 0) { 5342 // If this is FI+Offset, we can model it. 5343 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5344 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5345 FI->getIndex(), Offset); 5346 5347 // If this is (FI+Offset1)+Offset2, we can model it. 5348 if (Ptr.getOpcode() != ISD::ADD || 5349 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5350 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5351 return MachinePointerInfo(); 5352 5353 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5354 return MachinePointerInfo::getFixedStack( 5355 DAG.getMachineFunction(), FI, 5356 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5357 } 5358 5359 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5360 /// MachinePointerInfo record from it. This is particularly useful because the 5361 /// code generator has many cases where it doesn't bother passing in a 5362 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5363 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5364 SDValue OffsetOp) { 5365 // If the 'Offset' value isn't a constant, we can't handle this. 5366 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5367 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); 5368 if (OffsetOp.isUndef()) 5369 return InferPointerInfo(DAG, Ptr); 5370 return MachinePointerInfo(); 5371 } 5372 5373 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5374 EVT VT, const SDLoc &dl, SDValue Chain, 5375 SDValue Ptr, SDValue Offset, 5376 MachinePointerInfo PtrInfo, EVT MemVT, 5377 unsigned Alignment, 5378 MachineMemOperand::Flags MMOFlags, 5379 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5380 assert(Chain.getValueType() == MVT::Other && 5381 "Invalid chain type"); 5382 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5383 Alignment = getEVTAlignment(MemVT); 5384 5385 MMOFlags |= MachineMemOperand::MOLoad; 5386 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5387 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5388 // clients. 5389 if (PtrInfo.V.isNull()) 5390 PtrInfo = InferPointerInfo(*this, Ptr, Offset); 5391 5392 MachineFunction &MF = getMachineFunction(); 5393 MachineMemOperand *MMO = MF.getMachineMemOperand( 5394 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5395 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5396 } 5397 5398 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5399 EVT VT, const SDLoc &dl, SDValue Chain, 5400 SDValue Ptr, SDValue Offset, EVT MemVT, 5401 MachineMemOperand *MMO) { 5402 if (VT == MemVT) { 5403 ExtType = ISD::NON_EXTLOAD; 5404 } else if (ExtType == ISD::NON_EXTLOAD) { 5405 assert(VT == MemVT && "Non-extending load from different memory type!"); 5406 } else { 5407 // Extending load. 5408 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5409 "Should only be an extending load, not truncating!"); 5410 assert(VT.isInteger() == MemVT.isInteger() && 5411 "Cannot convert from FP to Int or Int -> FP!"); 5412 assert(VT.isVector() == MemVT.isVector() && 5413 "Cannot use an ext load to convert to or from a vector!"); 5414 assert((!VT.isVector() || 5415 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5416 "Cannot use an ext load to change the number of vector elements!"); 5417 } 5418 5419 bool Indexed = AM != ISD::UNINDEXED; 5420 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5421 5422 SDVTList VTs = Indexed ? 5423 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5424 SDValue Ops[] = { Chain, Ptr, Offset }; 5425 FoldingSetNodeID ID; 5426 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5427 ID.AddInteger(MemVT.getRawBits()); 5428 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5429 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5430 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5431 void *IP = nullptr; 5432 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5433 cast<LoadSDNode>(E)->refineAlignment(MMO); 5434 return SDValue(E, 0); 5435 } 5436 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5437 ExtType, MemVT, MMO); 5438 createOperands(N, Ops); 5439 5440 CSEMap.InsertNode(N, IP); 5441 InsertNode(N); 5442 return SDValue(N, 0); 5443 } 5444 5445 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5446 SDValue Ptr, MachinePointerInfo PtrInfo, 5447 unsigned Alignment, 5448 MachineMemOperand::Flags MMOFlags, 5449 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5450 SDValue Undef = getUNDEF(Ptr.getValueType()); 5451 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5452 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5453 } 5454 5455 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5456 SDValue Ptr, MachineMemOperand *MMO) { 5457 SDValue Undef = getUNDEF(Ptr.getValueType()); 5458 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5459 VT, MMO); 5460 } 5461 5462 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5463 EVT VT, SDValue Chain, SDValue Ptr, 5464 MachinePointerInfo PtrInfo, EVT MemVT, 5465 unsigned Alignment, 5466 MachineMemOperand::Flags MMOFlags, 5467 const AAMDNodes &AAInfo) { 5468 SDValue Undef = getUNDEF(Ptr.getValueType()); 5469 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5470 MemVT, Alignment, MMOFlags, AAInfo); 5471 } 5472 5473 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5474 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5475 MachineMemOperand *MMO) { 5476 SDValue Undef = getUNDEF(Ptr.getValueType()); 5477 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5478 MemVT, MMO); 5479 } 5480 5481 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5482 SDValue Base, SDValue Offset, 5483 ISD::MemIndexedMode AM) { 5484 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5485 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5486 // Don't propagate the invariant or dereferenceable flags. 5487 auto MMOFlags = 5488 LD->getMemOperand()->getFlags() & 5489 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5490 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5491 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5492 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5493 LD->getAAInfo()); 5494 } 5495 5496 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5497 SDValue Ptr, MachinePointerInfo PtrInfo, 5498 unsigned Alignment, 5499 MachineMemOperand::Flags MMOFlags, 5500 const AAMDNodes &AAInfo) { 5501 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 5502 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5503 Alignment = getEVTAlignment(Val.getValueType()); 5504 5505 MMOFlags |= MachineMemOperand::MOStore; 5506 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5507 5508 if (PtrInfo.V.isNull()) 5509 PtrInfo = InferPointerInfo(*this, Ptr); 5510 5511 MachineFunction &MF = getMachineFunction(); 5512 MachineMemOperand *MMO = MF.getMachineMemOperand( 5513 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 5514 return getStore(Chain, dl, Val, Ptr, MMO); 5515 } 5516 5517 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5518 SDValue Ptr, MachineMemOperand *MMO) { 5519 assert(Chain.getValueType() == MVT::Other && 5520 "Invalid chain type"); 5521 EVT VT = Val.getValueType(); 5522 SDVTList VTs = getVTList(MVT::Other); 5523 SDValue Undef = getUNDEF(Ptr.getValueType()); 5524 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5525 FoldingSetNodeID ID; 5526 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5527 ID.AddInteger(VT.getRawBits()); 5528 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5529 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 5530 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5531 void *IP = nullptr; 5532 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5533 cast<StoreSDNode>(E)->refineAlignment(MMO); 5534 return SDValue(E, 0); 5535 } 5536 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5537 ISD::UNINDEXED, false, VT, MMO); 5538 createOperands(N, Ops); 5539 5540 CSEMap.InsertNode(N, IP); 5541 InsertNode(N); 5542 return SDValue(N, 0); 5543 } 5544 5545 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5546 SDValue Ptr, MachinePointerInfo PtrInfo, 5547 EVT SVT, unsigned Alignment, 5548 MachineMemOperand::Flags MMOFlags, 5549 const AAMDNodes &AAInfo) { 5550 assert(Chain.getValueType() == MVT::Other && 5551 "Invalid chain type"); 5552 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5553 Alignment = getEVTAlignment(SVT); 5554 5555 MMOFlags |= MachineMemOperand::MOStore; 5556 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5557 5558 if (PtrInfo.V.isNull()) 5559 PtrInfo = InferPointerInfo(*this, Ptr); 5560 5561 MachineFunction &MF = getMachineFunction(); 5562 MachineMemOperand *MMO = MF.getMachineMemOperand( 5563 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 5564 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 5565 } 5566 5567 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5568 SDValue Ptr, EVT SVT, 5569 MachineMemOperand *MMO) { 5570 EVT VT = Val.getValueType(); 5571 5572 assert(Chain.getValueType() == MVT::Other && 5573 "Invalid chain type"); 5574 if (VT == SVT) 5575 return getStore(Chain, dl, Val, Ptr, MMO); 5576 5577 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 5578 "Should only be a truncating store, not extending!"); 5579 assert(VT.isInteger() == SVT.isInteger() && 5580 "Can't do FP-INT conversion!"); 5581 assert(VT.isVector() == SVT.isVector() && 5582 "Cannot use trunc store to convert to or from a vector!"); 5583 assert((!VT.isVector() || 5584 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 5585 "Cannot use trunc store to change the number of vector elements!"); 5586 5587 SDVTList VTs = getVTList(MVT::Other); 5588 SDValue Undef = getUNDEF(Ptr.getValueType()); 5589 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5590 FoldingSetNodeID ID; 5591 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5592 ID.AddInteger(SVT.getRawBits()); 5593 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5594 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 5595 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5596 void *IP = nullptr; 5597 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5598 cast<StoreSDNode>(E)->refineAlignment(MMO); 5599 return SDValue(E, 0); 5600 } 5601 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5602 ISD::UNINDEXED, true, SVT, MMO); 5603 createOperands(N, Ops); 5604 5605 CSEMap.InsertNode(N, IP); 5606 InsertNode(N); 5607 return SDValue(N, 0); 5608 } 5609 5610 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 5611 SDValue Base, SDValue Offset, 5612 ISD::MemIndexedMode AM) { 5613 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 5614 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 5615 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 5616 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 5617 FoldingSetNodeID ID; 5618 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5619 ID.AddInteger(ST->getMemoryVT().getRawBits()); 5620 ID.AddInteger(ST->getRawSubclassData()); 5621 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 5622 void *IP = nullptr; 5623 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 5624 return SDValue(E, 0); 5625 5626 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5627 ST->isTruncatingStore(), ST->getMemoryVT(), 5628 ST->getMemOperand()); 5629 createOperands(N, Ops); 5630 5631 CSEMap.InsertNode(N, IP); 5632 InsertNode(N); 5633 return SDValue(N, 0); 5634 } 5635 5636 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5637 SDValue Ptr, SDValue Mask, SDValue Src0, 5638 EVT MemVT, MachineMemOperand *MMO, 5639 ISD::LoadExtType ExtTy, bool isExpanding) { 5640 5641 SDVTList VTs = getVTList(VT, MVT::Other); 5642 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 5643 FoldingSetNodeID ID; 5644 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 5645 ID.AddInteger(VT.getRawBits()); 5646 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 5647 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 5648 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5649 void *IP = nullptr; 5650 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5651 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 5652 return SDValue(E, 0); 5653 } 5654 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5655 ExtTy, isExpanding, MemVT, MMO); 5656 createOperands(N, Ops); 5657 5658 CSEMap.InsertNode(N, IP); 5659 InsertNode(N); 5660 return SDValue(N, 0); 5661 } 5662 5663 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 5664 SDValue Val, SDValue Ptr, SDValue Mask, 5665 EVT MemVT, MachineMemOperand *MMO, 5666 bool IsTruncating, bool IsCompressing) { 5667 assert(Chain.getValueType() == MVT::Other && 5668 "Invalid chain type"); 5669 EVT VT = Val.getValueType(); 5670 SDVTList VTs = getVTList(MVT::Other); 5671 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 5672 FoldingSetNodeID ID; 5673 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 5674 ID.AddInteger(VT.getRawBits()); 5675 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 5676 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 5677 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5678 void *IP = nullptr; 5679 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5680 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 5681 return SDValue(E, 0); 5682 } 5683 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5684 IsTruncating, IsCompressing, MemVT, MMO); 5685 createOperands(N, Ops); 5686 5687 CSEMap.InsertNode(N, IP); 5688 InsertNode(N); 5689 return SDValue(N, 0); 5690 } 5691 5692 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 5693 ArrayRef<SDValue> Ops, 5694 MachineMemOperand *MMO) { 5695 assert(Ops.size() == 5 && "Incompatible number of operands"); 5696 5697 FoldingSetNodeID ID; 5698 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 5699 ID.AddInteger(VT.getRawBits()); 5700 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 5701 dl.getIROrder(), VTs, VT, MMO)); 5702 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5703 void *IP = nullptr; 5704 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5705 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 5706 return SDValue(E, 0); 5707 } 5708 5709 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 5710 VTs, VT, MMO); 5711 createOperands(N, Ops); 5712 5713 assert(N->getValue().getValueType() == N->getValueType(0) && 5714 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 5715 assert(N->getMask().getValueType().getVectorNumElements() == 5716 N->getValueType(0).getVectorNumElements() && 5717 "Vector width mismatch between mask and data"); 5718 assert(N->getIndex().getValueType().getVectorNumElements() == 5719 N->getValueType(0).getVectorNumElements() && 5720 "Vector width mismatch between index and data"); 5721 5722 CSEMap.InsertNode(N, IP); 5723 InsertNode(N); 5724 return SDValue(N, 0); 5725 } 5726 5727 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 5728 ArrayRef<SDValue> Ops, 5729 MachineMemOperand *MMO) { 5730 assert(Ops.size() == 5 && "Incompatible number of operands"); 5731 5732 FoldingSetNodeID ID; 5733 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 5734 ID.AddInteger(VT.getRawBits()); 5735 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 5736 dl.getIROrder(), VTs, VT, MMO)); 5737 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5738 void *IP = nullptr; 5739 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5740 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 5741 return SDValue(E, 0); 5742 } 5743 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 5744 VTs, VT, MMO); 5745 createOperands(N, Ops); 5746 5747 assert(N->getMask().getValueType().getVectorNumElements() == 5748 N->getValue().getValueType().getVectorNumElements() && 5749 "Vector width mismatch between mask and data"); 5750 assert(N->getIndex().getValueType().getVectorNumElements() == 5751 N->getValue().getValueType().getVectorNumElements() && 5752 "Vector width mismatch between index and data"); 5753 5754 CSEMap.InsertNode(N, IP); 5755 InsertNode(N); 5756 return SDValue(N, 0); 5757 } 5758 5759 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 5760 SDValue Ptr, SDValue SV, unsigned Align) { 5761 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 5762 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 5763 } 5764 5765 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5766 ArrayRef<SDUse> Ops) { 5767 switch (Ops.size()) { 5768 case 0: return getNode(Opcode, DL, VT); 5769 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 5770 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 5771 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 5772 default: break; 5773 } 5774 5775 // Copy from an SDUse array into an SDValue array for use with 5776 // the regular getNode logic. 5777 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 5778 return getNode(Opcode, DL, VT, NewOps); 5779 } 5780 5781 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5782 ArrayRef<SDValue> Ops, const SDNodeFlags *Flags) { 5783 unsigned NumOps = Ops.size(); 5784 switch (NumOps) { 5785 case 0: return getNode(Opcode, DL, VT); 5786 case 1: return getNode(Opcode, DL, VT, Ops[0]); 5787 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 5788 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 5789 default: break; 5790 } 5791 5792 switch (Opcode) { 5793 default: break; 5794 case ISD::CONCAT_VECTORS: { 5795 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 5796 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 5797 return V; 5798 break; 5799 } 5800 case ISD::SELECT_CC: { 5801 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 5802 assert(Ops[0].getValueType() == Ops[1].getValueType() && 5803 "LHS and RHS of condition must have same type!"); 5804 assert(Ops[2].getValueType() == Ops[3].getValueType() && 5805 "True and False arms of SelectCC must have same type!"); 5806 assert(Ops[2].getValueType() == VT && 5807 "select_cc node must be of same type as true and false value!"); 5808 break; 5809 } 5810 case ISD::BR_CC: { 5811 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 5812 assert(Ops[2].getValueType() == Ops[3].getValueType() && 5813 "LHS/RHS of comparison should match types!"); 5814 break; 5815 } 5816 } 5817 5818 // Memoize nodes. 5819 SDNode *N; 5820 SDVTList VTs = getVTList(VT); 5821 5822 if (VT != MVT::Glue) { 5823 FoldingSetNodeID ID; 5824 AddNodeIDNode(ID, Opcode, VTs, Ops); 5825 void *IP = nullptr; 5826 5827 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5828 return SDValue(E, 0); 5829 5830 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5831 createOperands(N, Ops); 5832 5833 CSEMap.InsertNode(N, IP); 5834 } else { 5835 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5836 createOperands(N, Ops); 5837 } 5838 5839 InsertNode(N); 5840 return SDValue(N, 0); 5841 } 5842 5843 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 5844 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 5845 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 5846 } 5847 5848 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5849 ArrayRef<SDValue> Ops) { 5850 if (VTList.NumVTs == 1) 5851 return getNode(Opcode, DL, VTList.VTs[0], Ops); 5852 5853 #if 0 5854 switch (Opcode) { 5855 // FIXME: figure out how to safely handle things like 5856 // int foo(int x) { return 1 << (x & 255); } 5857 // int bar() { return foo(256); } 5858 case ISD::SRA_PARTS: 5859 case ISD::SRL_PARTS: 5860 case ISD::SHL_PARTS: 5861 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 5862 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 5863 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5864 else if (N3.getOpcode() == ISD::AND) 5865 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 5866 // If the and is only masking out bits that cannot effect the shift, 5867 // eliminate the and. 5868 unsigned NumBits = VT.getScalarSizeInBits()*2; 5869 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 5870 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 5871 } 5872 break; 5873 } 5874 #endif 5875 5876 // Memoize the node unless it returns a flag. 5877 SDNode *N; 5878 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5879 FoldingSetNodeID ID; 5880 AddNodeIDNode(ID, Opcode, VTList, Ops); 5881 void *IP = nullptr; 5882 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5883 return SDValue(E, 0); 5884 5885 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 5886 createOperands(N, Ops); 5887 CSEMap.InsertNode(N, IP); 5888 } else { 5889 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 5890 createOperands(N, Ops); 5891 } 5892 InsertNode(N); 5893 return SDValue(N, 0); 5894 } 5895 5896 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 5897 SDVTList VTList) { 5898 return getNode(Opcode, DL, VTList, None); 5899 } 5900 5901 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5902 SDValue N1) { 5903 SDValue Ops[] = { N1 }; 5904 return getNode(Opcode, DL, VTList, Ops); 5905 } 5906 5907 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5908 SDValue N1, SDValue N2) { 5909 SDValue Ops[] = { N1, N2 }; 5910 return getNode(Opcode, DL, VTList, Ops); 5911 } 5912 5913 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5914 SDValue N1, SDValue N2, SDValue N3) { 5915 SDValue Ops[] = { N1, N2, N3 }; 5916 return getNode(Opcode, DL, VTList, Ops); 5917 } 5918 5919 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5920 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5921 SDValue Ops[] = { N1, N2, N3, N4 }; 5922 return getNode(Opcode, DL, VTList, Ops); 5923 } 5924 5925 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 5926 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5927 SDValue N5) { 5928 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5929 return getNode(Opcode, DL, VTList, Ops); 5930 } 5931 5932 SDVTList SelectionDAG::getVTList(EVT VT) { 5933 return makeVTList(SDNode::getValueTypeList(VT), 1); 5934 } 5935 5936 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 5937 FoldingSetNodeID ID; 5938 ID.AddInteger(2U); 5939 ID.AddInteger(VT1.getRawBits()); 5940 ID.AddInteger(VT2.getRawBits()); 5941 5942 void *IP = nullptr; 5943 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5944 if (!Result) { 5945 EVT *Array = Allocator.Allocate<EVT>(2); 5946 Array[0] = VT1; 5947 Array[1] = VT2; 5948 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 5949 VTListMap.InsertNode(Result, IP); 5950 } 5951 return Result->getSDVTList(); 5952 } 5953 5954 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 5955 FoldingSetNodeID ID; 5956 ID.AddInteger(3U); 5957 ID.AddInteger(VT1.getRawBits()); 5958 ID.AddInteger(VT2.getRawBits()); 5959 ID.AddInteger(VT3.getRawBits()); 5960 5961 void *IP = nullptr; 5962 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5963 if (!Result) { 5964 EVT *Array = Allocator.Allocate<EVT>(3); 5965 Array[0] = VT1; 5966 Array[1] = VT2; 5967 Array[2] = VT3; 5968 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 5969 VTListMap.InsertNode(Result, IP); 5970 } 5971 return Result->getSDVTList(); 5972 } 5973 5974 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 5975 FoldingSetNodeID ID; 5976 ID.AddInteger(4U); 5977 ID.AddInteger(VT1.getRawBits()); 5978 ID.AddInteger(VT2.getRawBits()); 5979 ID.AddInteger(VT3.getRawBits()); 5980 ID.AddInteger(VT4.getRawBits()); 5981 5982 void *IP = nullptr; 5983 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 5984 if (!Result) { 5985 EVT *Array = Allocator.Allocate<EVT>(4); 5986 Array[0] = VT1; 5987 Array[1] = VT2; 5988 Array[2] = VT3; 5989 Array[3] = VT4; 5990 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 5991 VTListMap.InsertNode(Result, IP); 5992 } 5993 return Result->getSDVTList(); 5994 } 5995 5996 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 5997 unsigned NumVTs = VTs.size(); 5998 FoldingSetNodeID ID; 5999 ID.AddInteger(NumVTs); 6000 for (unsigned index = 0; index < NumVTs; index++) { 6001 ID.AddInteger(VTs[index].getRawBits()); 6002 } 6003 6004 void *IP = nullptr; 6005 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6006 if (!Result) { 6007 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6008 std::copy(VTs.begin(), VTs.end(), Array); 6009 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6010 VTListMap.InsertNode(Result, IP); 6011 } 6012 return Result->getSDVTList(); 6013 } 6014 6015 6016 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6017 /// specified operands. If the resultant node already exists in the DAG, 6018 /// this does not modify the specified node, instead it returns the node that 6019 /// already exists. If the resultant node does not exist in the DAG, the 6020 /// input node is returned. As a degenerate case, if you specify the same 6021 /// input operands as the node already has, the input node is returned. 6022 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6023 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6024 6025 // Check to see if there is no change. 6026 if (Op == N->getOperand(0)) return N; 6027 6028 // See if the modified node already exists. 6029 void *InsertPos = nullptr; 6030 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6031 return Existing; 6032 6033 // Nope it doesn't. Remove the node from its current place in the maps. 6034 if (InsertPos) 6035 if (!RemoveNodeFromCSEMaps(N)) 6036 InsertPos = nullptr; 6037 6038 // Now we update the operands. 6039 N->OperandList[0].set(Op); 6040 6041 // If this gets put into a CSE map, add it. 6042 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6043 return N; 6044 } 6045 6046 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6047 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6048 6049 // Check to see if there is no change. 6050 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6051 return N; // No operands changed, just return the input node. 6052 6053 // See if the modified node already exists. 6054 void *InsertPos = nullptr; 6055 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6056 return Existing; 6057 6058 // Nope it doesn't. Remove the node from its current place in the maps. 6059 if (InsertPos) 6060 if (!RemoveNodeFromCSEMaps(N)) 6061 InsertPos = nullptr; 6062 6063 // Now we update the operands. 6064 if (N->OperandList[0] != Op1) 6065 N->OperandList[0].set(Op1); 6066 if (N->OperandList[1] != Op2) 6067 N->OperandList[1].set(Op2); 6068 6069 // If this gets put into a CSE map, add it. 6070 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6071 return N; 6072 } 6073 6074 SDNode *SelectionDAG:: 6075 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6076 SDValue Ops[] = { Op1, Op2, Op3 }; 6077 return UpdateNodeOperands(N, Ops); 6078 } 6079 6080 SDNode *SelectionDAG:: 6081 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6082 SDValue Op3, SDValue Op4) { 6083 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6084 return UpdateNodeOperands(N, Ops); 6085 } 6086 6087 SDNode *SelectionDAG:: 6088 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6089 SDValue Op3, SDValue Op4, SDValue Op5) { 6090 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6091 return UpdateNodeOperands(N, Ops); 6092 } 6093 6094 SDNode *SelectionDAG:: 6095 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6096 unsigned NumOps = Ops.size(); 6097 assert(N->getNumOperands() == NumOps && 6098 "Update with wrong number of operands"); 6099 6100 // If no operands changed just return the input node. 6101 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6102 return N; 6103 6104 // See if the modified node already exists. 6105 void *InsertPos = nullptr; 6106 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6107 return Existing; 6108 6109 // Nope it doesn't. Remove the node from its current place in the maps. 6110 if (InsertPos) 6111 if (!RemoveNodeFromCSEMaps(N)) 6112 InsertPos = nullptr; 6113 6114 // Now we update the operands. 6115 for (unsigned i = 0; i != NumOps; ++i) 6116 if (N->OperandList[i] != Ops[i]) 6117 N->OperandList[i].set(Ops[i]); 6118 6119 // If this gets put into a CSE map, add it. 6120 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6121 return N; 6122 } 6123 6124 /// DropOperands - Release the operands and set this node to have 6125 /// zero operands. 6126 void SDNode::DropOperands() { 6127 // Unlike the code in MorphNodeTo that does this, we don't need to 6128 // watch for dead nodes here. 6129 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6130 SDUse &Use = *I++; 6131 Use.set(SDValue()); 6132 } 6133 } 6134 6135 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6136 /// machine opcode. 6137 /// 6138 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6139 EVT VT) { 6140 SDVTList VTs = getVTList(VT); 6141 return SelectNodeTo(N, MachineOpc, VTs, None); 6142 } 6143 6144 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6145 EVT VT, SDValue Op1) { 6146 SDVTList VTs = getVTList(VT); 6147 SDValue Ops[] = { Op1 }; 6148 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6149 } 6150 6151 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6152 EVT VT, SDValue Op1, 6153 SDValue Op2) { 6154 SDVTList VTs = getVTList(VT); 6155 SDValue Ops[] = { Op1, Op2 }; 6156 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6157 } 6158 6159 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6160 EVT VT, SDValue Op1, 6161 SDValue Op2, SDValue Op3) { 6162 SDVTList VTs = getVTList(VT); 6163 SDValue Ops[] = { Op1, Op2, Op3 }; 6164 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6165 } 6166 6167 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6168 EVT VT, ArrayRef<SDValue> Ops) { 6169 SDVTList VTs = getVTList(VT); 6170 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6171 } 6172 6173 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6174 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6175 SDVTList VTs = getVTList(VT1, VT2); 6176 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6177 } 6178 6179 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6180 EVT VT1, EVT VT2) { 6181 SDVTList VTs = getVTList(VT1, VT2); 6182 return SelectNodeTo(N, MachineOpc, VTs, None); 6183 } 6184 6185 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6186 EVT VT1, EVT VT2, EVT VT3, 6187 ArrayRef<SDValue> Ops) { 6188 SDVTList VTs = getVTList(VT1, VT2, VT3); 6189 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6190 } 6191 6192 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6193 EVT VT1, EVT VT2, 6194 SDValue Op1, SDValue Op2) { 6195 SDVTList VTs = getVTList(VT1, VT2); 6196 SDValue Ops[] = { Op1, Op2 }; 6197 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6198 } 6199 6200 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6201 SDVTList VTs,ArrayRef<SDValue> Ops) { 6202 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6203 // Reset the NodeID to -1. 6204 New->setNodeId(-1); 6205 if (New != N) { 6206 ReplaceAllUsesWith(N, New); 6207 RemoveDeadNode(N); 6208 } 6209 return New; 6210 } 6211 6212 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6213 /// the line number information on the merged node since it is not possible to 6214 /// preserve the information that operation is associated with multiple lines. 6215 /// This will make the debugger working better at -O0, were there is a higher 6216 /// probability having other instructions associated with that line. 6217 /// 6218 /// For IROrder, we keep the smaller of the two 6219 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6220 DebugLoc NLoc = N->getDebugLoc(); 6221 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6222 N->setDebugLoc(DebugLoc()); 6223 } 6224 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6225 N->setIROrder(Order); 6226 return N; 6227 } 6228 6229 /// MorphNodeTo - This *mutates* the specified node to have the specified 6230 /// return type, opcode, and operands. 6231 /// 6232 /// Note that MorphNodeTo returns the resultant node. If there is already a 6233 /// node of the specified opcode and operands, it returns that node instead of 6234 /// the current one. Note that the SDLoc need not be the same. 6235 /// 6236 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6237 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6238 /// node, and because it doesn't require CSE recalculation for any of 6239 /// the node's users. 6240 /// 6241 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6242 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6243 /// the legalizer which maintain worklists that would need to be updated when 6244 /// deleting things. 6245 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6246 SDVTList VTs, ArrayRef<SDValue> Ops) { 6247 // If an identical node already exists, use it. 6248 void *IP = nullptr; 6249 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6250 FoldingSetNodeID ID; 6251 AddNodeIDNode(ID, Opc, VTs, Ops); 6252 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6253 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6254 } 6255 6256 if (!RemoveNodeFromCSEMaps(N)) 6257 IP = nullptr; 6258 6259 // Start the morphing. 6260 N->NodeType = Opc; 6261 N->ValueList = VTs.VTs; 6262 N->NumValues = VTs.NumVTs; 6263 6264 // Clear the operands list, updating used nodes to remove this from their 6265 // use list. Keep track of any operands that become dead as a result. 6266 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6267 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6268 SDUse &Use = *I++; 6269 SDNode *Used = Use.getNode(); 6270 Use.set(SDValue()); 6271 if (Used->use_empty()) 6272 DeadNodeSet.insert(Used); 6273 } 6274 6275 // For MachineNode, initialize the memory references information. 6276 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6277 MN->setMemRefs(nullptr, nullptr); 6278 6279 // Swap for an appropriately sized array from the recycler. 6280 removeOperands(N); 6281 createOperands(N, Ops); 6282 6283 // Delete any nodes that are still dead after adding the uses for the 6284 // new operands. 6285 if (!DeadNodeSet.empty()) { 6286 SmallVector<SDNode *, 16> DeadNodes; 6287 for (SDNode *N : DeadNodeSet) 6288 if (N->use_empty()) 6289 DeadNodes.push_back(N); 6290 RemoveDeadNodes(DeadNodes); 6291 } 6292 6293 if (IP) 6294 CSEMap.InsertNode(N, IP); // Memoize the new node. 6295 return N; 6296 } 6297 6298 6299 /// getMachineNode - These are used for target selectors to create a new node 6300 /// with specified return type(s), MachineInstr opcode, and operands. 6301 /// 6302 /// Note that getMachineNode returns the resultant node. If there is already a 6303 /// node of the specified opcode and operands, it returns that node instead of 6304 /// the current one. 6305 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6306 EVT VT) { 6307 SDVTList VTs = getVTList(VT); 6308 return getMachineNode(Opcode, dl, VTs, None); 6309 } 6310 6311 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6312 EVT VT, SDValue Op1) { 6313 SDVTList VTs = getVTList(VT); 6314 SDValue Ops[] = { Op1 }; 6315 return getMachineNode(Opcode, dl, VTs, Ops); 6316 } 6317 6318 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6319 EVT VT, SDValue Op1, SDValue Op2) { 6320 SDVTList VTs = getVTList(VT); 6321 SDValue Ops[] = { Op1, Op2 }; 6322 return getMachineNode(Opcode, dl, VTs, Ops); 6323 } 6324 6325 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6326 EVT VT, SDValue Op1, SDValue Op2, 6327 SDValue Op3) { 6328 SDVTList VTs = getVTList(VT); 6329 SDValue Ops[] = { Op1, Op2, Op3 }; 6330 return getMachineNode(Opcode, dl, VTs, Ops); 6331 } 6332 6333 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6334 EVT VT, ArrayRef<SDValue> Ops) { 6335 SDVTList VTs = getVTList(VT); 6336 return getMachineNode(Opcode, dl, VTs, Ops); 6337 } 6338 6339 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6340 EVT VT1, EVT VT2, SDValue Op1, 6341 SDValue Op2) { 6342 SDVTList VTs = getVTList(VT1, VT2); 6343 SDValue Ops[] = { Op1, Op2 }; 6344 return getMachineNode(Opcode, dl, VTs, Ops); 6345 } 6346 6347 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6348 EVT VT1, EVT VT2, SDValue Op1, 6349 SDValue Op2, SDValue Op3) { 6350 SDVTList VTs = getVTList(VT1, VT2); 6351 SDValue Ops[] = { Op1, Op2, Op3 }; 6352 return getMachineNode(Opcode, dl, VTs, Ops); 6353 } 6354 6355 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6356 EVT VT1, EVT VT2, 6357 ArrayRef<SDValue> Ops) { 6358 SDVTList VTs = getVTList(VT1, VT2); 6359 return getMachineNode(Opcode, dl, VTs, Ops); 6360 } 6361 6362 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6363 EVT VT1, EVT VT2, EVT VT3, 6364 SDValue Op1, SDValue Op2) { 6365 SDVTList VTs = getVTList(VT1, VT2, VT3); 6366 SDValue Ops[] = { Op1, Op2 }; 6367 return getMachineNode(Opcode, dl, VTs, Ops); 6368 } 6369 6370 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6371 EVT VT1, EVT VT2, EVT VT3, 6372 SDValue Op1, SDValue Op2, 6373 SDValue Op3) { 6374 SDVTList VTs = getVTList(VT1, VT2, VT3); 6375 SDValue Ops[] = { Op1, Op2, Op3 }; 6376 return getMachineNode(Opcode, dl, VTs, Ops); 6377 } 6378 6379 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6380 EVT VT1, EVT VT2, EVT VT3, 6381 ArrayRef<SDValue> Ops) { 6382 SDVTList VTs = getVTList(VT1, VT2, VT3); 6383 return getMachineNode(Opcode, dl, VTs, Ops); 6384 } 6385 6386 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6387 ArrayRef<EVT> ResultTys, 6388 ArrayRef<SDValue> Ops) { 6389 SDVTList VTs = getVTList(ResultTys); 6390 return getMachineNode(Opcode, dl, VTs, Ops); 6391 } 6392 6393 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6394 SDVTList VTs, 6395 ArrayRef<SDValue> Ops) { 6396 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6397 MachineSDNode *N; 6398 void *IP = nullptr; 6399 6400 if (DoCSE) { 6401 FoldingSetNodeID ID; 6402 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6403 IP = nullptr; 6404 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6405 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6406 } 6407 } 6408 6409 // Allocate a new MachineSDNode. 6410 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6411 createOperands(N, Ops); 6412 6413 if (DoCSE) 6414 CSEMap.InsertNode(N, IP); 6415 6416 InsertNode(N); 6417 return N; 6418 } 6419 6420 /// getTargetExtractSubreg - A convenience function for creating 6421 /// TargetOpcode::EXTRACT_SUBREG nodes. 6422 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6423 SDValue Operand) { 6424 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6425 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6426 VT, Operand, SRIdxVal); 6427 return SDValue(Subreg, 0); 6428 } 6429 6430 /// getTargetInsertSubreg - A convenience function for creating 6431 /// TargetOpcode::INSERT_SUBREG nodes. 6432 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6433 SDValue Operand, SDValue Subreg) { 6434 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6435 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6436 VT, Operand, Subreg, SRIdxVal); 6437 return SDValue(Result, 0); 6438 } 6439 6440 /// getNodeIfExists - Get the specified node if it's already available, or 6441 /// else return NULL. 6442 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6443 ArrayRef<SDValue> Ops, 6444 const SDNodeFlags *Flags) { 6445 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6446 FoldingSetNodeID ID; 6447 AddNodeIDNode(ID, Opcode, VTList, Ops); 6448 void *IP = nullptr; 6449 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 6450 if (Flags) 6451 E->intersectFlagsWith(Flags); 6452 return E; 6453 } 6454 } 6455 return nullptr; 6456 } 6457 6458 /// getDbgValue - Creates a SDDbgValue node. 6459 /// 6460 /// SDNode 6461 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, 6462 unsigned R, bool IsIndirect, uint64_t Off, 6463 const DebugLoc &DL, unsigned O) { 6464 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6465 "Expected inlined-at fields to agree"); 6466 return new (DbgInfo->getAlloc()) 6467 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O); 6468 } 6469 6470 /// Constant 6471 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr, 6472 const Value *C, uint64_t Off, 6473 const DebugLoc &DL, unsigned O) { 6474 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6475 "Expected inlined-at fields to agree"); 6476 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O); 6477 } 6478 6479 /// FrameIndex 6480 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, 6481 unsigned FI, uint64_t Off, 6482 const DebugLoc &DL, 6483 unsigned O) { 6484 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6485 "Expected inlined-at fields to agree"); 6486 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O); 6487 } 6488 6489 namespace { 6490 6491 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 6492 /// pointed to by a use iterator is deleted, increment the use iterator 6493 /// so that it doesn't dangle. 6494 /// 6495 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 6496 SDNode::use_iterator &UI; 6497 SDNode::use_iterator &UE; 6498 6499 void NodeDeleted(SDNode *N, SDNode *E) override { 6500 // Increment the iterator as needed. 6501 while (UI != UE && N == *UI) 6502 ++UI; 6503 } 6504 6505 public: 6506 RAUWUpdateListener(SelectionDAG &d, 6507 SDNode::use_iterator &ui, 6508 SDNode::use_iterator &ue) 6509 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 6510 }; 6511 6512 } 6513 6514 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6515 /// This can cause recursive merging of nodes in the DAG. 6516 /// 6517 /// This version assumes From has a single result value. 6518 /// 6519 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 6520 SDNode *From = FromN.getNode(); 6521 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 6522 "Cannot replace with this method!"); 6523 assert(From != To.getNode() && "Cannot replace uses of with self"); 6524 6525 // Preserve Debug Values 6526 TransferDbgValues(FromN, To); 6527 6528 // Iterate over all the existing uses of From. New uses will be added 6529 // to the beginning of the use list, which we avoid visiting. 6530 // This specifically avoids visiting uses of From that arise while the 6531 // replacement is happening, because any such uses would be the result 6532 // of CSE: If an existing node looks like From after one of its operands 6533 // is replaced by To, we don't want to replace of all its users with To 6534 // too. See PR3018 for more info. 6535 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6536 RAUWUpdateListener Listener(*this, UI, UE); 6537 while (UI != UE) { 6538 SDNode *User = *UI; 6539 6540 // This node is about to morph, remove its old self from the CSE maps. 6541 RemoveNodeFromCSEMaps(User); 6542 6543 // A user can appear in a use list multiple times, and when this 6544 // happens the uses are usually next to each other in the list. 6545 // To help reduce the number of CSE recomputations, process all 6546 // the uses of this user that we can find this way. 6547 do { 6548 SDUse &Use = UI.getUse(); 6549 ++UI; 6550 Use.set(To); 6551 } while (UI != UE && *UI == User); 6552 6553 // Now that we have modified User, add it back to the CSE maps. If it 6554 // already exists there, recursively merge the results together. 6555 AddModifiedNodeToCSEMaps(User); 6556 } 6557 6558 6559 // If we just RAUW'd the root, take note. 6560 if (FromN == getRoot()) 6561 setRoot(To); 6562 } 6563 6564 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6565 /// This can cause recursive merging of nodes in the DAG. 6566 /// 6567 /// This version assumes that for each value of From, there is a 6568 /// corresponding value in To in the same position with the same type. 6569 /// 6570 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 6571 #ifndef NDEBUG 6572 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6573 assert((!From->hasAnyUseOfValue(i) || 6574 From->getValueType(i) == To->getValueType(i)) && 6575 "Cannot use this version of ReplaceAllUsesWith!"); 6576 #endif 6577 6578 // Handle the trivial case. 6579 if (From == To) 6580 return; 6581 6582 // Preserve Debug Info. Only do this if there's a use. 6583 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6584 if (From->hasAnyUseOfValue(i)) { 6585 assert((i < To->getNumValues()) && "Invalid To location"); 6586 TransferDbgValues(SDValue(From, i), SDValue(To, i)); 6587 } 6588 6589 // Iterate over just the existing users of From. See the comments in 6590 // the ReplaceAllUsesWith above. 6591 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6592 RAUWUpdateListener Listener(*this, UI, UE); 6593 while (UI != UE) { 6594 SDNode *User = *UI; 6595 6596 // This node is about to morph, remove its old self from the CSE maps. 6597 RemoveNodeFromCSEMaps(User); 6598 6599 // A user can appear in a use list multiple times, and when this 6600 // happens the uses are usually next to each other in the list. 6601 // To help reduce the number of CSE recomputations, process all 6602 // the uses of this user that we can find this way. 6603 do { 6604 SDUse &Use = UI.getUse(); 6605 ++UI; 6606 Use.setNode(To); 6607 } while (UI != UE && *UI == User); 6608 6609 // Now that we have modified User, add it back to the CSE maps. If it 6610 // already exists there, recursively merge the results together. 6611 AddModifiedNodeToCSEMaps(User); 6612 } 6613 6614 // If we just RAUW'd the root, take note. 6615 if (From == getRoot().getNode()) 6616 setRoot(SDValue(To, getRoot().getResNo())); 6617 } 6618 6619 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 6620 /// This can cause recursive merging of nodes in the DAG. 6621 /// 6622 /// This version can replace From with any result values. To must match the 6623 /// number and types of values returned by From. 6624 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 6625 if (From->getNumValues() == 1) // Handle the simple case efficiently. 6626 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 6627 6628 // Preserve Debug Info. 6629 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 6630 TransferDbgValues(SDValue(From, i), *To); 6631 6632 // Iterate over just the existing users of From. See the comments in 6633 // the ReplaceAllUsesWith above. 6634 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 6635 RAUWUpdateListener Listener(*this, UI, UE); 6636 while (UI != UE) { 6637 SDNode *User = *UI; 6638 6639 // This node is about to morph, remove its old self from the CSE maps. 6640 RemoveNodeFromCSEMaps(User); 6641 6642 // A user can appear in a use list multiple times, and when this 6643 // happens the uses are usually next to each other in the list. 6644 // To help reduce the number of CSE recomputations, process all 6645 // the uses of this user that we can find this way. 6646 do { 6647 SDUse &Use = UI.getUse(); 6648 const SDValue &ToOp = To[Use.getResNo()]; 6649 ++UI; 6650 Use.set(ToOp); 6651 } while (UI != UE && *UI == User); 6652 6653 // Now that we have modified User, add it back to the CSE maps. If it 6654 // already exists there, recursively merge the results together. 6655 AddModifiedNodeToCSEMaps(User); 6656 } 6657 6658 // If we just RAUW'd the root, take note. 6659 if (From == getRoot().getNode()) 6660 setRoot(SDValue(To[getRoot().getResNo()])); 6661 } 6662 6663 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 6664 /// uses of other values produced by From.getNode() alone. The Deleted 6665 /// vector is handled the same way as for ReplaceAllUsesWith. 6666 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 6667 // Handle the really simple, really trivial case efficiently. 6668 if (From == To) return; 6669 6670 // Handle the simple, trivial, case efficiently. 6671 if (From.getNode()->getNumValues() == 1) { 6672 ReplaceAllUsesWith(From, To); 6673 return; 6674 } 6675 6676 // Preserve Debug Info. 6677 TransferDbgValues(From, To); 6678 6679 // Iterate over just the existing users of From. See the comments in 6680 // the ReplaceAllUsesWith above. 6681 SDNode::use_iterator UI = From.getNode()->use_begin(), 6682 UE = From.getNode()->use_end(); 6683 RAUWUpdateListener Listener(*this, UI, UE); 6684 while (UI != UE) { 6685 SDNode *User = *UI; 6686 bool UserRemovedFromCSEMaps = false; 6687 6688 // A user can appear in a use list multiple times, and when this 6689 // happens the uses are usually next to each other in the list. 6690 // To help reduce the number of CSE recomputations, process all 6691 // the uses of this user that we can find this way. 6692 do { 6693 SDUse &Use = UI.getUse(); 6694 6695 // Skip uses of different values from the same node. 6696 if (Use.getResNo() != From.getResNo()) { 6697 ++UI; 6698 continue; 6699 } 6700 6701 // If this node hasn't been modified yet, it's still in the CSE maps, 6702 // so remove its old self from the CSE maps. 6703 if (!UserRemovedFromCSEMaps) { 6704 RemoveNodeFromCSEMaps(User); 6705 UserRemovedFromCSEMaps = true; 6706 } 6707 6708 ++UI; 6709 Use.set(To); 6710 } while (UI != UE && *UI == User); 6711 6712 // We are iterating over all uses of the From node, so if a use 6713 // doesn't use the specific value, no changes are made. 6714 if (!UserRemovedFromCSEMaps) 6715 continue; 6716 6717 // Now that we have modified User, add it back to the CSE maps. If it 6718 // already exists there, recursively merge the results together. 6719 AddModifiedNodeToCSEMaps(User); 6720 } 6721 6722 // If we just RAUW'd the root, take note. 6723 if (From == getRoot()) 6724 setRoot(To); 6725 } 6726 6727 namespace { 6728 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 6729 /// to record information about a use. 6730 struct UseMemo { 6731 SDNode *User; 6732 unsigned Index; 6733 SDUse *Use; 6734 }; 6735 6736 /// operator< - Sort Memos by User. 6737 bool operator<(const UseMemo &L, const UseMemo &R) { 6738 return (intptr_t)L.User < (intptr_t)R.User; 6739 } 6740 } 6741 6742 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 6743 /// uses of other values produced by From.getNode() alone. The same value 6744 /// may appear in both the From and To list. The Deleted vector is 6745 /// handled the same way as for ReplaceAllUsesWith. 6746 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 6747 const SDValue *To, 6748 unsigned Num){ 6749 // Handle the simple, trivial case efficiently. 6750 if (Num == 1) 6751 return ReplaceAllUsesOfValueWith(*From, *To); 6752 6753 TransferDbgValues(*From, *To); 6754 6755 // Read up all the uses and make records of them. This helps 6756 // processing new uses that are introduced during the 6757 // replacement process. 6758 SmallVector<UseMemo, 4> Uses; 6759 for (unsigned i = 0; i != Num; ++i) { 6760 unsigned FromResNo = From[i].getResNo(); 6761 SDNode *FromNode = From[i].getNode(); 6762 for (SDNode::use_iterator UI = FromNode->use_begin(), 6763 E = FromNode->use_end(); UI != E; ++UI) { 6764 SDUse &Use = UI.getUse(); 6765 if (Use.getResNo() == FromResNo) { 6766 UseMemo Memo = { *UI, i, &Use }; 6767 Uses.push_back(Memo); 6768 } 6769 } 6770 } 6771 6772 // Sort the uses, so that all the uses from a given User are together. 6773 std::sort(Uses.begin(), Uses.end()); 6774 6775 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 6776 UseIndex != UseIndexEnd; ) { 6777 // We know that this user uses some value of From. If it is the right 6778 // value, update it. 6779 SDNode *User = Uses[UseIndex].User; 6780 6781 // This node is about to morph, remove its old self from the CSE maps. 6782 RemoveNodeFromCSEMaps(User); 6783 6784 // The Uses array is sorted, so all the uses for a given User 6785 // are next to each other in the list. 6786 // To help reduce the number of CSE recomputations, process all 6787 // the uses of this user that we can find this way. 6788 do { 6789 unsigned i = Uses[UseIndex].Index; 6790 SDUse &Use = *Uses[UseIndex].Use; 6791 ++UseIndex; 6792 6793 Use.set(To[i]); 6794 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 6795 6796 // Now that we have modified User, add it back to the CSE maps. If it 6797 // already exists there, recursively merge the results together. 6798 AddModifiedNodeToCSEMaps(User); 6799 } 6800 } 6801 6802 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 6803 /// based on their topological order. It returns the maximum id and a vector 6804 /// of the SDNodes* in assigned order by reference. 6805 unsigned SelectionDAG::AssignTopologicalOrder() { 6806 6807 unsigned DAGSize = 0; 6808 6809 // SortedPos tracks the progress of the algorithm. Nodes before it are 6810 // sorted, nodes after it are unsorted. When the algorithm completes 6811 // it is at the end of the list. 6812 allnodes_iterator SortedPos = allnodes_begin(); 6813 6814 // Visit all the nodes. Move nodes with no operands to the front of 6815 // the list immediately. Annotate nodes that do have operands with their 6816 // operand count. Before we do this, the Node Id fields of the nodes 6817 // may contain arbitrary values. After, the Node Id fields for nodes 6818 // before SortedPos will contain the topological sort index, and the 6819 // Node Id fields for nodes At SortedPos and after will contain the 6820 // count of outstanding operands. 6821 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 6822 SDNode *N = &*I++; 6823 checkForCycles(N, this); 6824 unsigned Degree = N->getNumOperands(); 6825 if (Degree == 0) { 6826 // A node with no uses, add it to the result array immediately. 6827 N->setNodeId(DAGSize++); 6828 allnodes_iterator Q(N); 6829 if (Q != SortedPos) 6830 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 6831 assert(SortedPos != AllNodes.end() && "Overran node list"); 6832 ++SortedPos; 6833 } else { 6834 // Temporarily use the Node Id as scratch space for the degree count. 6835 N->setNodeId(Degree); 6836 } 6837 } 6838 6839 // Visit all the nodes. As we iterate, move nodes into sorted order, 6840 // such that by the time the end is reached all nodes will be sorted. 6841 for (SDNode &Node : allnodes()) { 6842 SDNode *N = &Node; 6843 checkForCycles(N, this); 6844 // N is in sorted position, so all its uses have one less operand 6845 // that needs to be sorted. 6846 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6847 UI != UE; ++UI) { 6848 SDNode *P = *UI; 6849 unsigned Degree = P->getNodeId(); 6850 assert(Degree != 0 && "Invalid node degree"); 6851 --Degree; 6852 if (Degree == 0) { 6853 // All of P's operands are sorted, so P may sorted now. 6854 P->setNodeId(DAGSize++); 6855 if (P->getIterator() != SortedPos) 6856 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 6857 assert(SortedPos != AllNodes.end() && "Overran node list"); 6858 ++SortedPos; 6859 } else { 6860 // Update P's outstanding operand count. 6861 P->setNodeId(Degree); 6862 } 6863 } 6864 if (Node.getIterator() == SortedPos) { 6865 #ifndef NDEBUG 6866 allnodes_iterator I(N); 6867 SDNode *S = &*++I; 6868 dbgs() << "Overran sorted position:\n"; 6869 S->dumprFull(this); dbgs() << "\n"; 6870 dbgs() << "Checking if this is due to cycles\n"; 6871 checkForCycles(this, true); 6872 #endif 6873 llvm_unreachable(nullptr); 6874 } 6875 } 6876 6877 assert(SortedPos == AllNodes.end() && 6878 "Topological sort incomplete!"); 6879 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 6880 "First node in topological sort is not the entry token!"); 6881 assert(AllNodes.front().getNodeId() == 0 && 6882 "First node in topological sort has non-zero id!"); 6883 assert(AllNodes.front().getNumOperands() == 0 && 6884 "First node in topological sort has operands!"); 6885 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 6886 "Last node in topologic sort has unexpected id!"); 6887 assert(AllNodes.back().use_empty() && 6888 "Last node in topologic sort has users!"); 6889 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 6890 return DAGSize; 6891 } 6892 6893 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 6894 /// value is produced by SD. 6895 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 6896 if (SD) { 6897 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 6898 SD->setHasDebugValue(true); 6899 } 6900 DbgInfo->add(DB, SD, isParameter); 6901 } 6902 6903 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes. 6904 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 6905 if (From == To || !From.getNode()->getHasDebugValue()) 6906 return; 6907 SDNode *FromNode = From.getNode(); 6908 SDNode *ToNode = To.getNode(); 6909 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 6910 SmallVector<SDDbgValue *, 2> ClonedDVs; 6911 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 6912 I != E; ++I) { 6913 SDDbgValue *Dbg = *I; 6914 // Only add Dbgvalues attached to same ResNo. 6915 if (Dbg->getKind() == SDDbgValue::SDNODE && 6916 Dbg->getSDNode() == From.getNode() && 6917 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) { 6918 assert(FromNode != ToNode && 6919 "Should not transfer Debug Values intranode"); 6920 SDDbgValue *Clone = 6921 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode, 6922 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(), 6923 Dbg->getDebugLoc(), Dbg->getOrder()); 6924 ClonedDVs.push_back(Clone); 6925 Dbg->setIsInvalidated(); 6926 } 6927 } 6928 for (SDDbgValue *I : ClonedDVs) 6929 AddDbgValue(I, ToNode, false); 6930 } 6931 6932 //===----------------------------------------------------------------------===// 6933 // SDNode Class 6934 //===----------------------------------------------------------------------===// 6935 6936 bool llvm::isNullConstant(SDValue V) { 6937 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 6938 return Const != nullptr && Const->isNullValue(); 6939 } 6940 6941 bool llvm::isNullFPConstant(SDValue V) { 6942 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 6943 return Const != nullptr && Const->isZero() && !Const->isNegative(); 6944 } 6945 6946 bool llvm::isAllOnesConstant(SDValue V) { 6947 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 6948 return Const != nullptr && Const->isAllOnesValue(); 6949 } 6950 6951 bool llvm::isOneConstant(SDValue V) { 6952 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 6953 return Const != nullptr && Const->isOne(); 6954 } 6955 6956 bool llvm::isBitwiseNot(SDValue V) { 6957 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 6958 } 6959 6960 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 6961 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 6962 return CN; 6963 6964 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 6965 BitVector UndefElements; 6966 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 6967 6968 // BuildVectors can truncate their operands. Ignore that case here. 6969 // FIXME: We blindly ignore splats which include undef which is overly 6970 // pessimistic. 6971 if (CN && UndefElements.none() && 6972 CN->getValueType(0) == N.getValueType().getScalarType()) 6973 return CN; 6974 } 6975 6976 return nullptr; 6977 } 6978 6979 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 6980 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 6981 return CN; 6982 6983 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 6984 BitVector UndefElements; 6985 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 6986 6987 if (CN && UndefElements.none()) 6988 return CN; 6989 } 6990 6991 return nullptr; 6992 } 6993 6994 HandleSDNode::~HandleSDNode() { 6995 DropOperands(); 6996 } 6997 6998 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 6999 const DebugLoc &DL, 7000 const GlobalValue *GA, EVT VT, 7001 int64_t o, unsigned char TF) 7002 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7003 TheGlobal = GA; 7004 } 7005 7006 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7007 EVT VT, unsigned SrcAS, 7008 unsigned DestAS) 7009 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7010 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7011 7012 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7013 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7014 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7015 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7016 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7017 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7018 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7019 7020 // We check here that the size of the memory operand fits within the size of 7021 // the MMO. This is because the MMO might indicate only a possible address 7022 // range instead of specifying the affected memory addresses precisely. 7023 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7024 } 7025 7026 /// Profile - Gather unique data for the node. 7027 /// 7028 void SDNode::Profile(FoldingSetNodeID &ID) const { 7029 AddNodeIDNode(ID, this); 7030 } 7031 7032 namespace { 7033 struct EVTArray { 7034 std::vector<EVT> VTs; 7035 7036 EVTArray() { 7037 VTs.reserve(MVT::LAST_VALUETYPE); 7038 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7039 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7040 } 7041 }; 7042 } 7043 7044 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 7045 static ManagedStatic<EVTArray> SimpleVTArray; 7046 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 7047 7048 /// getValueTypeList - Return a pointer to the specified value type. 7049 /// 7050 const EVT *SDNode::getValueTypeList(EVT VT) { 7051 if (VT.isExtended()) { 7052 sys::SmartScopedLock<true> Lock(*VTMutex); 7053 return &(*EVTs->insert(VT).first); 7054 } else { 7055 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7056 "Value type out of range!"); 7057 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7058 } 7059 } 7060 7061 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7062 /// indicated value. This method ignores uses of other values defined by this 7063 /// operation. 7064 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7065 assert(Value < getNumValues() && "Bad value!"); 7066 7067 // TODO: Only iterate over uses of a given value of the node 7068 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7069 if (UI.getUse().getResNo() == Value) { 7070 if (NUses == 0) 7071 return false; 7072 --NUses; 7073 } 7074 } 7075 7076 // Found exactly the right number of uses? 7077 return NUses == 0; 7078 } 7079 7080 7081 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7082 /// value. This method ignores uses of other values defined by this operation. 7083 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7084 assert(Value < getNumValues() && "Bad value!"); 7085 7086 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7087 if (UI.getUse().getResNo() == Value) 7088 return true; 7089 7090 return false; 7091 } 7092 7093 7094 /// isOnlyUserOf - Return true if this node is the only use of N. 7095 /// 7096 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7097 bool Seen = false; 7098 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7099 SDNode *User = *I; 7100 if (User == this) 7101 Seen = true; 7102 else 7103 return false; 7104 } 7105 7106 return Seen; 7107 } 7108 7109 /// isOperand - Return true if this node is an operand of N. 7110 /// 7111 bool SDValue::isOperandOf(const SDNode *N) const { 7112 for (const SDValue &Op : N->op_values()) 7113 if (*this == Op) 7114 return true; 7115 return false; 7116 } 7117 7118 bool SDNode::isOperandOf(const SDNode *N) const { 7119 for (const SDValue &Op : N->op_values()) 7120 if (this == Op.getNode()) 7121 return true; 7122 return false; 7123 } 7124 7125 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7126 /// be a chain) reaches the specified operand without crossing any 7127 /// side-effecting instructions on any chain path. In practice, this looks 7128 /// through token factors and non-volatile loads. In order to remain efficient, 7129 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7130 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7131 unsigned Depth) const { 7132 if (*this == Dest) return true; 7133 7134 // Don't search too deeply, we just want to be able to see through 7135 // TokenFactor's etc. 7136 if (Depth == 0) return false; 7137 7138 // If this is a token factor, all inputs to the TF happen in parallel. If any 7139 // of the operands of the TF does not reach dest, then we cannot do the xform. 7140 if (getOpcode() == ISD::TokenFactor) { 7141 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 7142 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 7143 return false; 7144 return true; 7145 } 7146 7147 // Loads don't have side effects, look through them. 7148 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7149 if (!Ld->isVolatile()) 7150 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7151 } 7152 return false; 7153 } 7154 7155 bool SDNode::hasPredecessor(const SDNode *N) const { 7156 SmallPtrSet<const SDNode *, 32> Visited; 7157 SmallVector<const SDNode *, 16> Worklist; 7158 Worklist.push_back(this); 7159 return hasPredecessorHelper(N, Visited, Worklist); 7160 } 7161 7162 const SDNodeFlags *SDNode::getFlags() const { 7163 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this)) 7164 return &FlagsNode->Flags; 7165 return nullptr; 7166 } 7167 7168 void SDNode::intersectFlagsWith(const SDNodeFlags *Flags) { 7169 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this)) 7170 FlagsNode->Flags.intersectWith(Flags); 7171 } 7172 7173 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7174 assert(N->getNumValues() == 1 && 7175 "Can't unroll a vector with multiple results!"); 7176 7177 EVT VT = N->getValueType(0); 7178 unsigned NE = VT.getVectorNumElements(); 7179 EVT EltVT = VT.getVectorElementType(); 7180 SDLoc dl(N); 7181 7182 SmallVector<SDValue, 8> Scalars; 7183 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7184 7185 // If ResNE is 0, fully unroll the vector op. 7186 if (ResNE == 0) 7187 ResNE = NE; 7188 else if (NE > ResNE) 7189 NE = ResNE; 7190 7191 unsigned i; 7192 for (i= 0; i != NE; ++i) { 7193 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7194 SDValue Operand = N->getOperand(j); 7195 EVT OperandVT = Operand.getValueType(); 7196 if (OperandVT.isVector()) { 7197 // A vector operand; extract a single element. 7198 EVT OperandEltVT = OperandVT.getVectorElementType(); 7199 Operands[j] = 7200 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7201 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7202 } else { 7203 // A scalar operand; just use it as is. 7204 Operands[j] = Operand; 7205 } 7206 } 7207 7208 switch (N->getOpcode()) { 7209 default: { 7210 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7211 N->getFlags())); 7212 break; 7213 } 7214 case ISD::VSELECT: 7215 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7216 break; 7217 case ISD::SHL: 7218 case ISD::SRA: 7219 case ISD::SRL: 7220 case ISD::ROTL: 7221 case ISD::ROTR: 7222 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7223 getShiftAmountOperand(Operands[0].getValueType(), 7224 Operands[1]))); 7225 break; 7226 case ISD::SIGN_EXTEND_INREG: 7227 case ISD::FP_ROUND_INREG: { 7228 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7229 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7230 Operands[0], 7231 getValueType(ExtVT))); 7232 } 7233 } 7234 } 7235 7236 for (; i < ResNE; ++i) 7237 Scalars.push_back(getUNDEF(EltVT)); 7238 7239 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7240 return getBuildVector(VecVT, dl, Scalars); 7241 } 7242 7243 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7244 LoadSDNode *Base, 7245 unsigned Bytes, 7246 int Dist) const { 7247 if (LD->isVolatile() || Base->isVolatile()) 7248 return false; 7249 if (LD->isIndexed() || Base->isIndexed()) 7250 return false; 7251 if (LD->getChain() != Base->getChain()) 7252 return false; 7253 EVT VT = LD->getValueType(0); 7254 if (VT.getSizeInBits() / 8 != Bytes) 7255 return false; 7256 7257 SDValue Loc = LD->getOperand(1); 7258 SDValue BaseLoc = Base->getOperand(1); 7259 if (Loc.getOpcode() == ISD::FrameIndex) { 7260 if (BaseLoc.getOpcode() != ISD::FrameIndex) 7261 return false; 7262 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7263 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 7264 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 7265 int FS = MFI.getObjectSize(FI); 7266 int BFS = MFI.getObjectSize(BFI); 7267 if (FS != BFS || FS != (int)Bytes) return false; 7268 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 7269 } 7270 7271 // Handle X + C. 7272 if (isBaseWithConstantOffset(Loc)) { 7273 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 7274 if (Loc.getOperand(0) == BaseLoc) { 7275 // If the base location is a simple address with no offset itself, then 7276 // the second load's first add operand should be the base address. 7277 if (LocOffset == Dist * (int)Bytes) 7278 return true; 7279 } else if (isBaseWithConstantOffset(BaseLoc)) { 7280 // The base location itself has an offset, so subtract that value from the 7281 // second load's offset before comparing to distance * size. 7282 int64_t BOffset = 7283 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue(); 7284 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) { 7285 if ((LocOffset - BOffset) == Dist * (int)Bytes) 7286 return true; 7287 } 7288 } 7289 } 7290 const GlobalValue *GV1 = nullptr; 7291 const GlobalValue *GV2 = nullptr; 7292 int64_t Offset1 = 0; 7293 int64_t Offset2 = 0; 7294 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 7295 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 7296 if (isGA1 && isGA2 && GV1 == GV2) 7297 return Offset1 == (Offset2 + Dist*Bytes); 7298 return false; 7299 } 7300 7301 7302 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7303 /// it cannot be inferred. 7304 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7305 // If this is a GlobalAddress + cst, return the alignment. 7306 const GlobalValue *GV; 7307 int64_t GVOffset = 0; 7308 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7309 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7310 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 7311 llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne, 7312 getDataLayout()); 7313 unsigned AlignBits = KnownZero.countTrailingOnes(); 7314 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7315 if (Align) 7316 return MinAlign(Align, GVOffset); 7317 } 7318 7319 // If this is a direct reference to a stack slot, use information about the 7320 // stack slot's alignment. 7321 int FrameIdx = 1 << 31; 7322 int64_t FrameOffset = 0; 7323 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7324 FrameIdx = FI->getIndex(); 7325 } else if (isBaseWithConstantOffset(Ptr) && 7326 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7327 // Handle FI+Cst 7328 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7329 FrameOffset = Ptr.getConstantOperandVal(1); 7330 } 7331 7332 if (FrameIdx != (1 << 31)) { 7333 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7334 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7335 FrameOffset); 7336 return FIInfoAlign; 7337 } 7338 7339 return 0; 7340 } 7341 7342 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7343 /// which is split (or expanded) into two not necessarily identical pieces. 7344 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 7345 // Currently all types are split in half. 7346 EVT LoVT, HiVT; 7347 if (!VT.isVector()) { 7348 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 7349 } else { 7350 unsigned NumElements = VT.getVectorNumElements(); 7351 assert(!(NumElements & 1) && "Splitting vector, but not in half!"); 7352 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 7353 NumElements/2); 7354 } 7355 return std::make_pair(LoVT, HiVT); 7356 } 7357 7358 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 7359 /// low/high part. 7360 std::pair<SDValue, SDValue> 7361 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 7362 const EVT &HiVT) { 7363 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 7364 N.getValueType().getVectorNumElements() && 7365 "More vector elements requested than available!"); 7366 SDValue Lo, Hi; 7367 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 7368 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 7369 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 7370 getConstant(LoVT.getVectorNumElements(), DL, 7371 TLI->getVectorIdxTy(getDataLayout()))); 7372 return std::make_pair(Lo, Hi); 7373 } 7374 7375 void SelectionDAG::ExtractVectorElements(SDValue Op, 7376 SmallVectorImpl<SDValue> &Args, 7377 unsigned Start, unsigned Count) { 7378 EVT VT = Op.getValueType(); 7379 if (Count == 0) 7380 Count = VT.getVectorNumElements(); 7381 7382 EVT EltVT = VT.getVectorElementType(); 7383 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 7384 SDLoc SL(Op); 7385 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 7386 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7387 Op, getConstant(i, SL, IdxTy))); 7388 } 7389 } 7390 7391 // getAddressSpace - Return the address space this GlobalAddress belongs to. 7392 unsigned GlobalAddressSDNode::getAddressSpace() const { 7393 return getGlobal()->getType()->getAddressSpace(); 7394 } 7395 7396 7397 Type *ConstantPoolSDNode::getType() const { 7398 if (isMachineConstantPoolEntry()) 7399 return Val.MachineCPVal->getType(); 7400 return Val.ConstVal->getType(); 7401 } 7402 7403 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 7404 APInt &SplatUndef, 7405 unsigned &SplatBitSize, 7406 bool &HasAnyUndefs, 7407 unsigned MinSplatBits, 7408 bool isBigEndian) const { 7409 EVT VT = getValueType(0); 7410 assert(VT.isVector() && "Expected a vector type"); 7411 unsigned sz = VT.getSizeInBits(); 7412 if (MinSplatBits > sz) 7413 return false; 7414 7415 SplatValue = APInt(sz, 0); 7416 SplatUndef = APInt(sz, 0); 7417 7418 // Get the bits. Bits with undefined values (when the corresponding element 7419 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 7420 // in SplatValue. If any of the values are not constant, give up and return 7421 // false. 7422 unsigned int nOps = getNumOperands(); 7423 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 7424 unsigned EltBitSize = VT.getScalarSizeInBits(); 7425 7426 for (unsigned j = 0; j < nOps; ++j) { 7427 unsigned i = isBigEndian ? nOps-1-j : j; 7428 SDValue OpVal = getOperand(i); 7429 unsigned BitPos = j * EltBitSize; 7430 7431 if (OpVal.isUndef()) 7432 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 7433 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 7434 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 7435 zextOrTrunc(sz) << BitPos; 7436 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 7437 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 7438 else 7439 return false; 7440 } 7441 7442 // The build_vector is all constants or undefs. Find the smallest element 7443 // size that splats the vector. 7444 7445 HasAnyUndefs = (SplatUndef != 0); 7446 while (sz > 8) { 7447 7448 unsigned HalfSize = sz / 2; 7449 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 7450 APInt LowValue = SplatValue.trunc(HalfSize); 7451 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 7452 APInt LowUndef = SplatUndef.trunc(HalfSize); 7453 7454 // If the two halves do not match (ignoring undef bits), stop here. 7455 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 7456 MinSplatBits > HalfSize) 7457 break; 7458 7459 SplatValue = HighValue | LowValue; 7460 SplatUndef = HighUndef & LowUndef; 7461 7462 sz = HalfSize; 7463 } 7464 7465 SplatBitSize = sz; 7466 return true; 7467 } 7468 7469 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 7470 if (UndefElements) { 7471 UndefElements->clear(); 7472 UndefElements->resize(getNumOperands()); 7473 } 7474 SDValue Splatted; 7475 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 7476 SDValue Op = getOperand(i); 7477 if (Op.isUndef()) { 7478 if (UndefElements) 7479 (*UndefElements)[i] = true; 7480 } else if (!Splatted) { 7481 Splatted = Op; 7482 } else if (Splatted != Op) { 7483 return SDValue(); 7484 } 7485 } 7486 7487 if (!Splatted) { 7488 assert(getOperand(0).isUndef() && 7489 "Can only have a splat without a constant for all undefs."); 7490 return getOperand(0); 7491 } 7492 7493 return Splatted; 7494 } 7495 7496 ConstantSDNode * 7497 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 7498 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 7499 } 7500 7501 ConstantFPSDNode * 7502 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 7503 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 7504 } 7505 7506 int32_t 7507 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 7508 uint32_t BitWidth) const { 7509 if (ConstantFPSDNode *CN = 7510 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 7511 bool IsExact; 7512 APSInt IntVal(BitWidth); 7513 const APFloat &APF = CN->getValueAPF(); 7514 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 7515 APFloat::opOK || 7516 !IsExact) 7517 return -1; 7518 7519 return IntVal.exactLogBase2(); 7520 } 7521 return -1; 7522 } 7523 7524 bool BuildVectorSDNode::isConstant() const { 7525 for (const SDValue &Op : op_values()) { 7526 unsigned Opc = Op.getOpcode(); 7527 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 7528 return false; 7529 } 7530 return true; 7531 } 7532 7533 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 7534 // Find the first non-undef value in the shuffle mask. 7535 unsigned i, e; 7536 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 7537 /* search */; 7538 7539 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 7540 7541 // Make sure all remaining elements are either undef or the same as the first 7542 // non-undef value. 7543 for (int Idx = Mask[i]; i != e; ++i) 7544 if (Mask[i] >= 0 && Mask[i] != Idx) 7545 return false; 7546 return true; 7547 } 7548 7549 // \brief Returns the SDNode if it is a constant integer BuildVector 7550 // or constant integer. 7551 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 7552 if (isa<ConstantSDNode>(N)) 7553 return N.getNode(); 7554 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 7555 return N.getNode(); 7556 // Treat a GlobalAddress supporting constant offset folding as a 7557 // constant integer. 7558 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 7559 if (GA->getOpcode() == ISD::GlobalAddress && 7560 TLI->isOffsetFoldingLegal(GA)) 7561 return GA; 7562 return nullptr; 7563 } 7564 7565 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 7566 if (isa<ConstantFPSDNode>(N)) 7567 return N.getNode(); 7568 7569 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 7570 return N.getNode(); 7571 7572 return nullptr; 7573 } 7574 7575 #ifndef NDEBUG 7576 static void checkForCyclesHelper(const SDNode *N, 7577 SmallPtrSetImpl<const SDNode*> &Visited, 7578 SmallPtrSetImpl<const SDNode*> &Checked, 7579 const llvm::SelectionDAG *DAG) { 7580 // If this node has already been checked, don't check it again. 7581 if (Checked.count(N)) 7582 return; 7583 7584 // If a node has already been visited on this depth-first walk, reject it as 7585 // a cycle. 7586 if (!Visited.insert(N).second) { 7587 errs() << "Detected cycle in SelectionDAG\n"; 7588 dbgs() << "Offending node:\n"; 7589 N->dumprFull(DAG); dbgs() << "\n"; 7590 abort(); 7591 } 7592 7593 for (const SDValue &Op : N->op_values()) 7594 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 7595 7596 Checked.insert(N); 7597 Visited.erase(N); 7598 } 7599 #endif 7600 7601 void llvm::checkForCycles(const llvm::SDNode *N, 7602 const llvm::SelectionDAG *DAG, 7603 bool force) { 7604 #ifndef NDEBUG 7605 bool check = force; 7606 #ifdef EXPENSIVE_CHECKS 7607 check = true; 7608 #endif // EXPENSIVE_CHECKS 7609 if (check) { 7610 assert(N && "Checking nonexistent SDNode"); 7611 SmallPtrSet<const SDNode*, 32> visited; 7612 SmallPtrSet<const SDNode*, 32> checked; 7613 checkForCyclesHelper(N, visited, checked, DAG); 7614 } 7615 #endif // !NDEBUG 7616 } 7617 7618 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 7619 checkForCycles(DAG->getRoot().getNode(), DAG, force); 7620 } 7621