1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/APSInt.h" 19 #include "llvm/ADT/ArrayRef.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/FoldingSet.h" 22 #include "llvm/ADT/None.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineConstantPool.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineValueType.h" 36 #include "llvm/CodeGen/RuntimeLibcalls.h" 37 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Constant.h" 42 #include "llvm/IR/Constants.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DebugInfoMetadata.h" 45 #include "llvm/IR/DebugLoc.h" 46 #include "llvm/IR/DerivedTypes.h" 47 #include "llvm/IR/Function.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/Metadata.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CodeGen.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/KnownBits.h" 58 #include "llvm/Support/ManagedStatic.h" 59 #include "llvm/Support/MathExtras.h" 60 #include "llvm/Support/Mutex.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetLowering.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetOptions.h" 65 #include "llvm/Target/TargetRegisterInfo.h" 66 #include "llvm/Target/TargetSubtargetInfo.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <cstdlib> 71 #include <limits> 72 #include <set> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 using namespace llvm; 78 79 /// makeVTList - Return an instance of the SDVTList struct initialized with the 80 /// specified members. 81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 82 SDVTList Res = {VTs, NumVTs}; 83 return Res; 84 } 85 86 // Default null implementations of the callbacks. 87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 89 90 #define DEBUG_TYPE "selectiondag" 91 92 static void NewSDValueDbgMsg(SDValue V, StringRef Msg) { 93 DEBUG( 94 dbgs() << Msg; 95 V.dump(); 96 ); 97 } 98 99 //===----------------------------------------------------------------------===// 100 // ConstantFPSDNode Class 101 //===----------------------------------------------------------------------===// 102 103 /// isExactlyValue - We don't rely on operator== working on double values, as 104 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 105 /// As such, this method can be used to do an exact bit-for-bit comparison of 106 /// two floating point values. 107 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 108 return getValueAPF().bitwiseIsEqual(V); 109 } 110 111 bool ConstantFPSDNode::isValueValidForType(EVT VT, 112 const APFloat& Val) { 113 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 114 115 // convert modifies in place, so make a copy. 116 APFloat Val2 = APFloat(Val); 117 bool losesInfo; 118 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 119 APFloat::rmNearestTiesToEven, 120 &losesInfo); 121 return !losesInfo; 122 } 123 124 //===----------------------------------------------------------------------===// 125 // ISD Namespace 126 //===----------------------------------------------------------------------===// 127 128 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 129 auto *BV = dyn_cast<BuildVectorSDNode>(N); 130 if (!BV) 131 return false; 132 133 APInt SplatUndef; 134 unsigned SplatBitSize; 135 bool HasUndefs; 136 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 137 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 138 EltSize) && 139 EltSize == SplatBitSize; 140 } 141 142 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 143 // specializations of the more general isConstantSplatVector()? 144 145 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 146 // Look through a bit convert. 147 while (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 unsigned i = 0, e = N->getNumOperands(); 153 154 // Skip over all of the undef values. 155 while (i != e && N->getOperand(i).isUndef()) 156 ++i; 157 158 // Do not accept an all-undef vector. 159 if (i == e) return false; 160 161 // Do not accept build_vectors that aren't all constants or which have non-~0 162 // elements. We have to be a bit careful here, as the type of the constant 163 // may not be the same as the type of the vector elements due to type 164 // legalization (the elements are promoted to a legal type for the target and 165 // a vector of a type may be legal when the base element type is not). 166 // We only want to check enough bits to cover the vector elements, because 167 // we care if the resultant vector is all ones, not whether the individual 168 // constants are. 169 SDValue NotZero = N->getOperand(i); 170 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 172 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 173 return false; 174 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 175 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 176 return false; 177 } else 178 return false; 179 180 // Okay, we have at least one ~0 value, check to see if the rest match or are 181 // undefs. Even with the above element type twiddling, this should be OK, as 182 // the same type legalization should have applied to all the elements. 183 for (++i; i != e; ++i) 184 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 185 return false; 186 return true; 187 } 188 189 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 190 // Look through a bit convert. 191 while (N->getOpcode() == ISD::BITCAST) 192 N = N->getOperand(0).getNode(); 193 194 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 195 196 bool IsAllUndef = true; 197 for (const SDValue &Op : N->op_values()) { 198 if (Op.isUndef()) 199 continue; 200 IsAllUndef = false; 201 // Do not accept build_vectors that aren't all constants or which have non-0 202 // elements. We have to be a bit careful here, as the type of the constant 203 // may not be the same as the type of the vector elements due to type 204 // legalization (the elements are promoted to a legal type for the target 205 // and a vector of a type may be legal when the base element type is not). 206 // We only want to check enough bits to cover the vector elements, because 207 // we care if the resultant vector is all zeros, not whether the individual 208 // constants are. 209 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 210 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 211 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 212 return false; 213 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 214 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 215 return false; 216 } else 217 return false; 218 } 219 220 // Do not accept an all-undef vector. 221 if (IsAllUndef) 222 return false; 223 return true; 224 } 225 226 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 227 if (N->getOpcode() != ISD::BUILD_VECTOR) 228 return false; 229 230 for (const SDValue &Op : N->op_values()) { 231 if (Op.isUndef()) 232 continue; 233 if (!isa<ConstantSDNode>(Op)) 234 return false; 235 } 236 return true; 237 } 238 239 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 240 if (N->getOpcode() != ISD::BUILD_VECTOR) 241 return false; 242 243 for (const SDValue &Op : N->op_values()) { 244 if (Op.isUndef()) 245 continue; 246 if (!isa<ConstantFPSDNode>(Op)) 247 return false; 248 } 249 return true; 250 } 251 252 bool ISD::allOperandsUndef(const SDNode *N) { 253 // Return false if the node has no operands. 254 // This is "logically inconsistent" with the definition of "all" but 255 // is probably the desired behavior. 256 if (N->getNumOperands() == 0) 257 return false; 258 259 for (const SDValue &Op : N->op_values()) 260 if (!Op.isUndef()) 261 return false; 262 263 return true; 264 } 265 266 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 267 switch (ExtType) { 268 case ISD::EXTLOAD: 269 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 270 case ISD::SEXTLOAD: 271 return ISD::SIGN_EXTEND; 272 case ISD::ZEXTLOAD: 273 return ISD::ZERO_EXTEND; 274 default: 275 break; 276 } 277 278 llvm_unreachable("Invalid LoadExtType"); 279 } 280 281 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 282 // To perform this operation, we just need to swap the L and G bits of the 283 // operation. 284 unsigned OldL = (Operation >> 2) & 1; 285 unsigned OldG = (Operation >> 1) & 1; 286 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 287 (OldL << 1) | // New G bit 288 (OldG << 2)); // New L bit. 289 } 290 291 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 292 unsigned Operation = Op; 293 if (isInteger) 294 Operation ^= 7; // Flip L, G, E bits, but not U. 295 else 296 Operation ^= 15; // Flip all of the condition bits. 297 298 if (Operation > ISD::SETTRUE2) 299 Operation &= ~8; // Don't let N and U bits get set. 300 301 return ISD::CondCode(Operation); 302 } 303 304 /// For an integer comparison, return 1 if the comparison is a signed operation 305 /// and 2 if the result is an unsigned comparison. Return zero if the operation 306 /// does not depend on the sign of the input (setne and seteq). 307 static int isSignedOp(ISD::CondCode Opcode) { 308 switch (Opcode) { 309 default: llvm_unreachable("Illegal integer setcc operation!"); 310 case ISD::SETEQ: 311 case ISD::SETNE: return 0; 312 case ISD::SETLT: 313 case ISD::SETLE: 314 case ISD::SETGT: 315 case ISD::SETGE: return 1; 316 case ISD::SETULT: 317 case ISD::SETULE: 318 case ISD::SETUGT: 319 case ISD::SETUGE: return 2; 320 } 321 } 322 323 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 324 bool IsInteger) { 325 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 326 // Cannot fold a signed integer setcc with an unsigned integer setcc. 327 return ISD::SETCC_INVALID; 328 329 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 330 331 // If the N and U bits get set, then the resultant comparison DOES suddenly 332 // care about orderedness, and it is true when ordered. 333 if (Op > ISD::SETTRUE2) 334 Op &= ~16; // Clear the U bit if the N bit is set. 335 336 // Canonicalize illegal integer setcc's. 337 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 338 Op = ISD::SETNE; 339 340 return ISD::CondCode(Op); 341 } 342 343 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 344 bool IsInteger) { 345 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 346 // Cannot fold a signed setcc with an unsigned setcc. 347 return ISD::SETCC_INVALID; 348 349 // Combine all of the condition bits. 350 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 351 352 // Canonicalize illegal integer setcc's. 353 if (IsInteger) { 354 switch (Result) { 355 default: break; 356 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 357 case ISD::SETOEQ: // SETEQ & SETU[LG]E 358 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 359 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 360 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 361 } 362 } 363 364 return Result; 365 } 366 367 //===----------------------------------------------------------------------===// 368 // SDNode Profile Support 369 //===----------------------------------------------------------------------===// 370 371 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 372 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 373 ID.AddInteger(OpC); 374 } 375 376 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 377 /// solely with their pointer. 378 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 379 ID.AddPointer(VTList.VTs); 380 } 381 382 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 383 static void AddNodeIDOperands(FoldingSetNodeID &ID, 384 ArrayRef<SDValue> Ops) { 385 for (auto& Op : Ops) { 386 ID.AddPointer(Op.getNode()); 387 ID.AddInteger(Op.getResNo()); 388 } 389 } 390 391 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 392 static void AddNodeIDOperands(FoldingSetNodeID &ID, 393 ArrayRef<SDUse> Ops) { 394 for (auto& Op : Ops) { 395 ID.AddPointer(Op.getNode()); 396 ID.AddInteger(Op.getResNo()); 397 } 398 } 399 400 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 401 SDVTList VTList, ArrayRef<SDValue> OpList) { 402 AddNodeIDOpcode(ID, OpC); 403 AddNodeIDValueTypes(ID, VTList); 404 AddNodeIDOperands(ID, OpList); 405 } 406 407 /// If this is an SDNode with special info, add this info to the NodeID data. 408 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 409 switch (N->getOpcode()) { 410 case ISD::TargetExternalSymbol: 411 case ISD::ExternalSymbol: 412 case ISD::MCSymbol: 413 llvm_unreachable("Should only be used on nodes with operands"); 414 default: break; // Normal nodes don't need extra info. 415 case ISD::TargetConstant: 416 case ISD::Constant: { 417 const ConstantSDNode *C = cast<ConstantSDNode>(N); 418 ID.AddPointer(C->getConstantIntValue()); 419 ID.AddBoolean(C->isOpaque()); 420 break; 421 } 422 case ISD::TargetConstantFP: 423 case ISD::ConstantFP: 424 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 425 break; 426 case ISD::TargetGlobalAddress: 427 case ISD::GlobalAddress: 428 case ISD::TargetGlobalTLSAddress: 429 case ISD::GlobalTLSAddress: { 430 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 431 ID.AddPointer(GA->getGlobal()); 432 ID.AddInteger(GA->getOffset()); 433 ID.AddInteger(GA->getTargetFlags()); 434 break; 435 } 436 case ISD::BasicBlock: 437 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 438 break; 439 case ISD::Register: 440 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 441 break; 442 case ISD::RegisterMask: 443 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 444 break; 445 case ISD::SRCVALUE: 446 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 447 break; 448 case ISD::FrameIndex: 449 case ISD::TargetFrameIndex: 450 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 451 break; 452 case ISD::JumpTable: 453 case ISD::TargetJumpTable: 454 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 455 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 456 break; 457 case ISD::ConstantPool: 458 case ISD::TargetConstantPool: { 459 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 460 ID.AddInteger(CP->getAlignment()); 461 ID.AddInteger(CP->getOffset()); 462 if (CP->isMachineConstantPoolEntry()) 463 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 464 else 465 ID.AddPointer(CP->getConstVal()); 466 ID.AddInteger(CP->getTargetFlags()); 467 break; 468 } 469 case ISD::TargetIndex: { 470 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 471 ID.AddInteger(TI->getIndex()); 472 ID.AddInteger(TI->getOffset()); 473 ID.AddInteger(TI->getTargetFlags()); 474 break; 475 } 476 case ISD::LOAD: { 477 const LoadSDNode *LD = cast<LoadSDNode>(N); 478 ID.AddInteger(LD->getMemoryVT().getRawBits()); 479 ID.AddInteger(LD->getRawSubclassData()); 480 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 481 break; 482 } 483 case ISD::STORE: { 484 const StoreSDNode *ST = cast<StoreSDNode>(N); 485 ID.AddInteger(ST->getMemoryVT().getRawBits()); 486 ID.AddInteger(ST->getRawSubclassData()); 487 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 488 break; 489 } 490 case ISD::ATOMIC_CMP_SWAP: 491 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 492 case ISD::ATOMIC_SWAP: 493 case ISD::ATOMIC_LOAD_ADD: 494 case ISD::ATOMIC_LOAD_SUB: 495 case ISD::ATOMIC_LOAD_AND: 496 case ISD::ATOMIC_LOAD_OR: 497 case ISD::ATOMIC_LOAD_XOR: 498 case ISD::ATOMIC_LOAD_NAND: 499 case ISD::ATOMIC_LOAD_MIN: 500 case ISD::ATOMIC_LOAD_MAX: 501 case ISD::ATOMIC_LOAD_UMIN: 502 case ISD::ATOMIC_LOAD_UMAX: 503 case ISD::ATOMIC_LOAD: 504 case ISD::ATOMIC_STORE: { 505 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 506 ID.AddInteger(AT->getMemoryVT().getRawBits()); 507 ID.AddInteger(AT->getRawSubclassData()); 508 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 509 break; 510 } 511 case ISD::PREFETCH: { 512 const MemSDNode *PF = cast<MemSDNode>(N); 513 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 514 break; 515 } 516 case ISD::VECTOR_SHUFFLE: { 517 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 518 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 519 i != e; ++i) 520 ID.AddInteger(SVN->getMaskElt(i)); 521 break; 522 } 523 case ISD::TargetBlockAddress: 524 case ISD::BlockAddress: { 525 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 526 ID.AddPointer(BA->getBlockAddress()); 527 ID.AddInteger(BA->getOffset()); 528 ID.AddInteger(BA->getTargetFlags()); 529 break; 530 } 531 } // end switch (N->getOpcode()) 532 533 // Target specific memory nodes could also have address spaces to check. 534 if (N->isTargetMemoryOpcode()) 535 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 536 } 537 538 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 539 /// data. 540 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 541 AddNodeIDOpcode(ID, N->getOpcode()); 542 // Add the return value info. 543 AddNodeIDValueTypes(ID, N->getVTList()); 544 // Add the operand info. 545 AddNodeIDOperands(ID, N->ops()); 546 547 // Handle SDNode leafs with special info. 548 AddNodeIDCustom(ID, N); 549 } 550 551 //===----------------------------------------------------------------------===// 552 // SelectionDAG Class 553 //===----------------------------------------------------------------------===// 554 555 /// doNotCSE - Return true if CSE should not be performed for this node. 556 static bool doNotCSE(SDNode *N) { 557 if (N->getValueType(0) == MVT::Glue) 558 return true; // Never CSE anything that produces a flag. 559 560 switch (N->getOpcode()) { 561 default: break; 562 case ISD::HANDLENODE: 563 case ISD::EH_LABEL: 564 return true; // Never CSE these nodes. 565 } 566 567 // Check that remaining values produced are not flags. 568 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 569 if (N->getValueType(i) == MVT::Glue) 570 return true; // Never CSE anything that produces a flag. 571 572 return false; 573 } 574 575 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 576 /// SelectionDAG. 577 void SelectionDAG::RemoveDeadNodes() { 578 // Create a dummy node (which is not added to allnodes), that adds a reference 579 // to the root node, preventing it from being deleted. 580 HandleSDNode Dummy(getRoot()); 581 582 SmallVector<SDNode*, 128> DeadNodes; 583 584 // Add all obviously-dead nodes to the DeadNodes worklist. 585 for (SDNode &Node : allnodes()) 586 if (Node.use_empty()) 587 DeadNodes.push_back(&Node); 588 589 RemoveDeadNodes(DeadNodes); 590 591 // If the root changed (e.g. it was a dead load, update the root). 592 setRoot(Dummy.getValue()); 593 } 594 595 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 596 /// given list, and any nodes that become unreachable as a result. 597 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 598 599 // Process the worklist, deleting the nodes and adding their uses to the 600 // worklist. 601 while (!DeadNodes.empty()) { 602 SDNode *N = DeadNodes.pop_back_val(); 603 // Skip to next node if we've already managed to delete the node. This could 604 // happen if replacing a node causes a node previously added to the node to 605 // be deleted. 606 if (N->getOpcode() == ISD::DELETED_NODE) 607 continue; 608 609 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 610 DUL->NodeDeleted(N, nullptr); 611 612 // Take the node out of the appropriate CSE map. 613 RemoveNodeFromCSEMaps(N); 614 615 // Next, brutally remove the operand list. This is safe to do, as there are 616 // no cycles in the graph. 617 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 618 SDUse &Use = *I++; 619 SDNode *Operand = Use.getNode(); 620 Use.set(SDValue()); 621 622 // Now that we removed this operand, see if there are no uses of it left. 623 if (Operand->use_empty()) 624 DeadNodes.push_back(Operand); 625 } 626 627 DeallocateNode(N); 628 } 629 } 630 631 void SelectionDAG::RemoveDeadNode(SDNode *N){ 632 SmallVector<SDNode*, 16> DeadNodes(1, N); 633 634 // Create a dummy node that adds a reference to the root node, preventing 635 // it from being deleted. (This matters if the root is an operand of the 636 // dead node.) 637 HandleSDNode Dummy(getRoot()); 638 639 RemoveDeadNodes(DeadNodes); 640 } 641 642 void SelectionDAG::DeleteNode(SDNode *N) { 643 // First take this out of the appropriate CSE map. 644 RemoveNodeFromCSEMaps(N); 645 646 // Finally, remove uses due to operands of this node, remove from the 647 // AllNodes list, and delete the node. 648 DeleteNodeNotInCSEMaps(N); 649 } 650 651 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 652 assert(N->getIterator() != AllNodes.begin() && 653 "Cannot delete the entry node!"); 654 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 655 656 // Drop all of the operands and decrement used node's use counts. 657 N->DropOperands(); 658 659 DeallocateNode(N); 660 } 661 662 void SDDbgInfo::erase(const SDNode *Node) { 663 DbgValMapType::iterator I = DbgValMap.find(Node); 664 if (I == DbgValMap.end()) 665 return; 666 for (auto &Val: I->second) 667 Val->setIsInvalidated(); 668 DbgValMap.erase(I); 669 } 670 671 void SelectionDAG::DeallocateNode(SDNode *N) { 672 // If we have operands, deallocate them. 673 removeOperands(N); 674 675 NodeAllocator.Deallocate(AllNodes.remove(N)); 676 677 // Set the opcode to DELETED_NODE to help catch bugs when node 678 // memory is reallocated. 679 // FIXME: There are places in SDag that have grown a dependency on the opcode 680 // value in the released node. 681 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 682 N->NodeType = ISD::DELETED_NODE; 683 684 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 685 // them and forget about that node. 686 DbgInfo->erase(N); 687 } 688 689 #ifndef NDEBUG 690 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 691 static void VerifySDNode(SDNode *N) { 692 switch (N->getOpcode()) { 693 default: 694 break; 695 case ISD::BUILD_PAIR: { 696 EVT VT = N->getValueType(0); 697 assert(N->getNumValues() == 1 && "Too many results!"); 698 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 699 "Wrong return type!"); 700 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 701 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 702 "Mismatched operand types!"); 703 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 704 "Wrong operand type!"); 705 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 706 "Wrong return type size"); 707 break; 708 } 709 case ISD::BUILD_VECTOR: { 710 assert(N->getNumValues() == 1 && "Too many results!"); 711 assert(N->getValueType(0).isVector() && "Wrong return type!"); 712 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 713 "Wrong number of operands!"); 714 EVT EltVT = N->getValueType(0).getVectorElementType(); 715 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 716 assert((I->getValueType() == EltVT || 717 (EltVT.isInteger() && I->getValueType().isInteger() && 718 EltVT.bitsLE(I->getValueType()))) && 719 "Wrong operand type!"); 720 assert(I->getValueType() == N->getOperand(0).getValueType() && 721 "Operands must all have the same type"); 722 } 723 break; 724 } 725 } 726 } 727 #endif // NDEBUG 728 729 /// \brief Insert a newly allocated node into the DAG. 730 /// 731 /// Handles insertion into the all nodes list and CSE map, as well as 732 /// verification and other common operations when a new node is allocated. 733 void SelectionDAG::InsertNode(SDNode *N) { 734 AllNodes.push_back(N); 735 #ifndef NDEBUG 736 N->PersistentId = NextPersistentId++; 737 VerifySDNode(N); 738 #endif 739 } 740 741 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 742 /// correspond to it. This is useful when we're about to delete or repurpose 743 /// the node. We don't want future request for structurally identical nodes 744 /// to return N anymore. 745 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 746 bool Erased = false; 747 switch (N->getOpcode()) { 748 case ISD::HANDLENODE: return false; // noop. 749 case ISD::CONDCODE: 750 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 751 "Cond code doesn't exist!"); 752 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 753 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 754 break; 755 case ISD::ExternalSymbol: 756 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 757 break; 758 case ISD::TargetExternalSymbol: { 759 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 760 Erased = TargetExternalSymbols.erase( 761 std::pair<std::string,unsigned char>(ESN->getSymbol(), 762 ESN->getTargetFlags())); 763 break; 764 } 765 case ISD::MCSymbol: { 766 auto *MCSN = cast<MCSymbolSDNode>(N); 767 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 768 break; 769 } 770 case ISD::VALUETYPE: { 771 EVT VT = cast<VTSDNode>(N)->getVT(); 772 if (VT.isExtended()) { 773 Erased = ExtendedValueTypeNodes.erase(VT); 774 } else { 775 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 776 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 777 } 778 break; 779 } 780 default: 781 // Remove it from the CSE Map. 782 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 783 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 784 Erased = CSEMap.RemoveNode(N); 785 break; 786 } 787 #ifndef NDEBUG 788 // Verify that the node was actually in one of the CSE maps, unless it has a 789 // flag result (which cannot be CSE'd) or is one of the special cases that are 790 // not subject to CSE. 791 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 792 !N->isMachineOpcode() && !doNotCSE(N)) { 793 N->dump(this); 794 dbgs() << "\n"; 795 llvm_unreachable("Node is not in map!"); 796 } 797 #endif 798 return Erased; 799 } 800 801 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 802 /// maps and modified in place. Add it back to the CSE maps, unless an identical 803 /// node already exists, in which case transfer all its users to the existing 804 /// node. This transfer can potentially trigger recursive merging. 805 void 806 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 807 // For node types that aren't CSE'd, just act as if no identical node 808 // already exists. 809 if (!doNotCSE(N)) { 810 SDNode *Existing = CSEMap.GetOrInsertNode(N); 811 if (Existing != N) { 812 // If there was already an existing matching node, use ReplaceAllUsesWith 813 // to replace the dead one with the existing one. This can cause 814 // recursive merging of other unrelated nodes down the line. 815 ReplaceAllUsesWith(N, Existing); 816 817 // N is now dead. Inform the listeners and delete it. 818 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 819 DUL->NodeDeleted(N, Existing); 820 DeleteNodeNotInCSEMaps(N); 821 return; 822 } 823 } 824 825 // If the node doesn't already exist, we updated it. Inform listeners. 826 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 827 DUL->NodeUpdated(N); 828 } 829 830 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 831 /// were replaced with those specified. If this node is never memoized, 832 /// return null, otherwise return a pointer to the slot it would take. If a 833 /// node already exists with these operands, the slot will be non-null. 834 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 835 void *&InsertPos) { 836 if (doNotCSE(N)) 837 return nullptr; 838 839 SDValue Ops[] = { Op }; 840 FoldingSetNodeID ID; 841 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 842 AddNodeIDCustom(ID, N); 843 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 844 if (Node) 845 Node->intersectFlagsWith(N->getFlags()); 846 return Node; 847 } 848 849 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 850 /// were replaced with those specified. If this node is never memoized, 851 /// return null, otherwise return a pointer to the slot it would take. If a 852 /// node already exists with these operands, the slot will be non-null. 853 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 854 SDValue Op1, SDValue Op2, 855 void *&InsertPos) { 856 if (doNotCSE(N)) 857 return nullptr; 858 859 SDValue Ops[] = { Op1, Op2 }; 860 FoldingSetNodeID ID; 861 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 862 AddNodeIDCustom(ID, N); 863 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 864 if (Node) 865 Node->intersectFlagsWith(N->getFlags()); 866 return Node; 867 } 868 869 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 870 /// were replaced with those specified. If this node is never memoized, 871 /// return null, otherwise return a pointer to the slot it would take. If a 872 /// node already exists with these operands, the slot will be non-null. 873 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 874 void *&InsertPos) { 875 if (doNotCSE(N)) 876 return nullptr; 877 878 FoldingSetNodeID ID; 879 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 880 AddNodeIDCustom(ID, N); 881 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 882 if (Node) 883 Node->intersectFlagsWith(N->getFlags()); 884 return Node; 885 } 886 887 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 888 Type *Ty = VT == MVT::iPTR ? 889 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 890 VT.getTypeForEVT(*getContext()); 891 892 return getDataLayout().getABITypeAlignment(Ty); 893 } 894 895 // EntryNode could meaningfully have debug info if we can find it... 896 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 897 : TM(tm), OptLevel(OL), 898 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 899 Root(getEntryNode()) { 900 InsertNode(&EntryNode); 901 DbgInfo = new SDDbgInfo(); 902 } 903 904 void SelectionDAG::init(MachineFunction &NewMF, 905 OptimizationRemarkEmitter &NewORE, 906 Pass *PassPtr) { 907 MF = &NewMF; 908 SDAGISelPass = PassPtr; 909 ORE = &NewORE; 910 TLI = getSubtarget().getTargetLowering(); 911 TSI = getSubtarget().getSelectionDAGInfo(); 912 Context = &MF->getFunction()->getContext(); 913 } 914 915 SelectionDAG::~SelectionDAG() { 916 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 917 allnodes_clear(); 918 OperandRecycler.clear(OperandAllocator); 919 delete DbgInfo; 920 } 921 922 void SelectionDAG::allnodes_clear() { 923 assert(&*AllNodes.begin() == &EntryNode); 924 AllNodes.remove(AllNodes.begin()); 925 while (!AllNodes.empty()) 926 DeallocateNode(&AllNodes.front()); 927 #ifndef NDEBUG 928 NextPersistentId = 0; 929 #endif 930 } 931 932 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 933 void *&InsertPos) { 934 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 935 if (N) { 936 switch (N->getOpcode()) { 937 default: break; 938 case ISD::Constant: 939 case ISD::ConstantFP: 940 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 941 "debug location. Use another overload."); 942 } 943 } 944 return N; 945 } 946 947 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 948 const SDLoc &DL, void *&InsertPos) { 949 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 950 if (N) { 951 switch (N->getOpcode()) { 952 case ISD::Constant: 953 case ISD::ConstantFP: 954 // Erase debug location from the node if the node is used at several 955 // different places. Do not propagate one location to all uses as it 956 // will cause a worse single stepping debugging experience. 957 if (N->getDebugLoc() != DL.getDebugLoc()) 958 N->setDebugLoc(DebugLoc()); 959 break; 960 default: 961 // When the node's point of use is located earlier in the instruction 962 // sequence than its prior point of use, update its debug info to the 963 // earlier location. 964 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 965 N->setDebugLoc(DL.getDebugLoc()); 966 break; 967 } 968 } 969 return N; 970 } 971 972 void SelectionDAG::clear() { 973 allnodes_clear(); 974 OperandRecycler.clear(OperandAllocator); 975 OperandAllocator.Reset(); 976 CSEMap.clear(); 977 978 ExtendedValueTypeNodes.clear(); 979 ExternalSymbols.clear(); 980 TargetExternalSymbols.clear(); 981 MCSymbols.clear(); 982 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 983 static_cast<CondCodeSDNode*>(nullptr)); 984 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 985 static_cast<SDNode*>(nullptr)); 986 987 EntryNode.UseList = nullptr; 988 InsertNode(&EntryNode); 989 Root = getEntryNode(); 990 DbgInfo->clear(); 991 } 992 993 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 994 return VT.bitsGT(Op.getValueType()) 995 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 996 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 997 } 998 999 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1000 return VT.bitsGT(Op.getValueType()) ? 1001 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1002 getNode(ISD::TRUNCATE, DL, VT, Op); 1003 } 1004 1005 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1006 return VT.bitsGT(Op.getValueType()) ? 1007 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1008 getNode(ISD::TRUNCATE, DL, VT, Op); 1009 } 1010 1011 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1012 return VT.bitsGT(Op.getValueType()) ? 1013 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1014 getNode(ISD::TRUNCATE, DL, VT, Op); 1015 } 1016 1017 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1018 EVT OpVT) { 1019 if (VT.bitsLE(Op.getValueType())) 1020 return getNode(ISD::TRUNCATE, SL, VT, Op); 1021 1022 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1023 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1024 } 1025 1026 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1027 assert(!VT.isVector() && 1028 "getZeroExtendInReg should use the vector element type instead of " 1029 "the vector type!"); 1030 if (Op.getValueType() == VT) return Op; 1031 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1032 APInt Imm = APInt::getLowBitsSet(BitWidth, 1033 VT.getSizeInBits()); 1034 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1035 getConstant(Imm, DL, Op.getValueType())); 1036 } 1037 1038 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, 1039 EVT VT) { 1040 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1041 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1042 "The sizes of the input and result must match in order to perform the " 1043 "extend in-register."); 1044 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1045 "The destination vector type must have fewer lanes than the input."); 1046 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op); 1047 } 1048 1049 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, 1050 EVT VT) { 1051 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1052 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1053 "The sizes of the input and result must match in order to perform the " 1054 "extend in-register."); 1055 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1056 "The destination vector type must have fewer lanes than the input."); 1057 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op); 1058 } 1059 1060 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, 1061 EVT VT) { 1062 assert(VT.isVector() && "This DAG node is restricted to vector types."); 1063 assert(VT.getSizeInBits() == Op.getValueSizeInBits() && 1064 "The sizes of the input and result must match in order to perform the " 1065 "extend in-register."); 1066 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && 1067 "The destination vector type must have fewer lanes than the input."); 1068 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op); 1069 } 1070 1071 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1072 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1073 EVT EltVT = VT.getScalarType(); 1074 SDValue NegOne = 1075 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1076 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1077 } 1078 1079 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1080 EVT EltVT = VT.getScalarType(); 1081 SDValue TrueValue; 1082 switch (TLI->getBooleanContents(VT)) { 1083 case TargetLowering::ZeroOrOneBooleanContent: 1084 case TargetLowering::UndefinedBooleanContent: 1085 TrueValue = getConstant(1, DL, VT); 1086 break; 1087 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1088 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, 1089 VT); 1090 break; 1091 } 1092 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1093 } 1094 1095 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1096 bool isT, bool isO) { 1097 EVT EltVT = VT.getScalarType(); 1098 assert((EltVT.getSizeInBits() >= 64 || 1099 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1100 "getConstant with a uint64_t value that doesn't fit in the type!"); 1101 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1102 } 1103 1104 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1105 bool isT, bool isO) { 1106 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1107 } 1108 1109 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1110 EVT VT, bool isT, bool isO) { 1111 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1112 1113 EVT EltVT = VT.getScalarType(); 1114 const ConstantInt *Elt = &Val; 1115 1116 // In some cases the vector type is legal but the element type is illegal and 1117 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1118 // inserted value (the type does not need to match the vector element type). 1119 // Any extra bits introduced will be truncated away. 1120 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1121 TargetLowering::TypePromoteInteger) { 1122 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1123 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1124 Elt = ConstantInt::get(*getContext(), NewVal); 1125 } 1126 // In other cases the element type is illegal and needs to be expanded, for 1127 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1128 // the value into n parts and use a vector type with n-times the elements. 1129 // Then bitcast to the type requested. 1130 // Legalizing constants too early makes the DAGCombiner's job harder so we 1131 // only legalize if the DAG tells us we must produce legal types. 1132 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1133 TLI->getTypeAction(*getContext(), EltVT) == 1134 TargetLowering::TypeExpandInteger) { 1135 const APInt &NewVal = Elt->getValue(); 1136 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1137 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1138 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1139 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1140 1141 // Check the temporary vector is the correct size. If this fails then 1142 // getTypeToTransformTo() probably returned a type whose size (in bits) 1143 // isn't a power-of-2 factor of the requested type size. 1144 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1145 1146 SmallVector<SDValue, 2> EltParts; 1147 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1148 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1149 .zextOrTrunc(ViaEltSizeInBits), DL, 1150 ViaEltVT, isT, isO)); 1151 } 1152 1153 // EltParts is currently in little endian order. If we actually want 1154 // big-endian order then reverse it now. 1155 if (getDataLayout().isBigEndian()) 1156 std::reverse(EltParts.begin(), EltParts.end()); 1157 1158 // The elements must be reversed when the element order is different 1159 // to the endianness of the elements (because the BITCAST is itself a 1160 // vector shuffle in this situation). However, we do not need any code to 1161 // perform this reversal because getConstant() is producing a vector 1162 // splat. 1163 // This situation occurs in MIPS MSA. 1164 1165 SmallVector<SDValue, 8> Ops; 1166 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1167 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1168 1169 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1170 NewSDValueDbgMsg(V, "Creating constant: "); 1171 return V; 1172 } 1173 1174 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1175 "APInt size does not match type size!"); 1176 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1177 FoldingSetNodeID ID; 1178 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1179 ID.AddPointer(Elt); 1180 ID.AddBoolean(isO); 1181 void *IP = nullptr; 1182 SDNode *N = nullptr; 1183 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1184 if (!VT.isVector()) 1185 return SDValue(N, 0); 1186 1187 if (!N) { 1188 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT); 1189 CSEMap.InsertNode(N, IP); 1190 InsertNode(N); 1191 } 1192 1193 SDValue Result(N, 0); 1194 if (VT.isVector()) 1195 Result = getSplatBuildVector(VT, DL, Result); 1196 1197 NewSDValueDbgMsg(Result, "Creating constant: "); 1198 return Result; 1199 } 1200 1201 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1202 bool isTarget) { 1203 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1204 } 1205 1206 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1207 bool isTarget) { 1208 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1209 } 1210 1211 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1212 EVT VT, bool isTarget) { 1213 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1214 1215 EVT EltVT = VT.getScalarType(); 1216 1217 // Do the map lookup using the actual bit pattern for the floating point 1218 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1219 // we don't have issues with SNANs. 1220 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1221 FoldingSetNodeID ID; 1222 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1223 ID.AddPointer(&V); 1224 void *IP = nullptr; 1225 SDNode *N = nullptr; 1226 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1227 if (!VT.isVector()) 1228 return SDValue(N, 0); 1229 1230 if (!N) { 1231 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT); 1232 CSEMap.InsertNode(N, IP); 1233 InsertNode(N); 1234 } 1235 1236 SDValue Result(N, 0); 1237 if (VT.isVector()) 1238 Result = getSplatBuildVector(VT, DL, Result); 1239 NewSDValueDbgMsg(Result, "Creating fp constant: "); 1240 return Result; 1241 } 1242 1243 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1244 bool isTarget) { 1245 EVT EltVT = VT.getScalarType(); 1246 if (EltVT == MVT::f32) 1247 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1248 else if (EltVT == MVT::f64) 1249 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1250 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1251 EltVT == MVT::f16) { 1252 bool Ignored; 1253 APFloat APF = APFloat(Val); 1254 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1255 &Ignored); 1256 return getConstantFP(APF, DL, VT, isTarget); 1257 } else 1258 llvm_unreachable("Unsupported type in getConstantFP"); 1259 } 1260 1261 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1262 EVT VT, int64_t Offset, bool isTargetGA, 1263 unsigned char TargetFlags) { 1264 assert((TargetFlags == 0 || isTargetGA) && 1265 "Cannot set target flags on target-independent globals"); 1266 1267 // Truncate (with sign-extension) the offset value to the pointer size. 1268 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1269 if (BitWidth < 64) 1270 Offset = SignExtend64(Offset, BitWidth); 1271 1272 unsigned Opc; 1273 if (GV->isThreadLocal()) 1274 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1275 else 1276 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1277 1278 FoldingSetNodeID ID; 1279 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1280 ID.AddPointer(GV); 1281 ID.AddInteger(Offset); 1282 ID.AddInteger(TargetFlags); 1283 void *IP = nullptr; 1284 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1285 return SDValue(E, 0); 1286 1287 auto *N = newSDNode<GlobalAddressSDNode>( 1288 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1289 CSEMap.InsertNode(N, IP); 1290 InsertNode(N); 1291 return SDValue(N, 0); 1292 } 1293 1294 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1295 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1296 FoldingSetNodeID ID; 1297 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1298 ID.AddInteger(FI); 1299 void *IP = nullptr; 1300 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1301 return SDValue(E, 0); 1302 1303 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1304 CSEMap.InsertNode(N, IP); 1305 InsertNode(N); 1306 return SDValue(N, 0); 1307 } 1308 1309 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1310 unsigned char TargetFlags) { 1311 assert((TargetFlags == 0 || isTarget) && 1312 "Cannot set target flags on target-independent jump tables"); 1313 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1314 FoldingSetNodeID ID; 1315 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1316 ID.AddInteger(JTI); 1317 ID.AddInteger(TargetFlags); 1318 void *IP = nullptr; 1319 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1320 return SDValue(E, 0); 1321 1322 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1323 CSEMap.InsertNode(N, IP); 1324 InsertNode(N); 1325 return SDValue(N, 0); 1326 } 1327 1328 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1329 unsigned Alignment, int Offset, 1330 bool isTarget, 1331 unsigned char TargetFlags) { 1332 assert((TargetFlags == 0 || isTarget) && 1333 "Cannot set target flags on target-independent globals"); 1334 if (Alignment == 0) 1335 Alignment = MF->getFunction()->optForSize() 1336 ? getDataLayout().getABITypeAlignment(C->getType()) 1337 : getDataLayout().getPrefTypeAlignment(C->getType()); 1338 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1339 FoldingSetNodeID ID; 1340 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1341 ID.AddInteger(Alignment); 1342 ID.AddInteger(Offset); 1343 ID.AddPointer(C); 1344 ID.AddInteger(TargetFlags); 1345 void *IP = nullptr; 1346 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1347 return SDValue(E, 0); 1348 1349 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1350 TargetFlags); 1351 CSEMap.InsertNode(N, IP); 1352 InsertNode(N); 1353 return SDValue(N, 0); 1354 } 1355 1356 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1357 unsigned Alignment, int Offset, 1358 bool isTarget, 1359 unsigned char TargetFlags) { 1360 assert((TargetFlags == 0 || isTarget) && 1361 "Cannot set target flags on target-independent globals"); 1362 if (Alignment == 0) 1363 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1364 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1365 FoldingSetNodeID ID; 1366 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1367 ID.AddInteger(Alignment); 1368 ID.AddInteger(Offset); 1369 C->addSelectionDAGCSEId(ID); 1370 ID.AddInteger(TargetFlags); 1371 void *IP = nullptr; 1372 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1373 return SDValue(E, 0); 1374 1375 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1376 TargetFlags); 1377 CSEMap.InsertNode(N, IP); 1378 InsertNode(N); 1379 return SDValue(N, 0); 1380 } 1381 1382 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1383 unsigned char TargetFlags) { 1384 FoldingSetNodeID ID; 1385 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1386 ID.AddInteger(Index); 1387 ID.AddInteger(Offset); 1388 ID.AddInteger(TargetFlags); 1389 void *IP = nullptr; 1390 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1391 return SDValue(E, 0); 1392 1393 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1394 CSEMap.InsertNode(N, IP); 1395 InsertNode(N); 1396 return SDValue(N, 0); 1397 } 1398 1399 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1400 FoldingSetNodeID ID; 1401 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1402 ID.AddPointer(MBB); 1403 void *IP = nullptr; 1404 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1405 return SDValue(E, 0); 1406 1407 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1408 CSEMap.InsertNode(N, IP); 1409 InsertNode(N); 1410 return SDValue(N, 0); 1411 } 1412 1413 SDValue SelectionDAG::getValueType(EVT VT) { 1414 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1415 ValueTypeNodes.size()) 1416 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1417 1418 SDNode *&N = VT.isExtended() ? 1419 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1420 1421 if (N) return SDValue(N, 0); 1422 N = newSDNode<VTSDNode>(VT); 1423 InsertNode(N); 1424 return SDValue(N, 0); 1425 } 1426 1427 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1428 SDNode *&N = ExternalSymbols[Sym]; 1429 if (N) return SDValue(N, 0); 1430 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1431 InsertNode(N); 1432 return SDValue(N, 0); 1433 } 1434 1435 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1436 SDNode *&N = MCSymbols[Sym]; 1437 if (N) 1438 return SDValue(N, 0); 1439 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1440 InsertNode(N); 1441 return SDValue(N, 0); 1442 } 1443 1444 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1445 unsigned char TargetFlags) { 1446 SDNode *&N = 1447 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1448 TargetFlags)]; 1449 if (N) return SDValue(N, 0); 1450 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1451 InsertNode(N); 1452 return SDValue(N, 0); 1453 } 1454 1455 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1456 if ((unsigned)Cond >= CondCodeNodes.size()) 1457 CondCodeNodes.resize(Cond+1); 1458 1459 if (!CondCodeNodes[Cond]) { 1460 auto *N = newSDNode<CondCodeSDNode>(Cond); 1461 CondCodeNodes[Cond] = N; 1462 InsertNode(N); 1463 } 1464 1465 return SDValue(CondCodeNodes[Cond], 0); 1466 } 1467 1468 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1469 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1470 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1471 std::swap(N1, N2); 1472 ShuffleVectorSDNode::commuteMask(M); 1473 } 1474 1475 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1476 SDValue N2, ArrayRef<int> Mask) { 1477 assert(VT.getVectorNumElements() == Mask.size() && 1478 "Must have the same number of vector elements as mask elements!"); 1479 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1480 "Invalid VECTOR_SHUFFLE"); 1481 1482 // Canonicalize shuffle undef, undef -> undef 1483 if (N1.isUndef() && N2.isUndef()) 1484 return getUNDEF(VT); 1485 1486 // Validate that all indices in Mask are within the range of the elements 1487 // input to the shuffle. 1488 int NElts = Mask.size(); 1489 assert(llvm::all_of(Mask, [&](int M) { return M < (NElts * 2); }) && 1490 "Index out of range"); 1491 1492 // Copy the mask so we can do any needed cleanup. 1493 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1494 1495 // Canonicalize shuffle v, v -> v, undef 1496 if (N1 == N2) { 1497 N2 = getUNDEF(VT); 1498 for (int i = 0; i != NElts; ++i) 1499 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1500 } 1501 1502 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1503 if (N1.isUndef()) 1504 commuteShuffle(N1, N2, MaskVec); 1505 1506 // If shuffling a splat, try to blend the splat instead. We do this here so 1507 // that even when this arises during lowering we don't have to re-handle it. 1508 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1509 BitVector UndefElements; 1510 SDValue Splat = BV->getSplatValue(&UndefElements); 1511 if (!Splat) 1512 return; 1513 1514 for (int i = 0; i < NElts; ++i) { 1515 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1516 continue; 1517 1518 // If this input comes from undef, mark it as such. 1519 if (UndefElements[MaskVec[i] - Offset]) { 1520 MaskVec[i] = -1; 1521 continue; 1522 } 1523 1524 // If we can blend a non-undef lane, use that instead. 1525 if (!UndefElements[i]) 1526 MaskVec[i] = i + Offset; 1527 } 1528 }; 1529 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1530 BlendSplat(N1BV, 0); 1531 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1532 BlendSplat(N2BV, NElts); 1533 1534 // Canonicalize all index into lhs, -> shuffle lhs, undef 1535 // Canonicalize all index into rhs, -> shuffle rhs, undef 1536 bool AllLHS = true, AllRHS = true; 1537 bool N2Undef = N2.isUndef(); 1538 for (int i = 0; i != NElts; ++i) { 1539 if (MaskVec[i] >= NElts) { 1540 if (N2Undef) 1541 MaskVec[i] = -1; 1542 else 1543 AllLHS = false; 1544 } else if (MaskVec[i] >= 0) { 1545 AllRHS = false; 1546 } 1547 } 1548 if (AllLHS && AllRHS) 1549 return getUNDEF(VT); 1550 if (AllLHS && !N2Undef) 1551 N2 = getUNDEF(VT); 1552 if (AllRHS) { 1553 N1 = getUNDEF(VT); 1554 commuteShuffle(N1, N2, MaskVec); 1555 } 1556 // Reset our undef status after accounting for the mask. 1557 N2Undef = N2.isUndef(); 1558 // Re-check whether both sides ended up undef. 1559 if (N1.isUndef() && N2Undef) 1560 return getUNDEF(VT); 1561 1562 // If Identity shuffle return that node. 1563 bool Identity = true, AllSame = true; 1564 for (int i = 0; i != NElts; ++i) { 1565 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1566 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1567 } 1568 if (Identity && NElts) 1569 return N1; 1570 1571 // Shuffling a constant splat doesn't change the result. 1572 if (N2Undef) { 1573 SDValue V = N1; 1574 1575 // Look through any bitcasts. We check that these don't change the number 1576 // (and size) of elements and just changes their types. 1577 while (V.getOpcode() == ISD::BITCAST) 1578 V = V->getOperand(0); 1579 1580 // A splat should always show up as a build vector node. 1581 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1582 BitVector UndefElements; 1583 SDValue Splat = BV->getSplatValue(&UndefElements); 1584 // If this is a splat of an undef, shuffling it is also undef. 1585 if (Splat && Splat.isUndef()) 1586 return getUNDEF(VT); 1587 1588 bool SameNumElts = 1589 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1590 1591 // We only have a splat which can skip shuffles if there is a splatted 1592 // value and no undef lanes rearranged by the shuffle. 1593 if (Splat && UndefElements.none()) { 1594 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1595 // number of elements match or the value splatted is a zero constant. 1596 if (SameNumElts) 1597 return N1; 1598 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1599 if (C->isNullValue()) 1600 return N1; 1601 } 1602 1603 // If the shuffle itself creates a splat, build the vector directly. 1604 if (AllSame && SameNumElts) { 1605 EVT BuildVT = BV->getValueType(0); 1606 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1607 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1608 1609 // We may have jumped through bitcasts, so the type of the 1610 // BUILD_VECTOR may not match the type of the shuffle. 1611 if (BuildVT != VT) 1612 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1613 return NewBV; 1614 } 1615 } 1616 } 1617 1618 FoldingSetNodeID ID; 1619 SDValue Ops[2] = { N1, N2 }; 1620 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1621 for (int i = 0; i != NElts; ++i) 1622 ID.AddInteger(MaskVec[i]); 1623 1624 void* IP = nullptr; 1625 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1626 return SDValue(E, 0); 1627 1628 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1629 // SDNode doesn't have access to it. This memory will be "leaked" when 1630 // the node is deallocated, but recovered when the NodeAllocator is released. 1631 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1632 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc); 1633 1634 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1635 dl.getDebugLoc(), MaskAlloc); 1636 createOperands(N, Ops); 1637 1638 CSEMap.InsertNode(N, IP); 1639 InsertNode(N); 1640 return SDValue(N, 0); 1641 } 1642 1643 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1644 MVT VT = SV.getSimpleValueType(0); 1645 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1646 ShuffleVectorSDNode::commuteMask(MaskVec); 1647 1648 SDValue Op0 = SV.getOperand(0); 1649 SDValue Op1 = SV.getOperand(1); 1650 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1651 } 1652 1653 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1654 FoldingSetNodeID ID; 1655 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1656 ID.AddInteger(RegNo); 1657 void *IP = nullptr; 1658 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1659 return SDValue(E, 0); 1660 1661 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1662 CSEMap.InsertNode(N, IP); 1663 InsertNode(N); 1664 return SDValue(N, 0); 1665 } 1666 1667 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1668 FoldingSetNodeID ID; 1669 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1670 ID.AddPointer(RegMask); 1671 void *IP = nullptr; 1672 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1673 return SDValue(E, 0); 1674 1675 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1676 CSEMap.InsertNode(N, IP); 1677 InsertNode(N); 1678 return SDValue(N, 0); 1679 } 1680 1681 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1682 MCSymbol *Label) { 1683 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1684 } 1685 1686 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1687 SDValue Root, MCSymbol *Label) { 1688 FoldingSetNodeID ID; 1689 SDValue Ops[] = { Root }; 1690 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1691 ID.AddPointer(Label); 1692 void *IP = nullptr; 1693 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1694 return SDValue(E, 0); 1695 1696 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label); 1697 createOperands(N, Ops); 1698 1699 CSEMap.InsertNode(N, IP); 1700 InsertNode(N); 1701 return SDValue(N, 0); 1702 } 1703 1704 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1705 int64_t Offset, 1706 bool isTarget, 1707 unsigned char TargetFlags) { 1708 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1709 1710 FoldingSetNodeID ID; 1711 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1712 ID.AddPointer(BA); 1713 ID.AddInteger(Offset); 1714 ID.AddInteger(TargetFlags); 1715 void *IP = nullptr; 1716 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1717 return SDValue(E, 0); 1718 1719 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1720 CSEMap.InsertNode(N, IP); 1721 InsertNode(N); 1722 return SDValue(N, 0); 1723 } 1724 1725 SDValue SelectionDAG::getSrcValue(const Value *V) { 1726 assert((!V || V->getType()->isPointerTy()) && 1727 "SrcValue is not a pointer?"); 1728 1729 FoldingSetNodeID ID; 1730 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1731 ID.AddPointer(V); 1732 1733 void *IP = nullptr; 1734 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1735 return SDValue(E, 0); 1736 1737 auto *N = newSDNode<SrcValueSDNode>(V); 1738 CSEMap.InsertNode(N, IP); 1739 InsertNode(N); 1740 return SDValue(N, 0); 1741 } 1742 1743 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1744 FoldingSetNodeID ID; 1745 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1746 ID.AddPointer(MD); 1747 1748 void *IP = nullptr; 1749 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1750 return SDValue(E, 0); 1751 1752 auto *N = newSDNode<MDNodeSDNode>(MD); 1753 CSEMap.InsertNode(N, IP); 1754 InsertNode(N); 1755 return SDValue(N, 0); 1756 } 1757 1758 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1759 if (VT == V.getValueType()) 1760 return V; 1761 1762 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1763 } 1764 1765 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1766 unsigned SrcAS, unsigned DestAS) { 1767 SDValue Ops[] = {Ptr}; 1768 FoldingSetNodeID ID; 1769 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1770 ID.AddInteger(SrcAS); 1771 ID.AddInteger(DestAS); 1772 1773 void *IP = nullptr; 1774 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1775 return SDValue(E, 0); 1776 1777 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1778 VT, SrcAS, DestAS); 1779 createOperands(N, Ops); 1780 1781 CSEMap.InsertNode(N, IP); 1782 InsertNode(N); 1783 return SDValue(N, 0); 1784 } 1785 1786 /// getShiftAmountOperand - Return the specified value casted to 1787 /// the target's desired shift amount type. 1788 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1789 EVT OpTy = Op.getValueType(); 1790 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1791 if (OpTy == ShTy || OpTy.isVector()) return Op; 1792 1793 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1794 } 1795 1796 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1797 SDLoc dl(Node); 1798 const TargetLowering &TLI = getTargetLoweringInfo(); 1799 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1800 EVT VT = Node->getValueType(0); 1801 SDValue Tmp1 = Node->getOperand(0); 1802 SDValue Tmp2 = Node->getOperand(1); 1803 unsigned Align = Node->getConstantOperandVal(3); 1804 1805 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1806 Tmp2, MachinePointerInfo(V)); 1807 SDValue VAList = VAListLoad; 1808 1809 if (Align > TLI.getMinStackArgumentAlignment()) { 1810 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1811 1812 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1813 getConstant(Align - 1, dl, VAList.getValueType())); 1814 1815 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1816 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1817 } 1818 1819 // Increment the pointer, VAList, to the next vaarg 1820 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1821 getConstant(getDataLayout().getTypeAllocSize( 1822 VT.getTypeForEVT(*getContext())), 1823 dl, VAList.getValueType())); 1824 // Store the incremented VAList to the legalized pointer 1825 Tmp1 = 1826 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1827 // Load the actual argument out of the pointer VAList 1828 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1829 } 1830 1831 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1832 SDLoc dl(Node); 1833 const TargetLowering &TLI = getTargetLoweringInfo(); 1834 // This defaults to loading a pointer from the input and storing it to the 1835 // output, returning the chain. 1836 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1837 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1838 SDValue Tmp1 = 1839 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1840 Node->getOperand(2), MachinePointerInfo(VS)); 1841 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1842 MachinePointerInfo(VD)); 1843 } 1844 1845 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1846 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1847 unsigned ByteSize = VT.getStoreSize(); 1848 Type *Ty = VT.getTypeForEVT(*getContext()); 1849 unsigned StackAlign = 1850 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1851 1852 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1853 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1854 } 1855 1856 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1857 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1858 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1859 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1860 const DataLayout &DL = getDataLayout(); 1861 unsigned Align = 1862 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1863 1864 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1865 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1866 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1867 } 1868 1869 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1870 ISD::CondCode Cond, const SDLoc &dl) { 1871 // These setcc operations always fold. 1872 switch (Cond) { 1873 default: break; 1874 case ISD::SETFALSE: 1875 case ISD::SETFALSE2: return getConstant(0, dl, VT); 1876 case ISD::SETTRUE: 1877 case ISD::SETTRUE2: { 1878 TargetLowering::BooleanContent Cnt = 1879 TLI->getBooleanContents(N1->getValueType(0)); 1880 return getConstant( 1881 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl, 1882 VT); 1883 } 1884 1885 case ISD::SETOEQ: 1886 case ISD::SETOGT: 1887 case ISD::SETOGE: 1888 case ISD::SETOLT: 1889 case ISD::SETOLE: 1890 case ISD::SETONE: 1891 case ISD::SETO: 1892 case ISD::SETUO: 1893 case ISD::SETUEQ: 1894 case ISD::SETUNE: 1895 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1896 break; 1897 } 1898 1899 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 1900 const APInt &C2 = N2C->getAPIntValue(); 1901 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 1902 const APInt &C1 = N1C->getAPIntValue(); 1903 1904 switch (Cond) { 1905 default: llvm_unreachable("Unknown integer setcc!"); 1906 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT); 1907 case ISD::SETNE: return getConstant(C1 != C2, dl, VT); 1908 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT); 1909 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT); 1910 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT); 1911 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT); 1912 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT); 1913 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT); 1914 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT); 1915 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT); 1916 } 1917 } 1918 } 1919 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) { 1920 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) { 1921 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1922 switch (Cond) { 1923 default: break; 1924 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1925 return getUNDEF(VT); 1926 LLVM_FALLTHROUGH; 1927 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT); 1928 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1929 return getUNDEF(VT); 1930 LLVM_FALLTHROUGH; 1931 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1932 R==APFloat::cmpLessThan, dl, VT); 1933 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1934 return getUNDEF(VT); 1935 LLVM_FALLTHROUGH; 1936 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT); 1937 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1938 return getUNDEF(VT); 1939 LLVM_FALLTHROUGH; 1940 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT); 1941 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1942 return getUNDEF(VT); 1943 LLVM_FALLTHROUGH; 1944 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1945 R==APFloat::cmpEqual, dl, VT); 1946 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1947 return getUNDEF(VT); 1948 LLVM_FALLTHROUGH; 1949 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1950 R==APFloat::cmpEqual, dl, VT); 1951 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT); 1952 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT); 1953 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1954 R==APFloat::cmpEqual, dl, VT); 1955 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT); 1956 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1957 R==APFloat::cmpLessThan, dl, VT); 1958 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1959 R==APFloat::cmpUnordered, dl, VT); 1960 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT); 1961 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT); 1962 } 1963 } else { 1964 // Ensure that the constant occurs on the RHS. 1965 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 1966 MVT CompVT = N1.getValueType().getSimpleVT(); 1967 if (!TLI->isCondCodeLegal(SwappedCond, CompVT)) 1968 return SDValue(); 1969 1970 return getSetCC(dl, VT, N2, N1, SwappedCond); 1971 } 1972 } 1973 1974 // Could not fold it. 1975 return SDValue(); 1976 } 1977 1978 /// See if the specified operand can be simplified with the knowledge that only 1979 /// the bits specified by Mask are used. 1980 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) { 1981 switch (V.getOpcode()) { 1982 default: 1983 break; 1984 case ISD::Constant: { 1985 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 1986 assert(CV && "Const value should be ConstSDNode."); 1987 const APInt &CVal = CV->getAPIntValue(); 1988 APInt NewVal = CVal & Mask; 1989 if (NewVal != CVal) 1990 return getConstant(NewVal, SDLoc(V), V.getValueType()); 1991 break; 1992 } 1993 case ISD::OR: 1994 case ISD::XOR: 1995 // If the LHS or RHS don't contribute bits to the or, drop them. 1996 if (MaskedValueIsZero(V.getOperand(0), Mask)) 1997 return V.getOperand(1); 1998 if (MaskedValueIsZero(V.getOperand(1), Mask)) 1999 return V.getOperand(0); 2000 break; 2001 case ISD::SRL: 2002 // Only look at single-use SRLs. 2003 if (!V.getNode()->hasOneUse()) 2004 break; 2005 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2006 // See if we can recursively simplify the LHS. 2007 unsigned Amt = RHSC->getZExtValue(); 2008 2009 // Watch out for shift count overflow though. 2010 if (Amt >= Mask.getBitWidth()) 2011 break; 2012 APInt NewMask = Mask << Amt; 2013 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 2014 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2015 V.getOperand(1)); 2016 } 2017 break; 2018 case ISD::AND: { 2019 // X & -1 -> X (ignoring bits which aren't demanded). 2020 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 2021 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue())) 2022 return V.getOperand(0); 2023 break; 2024 } 2025 case ISD::ANY_EXTEND: { 2026 SDValue Src = V.getOperand(0); 2027 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2028 // Being conservative here - only peek through if we only demand bits in the 2029 // non-extended source (even though the extended bits are technically undef). 2030 if (Mask.getActiveBits() > SrcBitWidth) 2031 break; 2032 APInt SrcMask = Mask.trunc(SrcBitWidth); 2033 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask)) 2034 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2035 break; 2036 } 2037 } 2038 return SDValue(); 2039 } 2040 2041 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2042 /// use this predicate to simplify operations downstream. 2043 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2044 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2045 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2046 } 2047 2048 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2049 /// this predicate to simplify operations downstream. Mask is known to be zero 2050 /// for bits that V cannot have. 2051 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 2052 unsigned Depth) const { 2053 KnownBits Known; 2054 computeKnownBits(Op, Known, Depth); 2055 return Mask.isSubsetOf(Known.Zero); 2056 } 2057 2058 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2059 /// is less than the element bit-width of the shift node, return it. 2060 static const APInt *getValidShiftAmountConstant(SDValue V) { 2061 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2062 // Shifting more than the bitwidth is not valid. 2063 const APInt &ShAmt = SA->getAPIntValue(); 2064 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2065 return &ShAmt; 2066 } 2067 return nullptr; 2068 } 2069 2070 /// Determine which bits of Op are known to be either zero or one and return 2071 /// them in Known. For vectors, the known bits are those that are shared by 2072 /// every vector element. 2073 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2074 unsigned Depth) const { 2075 EVT VT = Op.getValueType(); 2076 APInt DemandedElts = VT.isVector() 2077 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2078 : APInt(1, 1); 2079 computeKnownBits(Op, Known, DemandedElts, Depth); 2080 } 2081 2082 /// Determine which bits of Op are known to be either zero or one and return 2083 /// them in Known. The DemandedElts argument allows us to only collect the known 2084 /// bits that are shared by the requested vector elements. 2085 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, 2086 const APInt &DemandedElts, 2087 unsigned Depth) const { 2088 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2089 2090 Known = KnownBits(BitWidth); // Don't know anything. 2091 if (Depth == 6) 2092 return; // Limit search depth. 2093 2094 KnownBits Known2; 2095 unsigned NumElts = DemandedElts.getBitWidth(); 2096 2097 if (!DemandedElts) 2098 return; // No demanded elts, better to assume we don't know anything. 2099 2100 unsigned Opcode = Op.getOpcode(); 2101 switch (Opcode) { 2102 case ISD::Constant: 2103 // We know all of the bits for a constant! 2104 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 2105 Known.Zero = ~Known.One; 2106 break; 2107 case ISD::BUILD_VECTOR: 2108 // Collect the known bits that are shared by every demanded vector element. 2109 assert(NumElts == Op.getValueType().getVectorNumElements() && 2110 "Unexpected vector size"); 2111 Known.Zero.setAllBits(); Known.One.setAllBits(); 2112 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2113 if (!DemandedElts[i]) 2114 continue; 2115 2116 SDValue SrcOp = Op.getOperand(i); 2117 computeKnownBits(SrcOp, Known2, Depth + 1); 2118 2119 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2120 if (SrcOp.getValueSizeInBits() != BitWidth) { 2121 assert(SrcOp.getValueSizeInBits() > BitWidth && 2122 "Expected BUILD_VECTOR implicit truncation"); 2123 Known2 = Known2.trunc(BitWidth); 2124 } 2125 2126 // Known bits are the values that are shared by every demanded element. 2127 Known.One &= Known2.One; 2128 Known.Zero &= Known2.Zero; 2129 2130 // If we don't know any bits, early out. 2131 if (!Known.One && !Known.Zero) 2132 break; 2133 } 2134 break; 2135 case ISD::VECTOR_SHUFFLE: { 2136 // Collect the known bits that are shared by every vector element referenced 2137 // by the shuffle. 2138 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2139 Known.Zero.setAllBits(); Known.One.setAllBits(); 2140 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2141 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2142 for (unsigned i = 0; i != NumElts; ++i) { 2143 if (!DemandedElts[i]) 2144 continue; 2145 2146 int M = SVN->getMaskElt(i); 2147 if (M < 0) { 2148 // For UNDEF elements, we don't know anything about the common state of 2149 // the shuffle result. 2150 Known.resetAll(); 2151 DemandedLHS.clearAllBits(); 2152 DemandedRHS.clearAllBits(); 2153 break; 2154 } 2155 2156 if ((unsigned)M < NumElts) 2157 DemandedLHS.setBit((unsigned)M % NumElts); 2158 else 2159 DemandedRHS.setBit((unsigned)M % NumElts); 2160 } 2161 // Known bits are the values that are shared by every demanded element. 2162 if (!!DemandedLHS) { 2163 SDValue LHS = Op.getOperand(0); 2164 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1); 2165 Known.One &= Known2.One; 2166 Known.Zero &= Known2.Zero; 2167 } 2168 // If we don't know any bits, early out. 2169 if (!Known.One && !Known.Zero) 2170 break; 2171 if (!!DemandedRHS) { 2172 SDValue RHS = Op.getOperand(1); 2173 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1); 2174 Known.One &= Known2.One; 2175 Known.Zero &= Known2.Zero; 2176 } 2177 break; 2178 } 2179 case ISD::CONCAT_VECTORS: { 2180 // Split DemandedElts and test each of the demanded subvectors. 2181 Known.Zero.setAllBits(); Known.One.setAllBits(); 2182 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2183 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2184 unsigned NumSubVectors = Op.getNumOperands(); 2185 for (unsigned i = 0; i != NumSubVectors; ++i) { 2186 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2187 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2188 if (!!DemandedSub) { 2189 SDValue Sub = Op.getOperand(i); 2190 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1); 2191 Known.One &= Known2.One; 2192 Known.Zero &= Known2.Zero; 2193 } 2194 // If we don't know any bits, early out. 2195 if (!Known.One && !Known.Zero) 2196 break; 2197 } 2198 break; 2199 } 2200 case ISD::EXTRACT_SUBVECTOR: { 2201 // If we know the element index, just demand that subvector elements, 2202 // otherwise demand them all. 2203 SDValue Src = Op.getOperand(0); 2204 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2205 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2206 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2207 // Offset the demanded elts by the subvector index. 2208 uint64_t Idx = SubIdx->getZExtValue(); 2209 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 2210 computeKnownBits(Src, Known, DemandedSrc, Depth + 1); 2211 } else { 2212 computeKnownBits(Src, Known, Depth + 1); 2213 } 2214 break; 2215 } 2216 case ISD::BITCAST: { 2217 SDValue N0 = Op.getOperand(0); 2218 unsigned SubBitWidth = N0.getScalarValueSizeInBits(); 2219 2220 // Ignore bitcasts from floating point. 2221 if (!N0.getValueType().isInteger()) 2222 break; 2223 2224 // Fast handling of 'identity' bitcasts. 2225 if (BitWidth == SubBitWidth) { 2226 computeKnownBits(N0, Known, DemandedElts, Depth + 1); 2227 break; 2228 } 2229 2230 // Support big-endian targets when it becomes useful. 2231 bool IsLE = getDataLayout().isLittleEndian(); 2232 if (!IsLE) 2233 break; 2234 2235 // Bitcast 'small element' vector to 'large element' scalar/vector. 2236 if ((BitWidth % SubBitWidth) == 0) { 2237 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2238 2239 // Collect known bits for the (larger) output by collecting the known 2240 // bits from each set of sub elements and shift these into place. 2241 // We need to separately call computeKnownBits for each set of 2242 // sub elements as the knownbits for each is likely to be different. 2243 unsigned SubScale = BitWidth / SubBitWidth; 2244 APInt SubDemandedElts(NumElts * SubScale, 0); 2245 for (unsigned i = 0; i != NumElts; ++i) 2246 if (DemandedElts[i]) 2247 SubDemandedElts.setBit(i * SubScale); 2248 2249 for (unsigned i = 0; i != SubScale; ++i) { 2250 computeKnownBits(N0, Known2, SubDemandedElts.shl(i), 2251 Depth + 1); 2252 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i); 2253 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i); 2254 } 2255 } 2256 2257 // Bitcast 'large element' scalar/vector to 'small element' vector. 2258 if ((SubBitWidth % BitWidth) == 0) { 2259 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2260 2261 // Collect known bits for the (smaller) output by collecting the known 2262 // bits from the overlapping larger input elements and extracting the 2263 // sub sections we actually care about. 2264 unsigned SubScale = SubBitWidth / BitWidth; 2265 APInt SubDemandedElts(NumElts / SubScale, 0); 2266 for (unsigned i = 0; i != NumElts; ++i) 2267 if (DemandedElts[i]) 2268 SubDemandedElts.setBit(i / SubScale); 2269 2270 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1); 2271 2272 Known.Zero.setAllBits(); Known.One.setAllBits(); 2273 for (unsigned i = 0; i != NumElts; ++i) 2274 if (DemandedElts[i]) { 2275 unsigned Offset = (i % SubScale) * BitWidth; 2276 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2277 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2278 // If we don't know any bits, early out. 2279 if (!Known.One && !Known.Zero) 2280 break; 2281 } 2282 } 2283 break; 2284 } 2285 case ISD::AND: 2286 // If either the LHS or the RHS are Zero, the result is zero. 2287 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2288 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2289 2290 // Output known-1 bits are only known if set in both the LHS & RHS. 2291 Known.One &= Known2.One; 2292 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2293 Known.Zero |= Known2.Zero; 2294 break; 2295 case ISD::OR: 2296 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2297 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2298 2299 // Output known-0 bits are only known if clear in both the LHS & RHS. 2300 Known.Zero &= Known2.Zero; 2301 // Output known-1 are known to be set if set in either the LHS | RHS. 2302 Known.One |= Known2.One; 2303 break; 2304 case ISD::XOR: { 2305 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2306 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2307 2308 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2309 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2310 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2311 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2312 Known.Zero = KnownZeroOut; 2313 break; 2314 } 2315 case ISD::MUL: { 2316 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1); 2317 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2318 2319 // If low bits are zero in either operand, output low known-0 bits. 2320 // Also compute a conservative estimate for high known-0 bits. 2321 // More trickiness is possible, but this is sufficient for the 2322 // interesting case of alignment computation. 2323 unsigned TrailZ = Known.countMinTrailingZeros() + 2324 Known2.countMinTrailingZeros(); 2325 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2326 Known2.countMinLeadingZeros(), 2327 BitWidth) - BitWidth; 2328 2329 Known.resetAll(); 2330 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2331 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2332 break; 2333 } 2334 case ISD::UDIV: { 2335 // For the purposes of computing leading zeros we can conservatively 2336 // treat a udiv as a logical right shift by the power of 2 known to 2337 // be less than the denominator. 2338 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2339 unsigned LeadZ = Known2.countMinLeadingZeros(); 2340 2341 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2342 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2343 if (RHSMaxLeadingZeros != BitWidth) 2344 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2345 2346 Known.Zero.setHighBits(LeadZ); 2347 break; 2348 } 2349 case ISD::SELECT: 2350 computeKnownBits(Op.getOperand(2), Known, Depth+1); 2351 // If we don't know any bits, early out. 2352 if (!Known.One && !Known.Zero) 2353 break; 2354 computeKnownBits(Op.getOperand(1), Known2, Depth+1); 2355 2356 // Only known if known in both the LHS and RHS. 2357 Known.One &= Known2.One; 2358 Known.Zero &= Known2.Zero; 2359 break; 2360 case ISD::SELECT_CC: 2361 computeKnownBits(Op.getOperand(3), Known, Depth+1); 2362 // If we don't know any bits, early out. 2363 if (!Known.One && !Known.Zero) 2364 break; 2365 computeKnownBits(Op.getOperand(2), Known2, Depth+1); 2366 2367 // Only known if known in both the LHS and RHS. 2368 Known.One &= Known2.One; 2369 Known.Zero &= Known2.Zero; 2370 break; 2371 case ISD::SMULO: 2372 case ISD::UMULO: 2373 if (Op.getResNo() != 1) 2374 break; 2375 // The boolean result conforms to getBooleanContents. 2376 // If we know the result of a setcc has the top bits zero, use this info. 2377 // We know that we have an integer-based boolean since these operations 2378 // are only available for integer. 2379 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2380 TargetLowering::ZeroOrOneBooleanContent && 2381 BitWidth > 1) 2382 Known.Zero.setBitsFrom(1); 2383 break; 2384 case ISD::SETCC: 2385 // If we know the result of a setcc has the top bits zero, use this info. 2386 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2387 TargetLowering::ZeroOrOneBooleanContent && 2388 BitWidth > 1) 2389 Known.Zero.setBitsFrom(1); 2390 break; 2391 case ISD::SHL: 2392 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2393 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2394 Known.Zero <<= *ShAmt; 2395 Known.One <<= *ShAmt; 2396 // Low bits are known zero. 2397 Known.Zero.setLowBits(ShAmt->getZExtValue()); 2398 } 2399 break; 2400 case ISD::SRL: 2401 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2402 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2403 Known.Zero.lshrInPlace(*ShAmt); 2404 Known.One.lshrInPlace(*ShAmt); 2405 // High bits are known zero. 2406 Known.Zero.setHighBits(ShAmt->getZExtValue()); 2407 } 2408 break; 2409 case ISD::SRA: 2410 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2411 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2412 Known.Zero.lshrInPlace(*ShAmt); 2413 Known.One.lshrInPlace(*ShAmt); 2414 // If we know the value of the sign bit, then we know it is copied across 2415 // the high bits by the shift amount. 2416 APInt SignMask = APInt::getSignMask(BitWidth); 2417 SignMask.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask. 2418 if (Known.Zero.intersects(SignMask)) { 2419 Known.Zero.setHighBits(ShAmt->getZExtValue());// New bits are known zero. 2420 } else if (Known.One.intersects(SignMask)) { 2421 Known.One.setHighBits(ShAmt->getZExtValue()); // New bits are known one. 2422 } 2423 } 2424 break; 2425 case ISD::SIGN_EXTEND_INREG: { 2426 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2427 unsigned EBits = EVT.getScalarSizeInBits(); 2428 2429 // Sign extension. Compute the demanded bits in the result that are not 2430 // present in the input. 2431 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2432 2433 APInt InSignMask = APInt::getSignMask(EBits); 2434 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2435 2436 // If the sign extended bits are demanded, we know that the sign 2437 // bit is demanded. 2438 InSignMask = InSignMask.zext(BitWidth); 2439 if (NewBits.getBoolValue()) 2440 InputDemandedBits |= InSignMask; 2441 2442 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2443 Known.One &= InputDemandedBits; 2444 Known.Zero &= InputDemandedBits; 2445 2446 // If the sign bit of the input is known set or clear, then we know the 2447 // top bits of the result. 2448 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2449 Known.Zero |= NewBits; 2450 Known.One &= ~NewBits; 2451 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2452 Known.One |= NewBits; 2453 Known.Zero &= ~NewBits; 2454 } else { // Input sign bit unknown 2455 Known.Zero &= ~NewBits; 2456 Known.One &= ~NewBits; 2457 } 2458 break; 2459 } 2460 case ISD::CTTZ: 2461 case ISD::CTTZ_ZERO_UNDEF: { 2462 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2463 // If we have a known 1, its position is our upper bound. 2464 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2465 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2466 Known.Zero.setBitsFrom(LowBits); 2467 break; 2468 } 2469 case ISD::CTLZ: 2470 case ISD::CTLZ_ZERO_UNDEF: { 2471 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2472 // If we have a known 1, its position is our upper bound. 2473 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2474 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2475 Known.Zero.setBitsFrom(LowBits); 2476 break; 2477 } 2478 case ISD::CTPOP: { 2479 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2480 // If we know some of the bits are zero, they can't be one. 2481 unsigned PossibleOnes = Known2.countMaxPopulation(); 2482 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2483 break; 2484 } 2485 case ISD::LOAD: { 2486 LoadSDNode *LD = cast<LoadSDNode>(Op); 2487 // If this is a ZEXTLoad and we are looking at the loaded value. 2488 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2489 EVT VT = LD->getMemoryVT(); 2490 unsigned MemBits = VT.getScalarSizeInBits(); 2491 Known.Zero.setBitsFrom(MemBits); 2492 } else if (const MDNode *Ranges = LD->getRanges()) { 2493 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2494 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2495 } 2496 break; 2497 } 2498 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2499 EVT InVT = Op.getOperand(0).getValueType(); 2500 unsigned InBits = InVT.getScalarSizeInBits(); 2501 Known = Known.trunc(InBits); 2502 computeKnownBits(Op.getOperand(0), Known, 2503 DemandedElts.zext(InVT.getVectorNumElements()), 2504 Depth + 1); 2505 Known = Known.zext(BitWidth); 2506 Known.Zero.setBitsFrom(InBits); 2507 break; 2508 } 2509 case ISD::ZERO_EXTEND: { 2510 EVT InVT = Op.getOperand(0).getValueType(); 2511 unsigned InBits = InVT.getScalarSizeInBits(); 2512 Known = Known.trunc(InBits); 2513 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2514 Known = Known.zext(BitWidth); 2515 Known.Zero.setBitsFrom(InBits); 2516 break; 2517 } 2518 // TODO ISD::SIGN_EXTEND_VECTOR_INREG 2519 case ISD::SIGN_EXTEND: { 2520 EVT InVT = Op.getOperand(0).getValueType(); 2521 unsigned InBits = InVT.getScalarSizeInBits(); 2522 2523 Known = Known.trunc(InBits); 2524 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2525 2526 // If the sign bit is known to be zero or one, then sext will extend 2527 // it to the top bits, else it will just zext. 2528 Known = Known.sext(BitWidth); 2529 break; 2530 } 2531 case ISD::ANY_EXTEND: { 2532 EVT InVT = Op.getOperand(0).getValueType(); 2533 unsigned InBits = InVT.getScalarSizeInBits(); 2534 Known = Known.trunc(InBits); 2535 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2536 Known = Known.zext(BitWidth); 2537 break; 2538 } 2539 case ISD::TRUNCATE: { 2540 EVT InVT = Op.getOperand(0).getValueType(); 2541 unsigned InBits = InVT.getScalarSizeInBits(); 2542 Known = Known.zext(InBits); 2543 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2544 Known = Known.trunc(BitWidth); 2545 break; 2546 } 2547 case ISD::AssertZext: { 2548 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2549 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 2550 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2551 Known.Zero |= (~InMask); 2552 Known.One &= (~Known.Zero); 2553 break; 2554 } 2555 case ISD::FGETSIGN: 2556 // All bits are zero except the low bit. 2557 Known.Zero.setBitsFrom(1); 2558 break; 2559 case ISD::USUBO: 2560 case ISD::SSUBO: 2561 if (Op.getResNo() == 1) { 2562 // If we know the result of a setcc has the top bits zero, use this info. 2563 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2564 TargetLowering::ZeroOrOneBooleanContent && 2565 BitWidth > 1) 2566 Known.Zero.setBitsFrom(1); 2567 break; 2568 } 2569 LLVM_FALLTHROUGH; 2570 case ISD::SUB: 2571 case ISD::SUBC: { 2572 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) { 2573 // We know that the top bits of C-X are clear if X contains less bits 2574 // than C (i.e. no wrap-around can happen). For example, 20-X is 2575 // positive if we can prove that X is >= 0 and < 16. 2576 if (CLHS->getAPIntValue().isNonNegative()) { 2577 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2578 // NLZ can't be BitWidth with no sign bit 2579 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2580 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2581 Depth + 1); 2582 2583 // If all of the MaskV bits are known to be zero, then we know the 2584 // output top bits are zero, because we now know that the output is 2585 // from [0-C]. 2586 if ((Known2.Zero & MaskV) == MaskV) { 2587 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2588 // Top bits known zero. 2589 Known.Zero.setHighBits(NLZ2); 2590 } 2591 } 2592 } 2593 2594 // If low bits are know to be zero in both operands, then we know they are 2595 // going to be 0 in the result. Both addition and complement operations 2596 // preserve the low zero bits. 2597 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2598 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2599 if (KnownZeroLow == 0) 2600 break; 2601 2602 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2603 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2604 Known.Zero.setLowBits(KnownZeroLow); 2605 break; 2606 } 2607 case ISD::UADDO: 2608 case ISD::SADDO: 2609 case ISD::ADDCARRY: 2610 if (Op.getResNo() == 1) { 2611 // If we know the result of a setcc has the top bits zero, use this info. 2612 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2613 TargetLowering::ZeroOrOneBooleanContent && 2614 BitWidth > 1) 2615 Known.Zero.setBitsFrom(1); 2616 break; 2617 } 2618 LLVM_FALLTHROUGH; 2619 case ISD::ADD: 2620 case ISD::ADDC: 2621 case ISD::ADDE: { 2622 // Output known-0 bits are known if clear or set in both the low clear bits 2623 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2624 // low 3 bits clear. 2625 // Output known-0 bits are also known if the top bits of each input are 2626 // known to be clear. For example, if one input has the top 10 bits clear 2627 // and the other has the top 8 bits clear, we know the top 7 bits of the 2628 // output must be clear. 2629 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2630 unsigned KnownZeroHigh = Known2.countMinLeadingZeros(); 2631 unsigned KnownZeroLow = Known2.countMinTrailingZeros(); 2632 2633 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, 2634 Depth + 1); 2635 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros()); 2636 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros()); 2637 2638 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) { 2639 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only 2640 // use this information if we know (at least) that the low two bits are 2641 // clear. We then return to the caller that the low bit is unknown but 2642 // that other bits are known zero. 2643 if (KnownZeroLow >= 2) 2644 Known.Zero.setBits(1, KnownZeroLow); 2645 break; 2646 } 2647 2648 Known.Zero.setLowBits(KnownZeroLow); 2649 if (KnownZeroHigh > 1) 2650 Known.Zero.setHighBits(KnownZeroHigh - 1); 2651 break; 2652 } 2653 case ISD::SREM: 2654 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2655 const APInt &RA = Rem->getAPIntValue().abs(); 2656 if (RA.isPowerOf2()) { 2657 APInt LowBits = RA - 1; 2658 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2659 2660 // The low bits of the first operand are unchanged by the srem. 2661 Known.Zero = Known2.Zero & LowBits; 2662 Known.One = Known2.One & LowBits; 2663 2664 // If the first operand is non-negative or has all low bits zero, then 2665 // the upper bits are all zero. 2666 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 2667 Known.Zero |= ~LowBits; 2668 2669 // If the first operand is negative and not all low bits are zero, then 2670 // the upper bits are all one. 2671 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 2672 Known.One |= ~LowBits; 2673 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 2674 } 2675 } 2676 break; 2677 case ISD::UREM: { 2678 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 2679 const APInt &RA = Rem->getAPIntValue(); 2680 if (RA.isPowerOf2()) { 2681 APInt LowBits = (RA - 1); 2682 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2683 2684 // The upper bits are all zero, the lower ones are unchanged. 2685 Known.Zero = Known2.Zero | ~LowBits; 2686 Known.One = Known2.One & LowBits; 2687 break; 2688 } 2689 } 2690 2691 // Since the result is less than or equal to either operand, any leading 2692 // zero bits in either operand must also exist in the result. 2693 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2694 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2695 2696 uint32_t Leaders = 2697 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 2698 Known.resetAll(); 2699 Known.Zero.setHighBits(Leaders); 2700 break; 2701 } 2702 case ISD::EXTRACT_ELEMENT: { 2703 computeKnownBits(Op.getOperand(0), Known, Depth+1); 2704 const unsigned Index = Op.getConstantOperandVal(1); 2705 const unsigned BitWidth = Op.getValueSizeInBits(); 2706 2707 // Remove low part of known bits mask 2708 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth); 2709 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth); 2710 2711 // Remove high part of known bit mask 2712 Known = Known.trunc(BitWidth); 2713 break; 2714 } 2715 case ISD::EXTRACT_VECTOR_ELT: { 2716 SDValue InVec = Op.getOperand(0); 2717 SDValue EltNo = Op.getOperand(1); 2718 EVT VecVT = InVec.getValueType(); 2719 const unsigned BitWidth = Op.getValueSizeInBits(); 2720 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 2721 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 2722 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2723 // anything about the extended bits. 2724 if (BitWidth > EltBitWidth) 2725 Known = Known.trunc(EltBitWidth); 2726 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 2727 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 2728 // If we know the element index, just demand that vector element. 2729 unsigned Idx = ConstEltNo->getZExtValue(); 2730 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 2731 computeKnownBits(InVec, Known, DemandedElt, Depth + 1); 2732 } else { 2733 // Unknown element index, so ignore DemandedElts and demand them all. 2734 computeKnownBits(InVec, Known, Depth + 1); 2735 } 2736 if (BitWidth > EltBitWidth) 2737 Known = Known.zext(BitWidth); 2738 break; 2739 } 2740 case ISD::INSERT_VECTOR_ELT: { 2741 SDValue InVec = Op.getOperand(0); 2742 SDValue InVal = Op.getOperand(1); 2743 SDValue EltNo = Op.getOperand(2); 2744 2745 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 2746 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 2747 // If we know the element index, split the demand between the 2748 // source vector and the inserted element. 2749 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 2750 unsigned EltIdx = CEltNo->getZExtValue(); 2751 2752 // If we demand the inserted element then add its common known bits. 2753 if (DemandedElts[EltIdx]) { 2754 computeKnownBits(InVal, Known2, Depth + 1); 2755 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2756 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2757 } 2758 2759 // If we demand the source vector then add its common known bits, ensuring 2760 // that we don't demand the inserted element. 2761 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 2762 if (!!VectorElts) { 2763 computeKnownBits(InVec, Known2, VectorElts, Depth + 1); 2764 Known.One &= Known2.One; 2765 Known.Zero &= Known2.Zero; 2766 } 2767 } else { 2768 // Unknown element index, so ignore DemandedElts and demand them all. 2769 computeKnownBits(InVec, Known, Depth + 1); 2770 computeKnownBits(InVal, Known2, Depth + 1); 2771 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 2772 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 2773 } 2774 break; 2775 } 2776 case ISD::BITREVERSE: { 2777 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2778 Known.Zero = Known2.Zero.reverseBits(); 2779 Known.One = Known2.One.reverseBits(); 2780 break; 2781 } 2782 case ISD::BSWAP: { 2783 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2784 Known.Zero = Known2.Zero.byteSwap(); 2785 Known.One = Known2.One.byteSwap(); 2786 break; 2787 } 2788 case ISD::ABS: { 2789 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1); 2790 2791 // If the source's MSB is zero then we know the rest of the bits already. 2792 if (Known2.isNonNegative()) { 2793 Known.Zero = Known2.Zero; 2794 Known.One = Known2.One; 2795 break; 2796 } 2797 2798 // We only know that the absolute values's MSB will be zero iff there is 2799 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 2800 Known2.One.clearSignBit(); 2801 if (Known2.One.getBoolValue()) { 2802 Known.Zero = APInt::getSignMask(BitWidth); 2803 break; 2804 } 2805 break; 2806 } 2807 case ISD::UMIN: { 2808 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1); 2809 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2810 2811 // UMIN - we know that the result will have the maximum of the 2812 // known zero leading bits of the inputs. 2813 unsigned LeadZero = Known.countMinLeadingZeros(); 2814 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 2815 2816 Known.Zero &= Known2.Zero; 2817 Known.One &= Known2.One; 2818 Known.Zero.setHighBits(LeadZero); 2819 break; 2820 } 2821 case ISD::UMAX: { 2822 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2823 Depth + 1); 2824 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2825 2826 // UMAX - we know that the result will have the maximum of the 2827 // known one leading bits of the inputs. 2828 unsigned LeadOne = Known.countMinLeadingOnes(); 2829 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 2830 2831 Known.Zero &= Known2.Zero; 2832 Known.One &= Known2.One; 2833 Known.One.setHighBits(LeadOne); 2834 break; 2835 } 2836 case ISD::SMIN: 2837 case ISD::SMAX: { 2838 computeKnownBits(Op.getOperand(0), Known, DemandedElts, 2839 Depth + 1); 2840 // If we don't know any bits, early out. 2841 if (!Known.One && !Known.Zero) 2842 break; 2843 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1); 2844 Known.Zero &= Known2.Zero; 2845 Known.One &= Known2.One; 2846 break; 2847 } 2848 case ISD::FrameIndex: 2849 case ISD::TargetFrameIndex: 2850 if (unsigned Align = InferPtrAlignment(Op)) { 2851 // The low bits are known zero if the pointer is aligned. 2852 Known.Zero.setLowBits(Log2_32(Align)); 2853 break; 2854 } 2855 break; 2856 2857 default: 2858 if (Opcode < ISD::BUILTIN_OP_END) 2859 break; 2860 LLVM_FALLTHROUGH; 2861 case ISD::INTRINSIC_WO_CHAIN: 2862 case ISD::INTRINSIC_W_CHAIN: 2863 case ISD::INTRINSIC_VOID: 2864 // Allow the target to implement this method for its nodes. 2865 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 2866 break; 2867 } 2868 2869 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2870 } 2871 2872 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 2873 SDValue N1) const { 2874 // X + 0 never overflow 2875 if (isNullConstant(N1)) 2876 return OFK_Never; 2877 2878 KnownBits N1Known; 2879 computeKnownBits(N1, N1Known); 2880 if (N1Known.Zero.getBoolValue()) { 2881 KnownBits N0Known; 2882 computeKnownBits(N0, N0Known); 2883 2884 bool overflow; 2885 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 2886 if (!overflow) 2887 return OFK_Never; 2888 } 2889 2890 // mulhi + 1 never overflow 2891 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 2892 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 2893 return OFK_Never; 2894 2895 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 2896 KnownBits N0Known; 2897 computeKnownBits(N0, N0Known); 2898 2899 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 2900 return OFK_Never; 2901 } 2902 2903 return OFK_Sometime; 2904 } 2905 2906 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 2907 EVT OpVT = Val.getValueType(); 2908 unsigned BitWidth = OpVT.getScalarSizeInBits(); 2909 2910 // Is the constant a known power of 2? 2911 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 2912 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2913 2914 // A left-shift of a constant one will have exactly one bit set because 2915 // shifting the bit off the end is undefined. 2916 if (Val.getOpcode() == ISD::SHL) { 2917 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2918 if (C && C->getAPIntValue() == 1) 2919 return true; 2920 } 2921 2922 // Similarly, a logical right-shift of a constant sign-bit will have exactly 2923 // one bit set. 2924 if (Val.getOpcode() == ISD::SRL) { 2925 auto *C = isConstOrConstSplat(Val.getOperand(0)); 2926 if (C && C->getAPIntValue().isSignMask()) 2927 return true; 2928 } 2929 2930 // Are all operands of a build vector constant powers of two? 2931 if (Val.getOpcode() == ISD::BUILD_VECTOR) 2932 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 2933 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 2934 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 2935 return false; 2936 })) 2937 return true; 2938 2939 // More could be done here, though the above checks are enough 2940 // to handle some common cases. 2941 2942 // Fall back to computeKnownBits to catch other known cases. 2943 KnownBits Known; 2944 computeKnownBits(Val, Known); 2945 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 2946 } 2947 2948 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 2949 EVT VT = Op.getValueType(); 2950 APInt DemandedElts = VT.isVector() 2951 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2952 : APInt(1, 1); 2953 return ComputeNumSignBits(Op, DemandedElts, Depth); 2954 } 2955 2956 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 2957 unsigned Depth) const { 2958 EVT VT = Op.getValueType(); 2959 assert(VT.isInteger() && "Invalid VT!"); 2960 unsigned VTBits = VT.getScalarSizeInBits(); 2961 unsigned NumElts = DemandedElts.getBitWidth(); 2962 unsigned Tmp, Tmp2; 2963 unsigned FirstAnswer = 1; 2964 2965 if (Depth == 6) 2966 return 1; // Limit search depth. 2967 2968 if (!DemandedElts) 2969 return 1; // No demanded elts, better to assume we don't know anything. 2970 2971 switch (Op.getOpcode()) { 2972 default: break; 2973 case ISD::AssertSext: 2974 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2975 return VTBits-Tmp+1; 2976 case ISD::AssertZext: 2977 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2978 return VTBits-Tmp; 2979 2980 case ISD::Constant: { 2981 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2982 return Val.getNumSignBits(); 2983 } 2984 2985 case ISD::BUILD_VECTOR: 2986 Tmp = VTBits; 2987 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 2988 if (!DemandedElts[i]) 2989 continue; 2990 2991 SDValue SrcOp = Op.getOperand(i); 2992 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 2993 2994 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2995 if (SrcOp.getValueSizeInBits() != VTBits) { 2996 assert(SrcOp.getValueSizeInBits() > VTBits && 2997 "Expected BUILD_VECTOR implicit truncation"); 2998 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 2999 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3000 } 3001 Tmp = std::min(Tmp, Tmp2); 3002 } 3003 return Tmp; 3004 3005 case ISD::VECTOR_SHUFFLE: { 3006 // Collect the minimum number of sign bits that are shared by every vector 3007 // element referenced by the shuffle. 3008 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3009 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3010 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3011 for (unsigned i = 0; i != NumElts; ++i) { 3012 int M = SVN->getMaskElt(i); 3013 if (!DemandedElts[i]) 3014 continue; 3015 // For UNDEF elements, we don't know anything about the common state of 3016 // the shuffle result. 3017 if (M < 0) 3018 return 1; 3019 if ((unsigned)M < NumElts) 3020 DemandedLHS.setBit((unsigned)M % NumElts); 3021 else 3022 DemandedRHS.setBit((unsigned)M % NumElts); 3023 } 3024 Tmp = std::numeric_limits<unsigned>::max(); 3025 if (!!DemandedLHS) 3026 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3027 if (!!DemandedRHS) { 3028 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3029 Tmp = std::min(Tmp, Tmp2); 3030 } 3031 // If we don't know anything, early out and try computeKnownBits fall-back. 3032 if (Tmp == 1) 3033 break; 3034 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3035 return Tmp; 3036 } 3037 3038 case ISD::BITCAST: { 3039 SDValue N0 = Op.getOperand(0); 3040 unsigned SrcBits = N0.getScalarValueSizeInBits(); 3041 3042 // Ignore bitcasts from floating point. 3043 if (!N0.getValueType().isInteger()) 3044 break; 3045 3046 // Fast handling of 'identity' bitcasts. 3047 if (VTBits == SrcBits) 3048 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3049 3050 // Bitcast 'large element' scalar/vector to 'small element' vector. 3051 // TODO: Handle cases other than 'sign splat' when we have a use case. 3052 // Requires handling of DemandedElts and Endianness. 3053 if ((SrcBits % VTBits) == 0) { 3054 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 3055 Tmp = ComputeNumSignBits(N0, Depth + 1); 3056 if (Tmp == SrcBits) 3057 return VTBits; 3058 } 3059 break; 3060 } 3061 3062 case ISD::SIGN_EXTEND: 3063 case ISD::SIGN_EXTEND_VECTOR_INREG: 3064 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3065 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 3066 3067 case ISD::SIGN_EXTEND_INREG: 3068 // Max of the input and what this extends. 3069 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3070 Tmp = VTBits-Tmp+1; 3071 3072 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3073 return std::max(Tmp, Tmp2); 3074 3075 case ISD::SRA: 3076 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3077 // SRA X, C -> adds C sign bits. 3078 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 3079 APInt ShiftVal = C->getAPIntValue(); 3080 ShiftVal += Tmp; 3081 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3082 } 3083 return Tmp; 3084 case ISD::SHL: 3085 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { 3086 // shl destroys sign bits. 3087 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3088 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3089 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3090 return Tmp - C->getZExtValue(); 3091 } 3092 break; 3093 case ISD::AND: 3094 case ISD::OR: 3095 case ISD::XOR: // NOT is handled here. 3096 // Logical binary ops preserve the number of sign bits at the worst. 3097 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3098 if (Tmp != 1) { 3099 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3100 FirstAnswer = std::min(Tmp, Tmp2); 3101 // We computed what we know about the sign bits as our first 3102 // answer. Now proceed to the generic code that uses 3103 // computeKnownBits, and pick whichever answer is better. 3104 } 3105 break; 3106 3107 case ISD::SELECT: 3108 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3109 if (Tmp == 1) return 1; // Early out. 3110 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 3111 return std::min(Tmp, Tmp2); 3112 case ISD::SELECT_CC: 3113 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1); 3114 if (Tmp == 1) return 1; // Early out. 3115 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1); 3116 return std::min(Tmp, Tmp2); 3117 case ISD::SMIN: 3118 case ISD::SMAX: 3119 case ISD::UMIN: 3120 case ISD::UMAX: 3121 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3122 if (Tmp == 1) 3123 return 1; // Early out. 3124 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3125 return std::min(Tmp, Tmp2); 3126 case ISD::SADDO: 3127 case ISD::UADDO: 3128 case ISD::SSUBO: 3129 case ISD::USUBO: 3130 case ISD::SMULO: 3131 case ISD::UMULO: 3132 if (Op.getResNo() != 1) 3133 break; 3134 // The boolean result conforms to getBooleanContents. Fall through. 3135 // If setcc returns 0/-1, all bits are sign bits. 3136 // We know that we have an integer-based boolean since these operations 3137 // are only available for integer. 3138 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 3139 TargetLowering::ZeroOrNegativeOneBooleanContent) 3140 return VTBits; 3141 break; 3142 case ISD::SETCC: 3143 // If setcc returns 0/-1, all bits are sign bits. 3144 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3145 TargetLowering::ZeroOrNegativeOneBooleanContent) 3146 return VTBits; 3147 break; 3148 case ISD::ROTL: 3149 case ISD::ROTR: 3150 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3151 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3152 3153 // Handle rotate right by N like a rotate left by 32-N. 3154 if (Op.getOpcode() == ISD::ROTR) 3155 RotAmt = (VTBits - RotAmt) % VTBits; 3156 3157 // If we aren't rotating out all of the known-in sign bits, return the 3158 // number that are left. This handles rotl(sext(x), 1) for example. 3159 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3160 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3161 } 3162 break; 3163 case ISD::ADD: 3164 case ISD::ADDC: 3165 // Add can have at most one carry bit. Thus we know that the output 3166 // is, at worst, one more bit than the inputs. 3167 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3168 if (Tmp == 1) return 1; // Early out. 3169 3170 // Special case decrementing a value (ADD X, -1): 3171 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3172 if (CRHS->isAllOnesValue()) { 3173 KnownBits Known; 3174 computeKnownBits(Op.getOperand(0), Known, Depth+1); 3175 3176 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3177 // sign bits set. 3178 if ((Known.Zero | 1).isAllOnesValue()) 3179 return VTBits; 3180 3181 // If we are subtracting one from a positive number, there is no carry 3182 // out of the result. 3183 if (Known.isNonNegative()) 3184 return Tmp; 3185 } 3186 3187 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3188 if (Tmp2 == 1) return 1; 3189 return std::min(Tmp, Tmp2)-1; 3190 3191 case ISD::SUB: 3192 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3193 if (Tmp2 == 1) return 1; 3194 3195 // Handle NEG. 3196 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3197 if (CLHS->isNullValue()) { 3198 KnownBits Known; 3199 computeKnownBits(Op.getOperand(1), Known, Depth+1); 3200 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3201 // sign bits set. 3202 if ((Known.Zero | 1).isAllOnesValue()) 3203 return VTBits; 3204 3205 // If the input is known to be positive (the sign bit is known clear), 3206 // the output of the NEG has the same number of sign bits as the input. 3207 if (Known.isNonNegative()) 3208 return Tmp2; 3209 3210 // Otherwise, we treat this like a SUB. 3211 } 3212 3213 // Sub can have at most one carry bit. Thus we know that the output 3214 // is, at worst, one more bit than the inputs. 3215 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3216 if (Tmp == 1) return 1; // Early out. 3217 return std::min(Tmp, Tmp2)-1; 3218 case ISD::TRUNCATE: { 3219 // Check if the sign bits of source go down as far as the truncated value. 3220 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3221 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3222 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3223 return NumSrcSignBits - (NumSrcBits - VTBits); 3224 break; 3225 } 3226 case ISD::EXTRACT_ELEMENT: { 3227 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3228 const int BitWidth = Op.getValueSizeInBits(); 3229 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3230 3231 // Get reverse index (starting from 1), Op1 value indexes elements from 3232 // little end. Sign starts at big end. 3233 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3234 3235 // If the sign portion ends in our element the subtraction gives correct 3236 // result. Otherwise it gives either negative or > bitwidth result 3237 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3238 } 3239 case ISD::INSERT_VECTOR_ELT: { 3240 SDValue InVec = Op.getOperand(0); 3241 SDValue InVal = Op.getOperand(1); 3242 SDValue EltNo = Op.getOperand(2); 3243 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 3244 3245 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3246 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3247 // If we know the element index, split the demand between the 3248 // source vector and the inserted element. 3249 unsigned EltIdx = CEltNo->getZExtValue(); 3250 3251 // If we demand the inserted element then get its sign bits. 3252 Tmp = std::numeric_limits<unsigned>::max(); 3253 if (DemandedElts[EltIdx]) { 3254 // TODO - handle implicit truncation of inserted elements. 3255 if (InVal.getScalarValueSizeInBits() != VTBits) 3256 break; 3257 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3258 } 3259 3260 // If we demand the source vector then get its sign bits, and determine 3261 // the minimum. 3262 APInt VectorElts = DemandedElts; 3263 VectorElts.clearBit(EltIdx); 3264 if (!!VectorElts) { 3265 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3266 Tmp = std::min(Tmp, Tmp2); 3267 } 3268 } else { 3269 // Unknown element index, so ignore DemandedElts and demand them all. 3270 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3271 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3272 Tmp = std::min(Tmp, Tmp2); 3273 } 3274 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3275 return Tmp; 3276 } 3277 case ISD::EXTRACT_VECTOR_ELT: { 3278 SDValue InVec = Op.getOperand(0); 3279 SDValue EltNo = Op.getOperand(1); 3280 EVT VecVT = InVec.getValueType(); 3281 const unsigned BitWidth = Op.getValueSizeInBits(); 3282 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3283 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3284 3285 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3286 // anything about sign bits. But if the sizes match we can derive knowledge 3287 // about sign bits from the vector operand. 3288 if (BitWidth != EltBitWidth) 3289 break; 3290 3291 // If we know the element index, just demand that vector element, else for 3292 // an unknown element index, ignore DemandedElts and demand them all. 3293 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3294 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3295 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3296 DemandedSrcElts = 3297 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3298 3299 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3300 } 3301 case ISD::EXTRACT_SUBVECTOR: { 3302 // If we know the element index, just demand that subvector elements, 3303 // otherwise demand them all. 3304 SDValue Src = Op.getOperand(0); 3305 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3306 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3307 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3308 // Offset the demanded elts by the subvector index. 3309 uint64_t Idx = SubIdx->getZExtValue(); 3310 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx); 3311 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3312 } 3313 return ComputeNumSignBits(Src, Depth + 1); 3314 } 3315 case ISD::CONCAT_VECTORS: 3316 // Determine the minimum number of sign bits across all demanded 3317 // elts of the input vectors. Early out if the result is already 1. 3318 Tmp = std::numeric_limits<unsigned>::max(); 3319 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3320 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3321 unsigned NumSubVectors = Op.getNumOperands(); 3322 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3323 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3324 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3325 if (!DemandedSub) 3326 continue; 3327 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3328 Tmp = std::min(Tmp, Tmp2); 3329 } 3330 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3331 return Tmp; 3332 } 3333 3334 // If we are looking at the loaded value of the SDNode. 3335 if (Op.getResNo() == 0) { 3336 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3337 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3338 unsigned ExtType = LD->getExtensionType(); 3339 switch (ExtType) { 3340 default: break; 3341 case ISD::SEXTLOAD: // '17' bits known 3342 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3343 return VTBits-Tmp+1; 3344 case ISD::ZEXTLOAD: // '16' bits known 3345 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3346 return VTBits-Tmp; 3347 } 3348 } 3349 } 3350 3351 // Allow the target to implement this method for its nodes. 3352 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3353 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3354 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3355 Op.getOpcode() == ISD::INTRINSIC_VOID) { 3356 unsigned NumBits = 3357 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3358 if (NumBits > 1) 3359 FirstAnswer = std::max(FirstAnswer, NumBits); 3360 } 3361 3362 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3363 // use this information. 3364 KnownBits Known; 3365 computeKnownBits(Op, Known, DemandedElts, Depth); 3366 3367 APInt Mask; 3368 if (Known.isNonNegative()) { // sign bit is 0 3369 Mask = Known.Zero; 3370 } else if (Known.isNegative()) { // sign bit is 1; 3371 Mask = Known.One; 3372 } else { 3373 // Nothing known. 3374 return FirstAnswer; 3375 } 3376 3377 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3378 // the number of identical bits in the top of the input value. 3379 Mask = ~Mask; 3380 Mask <<= Mask.getBitWidth()-VTBits; 3381 // Return # leading zeros. We use 'min' here in case Val was zero before 3382 // shifting. We don't want to return '64' as for an i32 "0". 3383 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3384 } 3385 3386 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3387 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3388 !isa<ConstantSDNode>(Op.getOperand(1))) 3389 return false; 3390 3391 if (Op.getOpcode() == ISD::OR && 3392 !MaskedValueIsZero(Op.getOperand(0), 3393 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 3394 return false; 3395 3396 return true; 3397 } 3398 3399 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 3400 // If we're told that NaNs won't happen, assume they won't. 3401 if (getTarget().Options.NoNaNsFPMath) 3402 return true; 3403 3404 if (Op->getFlags().hasNoNaNs()) 3405 return true; 3406 3407 // If the value is a constant, we can obviously see if it is a NaN or not. 3408 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3409 return !C->getValueAPF().isNaN(); 3410 3411 // TODO: Recognize more cases here. 3412 3413 return false; 3414 } 3415 3416 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 3417 // If the value is a constant, we can obviously see if it is a zero or not. 3418 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 3419 return !C->isZero(); 3420 3421 // TODO: Recognize more cases here. 3422 switch (Op.getOpcode()) { 3423 default: break; 3424 case ISD::OR: 3425 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3426 return !C->isNullValue(); 3427 break; 3428 } 3429 3430 return false; 3431 } 3432 3433 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 3434 // Check the obvious case. 3435 if (A == B) return true; 3436 3437 // For for negative and positive zero. 3438 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 3439 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 3440 if (CA->isZero() && CB->isZero()) return true; 3441 3442 // Otherwise they may not be equal. 3443 return false; 3444 } 3445 3446 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 3447 assert(A.getValueType() == B.getValueType() && 3448 "Values must have the same type"); 3449 KnownBits AKnown, BKnown; 3450 computeKnownBits(A, AKnown); 3451 computeKnownBits(B, BKnown); 3452 return (AKnown.Zero | BKnown.Zero).isAllOnesValue(); 3453 } 3454 3455 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 3456 ArrayRef<SDValue> Ops, 3457 SelectionDAG &DAG) { 3458 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 3459 assert(llvm::all_of(Ops, 3460 [Ops](SDValue Op) { 3461 return Ops[0].getValueType() == Op.getValueType(); 3462 }) && 3463 "Concatenation of vectors with inconsistent value types!"); 3464 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 3465 VT.getVectorNumElements() && 3466 "Incorrect element count in vector concatenation!"); 3467 3468 if (Ops.size() == 1) 3469 return Ops[0]; 3470 3471 // Concat of UNDEFs is UNDEF. 3472 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 3473 return DAG.getUNDEF(VT); 3474 3475 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 3476 // simplified to one big BUILD_VECTOR. 3477 // FIXME: Add support for SCALAR_TO_VECTOR as well. 3478 EVT SVT = VT.getScalarType(); 3479 SmallVector<SDValue, 16> Elts; 3480 for (SDValue Op : Ops) { 3481 EVT OpVT = Op.getValueType(); 3482 if (Op.isUndef()) 3483 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 3484 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 3485 Elts.append(Op->op_begin(), Op->op_end()); 3486 else 3487 return SDValue(); 3488 } 3489 3490 // BUILD_VECTOR requires all inputs to be of the same type, find the 3491 // maximum type and extend them all. 3492 for (SDValue Op : Elts) 3493 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 3494 3495 if (SVT.bitsGT(VT.getScalarType())) 3496 for (SDValue &Op : Elts) 3497 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 3498 ? DAG.getZExtOrTrunc(Op, DL, SVT) 3499 : DAG.getSExtOrTrunc(Op, DL, SVT); 3500 3501 SDValue V = DAG.getBuildVector(VT, DL, Elts); 3502 NewSDValueDbgMsg(V, "New node fold concat vectors: "); 3503 return V; 3504 } 3505 3506 /// Gets or creates the specified node. 3507 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 3508 FoldingSetNodeID ID; 3509 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 3510 void *IP = nullptr; 3511 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 3512 return SDValue(E, 0); 3513 3514 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 3515 getVTList(VT)); 3516 CSEMap.InsertNode(N, IP); 3517 3518 InsertNode(N); 3519 SDValue V = SDValue(N, 0); 3520 NewSDValueDbgMsg(V, "Creating new node: "); 3521 return V; 3522 } 3523 3524 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 3525 SDValue Operand, const SDNodeFlags Flags) { 3526 // Constant fold unary operations with an integer constant operand. Even 3527 // opaque constant will be folded, because the folding of unary operations 3528 // doesn't create new constants with different values. Nevertheless, the 3529 // opaque flag is preserved during folding to prevent future folding with 3530 // other constants. 3531 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 3532 const APInt &Val = C->getAPIntValue(); 3533 switch (Opcode) { 3534 default: break; 3535 case ISD::SIGN_EXTEND: 3536 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 3537 C->isTargetOpcode(), C->isOpaque()); 3538 case ISD::ANY_EXTEND: 3539 case ISD::ZERO_EXTEND: 3540 case ISD::TRUNCATE: 3541 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 3542 C->isTargetOpcode(), C->isOpaque()); 3543 case ISD::UINT_TO_FP: 3544 case ISD::SINT_TO_FP: { 3545 APFloat apf(EVTToAPFloatSemantics(VT), 3546 APInt::getNullValue(VT.getSizeInBits())); 3547 (void)apf.convertFromAPInt(Val, 3548 Opcode==ISD::SINT_TO_FP, 3549 APFloat::rmNearestTiesToEven); 3550 return getConstantFP(apf, DL, VT); 3551 } 3552 case ISD::BITCAST: 3553 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 3554 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 3555 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 3556 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 3557 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 3558 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 3559 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 3560 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 3561 break; 3562 case ISD::ABS: 3563 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 3564 C->isOpaque()); 3565 case ISD::BITREVERSE: 3566 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 3567 C->isOpaque()); 3568 case ISD::BSWAP: 3569 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 3570 C->isOpaque()); 3571 case ISD::CTPOP: 3572 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 3573 C->isOpaque()); 3574 case ISD::CTLZ: 3575 case ISD::CTLZ_ZERO_UNDEF: 3576 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 3577 C->isOpaque()); 3578 case ISD::CTTZ: 3579 case ISD::CTTZ_ZERO_UNDEF: 3580 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 3581 C->isOpaque()); 3582 case ISD::FP16_TO_FP: { 3583 bool Ignored; 3584 APFloat FPV(APFloat::IEEEhalf(), 3585 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 3586 3587 // This can return overflow, underflow, or inexact; we don't care. 3588 // FIXME need to be more flexible about rounding mode. 3589 (void)FPV.convert(EVTToAPFloatSemantics(VT), 3590 APFloat::rmNearestTiesToEven, &Ignored); 3591 return getConstantFP(FPV, DL, VT); 3592 } 3593 } 3594 } 3595 3596 // Constant fold unary operations with a floating point constant operand. 3597 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 3598 APFloat V = C->getValueAPF(); // make copy 3599 switch (Opcode) { 3600 case ISD::FNEG: 3601 V.changeSign(); 3602 return getConstantFP(V, DL, VT); 3603 case ISD::FABS: 3604 V.clearSign(); 3605 return getConstantFP(V, DL, VT); 3606 case ISD::FCEIL: { 3607 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 3608 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3609 return getConstantFP(V, DL, VT); 3610 break; 3611 } 3612 case ISD::FTRUNC: { 3613 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 3614 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3615 return getConstantFP(V, DL, VT); 3616 break; 3617 } 3618 case ISD::FFLOOR: { 3619 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 3620 if (fs == APFloat::opOK || fs == APFloat::opInexact) 3621 return getConstantFP(V, DL, VT); 3622 break; 3623 } 3624 case ISD::FP_EXTEND: { 3625 bool ignored; 3626 // This can return overflow, underflow, or inexact; we don't care. 3627 // FIXME need to be more flexible about rounding mode. 3628 (void)V.convert(EVTToAPFloatSemantics(VT), 3629 APFloat::rmNearestTiesToEven, &ignored); 3630 return getConstantFP(V, DL, VT); 3631 } 3632 case ISD::FP_TO_SINT: 3633 case ISD::FP_TO_UINT: { 3634 bool ignored; 3635 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 3636 // FIXME need to be more flexible about rounding mode. 3637 APFloat::opStatus s = 3638 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 3639 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 3640 break; 3641 return getConstant(IntVal, DL, VT); 3642 } 3643 case ISD::BITCAST: 3644 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 3645 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3646 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 3647 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 3648 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 3649 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 3650 break; 3651 case ISD::FP_TO_FP16: { 3652 bool Ignored; 3653 // This can return overflow, underflow, or inexact; we don't care. 3654 // FIXME need to be more flexible about rounding mode. 3655 (void)V.convert(APFloat::IEEEhalf(), 3656 APFloat::rmNearestTiesToEven, &Ignored); 3657 return getConstant(V.bitcastToAPInt(), DL, VT); 3658 } 3659 } 3660 } 3661 3662 // Constant fold unary operations with a vector integer or float operand. 3663 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 3664 if (BV->isConstant()) { 3665 switch (Opcode) { 3666 default: 3667 // FIXME: Entirely reasonable to perform folding of other unary 3668 // operations here as the need arises. 3669 break; 3670 case ISD::FNEG: 3671 case ISD::FABS: 3672 case ISD::FCEIL: 3673 case ISD::FTRUNC: 3674 case ISD::FFLOOR: 3675 case ISD::FP_EXTEND: 3676 case ISD::FP_TO_SINT: 3677 case ISD::FP_TO_UINT: 3678 case ISD::TRUNCATE: 3679 case ISD::UINT_TO_FP: 3680 case ISD::SINT_TO_FP: 3681 case ISD::ABS: 3682 case ISD::BITREVERSE: 3683 case ISD::BSWAP: 3684 case ISD::CTLZ: 3685 case ISD::CTLZ_ZERO_UNDEF: 3686 case ISD::CTTZ: 3687 case ISD::CTTZ_ZERO_UNDEF: 3688 case ISD::CTPOP: { 3689 SDValue Ops = { Operand }; 3690 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 3691 return Fold; 3692 } 3693 } 3694 } 3695 } 3696 3697 unsigned OpOpcode = Operand.getNode()->getOpcode(); 3698 switch (Opcode) { 3699 case ISD::TokenFactor: 3700 case ISD::MERGE_VALUES: 3701 case ISD::CONCAT_VECTORS: 3702 return Operand; // Factor, merge or concat of one node? No need. 3703 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 3704 case ISD::FP_EXTEND: 3705 assert(VT.isFloatingPoint() && 3706 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 3707 if (Operand.getValueType() == VT) return Operand; // noop conversion. 3708 assert((!VT.isVector() || 3709 VT.getVectorNumElements() == 3710 Operand.getValueType().getVectorNumElements()) && 3711 "Vector element count mismatch!"); 3712 assert(Operand.getValueType().bitsLT(VT) && 3713 "Invalid fpext node, dst < src!"); 3714 if (Operand.isUndef()) 3715 return getUNDEF(VT); 3716 break; 3717 case ISD::SIGN_EXTEND: 3718 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3719 "Invalid SIGN_EXTEND!"); 3720 if (Operand.getValueType() == VT) return Operand; // noop extension 3721 assert((!VT.isVector() || 3722 VT.getVectorNumElements() == 3723 Operand.getValueType().getVectorNumElements()) && 3724 "Vector element count mismatch!"); 3725 assert(Operand.getValueType().bitsLT(VT) && 3726 "Invalid sext node, dst < src!"); 3727 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 3728 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3729 else if (OpOpcode == ISD::UNDEF) 3730 // sext(undef) = 0, because the top bits will all be the same. 3731 return getConstant(0, DL, VT); 3732 break; 3733 case ISD::ZERO_EXTEND: 3734 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3735 "Invalid ZERO_EXTEND!"); 3736 if (Operand.getValueType() == VT) return Operand; // noop extension 3737 assert((!VT.isVector() || 3738 VT.getVectorNumElements() == 3739 Operand.getValueType().getVectorNumElements()) && 3740 "Vector element count mismatch!"); 3741 assert(Operand.getValueType().bitsLT(VT) && 3742 "Invalid zext node, dst < src!"); 3743 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 3744 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 3745 else if (OpOpcode == ISD::UNDEF) 3746 // zext(undef) = 0, because the top bits will be zero. 3747 return getConstant(0, DL, VT); 3748 break; 3749 case ISD::ANY_EXTEND: 3750 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3751 "Invalid ANY_EXTEND!"); 3752 if (Operand.getValueType() == VT) return Operand; // noop extension 3753 assert((!VT.isVector() || 3754 VT.getVectorNumElements() == 3755 Operand.getValueType().getVectorNumElements()) && 3756 "Vector element count mismatch!"); 3757 assert(Operand.getValueType().bitsLT(VT) && 3758 "Invalid anyext node, dst < src!"); 3759 3760 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3761 OpOpcode == ISD::ANY_EXTEND) 3762 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 3763 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3764 else if (OpOpcode == ISD::UNDEF) 3765 return getUNDEF(VT); 3766 3767 // (ext (trunx x)) -> x 3768 if (OpOpcode == ISD::TRUNCATE) { 3769 SDValue OpOp = Operand.getOperand(0); 3770 if (OpOp.getValueType() == VT) 3771 return OpOp; 3772 } 3773 break; 3774 case ISD::TRUNCATE: 3775 assert(VT.isInteger() && Operand.getValueType().isInteger() && 3776 "Invalid TRUNCATE!"); 3777 if (Operand.getValueType() == VT) return Operand; // noop truncate 3778 assert((!VT.isVector() || 3779 VT.getVectorNumElements() == 3780 Operand.getValueType().getVectorNumElements()) && 3781 "Vector element count mismatch!"); 3782 assert(Operand.getValueType().bitsGT(VT) && 3783 "Invalid truncate node, src < dst!"); 3784 if (OpOpcode == ISD::TRUNCATE) 3785 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3786 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 3787 OpOpcode == ISD::ANY_EXTEND) { 3788 // If the source is smaller than the dest, we still need an extend. 3789 if (Operand.getOperand(0).getValueType().getScalarType() 3790 .bitsLT(VT.getScalarType())) 3791 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 3792 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 3793 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 3794 return Operand.getOperand(0); 3795 } 3796 if (OpOpcode == ISD::UNDEF) 3797 return getUNDEF(VT); 3798 break; 3799 case ISD::ABS: 3800 assert(VT.isInteger() && VT == Operand.getValueType() && 3801 "Invalid ABS!"); 3802 if (OpOpcode == ISD::UNDEF) 3803 return getUNDEF(VT); 3804 break; 3805 case ISD::BSWAP: 3806 assert(VT.isInteger() && VT == Operand.getValueType() && 3807 "Invalid BSWAP!"); 3808 assert((VT.getScalarSizeInBits() % 16 == 0) && 3809 "BSWAP types must be a multiple of 16 bits!"); 3810 if (OpOpcode == ISD::UNDEF) 3811 return getUNDEF(VT); 3812 break; 3813 case ISD::BITREVERSE: 3814 assert(VT.isInteger() && VT == Operand.getValueType() && 3815 "Invalid BITREVERSE!"); 3816 if (OpOpcode == ISD::UNDEF) 3817 return getUNDEF(VT); 3818 break; 3819 case ISD::BITCAST: 3820 // Basic sanity checking. 3821 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 3822 "Cannot BITCAST between types of different sizes!"); 3823 if (VT == Operand.getValueType()) return Operand; // noop conversion. 3824 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 3825 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 3826 if (OpOpcode == ISD::UNDEF) 3827 return getUNDEF(VT); 3828 break; 3829 case ISD::SCALAR_TO_VECTOR: 3830 assert(VT.isVector() && !Operand.getValueType().isVector() && 3831 (VT.getVectorElementType() == Operand.getValueType() || 3832 (VT.getVectorElementType().isInteger() && 3833 Operand.getValueType().isInteger() && 3834 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 3835 "Illegal SCALAR_TO_VECTOR node!"); 3836 if (OpOpcode == ISD::UNDEF) 3837 return getUNDEF(VT); 3838 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 3839 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 3840 isa<ConstantSDNode>(Operand.getOperand(1)) && 3841 Operand.getConstantOperandVal(1) == 0 && 3842 Operand.getOperand(0).getValueType() == VT) 3843 return Operand.getOperand(0); 3844 break; 3845 case ISD::FNEG: 3846 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 3847 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 3848 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags? 3849 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 3850 Operand.getOperand(0), Operand.getNode()->getFlags()); 3851 if (OpOpcode == ISD::FNEG) // --X -> X 3852 return Operand.getOperand(0); 3853 break; 3854 case ISD::FABS: 3855 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 3856 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 3857 break; 3858 } 3859 3860 SDNode *N; 3861 SDVTList VTs = getVTList(VT); 3862 SDValue Ops[] = {Operand}; 3863 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 3864 FoldingSetNodeID ID; 3865 AddNodeIDNode(ID, Opcode, VTs, Ops); 3866 void *IP = nullptr; 3867 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 3868 E->intersectFlagsWith(Flags); 3869 return SDValue(E, 0); 3870 } 3871 3872 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3873 N->setFlags(Flags); 3874 createOperands(N, Ops); 3875 CSEMap.InsertNode(N, IP); 3876 } else { 3877 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 3878 createOperands(N, Ops); 3879 } 3880 3881 InsertNode(N); 3882 SDValue V = SDValue(N, 0); 3883 NewSDValueDbgMsg(V, "Creating new node: "); 3884 return V; 3885 } 3886 3887 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 3888 const APInt &C2) { 3889 switch (Opcode) { 3890 case ISD::ADD: return std::make_pair(C1 + C2, true); 3891 case ISD::SUB: return std::make_pair(C1 - C2, true); 3892 case ISD::MUL: return std::make_pair(C1 * C2, true); 3893 case ISD::AND: return std::make_pair(C1 & C2, true); 3894 case ISD::OR: return std::make_pair(C1 | C2, true); 3895 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 3896 case ISD::SHL: return std::make_pair(C1 << C2, true); 3897 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 3898 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 3899 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 3900 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 3901 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 3902 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 3903 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 3904 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 3905 case ISD::UDIV: 3906 if (!C2.getBoolValue()) 3907 break; 3908 return std::make_pair(C1.udiv(C2), true); 3909 case ISD::UREM: 3910 if (!C2.getBoolValue()) 3911 break; 3912 return std::make_pair(C1.urem(C2), true); 3913 case ISD::SDIV: 3914 if (!C2.getBoolValue()) 3915 break; 3916 return std::make_pair(C1.sdiv(C2), true); 3917 case ISD::SREM: 3918 if (!C2.getBoolValue()) 3919 break; 3920 return std::make_pair(C1.srem(C2), true); 3921 } 3922 return std::make_pair(APInt(1, 0), false); 3923 } 3924 3925 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3926 EVT VT, const ConstantSDNode *Cst1, 3927 const ConstantSDNode *Cst2) { 3928 if (Cst1->isOpaque() || Cst2->isOpaque()) 3929 return SDValue(); 3930 3931 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(), 3932 Cst2->getAPIntValue()); 3933 if (!Folded.second) 3934 return SDValue(); 3935 return getConstant(Folded.first, DL, VT); 3936 } 3937 3938 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 3939 const GlobalAddressSDNode *GA, 3940 const SDNode *N2) { 3941 if (GA->getOpcode() != ISD::GlobalAddress) 3942 return SDValue(); 3943 if (!TLI->isOffsetFoldingLegal(GA)) 3944 return SDValue(); 3945 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2); 3946 if (!Cst2) 3947 return SDValue(); 3948 int64_t Offset = Cst2->getSExtValue(); 3949 switch (Opcode) { 3950 case ISD::ADD: break; 3951 case ISD::SUB: Offset = -uint64_t(Offset); break; 3952 default: return SDValue(); 3953 } 3954 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT, 3955 GA->getOffset() + uint64_t(Offset)); 3956 } 3957 3958 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 3959 switch (Opcode) { 3960 case ISD::SDIV: 3961 case ISD::UDIV: 3962 case ISD::SREM: 3963 case ISD::UREM: { 3964 // If a divisor is zero/undef or any element of a divisor vector is 3965 // zero/undef, the whole op is undef. 3966 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 3967 SDValue Divisor = Ops[1]; 3968 if (Divisor.isUndef() || isNullConstant(Divisor)) 3969 return true; 3970 3971 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 3972 llvm::any_of(Divisor->op_values(), 3973 [](SDValue V) { return V.isUndef() || 3974 isNullConstant(V); }); 3975 // TODO: Handle signed overflow. 3976 } 3977 // TODO: Handle oversized shifts. 3978 default: 3979 return false; 3980 } 3981 } 3982 3983 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 3984 EVT VT, SDNode *Cst1, 3985 SDNode *Cst2) { 3986 // If the opcode is a target-specific ISD node, there's nothing we can 3987 // do here and the operand rules may not line up with the below, so 3988 // bail early. 3989 if (Opcode >= ISD::BUILTIN_OP_END) 3990 return SDValue(); 3991 3992 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)})) 3993 return getUNDEF(VT); 3994 3995 // Handle the case of two scalars. 3996 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) { 3997 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) { 3998 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); 3999 assert((!Folded || !VT.isVector()) && 4000 "Can't fold vectors ops with scalar operands"); 4001 return Folded; 4002 } 4003 } 4004 4005 // fold (add Sym, c) -> Sym+c 4006 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1)) 4007 return FoldSymbolOffset(Opcode, VT, GA, Cst2); 4008 if (TLI->isCommutativeBinOp(Opcode)) 4009 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2)) 4010 return FoldSymbolOffset(Opcode, VT, GA, Cst1); 4011 4012 // For vectors extract each constant element into Inputs so we can constant 4013 // fold them individually. 4014 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 4015 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 4016 if (!BV1 || !BV2) 4017 return SDValue(); 4018 4019 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 4020 4021 EVT SVT = VT.getScalarType(); 4022 EVT LegalSVT = SVT; 4023 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4024 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4025 if (LegalSVT.bitsLT(SVT)) 4026 return SDValue(); 4027 } 4028 SmallVector<SDValue, 4> Outputs; 4029 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 4030 SDValue V1 = BV1->getOperand(I); 4031 SDValue V2 = BV2->getOperand(I); 4032 4033 if (SVT.isInteger()) { 4034 if (V1->getValueType(0).bitsGT(SVT)) 4035 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4036 if (V2->getValueType(0).bitsGT(SVT)) 4037 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4038 } 4039 4040 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4041 return SDValue(); 4042 4043 // Fold one vector element. 4044 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4045 if (LegalSVT != SVT) 4046 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4047 4048 // Scalar folding only succeeded if the result is a constant or UNDEF. 4049 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4050 ScalarResult.getOpcode() != ISD::ConstantFP) 4051 return SDValue(); 4052 Outputs.push_back(ScalarResult); 4053 } 4054 4055 assert(VT.getVectorNumElements() == Outputs.size() && 4056 "Vector size mismatch!"); 4057 4058 // We may have a vector type but a scalar result. Create a splat. 4059 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4060 4061 // Build a big vector out of the scalar elements we generated. 4062 return getBuildVector(VT, SDLoc(), Outputs); 4063 } 4064 4065 // TODO: Merge with FoldConstantArithmetic 4066 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4067 const SDLoc &DL, EVT VT, 4068 ArrayRef<SDValue> Ops, 4069 const SDNodeFlags Flags) { 4070 // If the opcode is a target-specific ISD node, there's nothing we can 4071 // do here and the operand rules may not line up with the below, so 4072 // bail early. 4073 if (Opcode >= ISD::BUILTIN_OP_END) 4074 return SDValue(); 4075 4076 if (isUndef(Opcode, Ops)) 4077 return getUNDEF(VT); 4078 4079 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4080 if (!VT.isVector()) 4081 return SDValue(); 4082 4083 unsigned NumElts = VT.getVectorNumElements(); 4084 4085 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4086 return !Op.getValueType().isVector() || 4087 Op.getValueType().getVectorNumElements() == NumElts; 4088 }; 4089 4090 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4091 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4092 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4093 (BV && BV->isConstant()); 4094 }; 4095 4096 // All operands must be vector types with the same number of elements as 4097 // the result type and must be either UNDEF or a build vector of constant 4098 // or UNDEF scalars. 4099 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4100 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4101 return SDValue(); 4102 4103 // If we are comparing vectors, then the result needs to be a i1 boolean 4104 // that is then sign-extended back to the legal result type. 4105 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4106 4107 // Find legal integer scalar type for constant promotion and 4108 // ensure that its scalar size is at least as large as source. 4109 EVT LegalSVT = VT.getScalarType(); 4110 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4111 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4112 if (LegalSVT.bitsLT(VT.getScalarType())) 4113 return SDValue(); 4114 } 4115 4116 // Constant fold each scalar lane separately. 4117 SmallVector<SDValue, 4> ScalarResults; 4118 for (unsigned i = 0; i != NumElts; i++) { 4119 SmallVector<SDValue, 4> ScalarOps; 4120 for (SDValue Op : Ops) { 4121 EVT InSVT = Op.getValueType().getScalarType(); 4122 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4123 if (!InBV) { 4124 // We've checked that this is UNDEF or a constant of some kind. 4125 if (Op.isUndef()) 4126 ScalarOps.push_back(getUNDEF(InSVT)); 4127 else 4128 ScalarOps.push_back(Op); 4129 continue; 4130 } 4131 4132 SDValue ScalarOp = InBV->getOperand(i); 4133 EVT ScalarVT = ScalarOp.getValueType(); 4134 4135 // Build vector (integer) scalar operands may need implicit 4136 // truncation - do this before constant folding. 4137 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4138 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4139 4140 ScalarOps.push_back(ScalarOp); 4141 } 4142 4143 // Constant fold the scalar operands. 4144 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4145 4146 // Legalize the (integer) scalar constant if necessary. 4147 if (LegalSVT != SVT) 4148 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4149 4150 // Scalar folding only succeeded if the result is a constant or UNDEF. 4151 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4152 ScalarResult.getOpcode() != ISD::ConstantFP) 4153 return SDValue(); 4154 ScalarResults.push_back(ScalarResult); 4155 } 4156 4157 SDValue V = getBuildVector(VT, DL, ScalarResults); 4158 NewSDValueDbgMsg(V, "New node fold constant vector: "); 4159 return V; 4160 } 4161 4162 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4163 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 4164 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4165 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4166 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4167 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4168 4169 // Canonicalize constant to RHS if commutative. 4170 if (TLI->isCommutativeBinOp(Opcode)) { 4171 if (N1C && !N2C) { 4172 std::swap(N1C, N2C); 4173 std::swap(N1, N2); 4174 } else if (N1CFP && !N2CFP) { 4175 std::swap(N1CFP, N2CFP); 4176 std::swap(N1, N2); 4177 } 4178 } 4179 4180 switch (Opcode) { 4181 default: break; 4182 case ISD::TokenFactor: 4183 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 4184 N2.getValueType() == MVT::Other && "Invalid token factor!"); 4185 // Fold trivial token factors. 4186 if (N1.getOpcode() == ISD::EntryToken) return N2; 4187 if (N2.getOpcode() == ISD::EntryToken) return N1; 4188 if (N1 == N2) return N1; 4189 break; 4190 case ISD::CONCAT_VECTORS: { 4191 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4192 SDValue Ops[] = {N1, N2}; 4193 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4194 return V; 4195 break; 4196 } 4197 case ISD::AND: 4198 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4199 assert(N1.getValueType() == N2.getValueType() && 4200 N1.getValueType() == VT && "Binary operator types must match!"); 4201 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 4202 // worth handling here. 4203 if (N2C && N2C->isNullValue()) 4204 return N2; 4205 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 4206 return N1; 4207 break; 4208 case ISD::OR: 4209 case ISD::XOR: 4210 case ISD::ADD: 4211 case ISD::SUB: 4212 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4213 assert(N1.getValueType() == N2.getValueType() && 4214 N1.getValueType() == VT && "Binary operator types must match!"); 4215 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 4216 // it's worth handling here. 4217 if (N2C && N2C->isNullValue()) 4218 return N1; 4219 break; 4220 case ISD::UDIV: 4221 case ISD::UREM: 4222 case ISD::MULHU: 4223 case ISD::MULHS: 4224 case ISD::MUL: 4225 case ISD::SDIV: 4226 case ISD::SREM: 4227 case ISD::SMIN: 4228 case ISD::SMAX: 4229 case ISD::UMIN: 4230 case ISD::UMAX: 4231 assert(VT.isInteger() && "This operator does not apply to FP types!"); 4232 assert(N1.getValueType() == N2.getValueType() && 4233 N1.getValueType() == VT && "Binary operator types must match!"); 4234 break; 4235 case ISD::FADD: 4236 case ISD::FSUB: 4237 case ISD::FMUL: 4238 case ISD::FDIV: 4239 case ISD::FREM: 4240 if (getTarget().Options.UnsafeFPMath) { 4241 if (Opcode == ISD::FADD) { 4242 // x+0 --> x 4243 if (N2CFP && N2CFP->getValueAPF().isZero()) 4244 return N1; 4245 } else if (Opcode == ISD::FSUB) { 4246 // x-0 --> x 4247 if (N2CFP && N2CFP->getValueAPF().isZero()) 4248 return N1; 4249 } else if (Opcode == ISD::FMUL) { 4250 // x*0 --> 0 4251 if (N2CFP && N2CFP->isZero()) 4252 return N2; 4253 // x*1 --> x 4254 if (N2CFP && N2CFP->isExactlyValue(1.0)) 4255 return N1; 4256 } 4257 } 4258 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 4259 assert(N1.getValueType() == N2.getValueType() && 4260 N1.getValueType() == VT && "Binary operator types must match!"); 4261 break; 4262 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 4263 assert(N1.getValueType() == VT && 4264 N1.getValueType().isFloatingPoint() && 4265 N2.getValueType().isFloatingPoint() && 4266 "Invalid FCOPYSIGN!"); 4267 break; 4268 case ISD::SHL: 4269 case ISD::SRA: 4270 case ISD::SRL: 4271 case ISD::ROTL: 4272 case ISD::ROTR: 4273 assert(VT == N1.getValueType() && 4274 "Shift operators return type must be the same as their first arg"); 4275 assert(VT.isInteger() && N2.getValueType().isInteger() && 4276 "Shifts only work on integers"); 4277 assert((!VT.isVector() || VT == N2.getValueType()) && 4278 "Vector shift amounts must be in the same as their first arg"); 4279 // Verify that the shift amount VT is bit enough to hold valid shift 4280 // amounts. This catches things like trying to shift an i1024 value by an 4281 // i8, which is easy to fall into in generic code that uses 4282 // TLI.getShiftAmount(). 4283 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 4284 "Invalid use of small shift amount with oversized value!"); 4285 4286 // Always fold shifts of i1 values so the code generator doesn't need to 4287 // handle them. Since we know the size of the shift has to be less than the 4288 // size of the value, the shift/rotate count is guaranteed to be zero. 4289 if (VT == MVT::i1) 4290 return N1; 4291 if (N2C && N2C->isNullValue()) 4292 return N1; 4293 break; 4294 case ISD::FP_ROUND_INREG: { 4295 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4296 assert(VT == N1.getValueType() && "Not an inreg round!"); 4297 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 4298 "Cannot FP_ROUND_INREG integer types"); 4299 assert(EVT.isVector() == VT.isVector() && 4300 "FP_ROUND_INREG type should be vector iff the operand " 4301 "type is vector!"); 4302 assert((!EVT.isVector() || 4303 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4304 "Vector element counts must match in FP_ROUND_INREG"); 4305 assert(EVT.bitsLE(VT) && "Not rounding down!"); 4306 (void)EVT; 4307 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 4308 break; 4309 } 4310 case ISD::FP_ROUND: 4311 assert(VT.isFloatingPoint() && 4312 N1.getValueType().isFloatingPoint() && 4313 VT.bitsLE(N1.getValueType()) && 4314 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 4315 "Invalid FP_ROUND!"); 4316 if (N1.getValueType() == VT) return N1; // noop conversion. 4317 break; 4318 case ISD::AssertSext: 4319 case ISD::AssertZext: { 4320 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4321 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4322 assert(VT.isInteger() && EVT.isInteger() && 4323 "Cannot *_EXTEND_INREG FP types"); 4324 assert(!EVT.isVector() && 4325 "AssertSExt/AssertZExt type should be the vector element type " 4326 "rather than the vector type!"); 4327 assert(EVT.bitsLE(VT) && "Not extending!"); 4328 if (VT == EVT) return N1; // noop assertion. 4329 break; 4330 } 4331 case ISD::SIGN_EXTEND_INREG: { 4332 EVT EVT = cast<VTSDNode>(N2)->getVT(); 4333 assert(VT == N1.getValueType() && "Not an inreg extend!"); 4334 assert(VT.isInteger() && EVT.isInteger() && 4335 "Cannot *_EXTEND_INREG FP types"); 4336 assert(EVT.isVector() == VT.isVector() && 4337 "SIGN_EXTEND_INREG type should be vector iff the operand " 4338 "type is vector!"); 4339 assert((!EVT.isVector() || 4340 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 4341 "Vector element counts must match in SIGN_EXTEND_INREG"); 4342 assert(EVT.bitsLE(VT) && "Not extending!"); 4343 if (EVT == VT) return N1; // Not actually extending 4344 4345 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 4346 unsigned FromBits = EVT.getScalarSizeInBits(); 4347 Val <<= Val.getBitWidth() - FromBits; 4348 Val.ashrInPlace(Val.getBitWidth() - FromBits); 4349 return getConstant(Val, DL, ConstantVT); 4350 }; 4351 4352 if (N1C) { 4353 const APInt &Val = N1C->getAPIntValue(); 4354 return SignExtendInReg(Val, VT); 4355 } 4356 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 4357 SmallVector<SDValue, 8> Ops; 4358 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 4359 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4360 SDValue Op = N1.getOperand(i); 4361 if (Op.isUndef()) { 4362 Ops.push_back(getUNDEF(OpVT)); 4363 continue; 4364 } 4365 ConstantSDNode *C = cast<ConstantSDNode>(Op); 4366 APInt Val = C->getAPIntValue(); 4367 Ops.push_back(SignExtendInReg(Val, OpVT)); 4368 } 4369 return getBuildVector(VT, DL, Ops); 4370 } 4371 break; 4372 } 4373 case ISD::EXTRACT_VECTOR_ELT: 4374 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 4375 if (N1.isUndef()) 4376 return getUNDEF(VT); 4377 4378 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 4379 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4380 return getUNDEF(VT); 4381 4382 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 4383 // expanding copies of large vectors from registers. 4384 if (N2C && 4385 N1.getOpcode() == ISD::CONCAT_VECTORS && 4386 N1.getNumOperands() > 0) { 4387 unsigned Factor = 4388 N1.getOperand(0).getValueType().getVectorNumElements(); 4389 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 4390 N1.getOperand(N2C->getZExtValue() / Factor), 4391 getConstant(N2C->getZExtValue() % Factor, DL, 4392 N2.getValueType())); 4393 } 4394 4395 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 4396 // expanding large vector constants. 4397 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 4398 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 4399 4400 if (VT != Elt.getValueType()) 4401 // If the vector element type is not legal, the BUILD_VECTOR operands 4402 // are promoted and implicitly truncated, and the result implicitly 4403 // extended. Make that explicit here. 4404 Elt = getAnyExtOrTrunc(Elt, DL, VT); 4405 4406 return Elt; 4407 } 4408 4409 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 4410 // operations are lowered to scalars. 4411 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 4412 // If the indices are the same, return the inserted element else 4413 // if the indices are known different, extract the element from 4414 // the original vector. 4415 SDValue N1Op2 = N1.getOperand(2); 4416 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 4417 4418 if (N1Op2C && N2C) { 4419 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 4420 if (VT == N1.getOperand(1).getValueType()) 4421 return N1.getOperand(1); 4422 else 4423 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 4424 } 4425 4426 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 4427 } 4428 } 4429 4430 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 4431 // when vector types are scalarized and v1iX is legal. 4432 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 4433 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 4434 N1.getValueType().getVectorNumElements() == 1) { 4435 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 4436 N1.getOperand(1)); 4437 } 4438 break; 4439 case ISD::EXTRACT_ELEMENT: 4440 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 4441 assert(!N1.getValueType().isVector() && !VT.isVector() && 4442 (N1.getValueType().isInteger() == VT.isInteger()) && 4443 N1.getValueType() != VT && 4444 "Wrong types for EXTRACT_ELEMENT!"); 4445 4446 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 4447 // 64-bit integers into 32-bit parts. Instead of building the extract of 4448 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 4449 if (N1.getOpcode() == ISD::BUILD_PAIR) 4450 return N1.getOperand(N2C->getZExtValue()); 4451 4452 // EXTRACT_ELEMENT of a constant int is also very common. 4453 if (N1C) { 4454 unsigned ElementSize = VT.getSizeInBits(); 4455 unsigned Shift = ElementSize * N2C->getZExtValue(); 4456 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 4457 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 4458 } 4459 break; 4460 case ISD::EXTRACT_SUBVECTOR: 4461 if (VT.isSimple() && N1.getValueType().isSimple()) { 4462 assert(VT.isVector() && N1.getValueType().isVector() && 4463 "Extract subvector VTs must be a vectors!"); 4464 assert(VT.getVectorElementType() == 4465 N1.getValueType().getVectorElementType() && 4466 "Extract subvector VTs must have the same element type!"); 4467 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 4468 "Extract subvector must be from larger vector to smaller vector!"); 4469 4470 if (N2C) { 4471 assert((VT.getVectorNumElements() + N2C->getZExtValue() 4472 <= N1.getValueType().getVectorNumElements()) 4473 && "Extract subvector overflow!"); 4474 } 4475 4476 // Trivial extraction. 4477 if (VT.getSimpleVT() == N1.getSimpleValueType()) 4478 return N1; 4479 4480 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 4481 if (N1.isUndef()) 4482 return getUNDEF(VT); 4483 4484 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 4485 // the concat have the same type as the extract. 4486 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 4487 N1.getNumOperands() > 0 && 4488 VT == N1.getOperand(0).getValueType()) { 4489 unsigned Factor = VT.getVectorNumElements(); 4490 return N1.getOperand(N2C->getZExtValue() / Factor); 4491 } 4492 4493 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 4494 // during shuffle legalization. 4495 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 4496 VT == N1.getOperand(1).getValueType()) 4497 return N1.getOperand(1); 4498 } 4499 break; 4500 } 4501 4502 // Perform trivial constant folding. 4503 if (SDValue SV = 4504 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 4505 return SV; 4506 4507 // Constant fold FP operations. 4508 bool HasFPExceptions = TLI->hasFloatingPointExceptions(); 4509 if (N1CFP) { 4510 if (N2CFP) { 4511 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 4512 APFloat::opStatus s; 4513 switch (Opcode) { 4514 case ISD::FADD: 4515 s = V1.add(V2, APFloat::rmNearestTiesToEven); 4516 if (!HasFPExceptions || s != APFloat::opInvalidOp) 4517 return getConstantFP(V1, DL, VT); 4518 break; 4519 case ISD::FSUB: 4520 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 4521 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4522 return getConstantFP(V1, DL, VT); 4523 break; 4524 case ISD::FMUL: 4525 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 4526 if (!HasFPExceptions || s!=APFloat::opInvalidOp) 4527 return getConstantFP(V1, DL, VT); 4528 break; 4529 case ISD::FDIV: 4530 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 4531 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4532 s!=APFloat::opDivByZero)) { 4533 return getConstantFP(V1, DL, VT); 4534 } 4535 break; 4536 case ISD::FREM : 4537 s = V1.mod(V2); 4538 if (!HasFPExceptions || (s!=APFloat::opInvalidOp && 4539 s!=APFloat::opDivByZero)) { 4540 return getConstantFP(V1, DL, VT); 4541 } 4542 break; 4543 case ISD::FCOPYSIGN: 4544 V1.copySign(V2); 4545 return getConstantFP(V1, DL, VT); 4546 default: break; 4547 } 4548 } 4549 4550 if (Opcode == ISD::FP_ROUND) { 4551 APFloat V = N1CFP->getValueAPF(); // make copy 4552 bool ignored; 4553 // This can return overflow, underflow, or inexact; we don't care. 4554 // FIXME need to be more flexible about rounding mode. 4555 (void)V.convert(EVTToAPFloatSemantics(VT), 4556 APFloat::rmNearestTiesToEven, &ignored); 4557 return getConstantFP(V, DL, VT); 4558 } 4559 } 4560 4561 // Canonicalize an UNDEF to the RHS, even over a constant. 4562 if (N1.isUndef()) { 4563 if (TLI->isCommutativeBinOp(Opcode)) { 4564 std::swap(N1, N2); 4565 } else { 4566 switch (Opcode) { 4567 case ISD::FP_ROUND_INREG: 4568 case ISD::SIGN_EXTEND_INREG: 4569 case ISD::SUB: 4570 case ISD::FSUB: 4571 case ISD::FDIV: 4572 case ISD::FREM: 4573 case ISD::SRA: 4574 return N1; // fold op(undef, arg2) -> undef 4575 case ISD::UDIV: 4576 case ISD::SDIV: 4577 case ISD::UREM: 4578 case ISD::SREM: 4579 case ISD::SRL: 4580 case ISD::SHL: 4581 if (!VT.isVector()) 4582 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 4583 // For vectors, we can't easily build an all zero vector, just return 4584 // the LHS. 4585 return N2; 4586 } 4587 } 4588 } 4589 4590 // Fold a bunch of operators when the RHS is undef. 4591 if (N2.isUndef()) { 4592 switch (Opcode) { 4593 case ISD::XOR: 4594 if (N1.isUndef()) 4595 // Handle undef ^ undef -> 0 special case. This is a common 4596 // idiom (misuse). 4597 return getConstant(0, DL, VT); 4598 LLVM_FALLTHROUGH; 4599 case ISD::ADD: 4600 case ISD::ADDC: 4601 case ISD::ADDE: 4602 case ISD::SUB: 4603 case ISD::UDIV: 4604 case ISD::SDIV: 4605 case ISD::UREM: 4606 case ISD::SREM: 4607 return N2; // fold op(arg1, undef) -> undef 4608 case ISD::FADD: 4609 case ISD::FSUB: 4610 case ISD::FMUL: 4611 case ISD::FDIV: 4612 case ISD::FREM: 4613 if (getTarget().Options.UnsafeFPMath) 4614 return N2; 4615 break; 4616 case ISD::MUL: 4617 case ISD::AND: 4618 case ISD::SRL: 4619 case ISD::SHL: 4620 if (!VT.isVector()) 4621 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 4622 // For vectors, we can't easily build an all zero vector, just return 4623 // the LHS. 4624 return N1; 4625 case ISD::OR: 4626 if (!VT.isVector()) 4627 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT); 4628 // For vectors, we can't easily build an all one vector, just return 4629 // the LHS. 4630 return N1; 4631 case ISD::SRA: 4632 return N1; 4633 } 4634 } 4635 4636 // Memoize this node if possible. 4637 SDNode *N; 4638 SDVTList VTs = getVTList(VT); 4639 SDValue Ops[] = {N1, N2}; 4640 if (VT != MVT::Glue) { 4641 FoldingSetNodeID ID; 4642 AddNodeIDNode(ID, Opcode, VTs, Ops); 4643 void *IP = nullptr; 4644 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4645 E->intersectFlagsWith(Flags); 4646 return SDValue(E, 0); 4647 } 4648 4649 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4650 N->setFlags(Flags); 4651 createOperands(N, Ops); 4652 CSEMap.InsertNode(N, IP); 4653 } else { 4654 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4655 createOperands(N, Ops); 4656 } 4657 4658 InsertNode(N); 4659 SDValue V = SDValue(N, 0); 4660 NewSDValueDbgMsg(V, "Creating new node: "); 4661 return V; 4662 } 4663 4664 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4665 SDValue N1, SDValue N2, SDValue N3) { 4666 // Perform various simplifications. 4667 switch (Opcode) { 4668 case ISD::FMA: { 4669 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 4670 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 4671 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 4672 if (N1CFP && N2CFP && N3CFP) { 4673 APFloat V1 = N1CFP->getValueAPF(); 4674 const APFloat &V2 = N2CFP->getValueAPF(); 4675 const APFloat &V3 = N3CFP->getValueAPF(); 4676 APFloat::opStatus s = 4677 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 4678 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp) 4679 return getConstantFP(V1, DL, VT); 4680 } 4681 break; 4682 } 4683 case ISD::CONCAT_VECTORS: { 4684 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 4685 SDValue Ops[] = {N1, N2, N3}; 4686 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 4687 return V; 4688 break; 4689 } 4690 case ISD::SETCC: { 4691 // Use FoldSetCC to simplify SETCC's. 4692 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 4693 return V; 4694 // Vector constant folding. 4695 SDValue Ops[] = {N1, N2, N3}; 4696 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 4697 NewSDValueDbgMsg(V, "New node vector constant folding: "); 4698 return V; 4699 } 4700 break; 4701 } 4702 case ISD::SELECT: 4703 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 4704 if (N1C->getZExtValue()) 4705 return N2; // select true, X, Y -> X 4706 return N3; // select false, X, Y -> Y 4707 } 4708 4709 if (N2 == N3) return N2; // select C, X, X -> X 4710 break; 4711 case ISD::VECTOR_SHUFFLE: 4712 llvm_unreachable("should use getVectorShuffle constructor!"); 4713 case ISD::INSERT_VECTOR_ELT: { 4714 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 4715 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 4716 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 4717 return getUNDEF(VT); 4718 break; 4719 } 4720 case ISD::INSERT_SUBVECTOR: { 4721 SDValue Index = N3; 4722 if (VT.isSimple() && N1.getValueType().isSimple() 4723 && N2.getValueType().isSimple()) { 4724 assert(VT.isVector() && N1.getValueType().isVector() && 4725 N2.getValueType().isVector() && 4726 "Insert subvector VTs must be a vectors"); 4727 assert(VT == N1.getValueType() && 4728 "Dest and insert subvector source types must match!"); 4729 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 4730 "Insert subvector must be from smaller vector to larger vector!"); 4731 if (isa<ConstantSDNode>(Index)) { 4732 assert((N2.getValueType().getVectorNumElements() + 4733 cast<ConstantSDNode>(Index)->getZExtValue() 4734 <= VT.getVectorNumElements()) 4735 && "Insert subvector overflow!"); 4736 } 4737 4738 // Trivial insertion. 4739 if (VT.getSimpleVT() == N2.getSimpleValueType()) 4740 return N2; 4741 } 4742 break; 4743 } 4744 case ISD::BITCAST: 4745 // Fold bit_convert nodes from a type to themselves. 4746 if (N1.getValueType() == VT) 4747 return N1; 4748 break; 4749 } 4750 4751 // Memoize node if it doesn't produce a flag. 4752 SDNode *N; 4753 SDVTList VTs = getVTList(VT); 4754 SDValue Ops[] = {N1, N2, N3}; 4755 if (VT != MVT::Glue) { 4756 FoldingSetNodeID ID; 4757 AddNodeIDNode(ID, Opcode, VTs, Ops); 4758 void *IP = nullptr; 4759 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4760 return SDValue(E, 0); 4761 4762 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4763 createOperands(N, Ops); 4764 CSEMap.InsertNode(N, IP); 4765 } else { 4766 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4767 createOperands(N, Ops); 4768 } 4769 4770 InsertNode(N); 4771 SDValue V = SDValue(N, 0); 4772 NewSDValueDbgMsg(V, "Creating new node: "); 4773 return V; 4774 } 4775 4776 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4777 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 4778 SDValue Ops[] = { N1, N2, N3, N4 }; 4779 return getNode(Opcode, DL, VT, Ops); 4780 } 4781 4782 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4783 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 4784 SDValue N5) { 4785 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4786 return getNode(Opcode, DL, VT, Ops); 4787 } 4788 4789 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 4790 /// the incoming stack arguments to be loaded from the stack. 4791 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 4792 SmallVector<SDValue, 8> ArgChains; 4793 4794 // Include the original chain at the beginning of the list. When this is 4795 // used by target LowerCall hooks, this helps legalize find the 4796 // CALLSEQ_BEGIN node. 4797 ArgChains.push_back(Chain); 4798 4799 // Add a chain value for each stack argument. 4800 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 4801 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 4802 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 4803 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 4804 if (FI->getIndex() < 0) 4805 ArgChains.push_back(SDValue(L, 1)); 4806 4807 // Build a tokenfactor for all the chains. 4808 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 4809 } 4810 4811 /// getMemsetValue - Vectorized representation of the memset value 4812 /// operand. 4813 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 4814 const SDLoc &dl) { 4815 assert(!Value.isUndef()); 4816 4817 unsigned NumBits = VT.getScalarSizeInBits(); 4818 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 4819 assert(C->getAPIntValue().getBitWidth() == 8); 4820 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 4821 if (VT.isInteger()) 4822 return DAG.getConstant(Val, dl, VT); 4823 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 4824 VT); 4825 } 4826 4827 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 4828 EVT IntVT = VT.getScalarType(); 4829 if (!IntVT.isInteger()) 4830 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 4831 4832 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 4833 if (NumBits > 8) { 4834 // Use a multiplication with 0x010101... to extend the input to the 4835 // required length. 4836 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 4837 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 4838 DAG.getConstant(Magic, dl, IntVT)); 4839 } 4840 4841 if (VT != Value.getValueType() && !VT.isInteger()) 4842 Value = DAG.getBitcast(VT.getScalarType(), Value); 4843 if (VT != Value.getValueType()) 4844 Value = DAG.getSplatBuildVector(VT, dl, Value); 4845 4846 return Value; 4847 } 4848 4849 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 4850 /// used when a memcpy is turned into a memset when the source is a constant 4851 /// string ptr. 4852 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 4853 const TargetLowering &TLI, 4854 const ConstantDataArraySlice &Slice) { 4855 // Handle vector with all elements zero. 4856 if (Slice.Array == nullptr) { 4857 if (VT.isInteger()) 4858 return DAG.getConstant(0, dl, VT); 4859 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 4860 return DAG.getConstantFP(0.0, dl, VT); 4861 else if (VT.isVector()) { 4862 unsigned NumElts = VT.getVectorNumElements(); 4863 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 4864 return DAG.getNode(ISD::BITCAST, dl, VT, 4865 DAG.getConstant(0, dl, 4866 EVT::getVectorVT(*DAG.getContext(), 4867 EltVT, NumElts))); 4868 } else 4869 llvm_unreachable("Expected type!"); 4870 } 4871 4872 assert(!VT.isVector() && "Can't handle vector type here!"); 4873 unsigned NumVTBits = VT.getSizeInBits(); 4874 unsigned NumVTBytes = NumVTBits / 8; 4875 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 4876 4877 APInt Val(NumVTBits, 0); 4878 if (DAG.getDataLayout().isLittleEndian()) { 4879 for (unsigned i = 0; i != NumBytes; ++i) 4880 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 4881 } else { 4882 for (unsigned i = 0; i != NumBytes; ++i) 4883 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 4884 } 4885 4886 // If the "cost" of materializing the integer immediate is less than the cost 4887 // of a load, then it is cost effective to turn the load into the immediate. 4888 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 4889 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 4890 return DAG.getConstant(Val, dl, VT); 4891 return SDValue(nullptr, 0); 4892 } 4893 4894 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 4895 const SDLoc &DL) { 4896 EVT VT = Base.getValueType(); 4897 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 4898 } 4899 4900 /// Returns true if memcpy source is constant data. 4901 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 4902 uint64_t SrcDelta = 0; 4903 GlobalAddressSDNode *G = nullptr; 4904 if (Src.getOpcode() == ISD::GlobalAddress) 4905 G = cast<GlobalAddressSDNode>(Src); 4906 else if (Src.getOpcode() == ISD::ADD && 4907 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 4908 Src.getOperand(1).getOpcode() == ISD::Constant) { 4909 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 4910 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 4911 } 4912 if (!G) 4913 return false; 4914 4915 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 4916 SrcDelta + G->getOffset()); 4917 } 4918 4919 /// Determines the optimal series of memory ops to replace the memset / memcpy. 4920 /// Return true if the number of memory ops is below the threshold (Limit). 4921 /// It returns the types of the sequence of memory ops to perform 4922 /// memset / memcpy by reference. 4923 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 4924 unsigned Limit, uint64_t Size, 4925 unsigned DstAlign, unsigned SrcAlign, 4926 bool IsMemset, 4927 bool ZeroMemset, 4928 bool MemcpyStrSrc, 4929 bool AllowOverlap, 4930 unsigned DstAS, unsigned SrcAS, 4931 SelectionDAG &DAG, 4932 const TargetLowering &TLI) { 4933 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 4934 "Expecting memcpy / memset source to meet alignment requirement!"); 4935 // If 'SrcAlign' is zero, that means the memory operation does not need to 4936 // load the value, i.e. memset or memcpy from constant string. Otherwise, 4937 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 4938 // is the specified alignment of the memory operation. If it is zero, that 4939 // means it's possible to change the alignment of the destination. 4940 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 4941 // not need to be loaded. 4942 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 4943 IsMemset, ZeroMemset, MemcpyStrSrc, 4944 DAG.getMachineFunction()); 4945 4946 if (VT == MVT::Other) { 4947 // Use the largest integer type whose alignment constraints are satisfied. 4948 // We only need to check DstAlign here as SrcAlign is always greater or 4949 // equal to DstAlign (or zero). 4950 VT = MVT::i64; 4951 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 && 4952 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) 4953 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 4954 assert(VT.isInteger()); 4955 4956 // Find the largest legal integer type. 4957 MVT LVT = MVT::i64; 4958 while (!TLI.isTypeLegal(LVT)) 4959 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 4960 assert(LVT.isInteger()); 4961 4962 // If the type we've chosen is larger than the largest legal integer type 4963 // then use that instead. 4964 if (VT.bitsGT(LVT)) 4965 VT = LVT; 4966 } 4967 4968 unsigned NumMemOps = 0; 4969 while (Size != 0) { 4970 unsigned VTSize = VT.getSizeInBits() / 8; 4971 while (VTSize > Size) { 4972 // For now, only use non-vector load / store's for the left-over pieces. 4973 EVT NewVT = VT; 4974 unsigned NewVTSize; 4975 4976 bool Found = false; 4977 if (VT.isVector() || VT.isFloatingPoint()) { 4978 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 4979 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 4980 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 4981 Found = true; 4982 else if (NewVT == MVT::i64 && 4983 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 4984 TLI.isSafeMemOpType(MVT::f64)) { 4985 // i64 is usually not legal on 32-bit targets, but f64 may be. 4986 NewVT = MVT::f64; 4987 Found = true; 4988 } 4989 } 4990 4991 if (!Found) { 4992 do { 4993 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 4994 if (NewVT == MVT::i8) 4995 break; 4996 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 4997 } 4998 NewVTSize = NewVT.getSizeInBits() / 8; 4999 5000 // If the new VT cannot cover all of the remaining bits, then consider 5001 // issuing a (or a pair of) unaligned and overlapping load / store. 5002 // FIXME: Only does this for 64-bit or more since we don't have proper 5003 // cost model for unaligned load / store. 5004 bool Fast; 5005 if (NumMemOps && AllowOverlap && 5006 VTSize >= 8 && NewVTSize < Size && 5007 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) 5008 VTSize = Size; 5009 else { 5010 VT = NewVT; 5011 VTSize = NewVTSize; 5012 } 5013 } 5014 5015 if (++NumMemOps > Limit) 5016 return false; 5017 5018 MemOps.push_back(VT); 5019 Size -= VTSize; 5020 } 5021 5022 return true; 5023 } 5024 5025 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5026 // On Darwin, -Os means optimize for size without hurting performance, so 5027 // only really optimize for size when -Oz (MinSize) is used. 5028 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5029 return MF.getFunction()->optForMinSize(); 5030 return MF.getFunction()->optForSize(); 5031 } 5032 5033 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5034 SDValue Chain, SDValue Dst, SDValue Src, 5035 uint64_t Size, unsigned Align, 5036 bool isVol, bool AlwaysInline, 5037 MachinePointerInfo DstPtrInfo, 5038 MachinePointerInfo SrcPtrInfo) { 5039 // Turn a memcpy of undef to nop. 5040 if (Src.isUndef()) 5041 return Chain; 5042 5043 // Expand memcpy to a series of load and store ops if the size operand falls 5044 // below a certain threshold. 5045 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5046 // rather than maybe a humongous number of loads and stores. 5047 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5048 const DataLayout &DL = DAG.getDataLayout(); 5049 LLVMContext &C = *DAG.getContext(); 5050 std::vector<EVT> MemOps; 5051 bool DstAlignCanChange = false; 5052 MachineFunction &MF = DAG.getMachineFunction(); 5053 MachineFrameInfo &MFI = MF.getFrameInfo(); 5054 bool OptSize = shouldLowerMemFuncForSize(MF); 5055 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5056 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5057 DstAlignCanChange = true; 5058 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5059 if (Align > SrcAlign) 5060 SrcAlign = Align; 5061 ConstantDataArraySlice Slice; 5062 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5063 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5064 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5065 5066 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5067 (DstAlignCanChange ? 0 : Align), 5068 (isZeroConstant ? 0 : SrcAlign), 5069 false, false, CopyFromConstant, true, 5070 DstPtrInfo.getAddrSpace(), 5071 SrcPtrInfo.getAddrSpace(), 5072 DAG, TLI)) 5073 return SDValue(); 5074 5075 if (DstAlignCanChange) { 5076 Type *Ty = MemOps[0].getTypeForEVT(C); 5077 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5078 5079 // Don't promote to an alignment that would require dynamic stack 5080 // realignment. 5081 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5082 if (!TRI->needsStackRealignment(MF)) 5083 while (NewAlign > Align && 5084 DL.exceedsNaturalStackAlignment(NewAlign)) 5085 NewAlign /= 2; 5086 5087 if (NewAlign > Align) { 5088 // Give the stack frame object a larger alignment if needed. 5089 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5090 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5091 Align = NewAlign; 5092 } 5093 } 5094 5095 MachineMemOperand::Flags MMOFlags = 5096 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5097 SmallVector<SDValue, 8> OutChains; 5098 unsigned NumMemOps = MemOps.size(); 5099 uint64_t SrcOff = 0, DstOff = 0; 5100 for (unsigned i = 0; i != NumMemOps; ++i) { 5101 EVT VT = MemOps[i]; 5102 unsigned VTSize = VT.getSizeInBits() / 8; 5103 SDValue Value, Store; 5104 5105 if (VTSize > Size) { 5106 // Issuing an unaligned load / store pair that overlaps with the previous 5107 // pair. Adjust the offset accordingly. 5108 assert(i == NumMemOps-1 && i != 0); 5109 SrcOff -= VTSize - Size; 5110 DstOff -= VTSize - Size; 5111 } 5112 5113 if (CopyFromConstant && 5114 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5115 // It's unlikely a store of a vector immediate can be done in a single 5116 // instruction. It would require a load from a constantpool first. 5117 // We only handle zero vectors here. 5118 // FIXME: Handle other cases where store of vector immediate is done in 5119 // a single instruction. 5120 ConstantDataArraySlice SubSlice; 5121 if (SrcOff < Slice.Length) { 5122 SubSlice = Slice; 5123 SubSlice.move(SrcOff); 5124 } else { 5125 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5126 SubSlice.Array = nullptr; 5127 SubSlice.Offset = 0; 5128 SubSlice.Length = VTSize; 5129 } 5130 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5131 if (Value.getNode()) 5132 Store = DAG.getStore(Chain, dl, Value, 5133 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5134 DstPtrInfo.getWithOffset(DstOff), Align, 5135 MMOFlags); 5136 } 5137 5138 if (!Store.getNode()) { 5139 // The type might not be legal for the target. This should only happen 5140 // if the type is smaller than a legal type, as on PPC, so the right 5141 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5142 // to Load/Store if NVT==VT. 5143 // FIXME does the case above also need this? 5144 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5145 assert(NVT.bitsGE(VT)); 5146 5147 bool isDereferenceable = 5148 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5149 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5150 if (isDereferenceable) 5151 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5152 5153 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5154 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5155 SrcPtrInfo.getWithOffset(SrcOff), VT, 5156 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5157 OutChains.push_back(Value.getValue(1)); 5158 Store = DAG.getTruncStore( 5159 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5160 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5161 } 5162 OutChains.push_back(Store); 5163 SrcOff += VTSize; 5164 DstOff += VTSize; 5165 Size -= VTSize; 5166 } 5167 5168 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5169 } 5170 5171 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5172 SDValue Chain, SDValue Dst, SDValue Src, 5173 uint64_t Size, unsigned Align, 5174 bool isVol, bool AlwaysInline, 5175 MachinePointerInfo DstPtrInfo, 5176 MachinePointerInfo SrcPtrInfo) { 5177 // Turn a memmove of undef to nop. 5178 if (Src.isUndef()) 5179 return Chain; 5180 5181 // Expand memmove to a series of load and store ops if the size operand falls 5182 // below a certain threshold. 5183 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5184 const DataLayout &DL = DAG.getDataLayout(); 5185 LLVMContext &C = *DAG.getContext(); 5186 std::vector<EVT> MemOps; 5187 bool DstAlignCanChange = false; 5188 MachineFunction &MF = DAG.getMachineFunction(); 5189 MachineFrameInfo &MFI = MF.getFrameInfo(); 5190 bool OptSize = shouldLowerMemFuncForSize(MF); 5191 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5192 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5193 DstAlignCanChange = true; 5194 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5195 if (Align > SrcAlign) 5196 SrcAlign = Align; 5197 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5198 5199 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 5200 (DstAlignCanChange ? 0 : Align), SrcAlign, 5201 false, false, false, false, 5202 DstPtrInfo.getAddrSpace(), 5203 SrcPtrInfo.getAddrSpace(), 5204 DAG, TLI)) 5205 return SDValue(); 5206 5207 if (DstAlignCanChange) { 5208 Type *Ty = MemOps[0].getTypeForEVT(C); 5209 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5210 if (NewAlign > Align) { 5211 // Give the stack frame object a larger alignment if needed. 5212 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5213 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5214 Align = NewAlign; 5215 } 5216 } 5217 5218 MachineMemOperand::Flags MMOFlags = 5219 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5220 uint64_t SrcOff = 0, DstOff = 0; 5221 SmallVector<SDValue, 8> LoadValues; 5222 SmallVector<SDValue, 8> LoadChains; 5223 SmallVector<SDValue, 8> OutChains; 5224 unsigned NumMemOps = MemOps.size(); 5225 for (unsigned i = 0; i < NumMemOps; i++) { 5226 EVT VT = MemOps[i]; 5227 unsigned VTSize = VT.getSizeInBits() / 8; 5228 SDValue Value; 5229 5230 bool isDereferenceable = 5231 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5232 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5233 if (isDereferenceable) 5234 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5235 5236 Value = 5237 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5238 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 5239 LoadValues.push_back(Value); 5240 LoadChains.push_back(Value.getValue(1)); 5241 SrcOff += VTSize; 5242 } 5243 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 5244 OutChains.clear(); 5245 for (unsigned i = 0; i < NumMemOps; i++) { 5246 EVT VT = MemOps[i]; 5247 unsigned VTSize = VT.getSizeInBits() / 8; 5248 SDValue Store; 5249 5250 Store = DAG.getStore(Chain, dl, LoadValues[i], 5251 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5252 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 5253 OutChains.push_back(Store); 5254 DstOff += VTSize; 5255 } 5256 5257 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5258 } 5259 5260 /// \brief Lower the call to 'memset' intrinsic function into a series of store 5261 /// operations. 5262 /// 5263 /// \param DAG Selection DAG where lowered code is placed. 5264 /// \param dl Link to corresponding IR location. 5265 /// \param Chain Control flow dependency. 5266 /// \param Dst Pointer to destination memory location. 5267 /// \param Src Value of byte to write into the memory. 5268 /// \param Size Number of bytes to write. 5269 /// \param Align Alignment of the destination in bytes. 5270 /// \param isVol True if destination is volatile. 5271 /// \param DstPtrInfo IR information on the memory pointer. 5272 /// \returns New head in the control flow, if lowering was successful, empty 5273 /// SDValue otherwise. 5274 /// 5275 /// The function tries to replace 'llvm.memset' intrinsic with several store 5276 /// operations and value calculation code. This is usually profitable for small 5277 /// memory size. 5278 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 5279 SDValue Chain, SDValue Dst, SDValue Src, 5280 uint64_t Size, unsigned Align, bool isVol, 5281 MachinePointerInfo DstPtrInfo) { 5282 // Turn a memset of undef to nop. 5283 if (Src.isUndef()) 5284 return Chain; 5285 5286 // Expand memset to a series of load/store ops if the size operand 5287 // falls below a certain threshold. 5288 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5289 std::vector<EVT> MemOps; 5290 bool DstAlignCanChange = false; 5291 MachineFunction &MF = DAG.getMachineFunction(); 5292 MachineFrameInfo &MFI = MF.getFrameInfo(); 5293 bool OptSize = shouldLowerMemFuncForSize(MF); 5294 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5295 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5296 DstAlignCanChange = true; 5297 bool IsZeroVal = 5298 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 5299 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 5300 Size, (DstAlignCanChange ? 0 : Align), 0, 5301 true, IsZeroVal, false, true, 5302 DstPtrInfo.getAddrSpace(), ~0u, 5303 DAG, TLI)) 5304 return SDValue(); 5305 5306 if (DstAlignCanChange) { 5307 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 5308 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 5309 if (NewAlign > Align) { 5310 // Give the stack frame object a larger alignment if needed. 5311 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5312 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5313 Align = NewAlign; 5314 } 5315 } 5316 5317 SmallVector<SDValue, 8> OutChains; 5318 uint64_t DstOff = 0; 5319 unsigned NumMemOps = MemOps.size(); 5320 5321 // Find the largest store and generate the bit pattern for it. 5322 EVT LargestVT = MemOps[0]; 5323 for (unsigned i = 1; i < NumMemOps; i++) 5324 if (MemOps[i].bitsGT(LargestVT)) 5325 LargestVT = MemOps[i]; 5326 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 5327 5328 for (unsigned i = 0; i < NumMemOps; i++) { 5329 EVT VT = MemOps[i]; 5330 unsigned VTSize = VT.getSizeInBits() / 8; 5331 if (VTSize > Size) { 5332 // Issuing an unaligned load / store pair that overlaps with the previous 5333 // pair. Adjust the offset accordingly. 5334 assert(i == NumMemOps-1 && i != 0); 5335 DstOff -= VTSize - Size; 5336 } 5337 5338 // If this store is smaller than the largest store see whether we can get 5339 // the smaller value for free with a truncate. 5340 SDValue Value = MemSetValue; 5341 if (VT.bitsLT(LargestVT)) { 5342 if (!LargestVT.isVector() && !VT.isVector() && 5343 TLI.isTruncateFree(LargestVT, VT)) 5344 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 5345 else 5346 Value = getMemsetValue(Src, VT, DAG, dl); 5347 } 5348 assert(Value.getValueType() == VT && "Value with wrong type."); 5349 SDValue Store = DAG.getStore( 5350 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5351 DstPtrInfo.getWithOffset(DstOff), Align, 5352 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 5353 OutChains.push_back(Store); 5354 DstOff += VT.getSizeInBits() / 8; 5355 Size -= VTSize; 5356 } 5357 5358 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5359 } 5360 5361 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 5362 unsigned AS) { 5363 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 5364 // pointer operands can be losslessly bitcasted to pointers of address space 0 5365 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 5366 report_fatal_error("cannot lower memory intrinsic in address space " + 5367 Twine(AS)); 5368 } 5369 } 5370 5371 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 5372 SDValue Src, SDValue Size, unsigned Align, 5373 bool isVol, bool AlwaysInline, bool isTailCall, 5374 MachinePointerInfo DstPtrInfo, 5375 MachinePointerInfo SrcPtrInfo) { 5376 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5377 5378 // Check to see if we should lower the memcpy to loads and stores first. 5379 // For cases within the target-specified limits, this is the best choice. 5380 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5381 if (ConstantSize) { 5382 // Memcpy with size zero? Just return the original chain. 5383 if (ConstantSize->isNullValue()) 5384 return Chain; 5385 5386 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5387 ConstantSize->getZExtValue(),Align, 5388 isVol, false, DstPtrInfo, SrcPtrInfo); 5389 if (Result.getNode()) 5390 return Result; 5391 } 5392 5393 // Then check to see if we should lower the memcpy with target-specific 5394 // code. If the target chooses to do this, this is the next best. 5395 if (TSI) { 5396 SDValue Result = TSI->EmitTargetCodeForMemcpy( 5397 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 5398 DstPtrInfo, SrcPtrInfo); 5399 if (Result.getNode()) 5400 return Result; 5401 } 5402 5403 // If we really need inline code and the target declined to provide it, 5404 // use a (potentially long) sequence of loads and stores. 5405 if (AlwaysInline) { 5406 assert(ConstantSize && "AlwaysInline requires a constant size!"); 5407 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 5408 ConstantSize->getZExtValue(), Align, isVol, 5409 true, DstPtrInfo, SrcPtrInfo); 5410 } 5411 5412 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5413 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5414 5415 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 5416 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 5417 // respect volatile, so they may do things like read or write memory 5418 // beyond the given memory regions. But fixing this isn't easy, and most 5419 // people don't care. 5420 5421 // Emit a library call. 5422 TargetLowering::ArgListTy Args; 5423 TargetLowering::ArgListEntry Entry; 5424 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5425 Entry.Node = Dst; Args.push_back(Entry); 5426 Entry.Node = Src; Args.push_back(Entry); 5427 Entry.Node = Size; Args.push_back(Entry); 5428 // FIXME: pass in SDLoc 5429 TargetLowering::CallLoweringInfo CLI(*this); 5430 CLI.setDebugLoc(dl) 5431 .setChain(Chain) 5432 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 5433 Dst.getValueType().getTypeForEVT(*getContext()), 5434 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 5435 TLI->getPointerTy(getDataLayout())), 5436 std::move(Args)) 5437 .setDiscardResult() 5438 .setTailCall(isTailCall); 5439 5440 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5441 return CallResult.second; 5442 } 5443 5444 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 5445 SDValue Src, SDValue Size, unsigned Align, 5446 bool isVol, bool isTailCall, 5447 MachinePointerInfo DstPtrInfo, 5448 MachinePointerInfo SrcPtrInfo) { 5449 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5450 5451 // Check to see if we should lower the memmove to loads and stores first. 5452 // For cases within the target-specified limits, this is the best choice. 5453 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5454 if (ConstantSize) { 5455 // Memmove with size zero? Just return the original chain. 5456 if (ConstantSize->isNullValue()) 5457 return Chain; 5458 5459 SDValue Result = 5460 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 5461 ConstantSize->getZExtValue(), Align, isVol, 5462 false, DstPtrInfo, SrcPtrInfo); 5463 if (Result.getNode()) 5464 return Result; 5465 } 5466 5467 // Then check to see if we should lower the memmove with target-specific 5468 // code. If the target chooses to do this, this is the next best. 5469 if (TSI) { 5470 SDValue Result = TSI->EmitTargetCodeForMemmove( 5471 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 5472 if (Result.getNode()) 5473 return Result; 5474 } 5475 5476 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5477 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 5478 5479 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 5480 // not be safe. See memcpy above for more details. 5481 5482 // Emit a library call. 5483 TargetLowering::ArgListTy Args; 5484 TargetLowering::ArgListEntry Entry; 5485 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 5486 Entry.Node = Dst; Args.push_back(Entry); 5487 Entry.Node = Src; Args.push_back(Entry); 5488 Entry.Node = Size; Args.push_back(Entry); 5489 // FIXME: pass in SDLoc 5490 TargetLowering::CallLoweringInfo CLI(*this); 5491 CLI.setDebugLoc(dl) 5492 .setChain(Chain) 5493 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 5494 Dst.getValueType().getTypeForEVT(*getContext()), 5495 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 5496 TLI->getPointerTy(getDataLayout())), 5497 std::move(Args)) 5498 .setDiscardResult() 5499 .setTailCall(isTailCall); 5500 5501 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5502 return CallResult.second; 5503 } 5504 5505 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 5506 SDValue Src, SDValue Size, unsigned Align, 5507 bool isVol, bool isTailCall, 5508 MachinePointerInfo DstPtrInfo) { 5509 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 5510 5511 // Check to see if we should lower the memset to stores first. 5512 // For cases within the target-specified limits, this is the best choice. 5513 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 5514 if (ConstantSize) { 5515 // Memset with size zero? Just return the original chain. 5516 if (ConstantSize->isNullValue()) 5517 return Chain; 5518 5519 SDValue Result = 5520 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 5521 Align, isVol, DstPtrInfo); 5522 5523 if (Result.getNode()) 5524 return Result; 5525 } 5526 5527 // Then check to see if we should lower the memset with target-specific 5528 // code. If the target chooses to do this, this is the next best. 5529 if (TSI) { 5530 SDValue Result = TSI->EmitTargetCodeForMemset( 5531 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 5532 if (Result.getNode()) 5533 return Result; 5534 } 5535 5536 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 5537 5538 // Emit a library call. 5539 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext()); 5540 TargetLowering::ArgListTy Args; 5541 TargetLowering::ArgListEntry Entry; 5542 Entry.Node = Dst; Entry.Ty = IntPtrTy; 5543 Args.push_back(Entry); 5544 Entry.Node = Src; 5545 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 5546 Args.push_back(Entry); 5547 Entry.Node = Size; 5548 Entry.Ty = IntPtrTy; 5549 Args.push_back(Entry); 5550 5551 // FIXME: pass in SDLoc 5552 TargetLowering::CallLoweringInfo CLI(*this); 5553 CLI.setDebugLoc(dl) 5554 .setChain(Chain) 5555 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 5556 Dst.getValueType().getTypeForEVT(*getContext()), 5557 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 5558 TLI->getPointerTy(getDataLayout())), 5559 std::move(Args)) 5560 .setDiscardResult() 5561 .setTailCall(isTailCall); 5562 5563 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 5564 return CallResult.second; 5565 } 5566 5567 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5568 SDVTList VTList, ArrayRef<SDValue> Ops, 5569 MachineMemOperand *MMO) { 5570 FoldingSetNodeID ID; 5571 ID.AddInteger(MemVT.getRawBits()); 5572 AddNodeIDNode(ID, Opcode, VTList, Ops); 5573 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5574 void* IP = nullptr; 5575 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5576 cast<AtomicSDNode>(E)->refineAlignment(MMO); 5577 return SDValue(E, 0); 5578 } 5579 5580 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5581 VTList, MemVT, MMO); 5582 createOperands(N, Ops); 5583 5584 CSEMap.InsertNode(N, IP); 5585 InsertNode(N); 5586 return SDValue(N, 0); 5587 } 5588 5589 SDValue SelectionDAG::getAtomicCmpSwap( 5590 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, 5591 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo, 5592 unsigned Alignment, AtomicOrdering SuccessOrdering, 5593 AtomicOrdering FailureOrdering, SyncScope::ID SSID) { 5594 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5595 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5596 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5597 5598 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5599 Alignment = getEVTAlignment(MemVT); 5600 5601 MachineFunction &MF = getMachineFunction(); 5602 5603 // FIXME: Volatile isn't really correct; we should keep track of atomic 5604 // orderings in the memoperand. 5605 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad | 5606 MachineMemOperand::MOStore; 5607 MachineMemOperand *MMO = 5608 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 5609 AAMDNodes(), nullptr, SSID, SuccessOrdering, 5610 FailureOrdering); 5611 5612 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO); 5613 } 5614 5615 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 5616 EVT MemVT, SDVTList VTs, SDValue Chain, 5617 SDValue Ptr, SDValue Cmp, SDValue Swp, 5618 MachineMemOperand *MMO) { 5619 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 5620 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 5621 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 5622 5623 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 5624 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5625 } 5626 5627 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5628 SDValue Chain, SDValue Ptr, SDValue Val, 5629 const Value *PtrVal, unsigned Alignment, 5630 AtomicOrdering Ordering, 5631 SyncScope::ID SSID) { 5632 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5633 Alignment = getEVTAlignment(MemVT); 5634 5635 MachineFunction &MF = getMachineFunction(); 5636 // An atomic store does not load. An atomic load does not store. 5637 // (An atomicrmw obviously both loads and stores.) 5638 // For now, atomics are considered to be volatile always, and they are 5639 // chained as such. 5640 // FIXME: Volatile isn't really correct; we should keep track of atomic 5641 // orderings in the memoperand. 5642 auto Flags = MachineMemOperand::MOVolatile; 5643 if (Opcode != ISD::ATOMIC_STORE) 5644 Flags |= MachineMemOperand::MOLoad; 5645 if (Opcode != ISD::ATOMIC_LOAD) 5646 Flags |= MachineMemOperand::MOStore; 5647 5648 MachineMemOperand *MMO = 5649 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 5650 MemVT.getStoreSize(), Alignment, AAMDNodes(), 5651 nullptr, SSID, Ordering); 5652 5653 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); 5654 } 5655 5656 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5657 SDValue Chain, SDValue Ptr, SDValue Val, 5658 MachineMemOperand *MMO) { 5659 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 5660 Opcode == ISD::ATOMIC_LOAD_SUB || 5661 Opcode == ISD::ATOMIC_LOAD_AND || 5662 Opcode == ISD::ATOMIC_LOAD_OR || 5663 Opcode == ISD::ATOMIC_LOAD_XOR || 5664 Opcode == ISD::ATOMIC_LOAD_NAND || 5665 Opcode == ISD::ATOMIC_LOAD_MIN || 5666 Opcode == ISD::ATOMIC_LOAD_MAX || 5667 Opcode == ISD::ATOMIC_LOAD_UMIN || 5668 Opcode == ISD::ATOMIC_LOAD_UMAX || 5669 Opcode == ISD::ATOMIC_SWAP || 5670 Opcode == ISD::ATOMIC_STORE) && 5671 "Invalid Atomic Op"); 5672 5673 EVT VT = Val.getValueType(); 5674 5675 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 5676 getVTList(VT, MVT::Other); 5677 SDValue Ops[] = {Chain, Ptr, Val}; 5678 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5679 } 5680 5681 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 5682 EVT VT, SDValue Chain, SDValue Ptr, 5683 MachineMemOperand *MMO) { 5684 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 5685 5686 SDVTList VTs = getVTList(VT, MVT::Other); 5687 SDValue Ops[] = {Chain, Ptr}; 5688 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 5689 } 5690 5691 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 5692 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 5693 if (Ops.size() == 1) 5694 return Ops[0]; 5695 5696 SmallVector<EVT, 4> VTs; 5697 VTs.reserve(Ops.size()); 5698 for (unsigned i = 0; i < Ops.size(); ++i) 5699 VTs.push_back(Ops[i].getValueType()); 5700 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 5701 } 5702 5703 SDValue SelectionDAG::getMemIntrinsicNode( 5704 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 5705 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, 5706 bool ReadMem, bool WriteMem, unsigned Size) { 5707 if (Align == 0) // Ensure that codegen never sees alignment 0 5708 Align = getEVTAlignment(MemVT); 5709 5710 MachineFunction &MF = getMachineFunction(); 5711 auto Flags = MachineMemOperand::MONone; 5712 if (WriteMem) 5713 Flags |= MachineMemOperand::MOStore; 5714 if (ReadMem) 5715 Flags |= MachineMemOperand::MOLoad; 5716 if (Vol) 5717 Flags |= MachineMemOperand::MOVolatile; 5718 if (!Size) 5719 Size = MemVT.getStoreSize(); 5720 MachineMemOperand *MMO = 5721 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 5722 5723 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 5724 } 5725 5726 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 5727 SDVTList VTList, 5728 ArrayRef<SDValue> Ops, EVT MemVT, 5729 MachineMemOperand *MMO) { 5730 assert((Opcode == ISD::INTRINSIC_VOID || 5731 Opcode == ISD::INTRINSIC_W_CHAIN || 5732 Opcode == ISD::PREFETCH || 5733 Opcode == ISD::LIFETIME_START || 5734 Opcode == ISD::LIFETIME_END || 5735 ((int)Opcode <= std::numeric_limits<int>::max() && 5736 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 5737 "Opcode is not a memory-accessing opcode!"); 5738 5739 // Memoize the node unless it returns a flag. 5740 MemIntrinsicSDNode *N; 5741 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5742 FoldingSetNodeID ID; 5743 AddNodeIDNode(ID, Opcode, VTList, Ops); 5744 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5745 void *IP = nullptr; 5746 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5747 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 5748 return SDValue(E, 0); 5749 } 5750 5751 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5752 VTList, MemVT, MMO); 5753 createOperands(N, Ops); 5754 5755 CSEMap.InsertNode(N, IP); 5756 } else { 5757 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 5758 VTList, MemVT, MMO); 5759 createOperands(N, Ops); 5760 } 5761 InsertNode(N); 5762 return SDValue(N, 0); 5763 } 5764 5765 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5766 /// MachinePointerInfo record from it. This is particularly useful because the 5767 /// code generator has many cases where it doesn't bother passing in a 5768 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5769 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5770 int64_t Offset = 0) { 5771 // If this is FI+Offset, we can model it. 5772 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 5773 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 5774 FI->getIndex(), Offset); 5775 5776 // If this is (FI+Offset1)+Offset2, we can model it. 5777 if (Ptr.getOpcode() != ISD::ADD || 5778 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 5779 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 5780 return MachinePointerInfo(); 5781 5782 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5783 return MachinePointerInfo::getFixedStack( 5784 DAG.getMachineFunction(), FI, 5785 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 5786 } 5787 5788 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 5789 /// MachinePointerInfo record from it. This is particularly useful because the 5790 /// code generator has many cases where it doesn't bother passing in a 5791 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 5792 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, 5793 SDValue OffsetOp) { 5794 // If the 'Offset' value isn't a constant, we can't handle this. 5795 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 5796 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); 5797 if (OffsetOp.isUndef()) 5798 return InferPointerInfo(DAG, Ptr); 5799 return MachinePointerInfo(); 5800 } 5801 5802 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5803 EVT VT, const SDLoc &dl, SDValue Chain, 5804 SDValue Ptr, SDValue Offset, 5805 MachinePointerInfo PtrInfo, EVT MemVT, 5806 unsigned Alignment, 5807 MachineMemOperand::Flags MMOFlags, 5808 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5809 assert(Chain.getValueType() == MVT::Other && 5810 "Invalid chain type"); 5811 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5812 Alignment = getEVTAlignment(MemVT); 5813 5814 MMOFlags |= MachineMemOperand::MOLoad; 5815 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 5816 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 5817 // clients. 5818 if (PtrInfo.V.isNull()) 5819 PtrInfo = InferPointerInfo(*this, Ptr, Offset); 5820 5821 MachineFunction &MF = getMachineFunction(); 5822 MachineMemOperand *MMO = MF.getMachineMemOperand( 5823 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 5824 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 5825 } 5826 5827 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 5828 EVT VT, const SDLoc &dl, SDValue Chain, 5829 SDValue Ptr, SDValue Offset, EVT MemVT, 5830 MachineMemOperand *MMO) { 5831 if (VT == MemVT) { 5832 ExtType = ISD::NON_EXTLOAD; 5833 } else if (ExtType == ISD::NON_EXTLOAD) { 5834 assert(VT == MemVT && "Non-extending load from different memory type!"); 5835 } else { 5836 // Extending load. 5837 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 5838 "Should only be an extending load, not truncating!"); 5839 assert(VT.isInteger() == MemVT.isInteger() && 5840 "Cannot convert from FP to Int or Int -> FP!"); 5841 assert(VT.isVector() == MemVT.isVector() && 5842 "Cannot use an ext load to convert to or from a vector!"); 5843 assert((!VT.isVector() || 5844 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 5845 "Cannot use an ext load to change the number of vector elements!"); 5846 } 5847 5848 bool Indexed = AM != ISD::UNINDEXED; 5849 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 5850 5851 SDVTList VTs = Indexed ? 5852 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 5853 SDValue Ops[] = { Chain, Ptr, Offset }; 5854 FoldingSetNodeID ID; 5855 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 5856 ID.AddInteger(MemVT.getRawBits()); 5857 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 5858 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 5859 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5860 void *IP = nullptr; 5861 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5862 cast<LoadSDNode>(E)->refineAlignment(MMO); 5863 return SDValue(E, 0); 5864 } 5865 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 5866 ExtType, MemVT, MMO); 5867 createOperands(N, Ops); 5868 5869 CSEMap.InsertNode(N, IP); 5870 InsertNode(N); 5871 return SDValue(N, 0); 5872 } 5873 5874 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5875 SDValue Ptr, MachinePointerInfo PtrInfo, 5876 unsigned Alignment, 5877 MachineMemOperand::Flags MMOFlags, 5878 const AAMDNodes &AAInfo, const MDNode *Ranges) { 5879 SDValue Undef = getUNDEF(Ptr.getValueType()); 5880 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5881 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 5882 } 5883 5884 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 5885 SDValue Ptr, MachineMemOperand *MMO) { 5886 SDValue Undef = getUNDEF(Ptr.getValueType()); 5887 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 5888 VT, MMO); 5889 } 5890 5891 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5892 EVT VT, SDValue Chain, SDValue Ptr, 5893 MachinePointerInfo PtrInfo, EVT MemVT, 5894 unsigned Alignment, 5895 MachineMemOperand::Flags MMOFlags, 5896 const AAMDNodes &AAInfo) { 5897 SDValue Undef = getUNDEF(Ptr.getValueType()); 5898 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 5899 MemVT, Alignment, MMOFlags, AAInfo); 5900 } 5901 5902 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 5903 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 5904 MachineMemOperand *MMO) { 5905 SDValue Undef = getUNDEF(Ptr.getValueType()); 5906 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 5907 MemVT, MMO); 5908 } 5909 5910 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 5911 SDValue Base, SDValue Offset, 5912 ISD::MemIndexedMode AM) { 5913 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 5914 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 5915 // Don't propagate the invariant or dereferenceable flags. 5916 auto MMOFlags = 5917 LD->getMemOperand()->getFlags() & 5918 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 5919 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 5920 LD->getChain(), Base, Offset, LD->getPointerInfo(), 5921 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 5922 LD->getAAInfo()); 5923 } 5924 5925 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5926 SDValue Ptr, MachinePointerInfo PtrInfo, 5927 unsigned Alignment, 5928 MachineMemOperand::Flags MMOFlags, 5929 const AAMDNodes &AAInfo) { 5930 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 5931 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5932 Alignment = getEVTAlignment(Val.getValueType()); 5933 5934 MMOFlags |= MachineMemOperand::MOStore; 5935 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5936 5937 if (PtrInfo.V.isNull()) 5938 PtrInfo = InferPointerInfo(*this, Ptr); 5939 5940 MachineFunction &MF = getMachineFunction(); 5941 MachineMemOperand *MMO = MF.getMachineMemOperand( 5942 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 5943 return getStore(Chain, dl, Val, Ptr, MMO); 5944 } 5945 5946 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5947 SDValue Ptr, MachineMemOperand *MMO) { 5948 assert(Chain.getValueType() == MVT::Other && 5949 "Invalid chain type"); 5950 EVT VT = Val.getValueType(); 5951 SDVTList VTs = getVTList(MVT::Other); 5952 SDValue Undef = getUNDEF(Ptr.getValueType()); 5953 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 5954 FoldingSetNodeID ID; 5955 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 5956 ID.AddInteger(VT.getRawBits()); 5957 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 5958 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 5959 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 5960 void *IP = nullptr; 5961 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 5962 cast<StoreSDNode>(E)->refineAlignment(MMO); 5963 return SDValue(E, 0); 5964 } 5965 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 5966 ISD::UNINDEXED, false, VT, MMO); 5967 createOperands(N, Ops); 5968 5969 CSEMap.InsertNode(N, IP); 5970 InsertNode(N); 5971 return SDValue(N, 0); 5972 } 5973 5974 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5975 SDValue Ptr, MachinePointerInfo PtrInfo, 5976 EVT SVT, unsigned Alignment, 5977 MachineMemOperand::Flags MMOFlags, 5978 const AAMDNodes &AAInfo) { 5979 assert(Chain.getValueType() == MVT::Other && 5980 "Invalid chain type"); 5981 if (Alignment == 0) // Ensure that codegen never sees alignment 0 5982 Alignment = getEVTAlignment(SVT); 5983 5984 MMOFlags |= MachineMemOperand::MOStore; 5985 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 5986 5987 if (PtrInfo.V.isNull()) 5988 PtrInfo = InferPointerInfo(*this, Ptr); 5989 5990 MachineFunction &MF = getMachineFunction(); 5991 MachineMemOperand *MMO = MF.getMachineMemOperand( 5992 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 5993 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 5994 } 5995 5996 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 5997 SDValue Ptr, EVT SVT, 5998 MachineMemOperand *MMO) { 5999 EVT VT = Val.getValueType(); 6000 6001 assert(Chain.getValueType() == MVT::Other && 6002 "Invalid chain type"); 6003 if (VT == SVT) 6004 return getStore(Chain, dl, Val, Ptr, MMO); 6005 6006 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6007 "Should only be a truncating store, not extending!"); 6008 assert(VT.isInteger() == SVT.isInteger() && 6009 "Can't do FP-INT conversion!"); 6010 assert(VT.isVector() == SVT.isVector() && 6011 "Cannot use trunc store to convert to or from a vector!"); 6012 assert((!VT.isVector() || 6013 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6014 "Cannot use trunc store to change the number of vector elements!"); 6015 6016 SDVTList VTs = getVTList(MVT::Other); 6017 SDValue Undef = getUNDEF(Ptr.getValueType()); 6018 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6019 FoldingSetNodeID ID; 6020 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6021 ID.AddInteger(SVT.getRawBits()); 6022 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6023 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6024 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6025 void *IP = nullptr; 6026 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6027 cast<StoreSDNode>(E)->refineAlignment(MMO); 6028 return SDValue(E, 0); 6029 } 6030 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6031 ISD::UNINDEXED, true, SVT, MMO); 6032 createOperands(N, Ops); 6033 6034 CSEMap.InsertNode(N, IP); 6035 InsertNode(N); 6036 return SDValue(N, 0); 6037 } 6038 6039 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6040 SDValue Base, SDValue Offset, 6041 ISD::MemIndexedMode AM) { 6042 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6043 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6044 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6045 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6046 FoldingSetNodeID ID; 6047 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6048 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6049 ID.AddInteger(ST->getRawSubclassData()); 6050 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6051 void *IP = nullptr; 6052 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6053 return SDValue(E, 0); 6054 6055 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6056 ST->isTruncatingStore(), ST->getMemoryVT(), 6057 ST->getMemOperand()); 6058 createOperands(N, Ops); 6059 6060 CSEMap.InsertNode(N, IP); 6061 InsertNode(N); 6062 return SDValue(N, 0); 6063 } 6064 6065 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6066 SDValue Ptr, SDValue Mask, SDValue Src0, 6067 EVT MemVT, MachineMemOperand *MMO, 6068 ISD::LoadExtType ExtTy, bool isExpanding) { 6069 SDVTList VTs = getVTList(VT, MVT::Other); 6070 SDValue Ops[] = { Chain, Ptr, Mask, Src0 }; 6071 FoldingSetNodeID ID; 6072 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6073 ID.AddInteger(VT.getRawBits()); 6074 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6075 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6076 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6077 void *IP = nullptr; 6078 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6079 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6080 return SDValue(E, 0); 6081 } 6082 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6083 ExtTy, isExpanding, MemVT, MMO); 6084 createOperands(N, Ops); 6085 6086 CSEMap.InsertNode(N, IP); 6087 InsertNode(N); 6088 return SDValue(N, 0); 6089 } 6090 6091 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6092 SDValue Val, SDValue Ptr, SDValue Mask, 6093 EVT MemVT, MachineMemOperand *MMO, 6094 bool IsTruncating, bool IsCompressing) { 6095 assert(Chain.getValueType() == MVT::Other && 6096 "Invalid chain type"); 6097 EVT VT = Val.getValueType(); 6098 SDVTList VTs = getVTList(MVT::Other); 6099 SDValue Ops[] = { Chain, Ptr, Mask, Val }; 6100 FoldingSetNodeID ID; 6101 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6102 ID.AddInteger(VT.getRawBits()); 6103 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6104 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6105 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6106 void *IP = nullptr; 6107 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6108 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 6109 return SDValue(E, 0); 6110 } 6111 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6112 IsTruncating, IsCompressing, MemVT, MMO); 6113 createOperands(N, Ops); 6114 6115 CSEMap.InsertNode(N, IP); 6116 InsertNode(N); 6117 return SDValue(N, 0); 6118 } 6119 6120 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 6121 ArrayRef<SDValue> Ops, 6122 MachineMemOperand *MMO) { 6123 assert(Ops.size() == 5 && "Incompatible number of operands"); 6124 6125 FoldingSetNodeID ID; 6126 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 6127 ID.AddInteger(VT.getRawBits()); 6128 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 6129 dl.getIROrder(), VTs, VT, MMO)); 6130 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6131 void *IP = nullptr; 6132 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6133 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 6134 return SDValue(E, 0); 6135 } 6136 6137 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6138 VTs, VT, MMO); 6139 createOperands(N, Ops); 6140 6141 assert(N->getValue().getValueType() == N->getValueType(0) && 6142 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 6143 assert(N->getMask().getValueType().getVectorNumElements() == 6144 N->getValueType(0).getVectorNumElements() && 6145 "Vector width mismatch between mask and data"); 6146 assert(N->getIndex().getValueType().getVectorNumElements() == 6147 N->getValueType(0).getVectorNumElements() && 6148 "Vector width mismatch between index and data"); 6149 6150 CSEMap.InsertNode(N, IP); 6151 InsertNode(N); 6152 return SDValue(N, 0); 6153 } 6154 6155 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 6156 ArrayRef<SDValue> Ops, 6157 MachineMemOperand *MMO) { 6158 assert(Ops.size() == 5 && "Incompatible number of operands"); 6159 6160 FoldingSetNodeID ID; 6161 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 6162 ID.AddInteger(VT.getRawBits()); 6163 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 6164 dl.getIROrder(), VTs, VT, MMO)); 6165 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6166 void *IP = nullptr; 6167 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6168 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 6169 return SDValue(E, 0); 6170 } 6171 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 6172 VTs, VT, MMO); 6173 createOperands(N, Ops); 6174 6175 assert(N->getMask().getValueType().getVectorNumElements() == 6176 N->getValue().getValueType().getVectorNumElements() && 6177 "Vector width mismatch between mask and data"); 6178 assert(N->getIndex().getValueType().getVectorNumElements() == 6179 N->getValue().getValueType().getVectorNumElements() && 6180 "Vector width mismatch between index and data"); 6181 6182 CSEMap.InsertNode(N, IP); 6183 InsertNode(N); 6184 return SDValue(N, 0); 6185 } 6186 6187 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 6188 SDValue Ptr, SDValue SV, unsigned Align) { 6189 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 6190 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 6191 } 6192 6193 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6194 ArrayRef<SDUse> Ops) { 6195 switch (Ops.size()) { 6196 case 0: return getNode(Opcode, DL, VT); 6197 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 6198 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 6199 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6200 default: break; 6201 } 6202 6203 // Copy from an SDUse array into an SDValue array for use with 6204 // the regular getNode logic. 6205 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 6206 return getNode(Opcode, DL, VT, NewOps); 6207 } 6208 6209 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 6210 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 6211 unsigned NumOps = Ops.size(); 6212 switch (NumOps) { 6213 case 0: return getNode(Opcode, DL, VT); 6214 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 6215 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 6216 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 6217 default: break; 6218 } 6219 6220 switch (Opcode) { 6221 default: break; 6222 case ISD::CONCAT_VECTORS: 6223 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF. 6224 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this)) 6225 return V; 6226 break; 6227 case ISD::SELECT_CC: 6228 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 6229 assert(Ops[0].getValueType() == Ops[1].getValueType() && 6230 "LHS and RHS of condition must have same type!"); 6231 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6232 "True and False arms of SelectCC must have same type!"); 6233 assert(Ops[2].getValueType() == VT && 6234 "select_cc node must be of same type as true and false value!"); 6235 break; 6236 case ISD::BR_CC: 6237 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 6238 assert(Ops[2].getValueType() == Ops[3].getValueType() && 6239 "LHS/RHS of comparison should match types!"); 6240 break; 6241 } 6242 6243 // Memoize nodes. 6244 SDNode *N; 6245 SDVTList VTs = getVTList(VT); 6246 6247 if (VT != MVT::Glue) { 6248 FoldingSetNodeID ID; 6249 AddNodeIDNode(ID, Opcode, VTs, Ops); 6250 void *IP = nullptr; 6251 6252 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6253 return SDValue(E, 0); 6254 6255 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6256 createOperands(N, Ops); 6257 6258 CSEMap.InsertNode(N, IP); 6259 } else { 6260 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6261 createOperands(N, Ops); 6262 } 6263 6264 InsertNode(N); 6265 return SDValue(N, 0); 6266 } 6267 6268 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6269 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 6270 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 6271 } 6272 6273 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6274 ArrayRef<SDValue> Ops) { 6275 if (VTList.NumVTs == 1) 6276 return getNode(Opcode, DL, VTList.VTs[0], Ops); 6277 6278 #if 0 6279 switch (Opcode) { 6280 // FIXME: figure out how to safely handle things like 6281 // int foo(int x) { return 1 << (x & 255); } 6282 // int bar() { return foo(256); } 6283 case ISD::SRA_PARTS: 6284 case ISD::SRL_PARTS: 6285 case ISD::SHL_PARTS: 6286 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 6287 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 6288 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6289 else if (N3.getOpcode() == ISD::AND) 6290 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 6291 // If the and is only masking out bits that cannot effect the shift, 6292 // eliminate the and. 6293 unsigned NumBits = VT.getScalarSizeInBits()*2; 6294 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 6295 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 6296 } 6297 break; 6298 } 6299 #endif 6300 6301 // Memoize the node unless it returns a flag. 6302 SDNode *N; 6303 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6304 FoldingSetNodeID ID; 6305 AddNodeIDNode(ID, Opcode, VTList, Ops); 6306 void *IP = nullptr; 6307 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 6308 return SDValue(E, 0); 6309 6310 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6311 createOperands(N, Ops); 6312 CSEMap.InsertNode(N, IP); 6313 } else { 6314 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 6315 createOperands(N, Ops); 6316 } 6317 InsertNode(N); 6318 return SDValue(N, 0); 6319 } 6320 6321 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 6322 SDVTList VTList) { 6323 return getNode(Opcode, DL, VTList, None); 6324 } 6325 6326 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6327 SDValue N1) { 6328 SDValue Ops[] = { N1 }; 6329 return getNode(Opcode, DL, VTList, Ops); 6330 } 6331 6332 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6333 SDValue N1, SDValue N2) { 6334 SDValue Ops[] = { N1, N2 }; 6335 return getNode(Opcode, DL, VTList, Ops); 6336 } 6337 6338 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6339 SDValue N1, SDValue N2, SDValue N3) { 6340 SDValue Ops[] = { N1, N2, N3 }; 6341 return getNode(Opcode, DL, VTList, Ops); 6342 } 6343 6344 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6345 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 6346 SDValue Ops[] = { N1, N2, N3, N4 }; 6347 return getNode(Opcode, DL, VTList, Ops); 6348 } 6349 6350 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 6351 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 6352 SDValue N5) { 6353 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 6354 return getNode(Opcode, DL, VTList, Ops); 6355 } 6356 6357 SDVTList SelectionDAG::getVTList(EVT VT) { 6358 return makeVTList(SDNode::getValueTypeList(VT), 1); 6359 } 6360 6361 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 6362 FoldingSetNodeID ID; 6363 ID.AddInteger(2U); 6364 ID.AddInteger(VT1.getRawBits()); 6365 ID.AddInteger(VT2.getRawBits()); 6366 6367 void *IP = nullptr; 6368 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6369 if (!Result) { 6370 EVT *Array = Allocator.Allocate<EVT>(2); 6371 Array[0] = VT1; 6372 Array[1] = VT2; 6373 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 6374 VTListMap.InsertNode(Result, IP); 6375 } 6376 return Result->getSDVTList(); 6377 } 6378 6379 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 6380 FoldingSetNodeID ID; 6381 ID.AddInteger(3U); 6382 ID.AddInteger(VT1.getRawBits()); 6383 ID.AddInteger(VT2.getRawBits()); 6384 ID.AddInteger(VT3.getRawBits()); 6385 6386 void *IP = nullptr; 6387 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6388 if (!Result) { 6389 EVT *Array = Allocator.Allocate<EVT>(3); 6390 Array[0] = VT1; 6391 Array[1] = VT2; 6392 Array[2] = VT3; 6393 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 6394 VTListMap.InsertNode(Result, IP); 6395 } 6396 return Result->getSDVTList(); 6397 } 6398 6399 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 6400 FoldingSetNodeID ID; 6401 ID.AddInteger(4U); 6402 ID.AddInteger(VT1.getRawBits()); 6403 ID.AddInteger(VT2.getRawBits()); 6404 ID.AddInteger(VT3.getRawBits()); 6405 ID.AddInteger(VT4.getRawBits()); 6406 6407 void *IP = nullptr; 6408 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6409 if (!Result) { 6410 EVT *Array = Allocator.Allocate<EVT>(4); 6411 Array[0] = VT1; 6412 Array[1] = VT2; 6413 Array[2] = VT3; 6414 Array[3] = VT4; 6415 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 6416 VTListMap.InsertNode(Result, IP); 6417 } 6418 return Result->getSDVTList(); 6419 } 6420 6421 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 6422 unsigned NumVTs = VTs.size(); 6423 FoldingSetNodeID ID; 6424 ID.AddInteger(NumVTs); 6425 for (unsigned index = 0; index < NumVTs; index++) { 6426 ID.AddInteger(VTs[index].getRawBits()); 6427 } 6428 6429 void *IP = nullptr; 6430 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 6431 if (!Result) { 6432 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 6433 std::copy(VTs.begin(), VTs.end(), Array); 6434 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 6435 VTListMap.InsertNode(Result, IP); 6436 } 6437 return Result->getSDVTList(); 6438 } 6439 6440 6441 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 6442 /// specified operands. If the resultant node already exists in the DAG, 6443 /// this does not modify the specified node, instead it returns the node that 6444 /// already exists. If the resultant node does not exist in the DAG, the 6445 /// input node is returned. As a degenerate case, if you specify the same 6446 /// input operands as the node already has, the input node is returned. 6447 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 6448 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 6449 6450 // Check to see if there is no change. 6451 if (Op == N->getOperand(0)) return N; 6452 6453 // See if the modified node already exists. 6454 void *InsertPos = nullptr; 6455 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 6456 return Existing; 6457 6458 // Nope it doesn't. Remove the node from its current place in the maps. 6459 if (InsertPos) 6460 if (!RemoveNodeFromCSEMaps(N)) 6461 InsertPos = nullptr; 6462 6463 // Now we update the operands. 6464 N->OperandList[0].set(Op); 6465 6466 // If this gets put into a CSE map, add it. 6467 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6468 return N; 6469 } 6470 6471 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 6472 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 6473 6474 // Check to see if there is no change. 6475 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 6476 return N; // No operands changed, just return the input node. 6477 6478 // See if the modified node already exists. 6479 void *InsertPos = nullptr; 6480 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 6481 return Existing; 6482 6483 // Nope it doesn't. Remove the node from its current place in the maps. 6484 if (InsertPos) 6485 if (!RemoveNodeFromCSEMaps(N)) 6486 InsertPos = nullptr; 6487 6488 // Now we update the operands. 6489 if (N->OperandList[0] != Op1) 6490 N->OperandList[0].set(Op1); 6491 if (N->OperandList[1] != Op2) 6492 N->OperandList[1].set(Op2); 6493 6494 // If this gets put into a CSE map, add it. 6495 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6496 return N; 6497 } 6498 6499 SDNode *SelectionDAG:: 6500 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 6501 SDValue Ops[] = { Op1, Op2, Op3 }; 6502 return UpdateNodeOperands(N, Ops); 6503 } 6504 6505 SDNode *SelectionDAG:: 6506 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6507 SDValue Op3, SDValue Op4) { 6508 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 6509 return UpdateNodeOperands(N, Ops); 6510 } 6511 6512 SDNode *SelectionDAG:: 6513 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 6514 SDValue Op3, SDValue Op4, SDValue Op5) { 6515 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 6516 return UpdateNodeOperands(N, Ops); 6517 } 6518 6519 SDNode *SelectionDAG:: 6520 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 6521 unsigned NumOps = Ops.size(); 6522 assert(N->getNumOperands() == NumOps && 6523 "Update with wrong number of operands"); 6524 6525 // If no operands changed just return the input node. 6526 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 6527 return N; 6528 6529 // See if the modified node already exists. 6530 void *InsertPos = nullptr; 6531 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 6532 return Existing; 6533 6534 // Nope it doesn't. Remove the node from its current place in the maps. 6535 if (InsertPos) 6536 if (!RemoveNodeFromCSEMaps(N)) 6537 InsertPos = nullptr; 6538 6539 // Now we update the operands. 6540 for (unsigned i = 0; i != NumOps; ++i) 6541 if (N->OperandList[i] != Ops[i]) 6542 N->OperandList[i].set(Ops[i]); 6543 6544 // If this gets put into a CSE map, add it. 6545 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 6546 return N; 6547 } 6548 6549 /// DropOperands - Release the operands and set this node to have 6550 /// zero operands. 6551 void SDNode::DropOperands() { 6552 // Unlike the code in MorphNodeTo that does this, we don't need to 6553 // watch for dead nodes here. 6554 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 6555 SDUse &Use = *I++; 6556 Use.set(SDValue()); 6557 } 6558 } 6559 6560 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 6561 /// machine opcode. 6562 /// 6563 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6564 EVT VT) { 6565 SDVTList VTs = getVTList(VT); 6566 return SelectNodeTo(N, MachineOpc, VTs, None); 6567 } 6568 6569 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6570 EVT VT, SDValue Op1) { 6571 SDVTList VTs = getVTList(VT); 6572 SDValue Ops[] = { Op1 }; 6573 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6574 } 6575 6576 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6577 EVT VT, SDValue Op1, 6578 SDValue Op2) { 6579 SDVTList VTs = getVTList(VT); 6580 SDValue Ops[] = { Op1, Op2 }; 6581 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6582 } 6583 6584 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6585 EVT VT, SDValue Op1, 6586 SDValue Op2, SDValue Op3) { 6587 SDVTList VTs = getVTList(VT); 6588 SDValue Ops[] = { Op1, Op2, Op3 }; 6589 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6590 } 6591 6592 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6593 EVT VT, ArrayRef<SDValue> Ops) { 6594 SDVTList VTs = getVTList(VT); 6595 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6596 } 6597 6598 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6599 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 6600 SDVTList VTs = getVTList(VT1, VT2); 6601 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6602 } 6603 6604 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6605 EVT VT1, EVT VT2) { 6606 SDVTList VTs = getVTList(VT1, VT2); 6607 return SelectNodeTo(N, MachineOpc, VTs, None); 6608 } 6609 6610 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6611 EVT VT1, EVT VT2, EVT VT3, 6612 ArrayRef<SDValue> Ops) { 6613 SDVTList VTs = getVTList(VT1, VT2, VT3); 6614 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6615 } 6616 6617 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6618 EVT VT1, EVT VT2, 6619 SDValue Op1, SDValue Op2) { 6620 SDVTList VTs = getVTList(VT1, VT2); 6621 SDValue Ops[] = { Op1, Op2 }; 6622 return SelectNodeTo(N, MachineOpc, VTs, Ops); 6623 } 6624 6625 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 6626 SDVTList VTs,ArrayRef<SDValue> Ops) { 6627 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 6628 // Reset the NodeID to -1. 6629 New->setNodeId(-1); 6630 if (New != N) { 6631 ReplaceAllUsesWith(N, New); 6632 RemoveDeadNode(N); 6633 } 6634 return New; 6635 } 6636 6637 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 6638 /// the line number information on the merged node since it is not possible to 6639 /// preserve the information that operation is associated with multiple lines. 6640 /// This will make the debugger working better at -O0, were there is a higher 6641 /// probability having other instructions associated with that line. 6642 /// 6643 /// For IROrder, we keep the smaller of the two 6644 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 6645 DebugLoc NLoc = N->getDebugLoc(); 6646 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 6647 N->setDebugLoc(DebugLoc()); 6648 } 6649 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 6650 N->setIROrder(Order); 6651 return N; 6652 } 6653 6654 /// MorphNodeTo - This *mutates* the specified node to have the specified 6655 /// return type, opcode, and operands. 6656 /// 6657 /// Note that MorphNodeTo returns the resultant node. If there is already a 6658 /// node of the specified opcode and operands, it returns that node instead of 6659 /// the current one. Note that the SDLoc need not be the same. 6660 /// 6661 /// Using MorphNodeTo is faster than creating a new node and swapping it in 6662 /// with ReplaceAllUsesWith both because it often avoids allocating a new 6663 /// node, and because it doesn't require CSE recalculation for any of 6664 /// the node's users. 6665 /// 6666 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 6667 /// As a consequence it isn't appropriate to use from within the DAG combiner or 6668 /// the legalizer which maintain worklists that would need to be updated when 6669 /// deleting things. 6670 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 6671 SDVTList VTs, ArrayRef<SDValue> Ops) { 6672 // If an identical node already exists, use it. 6673 void *IP = nullptr; 6674 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 6675 FoldingSetNodeID ID; 6676 AddNodeIDNode(ID, Opc, VTs, Ops); 6677 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 6678 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 6679 } 6680 6681 if (!RemoveNodeFromCSEMaps(N)) 6682 IP = nullptr; 6683 6684 // Start the morphing. 6685 N->NodeType = Opc; 6686 N->ValueList = VTs.VTs; 6687 N->NumValues = VTs.NumVTs; 6688 6689 // Clear the operands list, updating used nodes to remove this from their 6690 // use list. Keep track of any operands that become dead as a result. 6691 SmallPtrSet<SDNode*, 16> DeadNodeSet; 6692 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 6693 SDUse &Use = *I++; 6694 SDNode *Used = Use.getNode(); 6695 Use.set(SDValue()); 6696 if (Used->use_empty()) 6697 DeadNodeSet.insert(Used); 6698 } 6699 6700 // For MachineNode, initialize the memory references information. 6701 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 6702 MN->setMemRefs(nullptr, nullptr); 6703 6704 // Swap for an appropriately sized array from the recycler. 6705 removeOperands(N); 6706 createOperands(N, Ops); 6707 6708 // Delete any nodes that are still dead after adding the uses for the 6709 // new operands. 6710 if (!DeadNodeSet.empty()) { 6711 SmallVector<SDNode *, 16> DeadNodes; 6712 for (SDNode *N : DeadNodeSet) 6713 if (N->use_empty()) 6714 DeadNodes.push_back(N); 6715 RemoveDeadNodes(DeadNodes); 6716 } 6717 6718 if (IP) 6719 CSEMap.InsertNode(N, IP); // Memoize the new node. 6720 return N; 6721 } 6722 6723 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 6724 unsigned OrigOpc = Node->getOpcode(); 6725 unsigned NewOpc; 6726 bool IsUnary = false; 6727 bool IsTernary = false; 6728 switch (OrigOpc) { 6729 default: 6730 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 6731 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 6732 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 6733 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 6734 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 6735 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 6736 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break; 6737 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break; 6738 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 6739 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 6740 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break; 6741 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break; 6742 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break; 6743 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break; 6744 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break; 6745 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break; 6746 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break; 6747 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break; 6748 case ISD::STRICT_FNEARBYINT: 6749 NewOpc = ISD::FNEARBYINT; 6750 IsUnary = true; 6751 break; 6752 } 6753 6754 // We're taking this node out of the chain, so we need to re-link things. 6755 SDValue InputChain = Node->getOperand(0); 6756 SDValue OutputChain = SDValue(Node, 1); 6757 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 6758 6759 SDVTList VTs = getVTList(Node->getOperand(1).getValueType()); 6760 SDNode *Res = nullptr; 6761 if (IsUnary) 6762 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) }); 6763 else if (IsTernary) 6764 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6765 Node->getOperand(2), 6766 Node->getOperand(3)}); 6767 else 6768 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1), 6769 Node->getOperand(2) }); 6770 6771 // MorphNodeTo can operate in two ways: if an existing node with the 6772 // specified operands exists, it can just return it. Otherwise, it 6773 // updates the node in place to have the requested operands. 6774 if (Res == Node) { 6775 // If we updated the node in place, reset the node ID. To the isel, 6776 // this should be just like a newly allocated machine node. 6777 Res->setNodeId(-1); 6778 } else { 6779 ReplaceAllUsesWith(Node, Res); 6780 RemoveDeadNode(Node); 6781 } 6782 6783 return Res; 6784 } 6785 6786 /// getMachineNode - These are used for target selectors to create a new node 6787 /// with specified return type(s), MachineInstr opcode, and operands. 6788 /// 6789 /// Note that getMachineNode returns the resultant node. If there is already a 6790 /// node of the specified opcode and operands, it returns that node instead of 6791 /// the current one. 6792 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6793 EVT VT) { 6794 SDVTList VTs = getVTList(VT); 6795 return getMachineNode(Opcode, dl, VTs, None); 6796 } 6797 6798 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6799 EVT VT, SDValue Op1) { 6800 SDVTList VTs = getVTList(VT); 6801 SDValue Ops[] = { Op1 }; 6802 return getMachineNode(Opcode, dl, VTs, Ops); 6803 } 6804 6805 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6806 EVT VT, SDValue Op1, SDValue Op2) { 6807 SDVTList VTs = getVTList(VT); 6808 SDValue Ops[] = { Op1, Op2 }; 6809 return getMachineNode(Opcode, dl, VTs, Ops); 6810 } 6811 6812 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6813 EVT VT, SDValue Op1, SDValue Op2, 6814 SDValue Op3) { 6815 SDVTList VTs = getVTList(VT); 6816 SDValue Ops[] = { Op1, Op2, Op3 }; 6817 return getMachineNode(Opcode, dl, VTs, Ops); 6818 } 6819 6820 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6821 EVT VT, ArrayRef<SDValue> Ops) { 6822 SDVTList VTs = getVTList(VT); 6823 return getMachineNode(Opcode, dl, VTs, Ops); 6824 } 6825 6826 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6827 EVT VT1, EVT VT2, SDValue Op1, 6828 SDValue Op2) { 6829 SDVTList VTs = getVTList(VT1, VT2); 6830 SDValue Ops[] = { Op1, Op2 }; 6831 return getMachineNode(Opcode, dl, VTs, Ops); 6832 } 6833 6834 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6835 EVT VT1, EVT VT2, SDValue Op1, 6836 SDValue Op2, SDValue Op3) { 6837 SDVTList VTs = getVTList(VT1, VT2); 6838 SDValue Ops[] = { Op1, Op2, Op3 }; 6839 return getMachineNode(Opcode, dl, VTs, Ops); 6840 } 6841 6842 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6843 EVT VT1, EVT VT2, 6844 ArrayRef<SDValue> Ops) { 6845 SDVTList VTs = getVTList(VT1, VT2); 6846 return getMachineNode(Opcode, dl, VTs, Ops); 6847 } 6848 6849 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6850 EVT VT1, EVT VT2, EVT VT3, 6851 SDValue Op1, SDValue Op2) { 6852 SDVTList VTs = getVTList(VT1, VT2, VT3); 6853 SDValue Ops[] = { Op1, Op2 }; 6854 return getMachineNode(Opcode, dl, VTs, Ops); 6855 } 6856 6857 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6858 EVT VT1, EVT VT2, EVT VT3, 6859 SDValue Op1, SDValue Op2, 6860 SDValue Op3) { 6861 SDVTList VTs = getVTList(VT1, VT2, VT3); 6862 SDValue Ops[] = { Op1, Op2, Op3 }; 6863 return getMachineNode(Opcode, dl, VTs, Ops); 6864 } 6865 6866 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6867 EVT VT1, EVT VT2, EVT VT3, 6868 ArrayRef<SDValue> Ops) { 6869 SDVTList VTs = getVTList(VT1, VT2, VT3); 6870 return getMachineNode(Opcode, dl, VTs, Ops); 6871 } 6872 6873 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 6874 ArrayRef<EVT> ResultTys, 6875 ArrayRef<SDValue> Ops) { 6876 SDVTList VTs = getVTList(ResultTys); 6877 return getMachineNode(Opcode, dl, VTs, Ops); 6878 } 6879 6880 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 6881 SDVTList VTs, 6882 ArrayRef<SDValue> Ops) { 6883 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 6884 MachineSDNode *N; 6885 void *IP = nullptr; 6886 6887 if (DoCSE) { 6888 FoldingSetNodeID ID; 6889 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 6890 IP = nullptr; 6891 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 6892 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 6893 } 6894 } 6895 6896 // Allocate a new MachineSDNode. 6897 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 6898 createOperands(N, Ops); 6899 6900 if (DoCSE) 6901 CSEMap.InsertNode(N, IP); 6902 6903 InsertNode(N); 6904 return N; 6905 } 6906 6907 /// getTargetExtractSubreg - A convenience function for creating 6908 /// TargetOpcode::EXTRACT_SUBREG nodes. 6909 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6910 SDValue Operand) { 6911 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6912 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 6913 VT, Operand, SRIdxVal); 6914 return SDValue(Subreg, 0); 6915 } 6916 6917 /// getTargetInsertSubreg - A convenience function for creating 6918 /// TargetOpcode::INSERT_SUBREG nodes. 6919 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 6920 SDValue Operand, SDValue Subreg) { 6921 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 6922 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 6923 VT, Operand, Subreg, SRIdxVal); 6924 return SDValue(Result, 0); 6925 } 6926 6927 /// getNodeIfExists - Get the specified node if it's already available, or 6928 /// else return NULL. 6929 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 6930 ArrayRef<SDValue> Ops, 6931 const SDNodeFlags Flags) { 6932 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 6933 FoldingSetNodeID ID; 6934 AddNodeIDNode(ID, Opcode, VTList, Ops); 6935 void *IP = nullptr; 6936 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 6937 E->intersectFlagsWith(Flags); 6938 return E; 6939 } 6940 } 6941 return nullptr; 6942 } 6943 6944 /// getDbgValue - Creates a SDDbgValue node. 6945 /// 6946 /// SDNode 6947 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 6948 SDNode *N, unsigned R, bool IsIndirect, 6949 const DebugLoc &DL, unsigned O) { 6950 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6951 "Expected inlined-at fields to agree"); 6952 return new (DbgInfo->getAlloc()) 6953 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 6954 } 6955 6956 /// Constant 6957 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 6958 DIExpression *Expr, 6959 const Value *C, 6960 const DebugLoc &DL, unsigned O) { 6961 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6962 "Expected inlined-at fields to agree"); 6963 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 6964 } 6965 6966 /// FrameIndex 6967 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 6968 DIExpression *Expr, unsigned FI, 6969 const DebugLoc &DL, 6970 unsigned O) { 6971 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 6972 "Expected inlined-at fields to agree"); 6973 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, DL, O); 6974 } 6975 6976 namespace { 6977 6978 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 6979 /// pointed to by a use iterator is deleted, increment the use iterator 6980 /// so that it doesn't dangle. 6981 /// 6982 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 6983 SDNode::use_iterator &UI; 6984 SDNode::use_iterator &UE; 6985 6986 void NodeDeleted(SDNode *N, SDNode *E) override { 6987 // Increment the iterator as needed. 6988 while (UI != UE && N == *UI) 6989 ++UI; 6990 } 6991 6992 public: 6993 RAUWUpdateListener(SelectionDAG &d, 6994 SDNode::use_iterator &ui, 6995 SDNode::use_iterator &ue) 6996 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 6997 }; 6998 6999 } // end anonymous namespace 7000 7001 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7002 /// This can cause recursive merging of nodes in the DAG. 7003 /// 7004 /// This version assumes From has a single result value. 7005 /// 7006 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 7007 SDNode *From = FromN.getNode(); 7008 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 7009 "Cannot replace with this method!"); 7010 assert(From != To.getNode() && "Cannot replace uses of with self"); 7011 7012 // Preserve Debug Values 7013 TransferDbgValues(FromN, To); 7014 7015 // Iterate over all the existing uses of From. New uses will be added 7016 // to the beginning of the use list, which we avoid visiting. 7017 // This specifically avoids visiting uses of From that arise while the 7018 // replacement is happening, because any such uses would be the result 7019 // of CSE: If an existing node looks like From after one of its operands 7020 // is replaced by To, we don't want to replace of all its users with To 7021 // too. See PR3018 for more info. 7022 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7023 RAUWUpdateListener Listener(*this, UI, UE); 7024 while (UI != UE) { 7025 SDNode *User = *UI; 7026 7027 // This node is about to morph, remove its old self from the CSE maps. 7028 RemoveNodeFromCSEMaps(User); 7029 7030 // A user can appear in a use list multiple times, and when this 7031 // happens the uses are usually next to each other in the list. 7032 // To help reduce the number of CSE recomputations, process all 7033 // the uses of this user that we can find this way. 7034 do { 7035 SDUse &Use = UI.getUse(); 7036 ++UI; 7037 Use.set(To); 7038 } while (UI != UE && *UI == User); 7039 7040 // Now that we have modified User, add it back to the CSE maps. If it 7041 // already exists there, recursively merge the results together. 7042 AddModifiedNodeToCSEMaps(User); 7043 } 7044 7045 // If we just RAUW'd the root, take note. 7046 if (FromN == getRoot()) 7047 setRoot(To); 7048 } 7049 7050 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7051 /// This can cause recursive merging of nodes in the DAG. 7052 /// 7053 /// This version assumes that for each value of From, there is a 7054 /// corresponding value in To in the same position with the same type. 7055 /// 7056 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 7057 #ifndef NDEBUG 7058 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7059 assert((!From->hasAnyUseOfValue(i) || 7060 From->getValueType(i) == To->getValueType(i)) && 7061 "Cannot use this version of ReplaceAllUsesWith!"); 7062 #endif 7063 7064 // Handle the trivial case. 7065 if (From == To) 7066 return; 7067 7068 // Preserve Debug Info. Only do this if there's a use. 7069 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7070 if (From->hasAnyUseOfValue(i)) { 7071 assert((i < To->getNumValues()) && "Invalid To location"); 7072 TransferDbgValues(SDValue(From, i), SDValue(To, i)); 7073 } 7074 7075 // Iterate over just the existing users of From. See the comments in 7076 // the ReplaceAllUsesWith above. 7077 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7078 RAUWUpdateListener Listener(*this, UI, UE); 7079 while (UI != UE) { 7080 SDNode *User = *UI; 7081 7082 // This node is about to morph, remove its old self from the CSE maps. 7083 RemoveNodeFromCSEMaps(User); 7084 7085 // A user can appear in a use list multiple times, and when this 7086 // happens the uses are usually next to each other in the list. 7087 // To help reduce the number of CSE recomputations, process all 7088 // the uses of this user that we can find this way. 7089 do { 7090 SDUse &Use = UI.getUse(); 7091 ++UI; 7092 Use.setNode(To); 7093 } while (UI != UE && *UI == User); 7094 7095 // Now that we have modified User, add it back to the CSE maps. If it 7096 // already exists there, recursively merge the results together. 7097 AddModifiedNodeToCSEMaps(User); 7098 } 7099 7100 // If we just RAUW'd the root, take note. 7101 if (From == getRoot().getNode()) 7102 setRoot(SDValue(To, getRoot().getResNo())); 7103 } 7104 7105 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 7106 /// This can cause recursive merging of nodes in the DAG. 7107 /// 7108 /// This version can replace From with any result values. To must match the 7109 /// number and types of values returned by From. 7110 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 7111 if (From->getNumValues() == 1) // Handle the simple case efficiently. 7112 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 7113 7114 // Preserve Debug Info. 7115 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 7116 TransferDbgValues(SDValue(From, i), *To); 7117 7118 // Iterate over just the existing users of From. See the comments in 7119 // the ReplaceAllUsesWith above. 7120 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 7121 RAUWUpdateListener Listener(*this, UI, UE); 7122 while (UI != UE) { 7123 SDNode *User = *UI; 7124 7125 // This node is about to morph, remove its old self from the CSE maps. 7126 RemoveNodeFromCSEMaps(User); 7127 7128 // A user can appear in a use list multiple times, and when this 7129 // happens the uses are usually next to each other in the list. 7130 // To help reduce the number of CSE recomputations, process all 7131 // the uses of this user that we can find this way. 7132 do { 7133 SDUse &Use = UI.getUse(); 7134 const SDValue &ToOp = To[Use.getResNo()]; 7135 ++UI; 7136 Use.set(ToOp); 7137 } while (UI != UE && *UI == User); 7138 7139 // Now that we have modified User, add it back to the CSE maps. If it 7140 // already exists there, recursively merge the results together. 7141 AddModifiedNodeToCSEMaps(User); 7142 } 7143 7144 // If we just RAUW'd the root, take note. 7145 if (From == getRoot().getNode()) 7146 setRoot(SDValue(To[getRoot().getResNo()])); 7147 } 7148 7149 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 7150 /// uses of other values produced by From.getNode() alone. The Deleted 7151 /// vector is handled the same way as for ReplaceAllUsesWith. 7152 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 7153 // Handle the really simple, really trivial case efficiently. 7154 if (From == To) return; 7155 7156 // Handle the simple, trivial, case efficiently. 7157 if (From.getNode()->getNumValues() == 1) { 7158 ReplaceAllUsesWith(From, To); 7159 return; 7160 } 7161 7162 // Preserve Debug Info. 7163 TransferDbgValues(From, To); 7164 7165 // Iterate over just the existing users of From. See the comments in 7166 // the ReplaceAllUsesWith above. 7167 SDNode::use_iterator UI = From.getNode()->use_begin(), 7168 UE = From.getNode()->use_end(); 7169 RAUWUpdateListener Listener(*this, UI, UE); 7170 while (UI != UE) { 7171 SDNode *User = *UI; 7172 bool UserRemovedFromCSEMaps = false; 7173 7174 // A user can appear in a use list multiple times, and when this 7175 // happens the uses are usually next to each other in the list. 7176 // To help reduce the number of CSE recomputations, process all 7177 // the uses of this user that we can find this way. 7178 do { 7179 SDUse &Use = UI.getUse(); 7180 7181 // Skip uses of different values from the same node. 7182 if (Use.getResNo() != From.getResNo()) { 7183 ++UI; 7184 continue; 7185 } 7186 7187 // If this node hasn't been modified yet, it's still in the CSE maps, 7188 // so remove its old self from the CSE maps. 7189 if (!UserRemovedFromCSEMaps) { 7190 RemoveNodeFromCSEMaps(User); 7191 UserRemovedFromCSEMaps = true; 7192 } 7193 7194 ++UI; 7195 Use.set(To); 7196 } while (UI != UE && *UI == User); 7197 7198 // We are iterating over all uses of the From node, so if a use 7199 // doesn't use the specific value, no changes are made. 7200 if (!UserRemovedFromCSEMaps) 7201 continue; 7202 7203 // Now that we have modified User, add it back to the CSE maps. If it 7204 // already exists there, recursively merge the results together. 7205 AddModifiedNodeToCSEMaps(User); 7206 } 7207 7208 // If we just RAUW'd the root, take note. 7209 if (From == getRoot()) 7210 setRoot(To); 7211 } 7212 7213 namespace { 7214 7215 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 7216 /// to record information about a use. 7217 struct UseMemo { 7218 SDNode *User; 7219 unsigned Index; 7220 SDUse *Use; 7221 }; 7222 7223 /// operator< - Sort Memos by User. 7224 bool operator<(const UseMemo &L, const UseMemo &R) { 7225 return (intptr_t)L.User < (intptr_t)R.User; 7226 } 7227 7228 } // end anonymous namespace 7229 7230 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 7231 /// uses of other values produced by From.getNode() alone. The same value 7232 /// may appear in both the From and To list. The Deleted vector is 7233 /// handled the same way as for ReplaceAllUsesWith. 7234 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 7235 const SDValue *To, 7236 unsigned Num){ 7237 // Handle the simple, trivial case efficiently. 7238 if (Num == 1) 7239 return ReplaceAllUsesOfValueWith(*From, *To); 7240 7241 TransferDbgValues(*From, *To); 7242 7243 // Read up all the uses and make records of them. This helps 7244 // processing new uses that are introduced during the 7245 // replacement process. 7246 SmallVector<UseMemo, 4> Uses; 7247 for (unsigned i = 0; i != Num; ++i) { 7248 unsigned FromResNo = From[i].getResNo(); 7249 SDNode *FromNode = From[i].getNode(); 7250 for (SDNode::use_iterator UI = FromNode->use_begin(), 7251 E = FromNode->use_end(); UI != E; ++UI) { 7252 SDUse &Use = UI.getUse(); 7253 if (Use.getResNo() == FromResNo) { 7254 UseMemo Memo = { *UI, i, &Use }; 7255 Uses.push_back(Memo); 7256 } 7257 } 7258 } 7259 7260 // Sort the uses, so that all the uses from a given User are together. 7261 std::sort(Uses.begin(), Uses.end()); 7262 7263 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 7264 UseIndex != UseIndexEnd; ) { 7265 // We know that this user uses some value of From. If it is the right 7266 // value, update it. 7267 SDNode *User = Uses[UseIndex].User; 7268 7269 // This node is about to morph, remove its old self from the CSE maps. 7270 RemoveNodeFromCSEMaps(User); 7271 7272 // The Uses array is sorted, so all the uses for a given User 7273 // are next to each other in the list. 7274 // To help reduce the number of CSE recomputations, process all 7275 // the uses of this user that we can find this way. 7276 do { 7277 unsigned i = Uses[UseIndex].Index; 7278 SDUse &Use = *Uses[UseIndex].Use; 7279 ++UseIndex; 7280 7281 Use.set(To[i]); 7282 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 7283 7284 // Now that we have modified User, add it back to the CSE maps. If it 7285 // already exists there, recursively merge the results together. 7286 AddModifiedNodeToCSEMaps(User); 7287 } 7288 } 7289 7290 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 7291 /// based on their topological order. It returns the maximum id and a vector 7292 /// of the SDNodes* in assigned order by reference. 7293 unsigned SelectionDAG::AssignTopologicalOrder() { 7294 unsigned DAGSize = 0; 7295 7296 // SortedPos tracks the progress of the algorithm. Nodes before it are 7297 // sorted, nodes after it are unsorted. When the algorithm completes 7298 // it is at the end of the list. 7299 allnodes_iterator SortedPos = allnodes_begin(); 7300 7301 // Visit all the nodes. Move nodes with no operands to the front of 7302 // the list immediately. Annotate nodes that do have operands with their 7303 // operand count. Before we do this, the Node Id fields of the nodes 7304 // may contain arbitrary values. After, the Node Id fields for nodes 7305 // before SortedPos will contain the topological sort index, and the 7306 // Node Id fields for nodes At SortedPos and after will contain the 7307 // count of outstanding operands. 7308 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 7309 SDNode *N = &*I++; 7310 checkForCycles(N, this); 7311 unsigned Degree = N->getNumOperands(); 7312 if (Degree == 0) { 7313 // A node with no uses, add it to the result array immediately. 7314 N->setNodeId(DAGSize++); 7315 allnodes_iterator Q(N); 7316 if (Q != SortedPos) 7317 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 7318 assert(SortedPos != AllNodes.end() && "Overran node list"); 7319 ++SortedPos; 7320 } else { 7321 // Temporarily use the Node Id as scratch space for the degree count. 7322 N->setNodeId(Degree); 7323 } 7324 } 7325 7326 // Visit all the nodes. As we iterate, move nodes into sorted order, 7327 // such that by the time the end is reached all nodes will be sorted. 7328 for (SDNode &Node : allnodes()) { 7329 SDNode *N = &Node; 7330 checkForCycles(N, this); 7331 // N is in sorted position, so all its uses have one less operand 7332 // that needs to be sorted. 7333 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7334 UI != UE; ++UI) { 7335 SDNode *P = *UI; 7336 unsigned Degree = P->getNodeId(); 7337 assert(Degree != 0 && "Invalid node degree"); 7338 --Degree; 7339 if (Degree == 0) { 7340 // All of P's operands are sorted, so P may sorted now. 7341 P->setNodeId(DAGSize++); 7342 if (P->getIterator() != SortedPos) 7343 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 7344 assert(SortedPos != AllNodes.end() && "Overran node list"); 7345 ++SortedPos; 7346 } else { 7347 // Update P's outstanding operand count. 7348 P->setNodeId(Degree); 7349 } 7350 } 7351 if (Node.getIterator() == SortedPos) { 7352 #ifndef NDEBUG 7353 allnodes_iterator I(N); 7354 SDNode *S = &*++I; 7355 dbgs() << "Overran sorted position:\n"; 7356 S->dumprFull(this); dbgs() << "\n"; 7357 dbgs() << "Checking if this is due to cycles\n"; 7358 checkForCycles(this, true); 7359 #endif 7360 llvm_unreachable(nullptr); 7361 } 7362 } 7363 7364 assert(SortedPos == AllNodes.end() && 7365 "Topological sort incomplete!"); 7366 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 7367 "First node in topological sort is not the entry token!"); 7368 assert(AllNodes.front().getNodeId() == 0 && 7369 "First node in topological sort has non-zero id!"); 7370 assert(AllNodes.front().getNumOperands() == 0 && 7371 "First node in topological sort has operands!"); 7372 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 7373 "Last node in topologic sort has unexpected id!"); 7374 assert(AllNodes.back().use_empty() && 7375 "Last node in topologic sort has users!"); 7376 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 7377 return DAGSize; 7378 } 7379 7380 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 7381 /// value is produced by SD. 7382 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 7383 if (SD) { 7384 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 7385 SD->setHasDebugValue(true); 7386 } 7387 DbgInfo->add(DB, SD, isParameter); 7388 } 7389 7390 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes. 7391 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 7392 if (From == To || !From.getNode()->getHasDebugValue()) 7393 return; 7394 SDNode *FromNode = From.getNode(); 7395 SDNode *ToNode = To.getNode(); 7396 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 7397 SmallVector<SDDbgValue *, 2> ClonedDVs; 7398 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 7399 I != E; ++I) { 7400 SDDbgValue *Dbg = *I; 7401 // Only add Dbgvalues attached to same ResNo. 7402 if (Dbg->getKind() == SDDbgValue::SDNODE && 7403 Dbg->getSDNode() == From.getNode() && 7404 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) { 7405 assert(FromNode != ToNode && 7406 "Should not transfer Debug Values intranode"); 7407 SDDbgValue *Clone = getDbgValue(Dbg->getVariable(), Dbg->getExpression(), 7408 ToNode, To.getResNo(), Dbg->isIndirect(), 7409 Dbg->getDebugLoc(), Dbg->getOrder()); 7410 ClonedDVs.push_back(Clone); 7411 Dbg->setIsInvalidated(); 7412 } 7413 } 7414 for (SDDbgValue *I : ClonedDVs) 7415 AddDbgValue(I, ToNode, false); 7416 } 7417 7418 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 7419 SDValue NewMemOp) { 7420 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 7421 // The new memory operation must have the same position as the old load in 7422 // terms of memory dependency. Create a TokenFactor for the old load and new 7423 // memory operation and update uses of the old load's output chain to use that 7424 // TokenFactor. 7425 SDValue OldChain = SDValue(OldLoad, 1); 7426 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 7427 if (!OldLoad->hasAnyUseOfValue(1)) 7428 return NewChain; 7429 7430 SDValue TokenFactor = 7431 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 7432 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 7433 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 7434 return TokenFactor; 7435 } 7436 7437 //===----------------------------------------------------------------------===// 7438 // SDNode Class 7439 //===----------------------------------------------------------------------===// 7440 7441 bool llvm::isNullConstant(SDValue V) { 7442 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7443 return Const != nullptr && Const->isNullValue(); 7444 } 7445 7446 bool llvm::isNullFPConstant(SDValue V) { 7447 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 7448 return Const != nullptr && Const->isZero() && !Const->isNegative(); 7449 } 7450 7451 bool llvm::isAllOnesConstant(SDValue V) { 7452 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7453 return Const != nullptr && Const->isAllOnesValue(); 7454 } 7455 7456 bool llvm::isOneConstant(SDValue V) { 7457 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 7458 return Const != nullptr && Const->isOne(); 7459 } 7460 7461 bool llvm::isBitwiseNot(SDValue V) { 7462 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1)); 7463 } 7464 7465 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { 7466 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 7467 return CN; 7468 7469 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7470 BitVector UndefElements; 7471 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 7472 7473 // BuildVectors can truncate their operands. Ignore that case here. 7474 // FIXME: We blindly ignore splats which include undef which is overly 7475 // pessimistic. 7476 if (CN && UndefElements.none() && 7477 CN->getValueType(0) == N.getValueType().getScalarType()) 7478 return CN; 7479 } 7480 7481 return nullptr; 7482 } 7483 7484 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { 7485 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 7486 return CN; 7487 7488 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 7489 BitVector UndefElements; 7490 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 7491 7492 if (CN && UndefElements.none()) 7493 return CN; 7494 } 7495 7496 return nullptr; 7497 } 7498 7499 HandleSDNode::~HandleSDNode() { 7500 DropOperands(); 7501 } 7502 7503 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 7504 const DebugLoc &DL, 7505 const GlobalValue *GA, EVT VT, 7506 int64_t o, unsigned char TF) 7507 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 7508 TheGlobal = GA; 7509 } 7510 7511 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 7512 EVT VT, unsigned SrcAS, 7513 unsigned DestAS) 7514 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 7515 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 7516 7517 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 7518 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 7519 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 7520 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 7521 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 7522 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 7523 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 7524 7525 // We check here that the size of the memory operand fits within the size of 7526 // the MMO. This is because the MMO might indicate only a possible address 7527 // range instead of specifying the affected memory addresses precisely. 7528 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 7529 } 7530 7531 /// Profile - Gather unique data for the node. 7532 /// 7533 void SDNode::Profile(FoldingSetNodeID &ID) const { 7534 AddNodeIDNode(ID, this); 7535 } 7536 7537 namespace { 7538 7539 struct EVTArray { 7540 std::vector<EVT> VTs; 7541 7542 EVTArray() { 7543 VTs.reserve(MVT::LAST_VALUETYPE); 7544 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 7545 VTs.push_back(MVT((MVT::SimpleValueType)i)); 7546 } 7547 }; 7548 7549 } // end anonymous namespace 7550 7551 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 7552 static ManagedStatic<EVTArray> SimpleVTArray; 7553 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 7554 7555 /// getValueTypeList - Return a pointer to the specified value type. 7556 /// 7557 const EVT *SDNode::getValueTypeList(EVT VT) { 7558 if (VT.isExtended()) { 7559 sys::SmartScopedLock<true> Lock(*VTMutex); 7560 return &(*EVTs->insert(VT).first); 7561 } else { 7562 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 7563 "Value type out of range!"); 7564 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 7565 } 7566 } 7567 7568 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 7569 /// indicated value. This method ignores uses of other values defined by this 7570 /// operation. 7571 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 7572 assert(Value < getNumValues() && "Bad value!"); 7573 7574 // TODO: Only iterate over uses of a given value of the node 7575 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 7576 if (UI.getUse().getResNo() == Value) { 7577 if (NUses == 0) 7578 return false; 7579 --NUses; 7580 } 7581 } 7582 7583 // Found exactly the right number of uses? 7584 return NUses == 0; 7585 } 7586 7587 /// hasAnyUseOfValue - Return true if there are any use of the indicated 7588 /// value. This method ignores uses of other values defined by this operation. 7589 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 7590 assert(Value < getNumValues() && "Bad value!"); 7591 7592 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 7593 if (UI.getUse().getResNo() == Value) 7594 return true; 7595 7596 return false; 7597 } 7598 7599 /// isOnlyUserOf - Return true if this node is the only use of N. 7600 bool SDNode::isOnlyUserOf(const SDNode *N) const { 7601 bool Seen = false; 7602 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7603 SDNode *User = *I; 7604 if (User == this) 7605 Seen = true; 7606 else 7607 return false; 7608 } 7609 7610 return Seen; 7611 } 7612 7613 /// Return true if the only users of N are contained in Nodes. 7614 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 7615 bool Seen = false; 7616 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 7617 SDNode *User = *I; 7618 if (llvm::any_of(Nodes, 7619 [&User](const SDNode *Node) { return User == Node; })) 7620 Seen = true; 7621 else 7622 return false; 7623 } 7624 7625 return Seen; 7626 } 7627 7628 /// isOperand - Return true if this node is an operand of N. 7629 bool SDValue::isOperandOf(const SDNode *N) const { 7630 for (const SDValue &Op : N->op_values()) 7631 if (*this == Op) 7632 return true; 7633 return false; 7634 } 7635 7636 bool SDNode::isOperandOf(const SDNode *N) const { 7637 for (const SDValue &Op : N->op_values()) 7638 if (this == Op.getNode()) 7639 return true; 7640 return false; 7641 } 7642 7643 /// reachesChainWithoutSideEffects - Return true if this operand (which must 7644 /// be a chain) reaches the specified operand without crossing any 7645 /// side-effecting instructions on any chain path. In practice, this looks 7646 /// through token factors and non-volatile loads. In order to remain efficient, 7647 /// this only looks a couple of nodes in, it does not do an exhaustive search. 7648 /// 7649 /// Note that we only need to examine chains when we're searching for 7650 /// side-effects; SelectionDAG requires that all side-effects are represented 7651 /// by chains, even if another operand would force a specific ordering. This 7652 /// constraint is necessary to allow transformations like splitting loads. 7653 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 7654 unsigned Depth) const { 7655 if (*this == Dest) return true; 7656 7657 // Don't search too deeply, we just want to be able to see through 7658 // TokenFactor's etc. 7659 if (Depth == 0) return false; 7660 7661 // If this is a token factor, all inputs to the TF happen in parallel. 7662 if (getOpcode() == ISD::TokenFactor) { 7663 // First, try a shallow search. 7664 if (is_contained((*this)->ops(), Dest)) { 7665 // We found the chain we want as an operand of this TokenFactor. 7666 // Essentially, we reach the chain without side-effects if we could 7667 // serialize the TokenFactor into a simple chain of operations with 7668 // Dest as the last operation. This is automatically true if the 7669 // chain has one use: there are no other ordering constraints. 7670 // If the chain has more than one use, we give up: some other 7671 // use of Dest might force a side-effect between Dest and the current 7672 // node. 7673 if (Dest.hasOneUse()) 7674 return true; 7675 } 7676 // Next, try a deep search: check whether every operand of the TokenFactor 7677 // reaches Dest. 7678 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 7679 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 7680 }); 7681 } 7682 7683 // Loads don't have side effects, look through them. 7684 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 7685 if (!Ld->isVolatile()) 7686 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 7687 } 7688 return false; 7689 } 7690 7691 bool SDNode::hasPredecessor(const SDNode *N) const { 7692 SmallPtrSet<const SDNode *, 32> Visited; 7693 SmallVector<const SDNode *, 16> Worklist; 7694 Worklist.push_back(this); 7695 return hasPredecessorHelper(N, Visited, Worklist); 7696 } 7697 7698 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 7699 this->Flags.intersectWith(Flags); 7700 } 7701 7702 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 7703 assert(N->getNumValues() == 1 && 7704 "Can't unroll a vector with multiple results!"); 7705 7706 EVT VT = N->getValueType(0); 7707 unsigned NE = VT.getVectorNumElements(); 7708 EVT EltVT = VT.getVectorElementType(); 7709 SDLoc dl(N); 7710 7711 SmallVector<SDValue, 8> Scalars; 7712 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 7713 7714 // If ResNE is 0, fully unroll the vector op. 7715 if (ResNE == 0) 7716 ResNE = NE; 7717 else if (NE > ResNE) 7718 NE = ResNE; 7719 7720 unsigned i; 7721 for (i= 0; i != NE; ++i) { 7722 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 7723 SDValue Operand = N->getOperand(j); 7724 EVT OperandVT = Operand.getValueType(); 7725 if (OperandVT.isVector()) { 7726 // A vector operand; extract a single element. 7727 EVT OperandEltVT = OperandVT.getVectorElementType(); 7728 Operands[j] = 7729 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 7730 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 7731 } else { 7732 // A scalar operand; just use it as is. 7733 Operands[j] = Operand; 7734 } 7735 } 7736 7737 switch (N->getOpcode()) { 7738 default: { 7739 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 7740 N->getFlags())); 7741 break; 7742 } 7743 case ISD::VSELECT: 7744 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 7745 break; 7746 case ISD::SHL: 7747 case ISD::SRA: 7748 case ISD::SRL: 7749 case ISD::ROTL: 7750 case ISD::ROTR: 7751 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 7752 getShiftAmountOperand(Operands[0].getValueType(), 7753 Operands[1]))); 7754 break; 7755 case ISD::SIGN_EXTEND_INREG: 7756 case ISD::FP_ROUND_INREG: { 7757 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 7758 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 7759 Operands[0], 7760 getValueType(ExtVT))); 7761 } 7762 } 7763 } 7764 7765 for (; i < ResNE; ++i) 7766 Scalars.push_back(getUNDEF(EltVT)); 7767 7768 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 7769 return getBuildVector(VecVT, dl, Scalars); 7770 } 7771 7772 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 7773 LoadSDNode *Base, 7774 unsigned Bytes, 7775 int Dist) const { 7776 if (LD->isVolatile() || Base->isVolatile()) 7777 return false; 7778 if (LD->isIndexed() || Base->isIndexed()) 7779 return false; 7780 if (LD->getChain() != Base->getChain()) 7781 return false; 7782 EVT VT = LD->getValueType(0); 7783 if (VT.getSizeInBits() / 8 != Bytes) 7784 return false; 7785 7786 SDValue Loc = LD->getOperand(1); 7787 SDValue BaseLoc = Base->getOperand(1); 7788 7789 auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this); 7790 auto LocDecomp = BaseIndexOffset::match(Loc, *this); 7791 7792 int64_t Offset = 0; 7793 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 7794 return (Dist * Bytes == Offset); 7795 return false; 7796 } 7797 7798 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 7799 /// it cannot be inferred. 7800 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 7801 // If this is a GlobalAddress + cst, return the alignment. 7802 const GlobalValue *GV; 7803 int64_t GVOffset = 0; 7804 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 7805 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 7806 KnownBits Known(PtrWidth); 7807 llvm::computeKnownBits(GV, Known, getDataLayout()); 7808 unsigned AlignBits = Known.countMinTrailingZeros(); 7809 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 7810 if (Align) 7811 return MinAlign(Align, GVOffset); 7812 } 7813 7814 // If this is a direct reference to a stack slot, use information about the 7815 // stack slot's alignment. 7816 int FrameIdx = 1 << 31; 7817 int64_t FrameOffset = 0; 7818 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 7819 FrameIdx = FI->getIndex(); 7820 } else if (isBaseWithConstantOffset(Ptr) && 7821 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 7822 // Handle FI+Cst 7823 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 7824 FrameOffset = Ptr.getConstantOperandVal(1); 7825 } 7826 7827 if (FrameIdx != (1 << 31)) { 7828 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 7829 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 7830 FrameOffset); 7831 return FIInfoAlign; 7832 } 7833 7834 return 0; 7835 } 7836 7837 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 7838 /// which is split (or expanded) into two not necessarily identical pieces. 7839 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 7840 // Currently all types are split in half. 7841 EVT LoVT, HiVT; 7842 if (!VT.isVector()) 7843 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 7844 else 7845 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 7846 7847 return std::make_pair(LoVT, HiVT); 7848 } 7849 7850 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 7851 /// low/high part. 7852 std::pair<SDValue, SDValue> 7853 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 7854 const EVT &HiVT) { 7855 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 7856 N.getValueType().getVectorNumElements() && 7857 "More vector elements requested than available!"); 7858 SDValue Lo, Hi; 7859 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 7860 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 7861 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 7862 getConstant(LoVT.getVectorNumElements(), DL, 7863 TLI->getVectorIdxTy(getDataLayout()))); 7864 return std::make_pair(Lo, Hi); 7865 } 7866 7867 void SelectionDAG::ExtractVectorElements(SDValue Op, 7868 SmallVectorImpl<SDValue> &Args, 7869 unsigned Start, unsigned Count) { 7870 EVT VT = Op.getValueType(); 7871 if (Count == 0) 7872 Count = VT.getVectorNumElements(); 7873 7874 EVT EltVT = VT.getVectorElementType(); 7875 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 7876 SDLoc SL(Op); 7877 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 7878 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7879 Op, getConstant(i, SL, IdxTy))); 7880 } 7881 } 7882 7883 // getAddressSpace - Return the address space this GlobalAddress belongs to. 7884 unsigned GlobalAddressSDNode::getAddressSpace() const { 7885 return getGlobal()->getType()->getAddressSpace(); 7886 } 7887 7888 Type *ConstantPoolSDNode::getType() const { 7889 if (isMachineConstantPoolEntry()) 7890 return Val.MachineCPVal->getType(); 7891 return Val.ConstVal->getType(); 7892 } 7893 7894 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 7895 unsigned &SplatBitSize, 7896 bool &HasAnyUndefs, 7897 unsigned MinSplatBits, 7898 bool IsBigEndian) const { 7899 EVT VT = getValueType(0); 7900 assert(VT.isVector() && "Expected a vector type"); 7901 unsigned VecWidth = VT.getSizeInBits(); 7902 if (MinSplatBits > VecWidth) 7903 return false; 7904 7905 // FIXME: The widths are based on this node's type, but build vectors can 7906 // truncate their operands. 7907 SplatValue = APInt(VecWidth, 0); 7908 SplatUndef = APInt(VecWidth, 0); 7909 7910 // Get the bits. Bits with undefined values (when the corresponding element 7911 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 7912 // in SplatValue. If any of the values are not constant, give up and return 7913 // false. 7914 unsigned int NumOps = getNumOperands(); 7915 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 7916 unsigned EltWidth = VT.getScalarSizeInBits(); 7917 7918 for (unsigned j = 0; j < NumOps; ++j) { 7919 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 7920 SDValue OpVal = getOperand(i); 7921 unsigned BitPos = j * EltWidth; 7922 7923 if (OpVal.isUndef()) 7924 SplatUndef.setBits(BitPos, BitPos + EltWidth); 7925 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 7926 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 7927 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 7928 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 7929 else 7930 return false; 7931 } 7932 7933 // The build_vector is all constants or undefs. Find the smallest element 7934 // size that splats the vector. 7935 HasAnyUndefs = (SplatUndef != 0); 7936 7937 // FIXME: This does not work for vectors with elements less than 8 bits. 7938 while (VecWidth > 8) { 7939 unsigned HalfSize = VecWidth / 2; 7940 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 7941 APInt LowValue = SplatValue.trunc(HalfSize); 7942 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 7943 APInt LowUndef = SplatUndef.trunc(HalfSize); 7944 7945 // If the two halves do not match (ignoring undef bits), stop here. 7946 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 7947 MinSplatBits > HalfSize) 7948 break; 7949 7950 SplatValue = HighValue | LowValue; 7951 SplatUndef = HighUndef & LowUndef; 7952 7953 VecWidth = HalfSize; 7954 } 7955 7956 SplatBitSize = VecWidth; 7957 return true; 7958 } 7959 7960 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 7961 if (UndefElements) { 7962 UndefElements->clear(); 7963 UndefElements->resize(getNumOperands()); 7964 } 7965 SDValue Splatted; 7966 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 7967 SDValue Op = getOperand(i); 7968 if (Op.isUndef()) { 7969 if (UndefElements) 7970 (*UndefElements)[i] = true; 7971 } else if (!Splatted) { 7972 Splatted = Op; 7973 } else if (Splatted != Op) { 7974 return SDValue(); 7975 } 7976 } 7977 7978 if (!Splatted) { 7979 assert(getOperand(0).isUndef() && 7980 "Can only have a splat without a constant for all undefs."); 7981 return getOperand(0); 7982 } 7983 7984 return Splatted; 7985 } 7986 7987 ConstantSDNode * 7988 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 7989 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 7990 } 7991 7992 ConstantFPSDNode * 7993 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 7994 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 7995 } 7996 7997 int32_t 7998 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 7999 uint32_t BitWidth) const { 8000 if (ConstantFPSDNode *CN = 8001 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 8002 bool IsExact; 8003 APSInt IntVal(BitWidth); 8004 const APFloat &APF = CN->getValueAPF(); 8005 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 8006 APFloat::opOK || 8007 !IsExact) 8008 return -1; 8009 8010 return IntVal.exactLogBase2(); 8011 } 8012 return -1; 8013 } 8014 8015 bool BuildVectorSDNode::isConstant() const { 8016 for (const SDValue &Op : op_values()) { 8017 unsigned Opc = Op.getOpcode(); 8018 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 8019 return false; 8020 } 8021 return true; 8022 } 8023 8024 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 8025 // Find the first non-undef value in the shuffle mask. 8026 unsigned i, e; 8027 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 8028 /* search */; 8029 8030 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 8031 8032 // Make sure all remaining elements are either undef or the same as the first 8033 // non-undef value. 8034 for (int Idx = Mask[i]; i != e; ++i) 8035 if (Mask[i] >= 0 && Mask[i] != Idx) 8036 return false; 8037 return true; 8038 } 8039 8040 // \brief Returns the SDNode if it is a constant integer BuildVector 8041 // or constant integer. 8042 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 8043 if (isa<ConstantSDNode>(N)) 8044 return N.getNode(); 8045 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 8046 return N.getNode(); 8047 // Treat a GlobalAddress supporting constant offset folding as a 8048 // constant integer. 8049 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 8050 if (GA->getOpcode() == ISD::GlobalAddress && 8051 TLI->isOffsetFoldingLegal(GA)) 8052 return GA; 8053 return nullptr; 8054 } 8055 8056 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 8057 if (isa<ConstantFPSDNode>(N)) 8058 return N.getNode(); 8059 8060 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 8061 return N.getNode(); 8062 8063 return nullptr; 8064 } 8065 8066 #ifndef NDEBUG 8067 static void checkForCyclesHelper(const SDNode *N, 8068 SmallPtrSetImpl<const SDNode*> &Visited, 8069 SmallPtrSetImpl<const SDNode*> &Checked, 8070 const llvm::SelectionDAG *DAG) { 8071 // If this node has already been checked, don't check it again. 8072 if (Checked.count(N)) 8073 return; 8074 8075 // If a node has already been visited on this depth-first walk, reject it as 8076 // a cycle. 8077 if (!Visited.insert(N).second) { 8078 errs() << "Detected cycle in SelectionDAG\n"; 8079 dbgs() << "Offending node:\n"; 8080 N->dumprFull(DAG); dbgs() << "\n"; 8081 abort(); 8082 } 8083 8084 for (const SDValue &Op : N->op_values()) 8085 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 8086 8087 Checked.insert(N); 8088 Visited.erase(N); 8089 } 8090 #endif 8091 8092 void llvm::checkForCycles(const llvm::SDNode *N, 8093 const llvm::SelectionDAG *DAG, 8094 bool force) { 8095 #ifndef NDEBUG 8096 bool check = force; 8097 #ifdef EXPENSIVE_CHECKS 8098 check = true; 8099 #endif // EXPENSIVE_CHECKS 8100 if (check) { 8101 assert(N && "Checking nonexistent SDNode"); 8102 SmallPtrSet<const SDNode*, 32> visited; 8103 SmallPtrSet<const SDNode*, 32> checked; 8104 checkForCyclesHelper(N, visited, checked, DAG); 8105 } 8106 #endif // !NDEBUG 8107 } 8108 8109 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 8110 checkForCycles(DAG->getRoot().getNode(), DAG, force); 8111 } 8112