1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetLowering.h" 42 #include "llvm/CodeGen/TargetRegisterInfo.h" 43 #include "llvm/CodeGen/TargetSubtargetInfo.h" 44 #include "llvm/CodeGen/ValueTypes.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/DerivedTypes.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Compiler.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/KnownBits.h" 62 #include "llvm/Support/MachineValueType.h" 63 #include "llvm/Support/ManagedStatic.h" 64 #include "llvm/Support/MathExtras.h" 65 #include "llvm/Support/Mutex.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Target/TargetMachine.h" 68 #include "llvm/Target/TargetOptions.h" 69 #include "llvm/Transforms/Utils/SizeOpts.h" 70 #include <algorithm> 71 #include <cassert> 72 #include <cstdint> 73 #include <cstdlib> 74 #include <limits> 75 #include <set> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 using namespace llvm; 81 82 /// makeVTList - Return an instance of the SDVTList struct initialized with the 83 /// specified members. 84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 85 SDVTList Res = {VTs, NumVTs}; 86 return Res; 87 } 88 89 // Default null implementations of the callbacks. 90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 93 94 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 95 96 #define DEBUG_TYPE "selectiondag" 97 98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 99 cl::Hidden, cl::init(true), 100 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 101 102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 103 cl::desc("Number limit for gluing ld/st of memcpy."), 104 cl::Hidden, cl::init(0)); 105 106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 108 } 109 110 //===----------------------------------------------------------------------===// 111 // ConstantFPSDNode Class 112 //===----------------------------------------------------------------------===// 113 114 /// isExactlyValue - We don't rely on operator== working on double values, as 115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 116 /// As such, this method can be used to do an exact bit-for-bit comparison of 117 /// two floating point values. 118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 119 return getValueAPF().bitwiseIsEqual(V); 120 } 121 122 bool ConstantFPSDNode::isValueValidForType(EVT VT, 123 const APFloat& Val) { 124 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 125 126 // convert modifies in place, so make a copy. 127 APFloat Val2 = APFloat(Val); 128 bool losesInfo; 129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 130 APFloat::rmNearestTiesToEven, 131 &losesInfo); 132 return !losesInfo; 133 } 134 135 //===----------------------------------------------------------------------===// 136 // ISD Namespace 137 //===----------------------------------------------------------------------===// 138 139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 140 auto *BV = dyn_cast<BuildVectorSDNode>(N); 141 if (!BV) 142 return false; 143 144 APInt SplatUndef; 145 unsigned SplatBitSize; 146 bool HasUndefs; 147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 149 EltSize) && 150 EltSize == SplatBitSize; 151 } 152 153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 154 // specializations of the more general isConstantSplatVector()? 155 156 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 157 // Look through a bit convert. 158 while (N->getOpcode() == ISD::BITCAST) 159 N = N->getOperand(0).getNode(); 160 161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 162 163 unsigned i = 0, e = N->getNumOperands(); 164 165 // Skip over all of the undef values. 166 while (i != e && N->getOperand(i).isUndef()) 167 ++i; 168 169 // Do not accept an all-undef vector. 170 if (i == e) return false; 171 172 // Do not accept build_vectors that aren't all constants or which have non-~0 173 // elements. We have to be a bit careful here, as the type of the constant 174 // may not be the same as the type of the vector elements due to type 175 // legalization (the elements are promoted to a legal type for the target and 176 // a vector of a type may be legal when the base element type is not). 177 // We only want to check enough bits to cover the vector elements, because 178 // we care if the resultant vector is all ones, not whether the individual 179 // constants are. 180 SDValue NotZero = N->getOperand(i); 181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 183 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 184 return false; 185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 187 return false; 188 } else 189 return false; 190 191 // Okay, we have at least one ~0 value, check to see if the rest match or are 192 // undefs. Even with the above element type twiddling, this should be OK, as 193 // the same type legalization should have applied to all the elements. 194 for (++i; i != e; ++i) 195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 196 return false; 197 return true; 198 } 199 200 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 201 // Look through a bit convert. 202 while (N->getOpcode() == ISD::BITCAST) 203 N = N->getOperand(0).getNode(); 204 205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 206 207 bool IsAllUndef = true; 208 for (const SDValue &Op : N->op_values()) { 209 if (Op.isUndef()) 210 continue; 211 IsAllUndef = false; 212 // Do not accept build_vectors that aren't all constants or which have non-0 213 // elements. We have to be a bit careful here, as the type of the constant 214 // may not be the same as the type of the vector elements due to type 215 // legalization (the elements are promoted to a legal type for the target 216 // and a vector of a type may be legal when the base element type is not). 217 // We only want to check enough bits to cover the vector elements, because 218 // we care if the resultant vector is all zeros, not whether the individual 219 // constants are. 220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 222 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 223 return false; 224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 226 return false; 227 } else 228 return false; 229 } 230 231 // Do not accept an all-undef vector. 232 if (IsAllUndef) 233 return false; 234 return true; 235 } 236 237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 238 if (N->getOpcode() != ISD::BUILD_VECTOR) 239 return false; 240 241 for (const SDValue &Op : N->op_values()) { 242 if (Op.isUndef()) 243 continue; 244 if (!isa<ConstantSDNode>(Op)) 245 return false; 246 } 247 return true; 248 } 249 250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 251 if (N->getOpcode() != ISD::BUILD_VECTOR) 252 return false; 253 254 for (const SDValue &Op : N->op_values()) { 255 if (Op.isUndef()) 256 continue; 257 if (!isa<ConstantFPSDNode>(Op)) 258 return false; 259 } 260 return true; 261 } 262 263 bool ISD::allOperandsUndef(const SDNode *N) { 264 // Return false if the node has no operands. 265 // This is "logically inconsistent" with the definition of "all" but 266 // is probably the desired behavior. 267 if (N->getNumOperands() == 0) 268 return false; 269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 270 } 271 272 bool ISD::matchUnaryPredicate(SDValue Op, 273 std::function<bool(ConstantSDNode *)> Match, 274 bool AllowUndefs) { 275 // FIXME: Add support for scalar UNDEF cases? 276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 277 return Match(Cst); 278 279 // FIXME: Add support for vector UNDEF cases? 280 if (ISD::BUILD_VECTOR != Op.getOpcode()) 281 return false; 282 283 EVT SVT = Op.getValueType().getScalarType(); 284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 285 if (AllowUndefs && Op.getOperand(i).isUndef()) { 286 if (!Match(nullptr)) 287 return false; 288 continue; 289 } 290 291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 293 return false; 294 } 295 return true; 296 } 297 298 bool ISD::matchBinaryPredicate( 299 SDValue LHS, SDValue RHS, 300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 301 bool AllowUndefs, bool AllowTypeMismatch) { 302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 303 return false; 304 305 // TODO: Add support for scalar UNDEF cases? 306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 308 return Match(LHSCst, RHSCst); 309 310 // TODO: Add support for vector UNDEF cases? 311 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 312 ISD::BUILD_VECTOR != RHS.getOpcode()) 313 return false; 314 315 EVT SVT = LHS.getValueType().getScalarType(); 316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 317 SDValue LHSOp = LHS.getOperand(i); 318 SDValue RHSOp = RHS.getOperand(i); 319 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 320 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 324 return false; 325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 326 LHSOp.getValueType() != RHSOp.getValueType())) 327 return false; 328 if (!Match(LHSCst, RHSCst)) 329 return false; 330 } 331 return true; 332 } 333 334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 335 switch (ExtType) { 336 case ISD::EXTLOAD: 337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 338 case ISD::SEXTLOAD: 339 return ISD::SIGN_EXTEND; 340 case ISD::ZEXTLOAD: 341 return ISD::ZERO_EXTEND; 342 default: 343 break; 344 } 345 346 llvm_unreachable("Invalid LoadExtType"); 347 } 348 349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 350 // To perform this operation, we just need to swap the L and G bits of the 351 // operation. 352 unsigned OldL = (Operation >> 2) & 1; 353 unsigned OldG = (Operation >> 1) & 1; 354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 355 (OldL << 1) | // New G bit 356 (OldG << 2)); // New L bit. 357 } 358 359 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 360 unsigned Operation = Op; 361 if (isIntegerLike) 362 Operation ^= 7; // Flip L, G, E bits, but not U. 363 else 364 Operation ^= 15; // Flip all of the condition bits. 365 366 if (Operation > ISD::SETTRUE2) 367 Operation &= ~8; // Don't let N and U bits get set. 368 369 return ISD::CondCode(Operation); 370 } 371 372 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 373 return getSetCCInverseImpl(Op, Type.isInteger()); 374 } 375 376 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 377 bool isIntegerLike) { 378 return getSetCCInverseImpl(Op, isIntegerLike); 379 } 380 381 /// For an integer comparison, return 1 if the comparison is a signed operation 382 /// and 2 if the result is an unsigned comparison. Return zero if the operation 383 /// does not depend on the sign of the input (setne and seteq). 384 static int isSignedOp(ISD::CondCode Opcode) { 385 switch (Opcode) { 386 default: llvm_unreachable("Illegal integer setcc operation!"); 387 case ISD::SETEQ: 388 case ISD::SETNE: return 0; 389 case ISD::SETLT: 390 case ISD::SETLE: 391 case ISD::SETGT: 392 case ISD::SETGE: return 1; 393 case ISD::SETULT: 394 case ISD::SETULE: 395 case ISD::SETUGT: 396 case ISD::SETUGE: return 2; 397 } 398 } 399 400 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 401 EVT Type) { 402 bool IsInteger = Type.isInteger(); 403 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 404 // Cannot fold a signed integer setcc with an unsigned integer setcc. 405 return ISD::SETCC_INVALID; 406 407 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 408 409 // If the N and U bits get set, then the resultant comparison DOES suddenly 410 // care about orderedness, and it is true when ordered. 411 if (Op > ISD::SETTRUE2) 412 Op &= ~16; // Clear the U bit if the N bit is set. 413 414 // Canonicalize illegal integer setcc's. 415 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 416 Op = ISD::SETNE; 417 418 return ISD::CondCode(Op); 419 } 420 421 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 422 EVT Type) { 423 bool IsInteger = Type.isInteger(); 424 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 425 // Cannot fold a signed setcc with an unsigned setcc. 426 return ISD::SETCC_INVALID; 427 428 // Combine all of the condition bits. 429 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 430 431 // Canonicalize illegal integer setcc's. 432 if (IsInteger) { 433 switch (Result) { 434 default: break; 435 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 436 case ISD::SETOEQ: // SETEQ & SETU[LG]E 437 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 438 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 439 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 440 } 441 } 442 443 return Result; 444 } 445 446 //===----------------------------------------------------------------------===// 447 // SDNode Profile Support 448 //===----------------------------------------------------------------------===// 449 450 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 451 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 452 ID.AddInteger(OpC); 453 } 454 455 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 456 /// solely with their pointer. 457 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 458 ID.AddPointer(VTList.VTs); 459 } 460 461 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 462 static void AddNodeIDOperands(FoldingSetNodeID &ID, 463 ArrayRef<SDValue> Ops) { 464 for (auto& Op : Ops) { 465 ID.AddPointer(Op.getNode()); 466 ID.AddInteger(Op.getResNo()); 467 } 468 } 469 470 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 471 static void AddNodeIDOperands(FoldingSetNodeID &ID, 472 ArrayRef<SDUse> Ops) { 473 for (auto& Op : Ops) { 474 ID.AddPointer(Op.getNode()); 475 ID.AddInteger(Op.getResNo()); 476 } 477 } 478 479 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 480 SDVTList VTList, ArrayRef<SDValue> OpList) { 481 AddNodeIDOpcode(ID, OpC); 482 AddNodeIDValueTypes(ID, VTList); 483 AddNodeIDOperands(ID, OpList); 484 } 485 486 /// If this is an SDNode with special info, add this info to the NodeID data. 487 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 488 switch (N->getOpcode()) { 489 case ISD::TargetExternalSymbol: 490 case ISD::ExternalSymbol: 491 case ISD::MCSymbol: 492 llvm_unreachable("Should only be used on nodes with operands"); 493 default: break; // Normal nodes don't need extra info. 494 case ISD::TargetConstant: 495 case ISD::Constant: { 496 const ConstantSDNode *C = cast<ConstantSDNode>(N); 497 ID.AddPointer(C->getConstantIntValue()); 498 ID.AddBoolean(C->isOpaque()); 499 break; 500 } 501 case ISD::TargetConstantFP: 502 case ISD::ConstantFP: 503 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 504 break; 505 case ISD::TargetGlobalAddress: 506 case ISD::GlobalAddress: 507 case ISD::TargetGlobalTLSAddress: 508 case ISD::GlobalTLSAddress: { 509 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 510 ID.AddPointer(GA->getGlobal()); 511 ID.AddInteger(GA->getOffset()); 512 ID.AddInteger(GA->getTargetFlags()); 513 break; 514 } 515 case ISD::BasicBlock: 516 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 517 break; 518 case ISD::Register: 519 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 520 break; 521 case ISD::RegisterMask: 522 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 523 break; 524 case ISD::SRCVALUE: 525 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 526 break; 527 case ISD::FrameIndex: 528 case ISD::TargetFrameIndex: 529 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 530 break; 531 case ISD::LIFETIME_START: 532 case ISD::LIFETIME_END: 533 if (cast<LifetimeSDNode>(N)->hasOffset()) { 534 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 535 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 536 } 537 break; 538 case ISD::JumpTable: 539 case ISD::TargetJumpTable: 540 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 541 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 542 break; 543 case ISD::ConstantPool: 544 case ISD::TargetConstantPool: { 545 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 546 ID.AddInteger(CP->getAlignment()); 547 ID.AddInteger(CP->getOffset()); 548 if (CP->isMachineConstantPoolEntry()) 549 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 550 else 551 ID.AddPointer(CP->getConstVal()); 552 ID.AddInteger(CP->getTargetFlags()); 553 break; 554 } 555 case ISD::TargetIndex: { 556 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 557 ID.AddInteger(TI->getIndex()); 558 ID.AddInteger(TI->getOffset()); 559 ID.AddInteger(TI->getTargetFlags()); 560 break; 561 } 562 case ISD::LOAD: { 563 const LoadSDNode *LD = cast<LoadSDNode>(N); 564 ID.AddInteger(LD->getMemoryVT().getRawBits()); 565 ID.AddInteger(LD->getRawSubclassData()); 566 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 567 break; 568 } 569 case ISD::STORE: { 570 const StoreSDNode *ST = cast<StoreSDNode>(N); 571 ID.AddInteger(ST->getMemoryVT().getRawBits()); 572 ID.AddInteger(ST->getRawSubclassData()); 573 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 574 break; 575 } 576 case ISD::MLOAD: { 577 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 578 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 579 ID.AddInteger(MLD->getRawSubclassData()); 580 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 581 break; 582 } 583 case ISD::MSTORE: { 584 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 585 ID.AddInteger(MST->getMemoryVT().getRawBits()); 586 ID.AddInteger(MST->getRawSubclassData()); 587 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 588 break; 589 } 590 case ISD::MGATHER: { 591 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 592 ID.AddInteger(MG->getMemoryVT().getRawBits()); 593 ID.AddInteger(MG->getRawSubclassData()); 594 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 595 break; 596 } 597 case ISD::MSCATTER: { 598 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 599 ID.AddInteger(MS->getMemoryVT().getRawBits()); 600 ID.AddInteger(MS->getRawSubclassData()); 601 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 602 break; 603 } 604 case ISD::ATOMIC_CMP_SWAP: 605 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 606 case ISD::ATOMIC_SWAP: 607 case ISD::ATOMIC_LOAD_ADD: 608 case ISD::ATOMIC_LOAD_SUB: 609 case ISD::ATOMIC_LOAD_AND: 610 case ISD::ATOMIC_LOAD_CLR: 611 case ISD::ATOMIC_LOAD_OR: 612 case ISD::ATOMIC_LOAD_XOR: 613 case ISD::ATOMIC_LOAD_NAND: 614 case ISD::ATOMIC_LOAD_MIN: 615 case ISD::ATOMIC_LOAD_MAX: 616 case ISD::ATOMIC_LOAD_UMIN: 617 case ISD::ATOMIC_LOAD_UMAX: 618 case ISD::ATOMIC_LOAD: 619 case ISD::ATOMIC_STORE: { 620 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 621 ID.AddInteger(AT->getMemoryVT().getRawBits()); 622 ID.AddInteger(AT->getRawSubclassData()); 623 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 624 break; 625 } 626 case ISD::PREFETCH: { 627 const MemSDNode *PF = cast<MemSDNode>(N); 628 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 629 break; 630 } 631 case ISD::VECTOR_SHUFFLE: { 632 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 633 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 634 i != e; ++i) 635 ID.AddInteger(SVN->getMaskElt(i)); 636 break; 637 } 638 case ISD::TargetBlockAddress: 639 case ISD::BlockAddress: { 640 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 641 ID.AddPointer(BA->getBlockAddress()); 642 ID.AddInteger(BA->getOffset()); 643 ID.AddInteger(BA->getTargetFlags()); 644 break; 645 } 646 } // end switch (N->getOpcode()) 647 648 // Target specific memory nodes could also have address spaces to check. 649 if (N->isTargetMemoryOpcode()) 650 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 651 } 652 653 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 654 /// data. 655 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 656 AddNodeIDOpcode(ID, N->getOpcode()); 657 // Add the return value info. 658 AddNodeIDValueTypes(ID, N->getVTList()); 659 // Add the operand info. 660 AddNodeIDOperands(ID, N->ops()); 661 662 // Handle SDNode leafs with special info. 663 AddNodeIDCustom(ID, N); 664 } 665 666 //===----------------------------------------------------------------------===// 667 // SelectionDAG Class 668 //===----------------------------------------------------------------------===// 669 670 /// doNotCSE - Return true if CSE should not be performed for this node. 671 static bool doNotCSE(SDNode *N) { 672 if (N->getValueType(0) == MVT::Glue) 673 return true; // Never CSE anything that produces a flag. 674 675 switch (N->getOpcode()) { 676 default: break; 677 case ISD::HANDLENODE: 678 case ISD::EH_LABEL: 679 return true; // Never CSE these nodes. 680 } 681 682 // Check that remaining values produced are not flags. 683 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 684 if (N->getValueType(i) == MVT::Glue) 685 return true; // Never CSE anything that produces a flag. 686 687 return false; 688 } 689 690 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 691 /// SelectionDAG. 692 void SelectionDAG::RemoveDeadNodes() { 693 // Create a dummy node (which is not added to allnodes), that adds a reference 694 // to the root node, preventing it from being deleted. 695 HandleSDNode Dummy(getRoot()); 696 697 SmallVector<SDNode*, 128> DeadNodes; 698 699 // Add all obviously-dead nodes to the DeadNodes worklist. 700 for (SDNode &Node : allnodes()) 701 if (Node.use_empty()) 702 DeadNodes.push_back(&Node); 703 704 RemoveDeadNodes(DeadNodes); 705 706 // If the root changed (e.g. it was a dead load, update the root). 707 setRoot(Dummy.getValue()); 708 } 709 710 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 711 /// given list, and any nodes that become unreachable as a result. 712 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 713 714 // Process the worklist, deleting the nodes and adding their uses to the 715 // worklist. 716 while (!DeadNodes.empty()) { 717 SDNode *N = DeadNodes.pop_back_val(); 718 // Skip to next node if we've already managed to delete the node. This could 719 // happen if replacing a node causes a node previously added to the node to 720 // be deleted. 721 if (N->getOpcode() == ISD::DELETED_NODE) 722 continue; 723 724 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 725 DUL->NodeDeleted(N, nullptr); 726 727 // Take the node out of the appropriate CSE map. 728 RemoveNodeFromCSEMaps(N); 729 730 // Next, brutally remove the operand list. This is safe to do, as there are 731 // no cycles in the graph. 732 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 733 SDUse &Use = *I++; 734 SDNode *Operand = Use.getNode(); 735 Use.set(SDValue()); 736 737 // Now that we removed this operand, see if there are no uses of it left. 738 if (Operand->use_empty()) 739 DeadNodes.push_back(Operand); 740 } 741 742 DeallocateNode(N); 743 } 744 } 745 746 void SelectionDAG::RemoveDeadNode(SDNode *N){ 747 SmallVector<SDNode*, 16> DeadNodes(1, N); 748 749 // Create a dummy node that adds a reference to the root node, preventing 750 // it from being deleted. (This matters if the root is an operand of the 751 // dead node.) 752 HandleSDNode Dummy(getRoot()); 753 754 RemoveDeadNodes(DeadNodes); 755 } 756 757 void SelectionDAG::DeleteNode(SDNode *N) { 758 // First take this out of the appropriate CSE map. 759 RemoveNodeFromCSEMaps(N); 760 761 // Finally, remove uses due to operands of this node, remove from the 762 // AllNodes list, and delete the node. 763 DeleteNodeNotInCSEMaps(N); 764 } 765 766 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 767 assert(N->getIterator() != AllNodes.begin() && 768 "Cannot delete the entry node!"); 769 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 770 771 // Drop all of the operands and decrement used node's use counts. 772 N->DropOperands(); 773 774 DeallocateNode(N); 775 } 776 777 void SDDbgInfo::erase(const SDNode *Node) { 778 DbgValMapType::iterator I = DbgValMap.find(Node); 779 if (I == DbgValMap.end()) 780 return; 781 for (auto &Val: I->second) 782 Val->setIsInvalidated(); 783 DbgValMap.erase(I); 784 } 785 786 void SelectionDAG::DeallocateNode(SDNode *N) { 787 // If we have operands, deallocate them. 788 removeOperands(N); 789 790 NodeAllocator.Deallocate(AllNodes.remove(N)); 791 792 // Set the opcode to DELETED_NODE to help catch bugs when node 793 // memory is reallocated. 794 // FIXME: There are places in SDag that have grown a dependency on the opcode 795 // value in the released node. 796 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 797 N->NodeType = ISD::DELETED_NODE; 798 799 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 800 // them and forget about that node. 801 DbgInfo->erase(N); 802 } 803 804 #ifndef NDEBUG 805 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 806 static void VerifySDNode(SDNode *N) { 807 switch (N->getOpcode()) { 808 default: 809 break; 810 case ISD::BUILD_PAIR: { 811 EVT VT = N->getValueType(0); 812 assert(N->getNumValues() == 1 && "Too many results!"); 813 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 814 "Wrong return type!"); 815 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 816 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 817 "Mismatched operand types!"); 818 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 819 "Wrong operand type!"); 820 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 821 "Wrong return type size"); 822 break; 823 } 824 case ISD::BUILD_VECTOR: { 825 assert(N->getNumValues() == 1 && "Too many results!"); 826 assert(N->getValueType(0).isVector() && "Wrong return type!"); 827 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 828 "Wrong number of operands!"); 829 EVT EltVT = N->getValueType(0).getVectorElementType(); 830 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 831 assert((I->getValueType() == EltVT || 832 (EltVT.isInteger() && I->getValueType().isInteger() && 833 EltVT.bitsLE(I->getValueType()))) && 834 "Wrong operand type!"); 835 assert(I->getValueType() == N->getOperand(0).getValueType() && 836 "Operands must all have the same type"); 837 } 838 break; 839 } 840 } 841 } 842 #endif // NDEBUG 843 844 /// Insert a newly allocated node into the DAG. 845 /// 846 /// Handles insertion into the all nodes list and CSE map, as well as 847 /// verification and other common operations when a new node is allocated. 848 void SelectionDAG::InsertNode(SDNode *N) { 849 AllNodes.push_back(N); 850 #ifndef NDEBUG 851 N->PersistentId = NextPersistentId++; 852 VerifySDNode(N); 853 #endif 854 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 855 DUL->NodeInserted(N); 856 } 857 858 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 859 /// correspond to it. This is useful when we're about to delete or repurpose 860 /// the node. We don't want future request for structurally identical nodes 861 /// to return N anymore. 862 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 863 bool Erased = false; 864 switch (N->getOpcode()) { 865 case ISD::HANDLENODE: return false; // noop. 866 case ISD::CONDCODE: 867 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 868 "Cond code doesn't exist!"); 869 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 870 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 871 break; 872 case ISD::ExternalSymbol: 873 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 874 break; 875 case ISD::TargetExternalSymbol: { 876 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 877 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 878 ESN->getSymbol(), ESN->getTargetFlags())); 879 break; 880 } 881 case ISD::MCSymbol: { 882 auto *MCSN = cast<MCSymbolSDNode>(N); 883 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 884 break; 885 } 886 case ISD::VALUETYPE: { 887 EVT VT = cast<VTSDNode>(N)->getVT(); 888 if (VT.isExtended()) { 889 Erased = ExtendedValueTypeNodes.erase(VT); 890 } else { 891 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 892 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 893 } 894 break; 895 } 896 default: 897 // Remove it from the CSE Map. 898 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 899 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 900 Erased = CSEMap.RemoveNode(N); 901 break; 902 } 903 #ifndef NDEBUG 904 // Verify that the node was actually in one of the CSE maps, unless it has a 905 // flag result (which cannot be CSE'd) or is one of the special cases that are 906 // not subject to CSE. 907 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 908 !N->isMachineOpcode() && !doNotCSE(N)) { 909 N->dump(this); 910 dbgs() << "\n"; 911 llvm_unreachable("Node is not in map!"); 912 } 913 #endif 914 return Erased; 915 } 916 917 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 918 /// maps and modified in place. Add it back to the CSE maps, unless an identical 919 /// node already exists, in which case transfer all its users to the existing 920 /// node. This transfer can potentially trigger recursive merging. 921 void 922 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 923 // For node types that aren't CSE'd, just act as if no identical node 924 // already exists. 925 if (!doNotCSE(N)) { 926 SDNode *Existing = CSEMap.GetOrInsertNode(N); 927 if (Existing != N) { 928 // If there was already an existing matching node, use ReplaceAllUsesWith 929 // to replace the dead one with the existing one. This can cause 930 // recursive merging of other unrelated nodes down the line. 931 ReplaceAllUsesWith(N, Existing); 932 933 // N is now dead. Inform the listeners and delete it. 934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 935 DUL->NodeDeleted(N, Existing); 936 DeleteNodeNotInCSEMaps(N); 937 return; 938 } 939 } 940 941 // If the node doesn't already exist, we updated it. Inform listeners. 942 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 943 DUL->NodeUpdated(N); 944 } 945 946 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 947 /// were replaced with those specified. If this node is never memoized, 948 /// return null, otherwise return a pointer to the slot it would take. If a 949 /// node already exists with these operands, the slot will be non-null. 950 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 951 void *&InsertPos) { 952 if (doNotCSE(N)) 953 return nullptr; 954 955 SDValue Ops[] = { Op }; 956 FoldingSetNodeID ID; 957 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 958 AddNodeIDCustom(ID, N); 959 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 960 if (Node) 961 Node->intersectFlagsWith(N->getFlags()); 962 return Node; 963 } 964 965 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 966 /// were replaced with those specified. If this node is never memoized, 967 /// return null, otherwise return a pointer to the slot it would take. If a 968 /// node already exists with these operands, the slot will be non-null. 969 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 970 SDValue Op1, SDValue Op2, 971 void *&InsertPos) { 972 if (doNotCSE(N)) 973 return nullptr; 974 975 SDValue Ops[] = { Op1, Op2 }; 976 FoldingSetNodeID ID; 977 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 978 AddNodeIDCustom(ID, N); 979 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 980 if (Node) 981 Node->intersectFlagsWith(N->getFlags()); 982 return Node; 983 } 984 985 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 986 /// were replaced with those specified. If this node is never memoized, 987 /// return null, otherwise return a pointer to the slot it would take. If a 988 /// node already exists with these operands, the slot will be non-null. 989 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 990 void *&InsertPos) { 991 if (doNotCSE(N)) 992 return nullptr; 993 994 FoldingSetNodeID ID; 995 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 996 AddNodeIDCustom(ID, N); 997 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 998 if (Node) 999 Node->intersectFlagsWith(N->getFlags()); 1000 return Node; 1001 } 1002 1003 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 1004 Type *Ty = VT == MVT::iPTR ? 1005 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1006 VT.getTypeForEVT(*getContext()); 1007 1008 return getDataLayout().getABITypeAlignment(Ty); 1009 } 1010 1011 // EntryNode could meaningfully have debug info if we can find it... 1012 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1013 : TM(tm), OptLevel(OL), 1014 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1015 Root(getEntryNode()) { 1016 InsertNode(&EntryNode); 1017 DbgInfo = new SDDbgInfo(); 1018 } 1019 1020 void SelectionDAG::init(MachineFunction &NewMF, 1021 OptimizationRemarkEmitter &NewORE, 1022 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1023 LegacyDivergenceAnalysis * Divergence, 1024 ProfileSummaryInfo *PSIin, 1025 BlockFrequencyInfo *BFIin) { 1026 MF = &NewMF; 1027 SDAGISelPass = PassPtr; 1028 ORE = &NewORE; 1029 TLI = getSubtarget().getTargetLowering(); 1030 TSI = getSubtarget().getSelectionDAGInfo(); 1031 LibInfo = LibraryInfo; 1032 Context = &MF->getFunction().getContext(); 1033 DA = Divergence; 1034 PSI = PSIin; 1035 BFI = BFIin; 1036 } 1037 1038 SelectionDAG::~SelectionDAG() { 1039 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1040 allnodes_clear(); 1041 OperandRecycler.clear(OperandAllocator); 1042 delete DbgInfo; 1043 } 1044 1045 bool SelectionDAG::shouldOptForSize() const { 1046 return MF->getFunction().hasOptSize() || 1047 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1048 } 1049 1050 void SelectionDAG::allnodes_clear() { 1051 assert(&*AllNodes.begin() == &EntryNode); 1052 AllNodes.remove(AllNodes.begin()); 1053 while (!AllNodes.empty()) 1054 DeallocateNode(&AllNodes.front()); 1055 #ifndef NDEBUG 1056 NextPersistentId = 0; 1057 #endif 1058 } 1059 1060 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1061 void *&InsertPos) { 1062 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1063 if (N) { 1064 switch (N->getOpcode()) { 1065 default: break; 1066 case ISD::Constant: 1067 case ISD::ConstantFP: 1068 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1069 "debug location. Use another overload."); 1070 } 1071 } 1072 return N; 1073 } 1074 1075 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1076 const SDLoc &DL, void *&InsertPos) { 1077 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1078 if (N) { 1079 switch (N->getOpcode()) { 1080 case ISD::Constant: 1081 case ISD::ConstantFP: 1082 // Erase debug location from the node if the node is used at several 1083 // different places. Do not propagate one location to all uses as it 1084 // will cause a worse single stepping debugging experience. 1085 if (N->getDebugLoc() != DL.getDebugLoc()) 1086 N->setDebugLoc(DebugLoc()); 1087 break; 1088 default: 1089 // When the node's point of use is located earlier in the instruction 1090 // sequence than its prior point of use, update its debug info to the 1091 // earlier location. 1092 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1093 N->setDebugLoc(DL.getDebugLoc()); 1094 break; 1095 } 1096 } 1097 return N; 1098 } 1099 1100 void SelectionDAG::clear() { 1101 allnodes_clear(); 1102 OperandRecycler.clear(OperandAllocator); 1103 OperandAllocator.Reset(); 1104 CSEMap.clear(); 1105 1106 ExtendedValueTypeNodes.clear(); 1107 ExternalSymbols.clear(); 1108 TargetExternalSymbols.clear(); 1109 MCSymbols.clear(); 1110 SDCallSiteDbgInfo.clear(); 1111 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1112 static_cast<CondCodeSDNode*>(nullptr)); 1113 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1114 static_cast<SDNode*>(nullptr)); 1115 1116 EntryNode.UseList = nullptr; 1117 InsertNode(&EntryNode); 1118 Root = getEntryNode(); 1119 DbgInfo->clear(); 1120 } 1121 1122 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1123 return VT.bitsGT(Op.getValueType()) 1124 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1125 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1126 } 1127 1128 std::pair<SDValue, SDValue> 1129 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1130 const SDLoc &DL, EVT VT) { 1131 assert(!VT.bitsEq(Op.getValueType()) && 1132 "Strict no-op FP extend/round not allowed."); 1133 SDValue Res = 1134 VT.bitsGT(Op.getValueType()) 1135 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1136 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1137 {Chain, Op, getIntPtrConstant(0, DL)}); 1138 1139 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1140 } 1141 1142 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1143 return VT.bitsGT(Op.getValueType()) ? 1144 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1145 getNode(ISD::TRUNCATE, DL, VT, Op); 1146 } 1147 1148 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1149 return VT.bitsGT(Op.getValueType()) ? 1150 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1151 getNode(ISD::TRUNCATE, DL, VT, Op); 1152 } 1153 1154 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1155 return VT.bitsGT(Op.getValueType()) ? 1156 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1157 getNode(ISD::TRUNCATE, DL, VT, Op); 1158 } 1159 1160 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1161 EVT OpVT) { 1162 if (VT.bitsLE(Op.getValueType())) 1163 return getNode(ISD::TRUNCATE, SL, VT, Op); 1164 1165 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1166 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1167 } 1168 1169 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1170 assert(!VT.isVector() && 1171 "getZeroExtendInReg should use the vector element type instead of " 1172 "the vector type!"); 1173 if (Op.getValueType().getScalarType() == VT) return Op; 1174 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1175 APInt Imm = APInt::getLowBitsSet(BitWidth, 1176 VT.getSizeInBits()); 1177 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1178 getConstant(Imm, DL, Op.getValueType())); 1179 } 1180 1181 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1182 // Only unsigned pointer semantics are supported right now. In the future this 1183 // might delegate to TLI to check pointer signedness. 1184 return getZExtOrTrunc(Op, DL, VT); 1185 } 1186 1187 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1188 // Only unsigned pointer semantics are supported right now. In the future this 1189 // might delegate to TLI to check pointer signedness. 1190 return getZeroExtendInReg(Op, DL, VT); 1191 } 1192 1193 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1194 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1195 EVT EltVT = VT.getScalarType(); 1196 SDValue NegOne = 1197 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1198 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1199 } 1200 1201 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1202 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1203 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1204 } 1205 1206 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1207 EVT OpVT) { 1208 if (!V) 1209 return getConstant(0, DL, VT); 1210 1211 switch (TLI->getBooleanContents(OpVT)) { 1212 case TargetLowering::ZeroOrOneBooleanContent: 1213 case TargetLowering::UndefinedBooleanContent: 1214 return getConstant(1, DL, VT); 1215 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1216 return getAllOnesConstant(DL, VT); 1217 } 1218 llvm_unreachable("Unexpected boolean content enum!"); 1219 } 1220 1221 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1222 bool isT, bool isO) { 1223 EVT EltVT = VT.getScalarType(); 1224 assert((EltVT.getSizeInBits() >= 64 || 1225 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1226 "getConstant with a uint64_t value that doesn't fit in the type!"); 1227 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1228 } 1229 1230 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1231 bool isT, bool isO) { 1232 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1233 } 1234 1235 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1236 EVT VT, bool isT, bool isO) { 1237 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1238 1239 EVT EltVT = VT.getScalarType(); 1240 const ConstantInt *Elt = &Val; 1241 1242 // In some cases the vector type is legal but the element type is illegal and 1243 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1244 // inserted value (the type does not need to match the vector element type). 1245 // Any extra bits introduced will be truncated away. 1246 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1247 TargetLowering::TypePromoteInteger) { 1248 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1249 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1250 Elt = ConstantInt::get(*getContext(), NewVal); 1251 } 1252 // In other cases the element type is illegal and needs to be expanded, for 1253 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1254 // the value into n parts and use a vector type with n-times the elements. 1255 // Then bitcast to the type requested. 1256 // Legalizing constants too early makes the DAGCombiner's job harder so we 1257 // only legalize if the DAG tells us we must produce legal types. 1258 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1259 TLI->getTypeAction(*getContext(), EltVT) == 1260 TargetLowering::TypeExpandInteger) { 1261 const APInt &NewVal = Elt->getValue(); 1262 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1263 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1264 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1265 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1266 1267 // Check the temporary vector is the correct size. If this fails then 1268 // getTypeToTransformTo() probably returned a type whose size (in bits) 1269 // isn't a power-of-2 factor of the requested type size. 1270 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1271 1272 SmallVector<SDValue, 2> EltParts; 1273 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1274 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1275 .zextOrTrunc(ViaEltSizeInBits), DL, 1276 ViaEltVT, isT, isO)); 1277 } 1278 1279 // EltParts is currently in little endian order. If we actually want 1280 // big-endian order then reverse it now. 1281 if (getDataLayout().isBigEndian()) 1282 std::reverse(EltParts.begin(), EltParts.end()); 1283 1284 // The elements must be reversed when the element order is different 1285 // to the endianness of the elements (because the BITCAST is itself a 1286 // vector shuffle in this situation). However, we do not need any code to 1287 // perform this reversal because getConstant() is producing a vector 1288 // splat. 1289 // This situation occurs in MIPS MSA. 1290 1291 SmallVector<SDValue, 8> Ops; 1292 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1293 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1294 1295 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1296 return V; 1297 } 1298 1299 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1300 "APInt size does not match type size!"); 1301 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1302 FoldingSetNodeID ID; 1303 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1304 ID.AddPointer(Elt); 1305 ID.AddBoolean(isO); 1306 void *IP = nullptr; 1307 SDNode *N = nullptr; 1308 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1309 if (!VT.isVector()) 1310 return SDValue(N, 0); 1311 1312 if (!N) { 1313 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1314 CSEMap.InsertNode(N, IP); 1315 InsertNode(N); 1316 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1317 } 1318 1319 SDValue Result(N, 0); 1320 if (VT.isScalableVector()) 1321 Result = getSplatVector(VT, DL, Result); 1322 else if (VT.isVector()) 1323 Result = getSplatBuildVector(VT, DL, Result); 1324 1325 return Result; 1326 } 1327 1328 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1329 bool isTarget) { 1330 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1331 } 1332 1333 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1334 const SDLoc &DL, bool LegalTypes) { 1335 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1336 return getConstant(Val, DL, ShiftVT); 1337 } 1338 1339 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1340 bool isTarget) { 1341 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1342 } 1343 1344 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1345 bool isTarget) { 1346 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1347 } 1348 1349 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1350 EVT VT, bool isTarget) { 1351 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1352 1353 EVT EltVT = VT.getScalarType(); 1354 1355 // Do the map lookup using the actual bit pattern for the floating point 1356 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1357 // we don't have issues with SNANs. 1358 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1359 FoldingSetNodeID ID; 1360 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1361 ID.AddPointer(&V); 1362 void *IP = nullptr; 1363 SDNode *N = nullptr; 1364 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1365 if (!VT.isVector()) 1366 return SDValue(N, 0); 1367 1368 if (!N) { 1369 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1370 CSEMap.InsertNode(N, IP); 1371 InsertNode(N); 1372 } 1373 1374 SDValue Result(N, 0); 1375 if (VT.isVector()) 1376 Result = getSplatBuildVector(VT, DL, Result); 1377 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1378 return Result; 1379 } 1380 1381 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1382 bool isTarget) { 1383 EVT EltVT = VT.getScalarType(); 1384 if (EltVT == MVT::f32) 1385 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1386 else if (EltVT == MVT::f64) 1387 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1388 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1389 EltVT == MVT::f16) { 1390 bool Ignored; 1391 APFloat APF = APFloat(Val); 1392 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1393 &Ignored); 1394 return getConstantFP(APF, DL, VT, isTarget); 1395 } else 1396 llvm_unreachable("Unsupported type in getConstantFP"); 1397 } 1398 1399 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1400 EVT VT, int64_t Offset, bool isTargetGA, 1401 unsigned TargetFlags) { 1402 assert((TargetFlags == 0 || isTargetGA) && 1403 "Cannot set target flags on target-independent globals"); 1404 1405 // Truncate (with sign-extension) the offset value to the pointer size. 1406 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1407 if (BitWidth < 64) 1408 Offset = SignExtend64(Offset, BitWidth); 1409 1410 unsigned Opc; 1411 if (GV->isThreadLocal()) 1412 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1413 else 1414 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1415 1416 FoldingSetNodeID ID; 1417 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1418 ID.AddPointer(GV); 1419 ID.AddInteger(Offset); 1420 ID.AddInteger(TargetFlags); 1421 void *IP = nullptr; 1422 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1423 return SDValue(E, 0); 1424 1425 auto *N = newSDNode<GlobalAddressSDNode>( 1426 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1427 CSEMap.InsertNode(N, IP); 1428 InsertNode(N); 1429 return SDValue(N, 0); 1430 } 1431 1432 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1433 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1434 FoldingSetNodeID ID; 1435 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1436 ID.AddInteger(FI); 1437 void *IP = nullptr; 1438 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1439 return SDValue(E, 0); 1440 1441 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1442 CSEMap.InsertNode(N, IP); 1443 InsertNode(N); 1444 return SDValue(N, 0); 1445 } 1446 1447 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1448 unsigned TargetFlags) { 1449 assert((TargetFlags == 0 || isTarget) && 1450 "Cannot set target flags on target-independent jump tables"); 1451 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1452 FoldingSetNodeID ID; 1453 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1454 ID.AddInteger(JTI); 1455 ID.AddInteger(TargetFlags); 1456 void *IP = nullptr; 1457 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1458 return SDValue(E, 0); 1459 1460 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1461 CSEMap.InsertNode(N, IP); 1462 InsertNode(N); 1463 return SDValue(N, 0); 1464 } 1465 1466 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1467 unsigned Alignment, int Offset, 1468 bool isTarget, 1469 unsigned TargetFlags) { 1470 assert((TargetFlags == 0 || isTarget) && 1471 "Cannot set target flags on target-independent globals"); 1472 if (Alignment == 0) 1473 Alignment = shouldOptForSize() 1474 ? getDataLayout().getABITypeAlignment(C->getType()) 1475 : getDataLayout().getPrefTypeAlignment(C->getType()); 1476 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1477 FoldingSetNodeID ID; 1478 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1479 ID.AddInteger(Alignment); 1480 ID.AddInteger(Offset); 1481 ID.AddPointer(C); 1482 ID.AddInteger(TargetFlags); 1483 void *IP = nullptr; 1484 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1485 return SDValue(E, 0); 1486 1487 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1488 TargetFlags); 1489 CSEMap.InsertNode(N, IP); 1490 InsertNode(N); 1491 return SDValue(N, 0); 1492 } 1493 1494 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1495 unsigned Alignment, int Offset, 1496 bool isTarget, 1497 unsigned TargetFlags) { 1498 assert((TargetFlags == 0 || isTarget) && 1499 "Cannot set target flags on target-independent globals"); 1500 if (Alignment == 0) 1501 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1502 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1503 FoldingSetNodeID ID; 1504 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1505 ID.AddInteger(Alignment); 1506 ID.AddInteger(Offset); 1507 C->addSelectionDAGCSEId(ID); 1508 ID.AddInteger(TargetFlags); 1509 void *IP = nullptr; 1510 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1511 return SDValue(E, 0); 1512 1513 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1514 TargetFlags); 1515 CSEMap.InsertNode(N, IP); 1516 InsertNode(N); 1517 return SDValue(N, 0); 1518 } 1519 1520 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1521 unsigned TargetFlags) { 1522 FoldingSetNodeID ID; 1523 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1524 ID.AddInteger(Index); 1525 ID.AddInteger(Offset); 1526 ID.AddInteger(TargetFlags); 1527 void *IP = nullptr; 1528 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1529 return SDValue(E, 0); 1530 1531 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1532 CSEMap.InsertNode(N, IP); 1533 InsertNode(N); 1534 return SDValue(N, 0); 1535 } 1536 1537 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1538 FoldingSetNodeID ID; 1539 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1540 ID.AddPointer(MBB); 1541 void *IP = nullptr; 1542 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1543 return SDValue(E, 0); 1544 1545 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1546 CSEMap.InsertNode(N, IP); 1547 InsertNode(N); 1548 return SDValue(N, 0); 1549 } 1550 1551 SDValue SelectionDAG::getValueType(EVT VT) { 1552 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1553 ValueTypeNodes.size()) 1554 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1555 1556 SDNode *&N = VT.isExtended() ? 1557 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1558 1559 if (N) return SDValue(N, 0); 1560 N = newSDNode<VTSDNode>(VT); 1561 InsertNode(N); 1562 return SDValue(N, 0); 1563 } 1564 1565 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1566 SDNode *&N = ExternalSymbols[Sym]; 1567 if (N) return SDValue(N, 0); 1568 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1569 InsertNode(N); 1570 return SDValue(N, 0); 1571 } 1572 1573 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1574 SDNode *&N = MCSymbols[Sym]; 1575 if (N) 1576 return SDValue(N, 0); 1577 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1578 InsertNode(N); 1579 return SDValue(N, 0); 1580 } 1581 1582 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1583 unsigned TargetFlags) { 1584 SDNode *&N = 1585 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1586 if (N) return SDValue(N, 0); 1587 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1588 InsertNode(N); 1589 return SDValue(N, 0); 1590 } 1591 1592 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1593 if ((unsigned)Cond >= CondCodeNodes.size()) 1594 CondCodeNodes.resize(Cond+1); 1595 1596 if (!CondCodeNodes[Cond]) { 1597 auto *N = newSDNode<CondCodeSDNode>(Cond); 1598 CondCodeNodes[Cond] = N; 1599 InsertNode(N); 1600 } 1601 1602 return SDValue(CondCodeNodes[Cond], 0); 1603 } 1604 1605 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1606 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1607 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1608 std::swap(N1, N2); 1609 ShuffleVectorSDNode::commuteMask(M); 1610 } 1611 1612 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1613 SDValue N2, ArrayRef<int> Mask) { 1614 assert(VT.getVectorNumElements() == Mask.size() && 1615 "Must have the same number of vector elements as mask elements!"); 1616 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1617 "Invalid VECTOR_SHUFFLE"); 1618 1619 // Canonicalize shuffle undef, undef -> undef 1620 if (N1.isUndef() && N2.isUndef()) 1621 return getUNDEF(VT); 1622 1623 // Validate that all indices in Mask are within the range of the elements 1624 // input to the shuffle. 1625 int NElts = Mask.size(); 1626 assert(llvm::all_of(Mask, 1627 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1628 "Index out of range"); 1629 1630 // Copy the mask so we can do any needed cleanup. 1631 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1632 1633 // Canonicalize shuffle v, v -> v, undef 1634 if (N1 == N2) { 1635 N2 = getUNDEF(VT); 1636 for (int i = 0; i != NElts; ++i) 1637 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1638 } 1639 1640 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1641 if (N1.isUndef()) 1642 commuteShuffle(N1, N2, MaskVec); 1643 1644 if (TLI->hasVectorBlend()) { 1645 // If shuffling a splat, try to blend the splat instead. We do this here so 1646 // that even when this arises during lowering we don't have to re-handle it. 1647 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1648 BitVector UndefElements; 1649 SDValue Splat = BV->getSplatValue(&UndefElements); 1650 if (!Splat) 1651 return; 1652 1653 for (int i = 0; i < NElts; ++i) { 1654 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1655 continue; 1656 1657 // If this input comes from undef, mark it as such. 1658 if (UndefElements[MaskVec[i] - Offset]) { 1659 MaskVec[i] = -1; 1660 continue; 1661 } 1662 1663 // If we can blend a non-undef lane, use that instead. 1664 if (!UndefElements[i]) 1665 MaskVec[i] = i + Offset; 1666 } 1667 }; 1668 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1669 BlendSplat(N1BV, 0); 1670 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1671 BlendSplat(N2BV, NElts); 1672 } 1673 1674 // Canonicalize all index into lhs, -> shuffle lhs, undef 1675 // Canonicalize all index into rhs, -> shuffle rhs, undef 1676 bool AllLHS = true, AllRHS = true; 1677 bool N2Undef = N2.isUndef(); 1678 for (int i = 0; i != NElts; ++i) { 1679 if (MaskVec[i] >= NElts) { 1680 if (N2Undef) 1681 MaskVec[i] = -1; 1682 else 1683 AllLHS = false; 1684 } else if (MaskVec[i] >= 0) { 1685 AllRHS = false; 1686 } 1687 } 1688 if (AllLHS && AllRHS) 1689 return getUNDEF(VT); 1690 if (AllLHS && !N2Undef) 1691 N2 = getUNDEF(VT); 1692 if (AllRHS) { 1693 N1 = getUNDEF(VT); 1694 commuteShuffle(N1, N2, MaskVec); 1695 } 1696 // Reset our undef status after accounting for the mask. 1697 N2Undef = N2.isUndef(); 1698 // Re-check whether both sides ended up undef. 1699 if (N1.isUndef() && N2Undef) 1700 return getUNDEF(VT); 1701 1702 // If Identity shuffle return that node. 1703 bool Identity = true, AllSame = true; 1704 for (int i = 0; i != NElts; ++i) { 1705 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1706 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1707 } 1708 if (Identity && NElts) 1709 return N1; 1710 1711 // Shuffling a constant splat doesn't change the result. 1712 if (N2Undef) { 1713 SDValue V = N1; 1714 1715 // Look through any bitcasts. We check that these don't change the number 1716 // (and size) of elements and just changes their types. 1717 while (V.getOpcode() == ISD::BITCAST) 1718 V = V->getOperand(0); 1719 1720 // A splat should always show up as a build vector node. 1721 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1722 BitVector UndefElements; 1723 SDValue Splat = BV->getSplatValue(&UndefElements); 1724 // If this is a splat of an undef, shuffling it is also undef. 1725 if (Splat && Splat.isUndef()) 1726 return getUNDEF(VT); 1727 1728 bool SameNumElts = 1729 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1730 1731 // We only have a splat which can skip shuffles if there is a splatted 1732 // value and no undef lanes rearranged by the shuffle. 1733 if (Splat && UndefElements.none()) { 1734 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1735 // number of elements match or the value splatted is a zero constant. 1736 if (SameNumElts) 1737 return N1; 1738 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1739 if (C->isNullValue()) 1740 return N1; 1741 } 1742 1743 // If the shuffle itself creates a splat, build the vector directly. 1744 if (AllSame && SameNumElts) { 1745 EVT BuildVT = BV->getValueType(0); 1746 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1747 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1748 1749 // We may have jumped through bitcasts, so the type of the 1750 // BUILD_VECTOR may not match the type of the shuffle. 1751 if (BuildVT != VT) 1752 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1753 return NewBV; 1754 } 1755 } 1756 } 1757 1758 FoldingSetNodeID ID; 1759 SDValue Ops[2] = { N1, N2 }; 1760 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1761 for (int i = 0; i != NElts; ++i) 1762 ID.AddInteger(MaskVec[i]); 1763 1764 void* IP = nullptr; 1765 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1766 return SDValue(E, 0); 1767 1768 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1769 // SDNode doesn't have access to it. This memory will be "leaked" when 1770 // the node is deallocated, but recovered when the NodeAllocator is released. 1771 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1772 llvm::copy(MaskVec, MaskAlloc); 1773 1774 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1775 dl.getDebugLoc(), MaskAlloc); 1776 createOperands(N, Ops); 1777 1778 CSEMap.InsertNode(N, IP); 1779 InsertNode(N); 1780 SDValue V = SDValue(N, 0); 1781 NewSDValueDbgMsg(V, "Creating new node: ", this); 1782 return V; 1783 } 1784 1785 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1786 EVT VT = SV.getValueType(0); 1787 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1788 ShuffleVectorSDNode::commuteMask(MaskVec); 1789 1790 SDValue Op0 = SV.getOperand(0); 1791 SDValue Op1 = SV.getOperand(1); 1792 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1793 } 1794 1795 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1796 FoldingSetNodeID ID; 1797 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1798 ID.AddInteger(RegNo); 1799 void *IP = nullptr; 1800 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1801 return SDValue(E, 0); 1802 1803 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1804 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1805 CSEMap.InsertNode(N, IP); 1806 InsertNode(N); 1807 return SDValue(N, 0); 1808 } 1809 1810 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1811 FoldingSetNodeID ID; 1812 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1813 ID.AddPointer(RegMask); 1814 void *IP = nullptr; 1815 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1816 return SDValue(E, 0); 1817 1818 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1819 CSEMap.InsertNode(N, IP); 1820 InsertNode(N); 1821 return SDValue(N, 0); 1822 } 1823 1824 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1825 MCSymbol *Label) { 1826 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1827 } 1828 1829 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1830 SDValue Root, MCSymbol *Label) { 1831 FoldingSetNodeID ID; 1832 SDValue Ops[] = { Root }; 1833 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1834 ID.AddPointer(Label); 1835 void *IP = nullptr; 1836 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1837 return SDValue(E, 0); 1838 1839 auto *N = 1840 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1841 createOperands(N, Ops); 1842 1843 CSEMap.InsertNode(N, IP); 1844 InsertNode(N); 1845 return SDValue(N, 0); 1846 } 1847 1848 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1849 int64_t Offset, bool isTarget, 1850 unsigned TargetFlags) { 1851 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1852 1853 FoldingSetNodeID ID; 1854 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1855 ID.AddPointer(BA); 1856 ID.AddInteger(Offset); 1857 ID.AddInteger(TargetFlags); 1858 void *IP = nullptr; 1859 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1860 return SDValue(E, 0); 1861 1862 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1863 CSEMap.InsertNode(N, IP); 1864 InsertNode(N); 1865 return SDValue(N, 0); 1866 } 1867 1868 SDValue SelectionDAG::getSrcValue(const Value *V) { 1869 assert((!V || V->getType()->isPointerTy()) && 1870 "SrcValue is not a pointer?"); 1871 1872 FoldingSetNodeID ID; 1873 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1874 ID.AddPointer(V); 1875 1876 void *IP = nullptr; 1877 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1878 return SDValue(E, 0); 1879 1880 auto *N = newSDNode<SrcValueSDNode>(V); 1881 CSEMap.InsertNode(N, IP); 1882 InsertNode(N); 1883 return SDValue(N, 0); 1884 } 1885 1886 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1887 FoldingSetNodeID ID; 1888 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1889 ID.AddPointer(MD); 1890 1891 void *IP = nullptr; 1892 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1893 return SDValue(E, 0); 1894 1895 auto *N = newSDNode<MDNodeSDNode>(MD); 1896 CSEMap.InsertNode(N, IP); 1897 InsertNode(N); 1898 return SDValue(N, 0); 1899 } 1900 1901 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1902 if (VT == V.getValueType()) 1903 return V; 1904 1905 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1906 } 1907 1908 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1909 unsigned SrcAS, unsigned DestAS) { 1910 SDValue Ops[] = {Ptr}; 1911 FoldingSetNodeID ID; 1912 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1913 ID.AddInteger(SrcAS); 1914 ID.AddInteger(DestAS); 1915 1916 void *IP = nullptr; 1917 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1918 return SDValue(E, 0); 1919 1920 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1921 VT, SrcAS, DestAS); 1922 createOperands(N, Ops); 1923 1924 CSEMap.InsertNode(N, IP); 1925 InsertNode(N); 1926 return SDValue(N, 0); 1927 } 1928 1929 /// getShiftAmountOperand - Return the specified value casted to 1930 /// the target's desired shift amount type. 1931 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1932 EVT OpTy = Op.getValueType(); 1933 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1934 if (OpTy == ShTy || OpTy.isVector()) return Op; 1935 1936 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1937 } 1938 1939 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1940 SDLoc dl(Node); 1941 const TargetLowering &TLI = getTargetLoweringInfo(); 1942 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1943 EVT VT = Node->getValueType(0); 1944 SDValue Tmp1 = Node->getOperand(0); 1945 SDValue Tmp2 = Node->getOperand(1); 1946 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1947 1948 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1949 Tmp2, MachinePointerInfo(V)); 1950 SDValue VAList = VAListLoad; 1951 1952 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1953 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1954 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1955 1956 VAList = 1957 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1958 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1959 } 1960 1961 // Increment the pointer, VAList, to the next vaarg 1962 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1963 getConstant(getDataLayout().getTypeAllocSize( 1964 VT.getTypeForEVT(*getContext())), 1965 dl, VAList.getValueType())); 1966 // Store the incremented VAList to the legalized pointer 1967 Tmp1 = 1968 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1969 // Load the actual argument out of the pointer VAList 1970 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1971 } 1972 1973 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1974 SDLoc dl(Node); 1975 const TargetLowering &TLI = getTargetLoweringInfo(); 1976 // This defaults to loading a pointer from the input and storing it to the 1977 // output, returning the chain. 1978 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1979 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1980 SDValue Tmp1 = 1981 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1982 Node->getOperand(2), MachinePointerInfo(VS)); 1983 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1984 MachinePointerInfo(VD)); 1985 } 1986 1987 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1988 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1989 unsigned ByteSize = VT.getStoreSize(); 1990 Type *Ty = VT.getTypeForEVT(*getContext()); 1991 unsigned StackAlign = 1992 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1993 1994 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1995 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1996 } 1997 1998 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1999 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 2000 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2001 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2002 const DataLayout &DL = getDataLayout(); 2003 unsigned Align = 2004 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 2005 2006 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 2007 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 2008 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2009 } 2010 2011 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2012 ISD::CondCode Cond, const SDLoc &dl) { 2013 EVT OpVT = N1.getValueType(); 2014 2015 // These setcc operations always fold. 2016 switch (Cond) { 2017 default: break; 2018 case ISD::SETFALSE: 2019 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2020 case ISD::SETTRUE: 2021 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2022 2023 case ISD::SETOEQ: 2024 case ISD::SETOGT: 2025 case ISD::SETOGE: 2026 case ISD::SETOLT: 2027 case ISD::SETOLE: 2028 case ISD::SETONE: 2029 case ISD::SETO: 2030 case ISD::SETUO: 2031 case ISD::SETUEQ: 2032 case ISD::SETUNE: 2033 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2034 break; 2035 } 2036 2037 if (OpVT.isInteger()) { 2038 // For EQ and NE, we can always pick a value for the undef to make the 2039 // predicate pass or fail, so we can return undef. 2040 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2041 // icmp eq/ne X, undef -> undef. 2042 if ((N1.isUndef() || N2.isUndef()) && 2043 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2044 return getUNDEF(VT); 2045 2046 // If both operands are undef, we can return undef for int comparison. 2047 // icmp undef, undef -> undef. 2048 if (N1.isUndef() && N2.isUndef()) 2049 return getUNDEF(VT); 2050 2051 // icmp X, X -> true/false 2052 // icmp X, undef -> true/false because undef could be X. 2053 if (N1 == N2) 2054 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2055 } 2056 2057 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2058 const APInt &C2 = N2C->getAPIntValue(); 2059 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2060 const APInt &C1 = N1C->getAPIntValue(); 2061 2062 switch (Cond) { 2063 default: llvm_unreachable("Unknown integer setcc!"); 2064 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2065 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2066 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2067 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2068 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2069 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2070 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2071 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2072 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2073 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2074 } 2075 } 2076 } 2077 2078 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2079 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2080 2081 if (N1CFP && N2CFP) { 2082 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2083 switch (Cond) { 2084 default: break; 2085 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2086 return getUNDEF(VT); 2087 LLVM_FALLTHROUGH; 2088 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2089 OpVT); 2090 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2091 return getUNDEF(VT); 2092 LLVM_FALLTHROUGH; 2093 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2094 R==APFloat::cmpLessThan, dl, VT, 2095 OpVT); 2096 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2097 return getUNDEF(VT); 2098 LLVM_FALLTHROUGH; 2099 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2100 OpVT); 2101 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2102 return getUNDEF(VT); 2103 LLVM_FALLTHROUGH; 2104 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2105 VT, OpVT); 2106 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2107 return getUNDEF(VT); 2108 LLVM_FALLTHROUGH; 2109 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2110 R==APFloat::cmpEqual, dl, VT, 2111 OpVT); 2112 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2113 return getUNDEF(VT); 2114 LLVM_FALLTHROUGH; 2115 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2116 R==APFloat::cmpEqual, dl, VT, OpVT); 2117 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2118 OpVT); 2119 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2120 OpVT); 2121 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2122 R==APFloat::cmpEqual, dl, VT, 2123 OpVT); 2124 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2125 OpVT); 2126 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2127 R==APFloat::cmpLessThan, dl, VT, 2128 OpVT); 2129 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2130 R==APFloat::cmpUnordered, dl, VT, 2131 OpVT); 2132 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2133 VT, OpVT); 2134 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2135 OpVT); 2136 } 2137 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2138 // Ensure that the constant occurs on the RHS. 2139 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2140 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2141 return SDValue(); 2142 return getSetCC(dl, VT, N2, N1, SwappedCond); 2143 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2144 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2145 // If an operand is known to be a nan (or undef that could be a nan), we can 2146 // fold it. 2147 // Choosing NaN for the undef will always make unordered comparison succeed 2148 // and ordered comparison fails. 2149 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2150 switch (ISD::getUnorderedFlavor(Cond)) { 2151 default: 2152 llvm_unreachable("Unknown flavor!"); 2153 case 0: // Known false. 2154 return getBoolConstant(false, dl, VT, OpVT); 2155 case 1: // Known true. 2156 return getBoolConstant(true, dl, VT, OpVT); 2157 case 2: // Undefined. 2158 return getUNDEF(VT); 2159 } 2160 } 2161 2162 // Could not fold it. 2163 return SDValue(); 2164 } 2165 2166 /// See if the specified operand can be simplified with the knowledge that only 2167 /// the bits specified by DemandedBits are used. 2168 /// TODO: really we should be making this into the DAG equivalent of 2169 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2170 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2171 EVT VT = V.getValueType(); 2172 APInt DemandedElts = VT.isVector() 2173 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2174 : APInt(1, 1); 2175 return GetDemandedBits(V, DemandedBits, DemandedElts); 2176 } 2177 2178 /// See if the specified operand can be simplified with the knowledge that only 2179 /// the bits specified by DemandedBits are used in the elements specified by 2180 /// DemandedElts. 2181 /// TODO: really we should be making this into the DAG equivalent of 2182 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2183 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2184 const APInt &DemandedElts) { 2185 switch (V.getOpcode()) { 2186 default: 2187 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2188 *this, 0); 2189 break; 2190 case ISD::Constant: { 2191 auto *CV = cast<ConstantSDNode>(V.getNode()); 2192 assert(CV && "Const value should be ConstSDNode."); 2193 const APInt &CVal = CV->getAPIntValue(); 2194 APInt NewVal = CVal & DemandedBits; 2195 if (NewVal != CVal) 2196 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2197 break; 2198 } 2199 case ISD::SRL: 2200 // Only look at single-use SRLs. 2201 if (!V.getNode()->hasOneUse()) 2202 break; 2203 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2204 // See if we can recursively simplify the LHS. 2205 unsigned Amt = RHSC->getZExtValue(); 2206 2207 // Watch out for shift count overflow though. 2208 if (Amt >= DemandedBits.getBitWidth()) 2209 break; 2210 APInt SrcDemandedBits = DemandedBits << Amt; 2211 if (SDValue SimplifyLHS = 2212 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2213 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2214 V.getOperand(1)); 2215 } 2216 break; 2217 case ISD::AND: { 2218 // X & -1 -> X (ignoring bits which aren't demanded). 2219 // Also handle the case where masked out bits in X are known to be zero. 2220 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2221 const APInt &AndVal = RHSC->getAPIntValue(); 2222 if (DemandedBits.isSubsetOf(AndVal) || 2223 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2224 AndVal)) 2225 return V.getOperand(0); 2226 } 2227 break; 2228 } 2229 } 2230 return SDValue(); 2231 } 2232 2233 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2234 /// use this predicate to simplify operations downstream. 2235 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2236 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2237 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2238 } 2239 2240 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2241 /// this predicate to simplify operations downstream. Mask is known to be zero 2242 /// for bits that V cannot have. 2243 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2244 unsigned Depth) const { 2245 EVT VT = V.getValueType(); 2246 APInt DemandedElts = VT.isVector() 2247 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2248 : APInt(1, 1); 2249 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2250 } 2251 2252 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2253 /// DemandedElts. We use this predicate to simplify operations downstream. 2254 /// Mask is known to be zero for bits that V cannot have. 2255 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2256 const APInt &DemandedElts, 2257 unsigned Depth) const { 2258 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2259 } 2260 2261 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2262 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2263 unsigned Depth) const { 2264 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2265 } 2266 2267 /// isSplatValue - Return true if the vector V has the same value 2268 /// across all DemandedElts. 2269 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2270 APInt &UndefElts) { 2271 if (!DemandedElts) 2272 return false; // No demanded elts, better to assume we don't know anything. 2273 2274 EVT VT = V.getValueType(); 2275 assert(VT.isVector() && "Vector type expected"); 2276 2277 unsigned NumElts = VT.getVectorNumElements(); 2278 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2279 UndefElts = APInt::getNullValue(NumElts); 2280 2281 switch (V.getOpcode()) { 2282 case ISD::BUILD_VECTOR: { 2283 SDValue Scl; 2284 for (unsigned i = 0; i != NumElts; ++i) { 2285 SDValue Op = V.getOperand(i); 2286 if (Op.isUndef()) { 2287 UndefElts.setBit(i); 2288 continue; 2289 } 2290 if (!DemandedElts[i]) 2291 continue; 2292 if (Scl && Scl != Op) 2293 return false; 2294 Scl = Op; 2295 } 2296 return true; 2297 } 2298 case ISD::VECTOR_SHUFFLE: { 2299 // Check if this is a shuffle node doing a splat. 2300 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2301 int SplatIndex = -1; 2302 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2303 for (int i = 0; i != (int)NumElts; ++i) { 2304 int M = Mask[i]; 2305 if (M < 0) { 2306 UndefElts.setBit(i); 2307 continue; 2308 } 2309 if (!DemandedElts[i]) 2310 continue; 2311 if (0 <= SplatIndex && SplatIndex != M) 2312 return false; 2313 SplatIndex = M; 2314 } 2315 return true; 2316 } 2317 case ISD::EXTRACT_SUBVECTOR: { 2318 SDValue Src = V.getOperand(0); 2319 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1)); 2320 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2321 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2322 // Offset the demanded elts by the subvector index. 2323 uint64_t Idx = SubIdx->getZExtValue(); 2324 APInt UndefSrcElts; 2325 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2326 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) { 2327 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2328 return true; 2329 } 2330 } 2331 break; 2332 } 2333 case ISD::ADD: 2334 case ISD::SUB: 2335 case ISD::AND: { 2336 APInt UndefLHS, UndefRHS; 2337 SDValue LHS = V.getOperand(0); 2338 SDValue RHS = V.getOperand(1); 2339 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2340 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2341 UndefElts = UndefLHS | UndefRHS; 2342 return true; 2343 } 2344 break; 2345 } 2346 } 2347 2348 return false; 2349 } 2350 2351 /// Helper wrapper to main isSplatValue function. 2352 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2353 EVT VT = V.getValueType(); 2354 assert(VT.isVector() && "Vector type expected"); 2355 unsigned NumElts = VT.getVectorNumElements(); 2356 2357 APInt UndefElts; 2358 APInt DemandedElts = APInt::getAllOnesValue(NumElts); 2359 return isSplatValue(V, DemandedElts, UndefElts) && 2360 (AllowUndefs || !UndefElts); 2361 } 2362 2363 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2364 V = peekThroughExtractSubvectors(V); 2365 2366 EVT VT = V.getValueType(); 2367 unsigned Opcode = V.getOpcode(); 2368 switch (Opcode) { 2369 default: { 2370 APInt UndefElts; 2371 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2372 if (isSplatValue(V, DemandedElts, UndefElts)) { 2373 // Handle case where all demanded elements are UNDEF. 2374 if (DemandedElts.isSubsetOf(UndefElts)) { 2375 SplatIdx = 0; 2376 return getUNDEF(VT); 2377 } 2378 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2379 return V; 2380 } 2381 break; 2382 } 2383 case ISD::VECTOR_SHUFFLE: { 2384 // Check if this is a shuffle node doing a splat. 2385 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2386 // getTargetVShiftNode currently struggles without the splat source. 2387 auto *SVN = cast<ShuffleVectorSDNode>(V); 2388 if (!SVN->isSplat()) 2389 break; 2390 int Idx = SVN->getSplatIndex(); 2391 int NumElts = V.getValueType().getVectorNumElements(); 2392 SplatIdx = Idx % NumElts; 2393 return V.getOperand(Idx / NumElts); 2394 } 2395 } 2396 2397 return SDValue(); 2398 } 2399 2400 SDValue SelectionDAG::getSplatValue(SDValue V) { 2401 int SplatIdx; 2402 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2403 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2404 SrcVector.getValueType().getScalarType(), SrcVector, 2405 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2406 return SDValue(); 2407 } 2408 2409 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2410 /// is less than the element bit-width of the shift node, return it. 2411 static const APInt *getValidShiftAmountConstant(SDValue V, 2412 const APInt &DemandedElts) { 2413 unsigned BitWidth = V.getScalarValueSizeInBits(); 2414 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2415 // Shifting more than the bitwidth is not valid. 2416 const APInt &ShAmt = SA->getAPIntValue(); 2417 if (ShAmt.ult(BitWidth)) 2418 return &ShAmt; 2419 } 2420 return nullptr; 2421 } 2422 2423 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less 2424 /// than the element bit-width of the shift node, return the minimum value. 2425 static const APInt * 2426 getValidMinimumShiftAmountConstant(SDValue V, const APInt &DemandedElts) { 2427 unsigned BitWidth = V.getScalarValueSizeInBits(); 2428 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2429 if (!BV) 2430 return nullptr; 2431 const APInt *MinShAmt = nullptr; 2432 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2433 if (!DemandedElts[i]) 2434 continue; 2435 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2436 if (!SA) 2437 return nullptr; 2438 // Shifting more than the bitwidth is not valid. 2439 const APInt &ShAmt = SA->getAPIntValue(); 2440 if (ShAmt.uge(BitWidth)) 2441 return nullptr; 2442 if (MinShAmt && MinShAmt->ule(ShAmt)) 2443 continue; 2444 MinShAmt = &ShAmt; 2445 } 2446 return MinShAmt; 2447 } 2448 2449 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less 2450 /// than the element bit-width of the shift node, return the maximum value. 2451 static const APInt * 2452 getValidMaximumShiftAmountConstant(SDValue V, const APInt &DemandedElts) { 2453 unsigned BitWidth = V.getScalarValueSizeInBits(); 2454 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2455 if (!BV) 2456 return nullptr; 2457 const APInt *MaxShAmt = nullptr; 2458 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2459 if (!DemandedElts[i]) 2460 continue; 2461 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2462 if (!SA) 2463 return nullptr; 2464 // Shifting more than the bitwidth is not valid. 2465 const APInt &ShAmt = SA->getAPIntValue(); 2466 if (ShAmt.uge(BitWidth)) 2467 return nullptr; 2468 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2469 continue; 2470 MaxShAmt = &ShAmt; 2471 } 2472 return MaxShAmt; 2473 } 2474 2475 /// Determine which bits of Op are known to be either zero or one and return 2476 /// them in Known. For vectors, the known bits are those that are shared by 2477 /// every vector element. 2478 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2479 EVT VT = Op.getValueType(); 2480 APInt DemandedElts = VT.isVector() 2481 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2482 : APInt(1, 1); 2483 return computeKnownBits(Op, DemandedElts, Depth); 2484 } 2485 2486 /// Determine which bits of Op are known to be either zero or one and return 2487 /// them in Known. The DemandedElts argument allows us to only collect the known 2488 /// bits that are shared by the requested vector elements. 2489 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2490 unsigned Depth) const { 2491 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2492 2493 KnownBits Known(BitWidth); // Don't know anything. 2494 2495 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2496 // We know all of the bits for a constant! 2497 Known.One = C->getAPIntValue(); 2498 Known.Zero = ~Known.One; 2499 return Known; 2500 } 2501 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2502 // We know all of the bits for a constant fp! 2503 Known.One = C->getValueAPF().bitcastToAPInt(); 2504 Known.Zero = ~Known.One; 2505 return Known; 2506 } 2507 2508 if (Depth >= MaxRecursionDepth) 2509 return Known; // Limit search depth. 2510 2511 KnownBits Known2; 2512 unsigned NumElts = DemandedElts.getBitWidth(); 2513 assert((!Op.getValueType().isVector() || 2514 NumElts == Op.getValueType().getVectorNumElements()) && 2515 "Unexpected vector size"); 2516 2517 if (!DemandedElts) 2518 return Known; // No demanded elts, better to assume we don't know anything. 2519 2520 unsigned Opcode = Op.getOpcode(); 2521 switch (Opcode) { 2522 case ISD::BUILD_VECTOR: 2523 // Collect the known bits that are shared by every demanded vector element. 2524 Known.Zero.setAllBits(); Known.One.setAllBits(); 2525 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2526 if (!DemandedElts[i]) 2527 continue; 2528 2529 SDValue SrcOp = Op.getOperand(i); 2530 Known2 = computeKnownBits(SrcOp, Depth + 1); 2531 2532 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2533 if (SrcOp.getValueSizeInBits() != BitWidth) { 2534 assert(SrcOp.getValueSizeInBits() > BitWidth && 2535 "Expected BUILD_VECTOR implicit truncation"); 2536 Known2 = Known2.trunc(BitWidth); 2537 } 2538 2539 // Known bits are the values that are shared by every demanded element. 2540 Known.One &= Known2.One; 2541 Known.Zero &= Known2.Zero; 2542 2543 // If we don't know any bits, early out. 2544 if (Known.isUnknown()) 2545 break; 2546 } 2547 break; 2548 case ISD::VECTOR_SHUFFLE: { 2549 // Collect the known bits that are shared by every vector element referenced 2550 // by the shuffle. 2551 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2552 Known.Zero.setAllBits(); Known.One.setAllBits(); 2553 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2554 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2555 for (unsigned i = 0; i != NumElts; ++i) { 2556 if (!DemandedElts[i]) 2557 continue; 2558 2559 int M = SVN->getMaskElt(i); 2560 if (M < 0) { 2561 // For UNDEF elements, we don't know anything about the common state of 2562 // the shuffle result. 2563 Known.resetAll(); 2564 DemandedLHS.clearAllBits(); 2565 DemandedRHS.clearAllBits(); 2566 break; 2567 } 2568 2569 if ((unsigned)M < NumElts) 2570 DemandedLHS.setBit((unsigned)M % NumElts); 2571 else 2572 DemandedRHS.setBit((unsigned)M % NumElts); 2573 } 2574 // Known bits are the values that are shared by every demanded element. 2575 if (!!DemandedLHS) { 2576 SDValue LHS = Op.getOperand(0); 2577 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2578 Known.One &= Known2.One; 2579 Known.Zero &= Known2.Zero; 2580 } 2581 // If we don't know any bits, early out. 2582 if (Known.isUnknown()) 2583 break; 2584 if (!!DemandedRHS) { 2585 SDValue RHS = Op.getOperand(1); 2586 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2587 Known.One &= Known2.One; 2588 Known.Zero &= Known2.Zero; 2589 } 2590 break; 2591 } 2592 case ISD::CONCAT_VECTORS: { 2593 // Split DemandedElts and test each of the demanded subvectors. 2594 Known.Zero.setAllBits(); Known.One.setAllBits(); 2595 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2596 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2597 unsigned NumSubVectors = Op.getNumOperands(); 2598 for (unsigned i = 0; i != NumSubVectors; ++i) { 2599 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2600 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2601 if (!!DemandedSub) { 2602 SDValue Sub = Op.getOperand(i); 2603 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2604 Known.One &= Known2.One; 2605 Known.Zero &= Known2.Zero; 2606 } 2607 // If we don't know any bits, early out. 2608 if (Known.isUnknown()) 2609 break; 2610 } 2611 break; 2612 } 2613 case ISD::INSERT_SUBVECTOR: { 2614 // If we know the element index, demand any elements from the subvector and 2615 // the remainder from the src its inserted into, otherwise demand them all. 2616 SDValue Src = Op.getOperand(0); 2617 SDValue Sub = Op.getOperand(1); 2618 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2619 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2620 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2621 Known.One.setAllBits(); 2622 Known.Zero.setAllBits(); 2623 uint64_t Idx = SubIdx->getZExtValue(); 2624 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2625 if (!!DemandedSubElts) { 2626 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2627 if (Known.isUnknown()) 2628 break; // early-out. 2629 } 2630 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2631 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2632 if (!!DemandedSrcElts) { 2633 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2634 Known.One &= Known2.One; 2635 Known.Zero &= Known2.Zero; 2636 } 2637 } else { 2638 Known = computeKnownBits(Sub, Depth + 1); 2639 if (Known.isUnknown()) 2640 break; // early-out. 2641 Known2 = computeKnownBits(Src, Depth + 1); 2642 Known.One &= Known2.One; 2643 Known.Zero &= Known2.Zero; 2644 } 2645 break; 2646 } 2647 case ISD::EXTRACT_SUBVECTOR: { 2648 // If we know the element index, just demand that subvector elements, 2649 // otherwise demand them all. 2650 SDValue Src = Op.getOperand(0); 2651 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2652 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2653 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 2654 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2655 // Offset the demanded elts by the subvector index. 2656 uint64_t Idx = SubIdx->getZExtValue(); 2657 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2658 } 2659 Known = computeKnownBits(Src, DemandedSrc, Depth + 1); 2660 break; 2661 } 2662 case ISD::SCALAR_TO_VECTOR: { 2663 // We know about scalar_to_vector as much as we know about it source, 2664 // which becomes the first element of otherwise unknown vector. 2665 if (DemandedElts != 1) 2666 break; 2667 2668 SDValue N0 = Op.getOperand(0); 2669 Known = computeKnownBits(N0, Depth + 1); 2670 if (N0.getValueSizeInBits() != BitWidth) 2671 Known = Known.trunc(BitWidth); 2672 2673 break; 2674 } 2675 case ISD::BITCAST: { 2676 SDValue N0 = Op.getOperand(0); 2677 EVT SubVT = N0.getValueType(); 2678 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2679 2680 // Ignore bitcasts from unsupported types. 2681 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2682 break; 2683 2684 // Fast handling of 'identity' bitcasts. 2685 if (BitWidth == SubBitWidth) { 2686 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2687 break; 2688 } 2689 2690 bool IsLE = getDataLayout().isLittleEndian(); 2691 2692 // Bitcast 'small element' vector to 'large element' scalar/vector. 2693 if ((BitWidth % SubBitWidth) == 0) { 2694 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2695 2696 // Collect known bits for the (larger) output by collecting the known 2697 // bits from each set of sub elements and shift these into place. 2698 // We need to separately call computeKnownBits for each set of 2699 // sub elements as the knownbits for each is likely to be different. 2700 unsigned SubScale = BitWidth / SubBitWidth; 2701 APInt SubDemandedElts(NumElts * SubScale, 0); 2702 for (unsigned i = 0; i != NumElts; ++i) 2703 if (DemandedElts[i]) 2704 SubDemandedElts.setBit(i * SubScale); 2705 2706 for (unsigned i = 0; i != SubScale; ++i) { 2707 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2708 Depth + 1); 2709 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2710 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2711 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2712 } 2713 } 2714 2715 // Bitcast 'large element' scalar/vector to 'small element' vector. 2716 if ((SubBitWidth % BitWidth) == 0) { 2717 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2718 2719 // Collect known bits for the (smaller) output by collecting the known 2720 // bits from the overlapping larger input elements and extracting the 2721 // sub sections we actually care about. 2722 unsigned SubScale = SubBitWidth / BitWidth; 2723 APInt SubDemandedElts(NumElts / SubScale, 0); 2724 for (unsigned i = 0; i != NumElts; ++i) 2725 if (DemandedElts[i]) 2726 SubDemandedElts.setBit(i / SubScale); 2727 2728 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2729 2730 Known.Zero.setAllBits(); Known.One.setAllBits(); 2731 for (unsigned i = 0; i != NumElts; ++i) 2732 if (DemandedElts[i]) { 2733 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2734 unsigned Offset = (Shifts % SubScale) * BitWidth; 2735 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2736 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2737 // If we don't know any bits, early out. 2738 if (Known.isUnknown()) 2739 break; 2740 } 2741 } 2742 break; 2743 } 2744 case ISD::AND: 2745 // If either the LHS or the RHS are Zero, the result is zero. 2746 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2747 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2748 2749 // Output known-1 bits are only known if set in both the LHS & RHS. 2750 Known.One &= Known2.One; 2751 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2752 Known.Zero |= Known2.Zero; 2753 break; 2754 case ISD::OR: 2755 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2756 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2757 2758 // Output known-0 bits are only known if clear in both the LHS & RHS. 2759 Known.Zero &= Known2.Zero; 2760 // Output known-1 are known to be set if set in either the LHS | RHS. 2761 Known.One |= Known2.One; 2762 break; 2763 case ISD::XOR: { 2764 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2765 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2766 2767 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2768 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2769 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2770 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2771 Known.Zero = KnownZeroOut; 2772 break; 2773 } 2774 case ISD::MUL: { 2775 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2776 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2777 2778 // If low bits are zero in either operand, output low known-0 bits. 2779 // Also compute a conservative estimate for high known-0 bits. 2780 // More trickiness is possible, but this is sufficient for the 2781 // interesting case of alignment computation. 2782 unsigned TrailZ = Known.countMinTrailingZeros() + 2783 Known2.countMinTrailingZeros(); 2784 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2785 Known2.countMinLeadingZeros(), 2786 BitWidth) - BitWidth; 2787 2788 Known.resetAll(); 2789 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2790 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2791 break; 2792 } 2793 case ISD::UDIV: { 2794 // For the purposes of computing leading zeros we can conservatively 2795 // treat a udiv as a logical right shift by the power of 2 known to 2796 // be less than the denominator. 2797 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2798 unsigned LeadZ = Known2.countMinLeadingZeros(); 2799 2800 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2801 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2802 if (RHSMaxLeadingZeros != BitWidth) 2803 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2804 2805 Known.Zero.setHighBits(LeadZ); 2806 break; 2807 } 2808 case ISD::SELECT: 2809 case ISD::VSELECT: 2810 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2811 // If we don't know any bits, early out. 2812 if (Known.isUnknown()) 2813 break; 2814 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2815 2816 // Only known if known in both the LHS and RHS. 2817 Known.One &= Known2.One; 2818 Known.Zero &= Known2.Zero; 2819 break; 2820 case ISD::SELECT_CC: 2821 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2822 // If we don't know any bits, early out. 2823 if (Known.isUnknown()) 2824 break; 2825 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2826 2827 // Only known if known in both the LHS and RHS. 2828 Known.One &= Known2.One; 2829 Known.Zero &= Known2.Zero; 2830 break; 2831 case ISD::SMULO: 2832 case ISD::UMULO: 2833 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2834 if (Op.getResNo() != 1) 2835 break; 2836 // The boolean result conforms to getBooleanContents. 2837 // If we know the result of a setcc has the top bits zero, use this info. 2838 // We know that we have an integer-based boolean since these operations 2839 // are only available for integer. 2840 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2841 TargetLowering::ZeroOrOneBooleanContent && 2842 BitWidth > 1) 2843 Known.Zero.setBitsFrom(1); 2844 break; 2845 case ISD::SETCC: 2846 case ISD::STRICT_FSETCC: 2847 case ISD::STRICT_FSETCCS: { 2848 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2849 // If we know the result of a setcc has the top bits zero, use this info. 2850 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2851 TargetLowering::ZeroOrOneBooleanContent && 2852 BitWidth > 1) 2853 Known.Zero.setBitsFrom(1); 2854 break; 2855 } 2856 case ISD::SHL: 2857 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2858 2859 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2860 unsigned Shift = ShAmt->getZExtValue(); 2861 Known.Zero <<= Shift; 2862 Known.One <<= Shift; 2863 // Low bits are known zero. 2864 Known.Zero.setLowBits(Shift); 2865 break; 2866 } 2867 2868 // No matter the shift amount, the trailing zeros will stay zero. 2869 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros()); 2870 Known.One.clearAllBits(); 2871 2872 // Minimum shift low bits are known zero. 2873 if (const APInt *ShMinAmt = 2874 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2875 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 2876 break; 2877 case ISD::SRL: 2878 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2879 2880 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2881 unsigned Shift = ShAmt->getZExtValue(); 2882 Known.Zero.lshrInPlace(Shift); 2883 Known.One.lshrInPlace(Shift); 2884 // High bits are known zero. 2885 Known.Zero.setHighBits(Shift); 2886 break; 2887 } 2888 2889 // No matter the shift amount, the leading zeros will stay zero. 2890 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros()); 2891 Known.One.clearAllBits(); 2892 2893 // Minimum shift high bits are known zero. 2894 if (const APInt *ShMinAmt = 2895 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2896 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2897 break; 2898 case ISD::SRA: 2899 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2900 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2901 unsigned Shift = ShAmt->getZExtValue(); 2902 // Sign extend known zero/one bit (else is unknown). 2903 Known.Zero.ashrInPlace(Shift); 2904 Known.One.ashrInPlace(Shift); 2905 } 2906 break; 2907 case ISD::FSHL: 2908 case ISD::FSHR: 2909 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2910 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2911 2912 // For fshl, 0-shift returns the 1st arg. 2913 // For fshr, 0-shift returns the 2nd arg. 2914 if (Amt == 0) { 2915 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2916 DemandedElts, Depth + 1); 2917 break; 2918 } 2919 2920 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2921 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2922 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2923 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2924 if (Opcode == ISD::FSHL) { 2925 Known.One <<= Amt; 2926 Known.Zero <<= Amt; 2927 Known2.One.lshrInPlace(BitWidth - Amt); 2928 Known2.Zero.lshrInPlace(BitWidth - Amt); 2929 } else { 2930 Known.One <<= BitWidth - Amt; 2931 Known.Zero <<= BitWidth - Amt; 2932 Known2.One.lshrInPlace(Amt); 2933 Known2.Zero.lshrInPlace(Amt); 2934 } 2935 Known.One |= Known2.One; 2936 Known.Zero |= Known2.Zero; 2937 } 2938 break; 2939 case ISD::SIGN_EXTEND_INREG: { 2940 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2941 unsigned EBits = EVT.getScalarSizeInBits(); 2942 2943 // Sign extension. Compute the demanded bits in the result that are not 2944 // present in the input. 2945 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2946 2947 APInt InSignMask = APInt::getSignMask(EBits); 2948 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2949 2950 // If the sign extended bits are demanded, we know that the sign 2951 // bit is demanded. 2952 InSignMask = InSignMask.zext(BitWidth); 2953 if (NewBits.getBoolValue()) 2954 InputDemandedBits |= InSignMask; 2955 2956 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2957 Known.One &= InputDemandedBits; 2958 Known.Zero &= InputDemandedBits; 2959 2960 // If the sign bit of the input is known set or clear, then we know the 2961 // top bits of the result. 2962 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2963 Known.Zero |= NewBits; 2964 Known.One &= ~NewBits; 2965 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2966 Known.One |= NewBits; 2967 Known.Zero &= ~NewBits; 2968 } else { // Input sign bit unknown 2969 Known.Zero &= ~NewBits; 2970 Known.One &= ~NewBits; 2971 } 2972 break; 2973 } 2974 case ISD::CTTZ: 2975 case ISD::CTTZ_ZERO_UNDEF: { 2976 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2977 // If we have a known 1, its position is our upper bound. 2978 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2979 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2980 Known.Zero.setBitsFrom(LowBits); 2981 break; 2982 } 2983 case ISD::CTLZ: 2984 case ISD::CTLZ_ZERO_UNDEF: { 2985 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2986 // If we have a known 1, its position is our upper bound. 2987 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2988 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2989 Known.Zero.setBitsFrom(LowBits); 2990 break; 2991 } 2992 case ISD::CTPOP: { 2993 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2994 // If we know some of the bits are zero, they can't be one. 2995 unsigned PossibleOnes = Known2.countMaxPopulation(); 2996 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2997 break; 2998 } 2999 case ISD::LOAD: { 3000 LoadSDNode *LD = cast<LoadSDNode>(Op); 3001 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3002 if (ISD::isNON_EXTLoad(LD) && Cst) { 3003 // Determine any common known bits from the loaded constant pool value. 3004 Type *CstTy = Cst->getType(); 3005 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3006 // If its a vector splat, then we can (quickly) reuse the scalar path. 3007 // NOTE: We assume all elements match and none are UNDEF. 3008 if (CstTy->isVectorTy()) { 3009 if (const Constant *Splat = Cst->getSplatValue()) { 3010 Cst = Splat; 3011 CstTy = Cst->getType(); 3012 } 3013 } 3014 // TODO - do we need to handle different bitwidths? 3015 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3016 // Iterate across all vector elements finding common known bits. 3017 Known.One.setAllBits(); 3018 Known.Zero.setAllBits(); 3019 for (unsigned i = 0; i != NumElts; ++i) { 3020 if (!DemandedElts[i]) 3021 continue; 3022 if (Constant *Elt = Cst->getAggregateElement(i)) { 3023 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3024 const APInt &Value = CInt->getValue(); 3025 Known.One &= Value; 3026 Known.Zero &= ~Value; 3027 continue; 3028 } 3029 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3030 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3031 Known.One &= Value; 3032 Known.Zero &= ~Value; 3033 continue; 3034 } 3035 } 3036 Known.One.clearAllBits(); 3037 Known.Zero.clearAllBits(); 3038 break; 3039 } 3040 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3041 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3042 const APInt &Value = CInt->getValue(); 3043 Known.One = Value; 3044 Known.Zero = ~Value; 3045 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3046 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3047 Known.One = Value; 3048 Known.Zero = ~Value; 3049 } 3050 } 3051 } 3052 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3053 // If this is a ZEXTLoad and we are looking at the loaded value. 3054 EVT VT = LD->getMemoryVT(); 3055 unsigned MemBits = VT.getScalarSizeInBits(); 3056 Known.Zero.setBitsFrom(MemBits); 3057 } else if (const MDNode *Ranges = LD->getRanges()) { 3058 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3059 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3060 } 3061 break; 3062 } 3063 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3064 EVT InVT = Op.getOperand(0).getValueType(); 3065 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3066 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3067 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3068 break; 3069 } 3070 case ISD::ZERO_EXTEND: { 3071 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3072 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3073 break; 3074 } 3075 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3076 EVT InVT = Op.getOperand(0).getValueType(); 3077 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3078 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3079 // If the sign bit is known to be zero or one, then sext will extend 3080 // it to the top bits, else it will just zext. 3081 Known = Known.sext(BitWidth); 3082 break; 3083 } 3084 case ISD::SIGN_EXTEND: { 3085 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3086 // If the sign bit is known to be zero or one, then sext will extend 3087 // it to the top bits, else it will just zext. 3088 Known = Known.sext(BitWidth); 3089 break; 3090 } 3091 case ISD::ANY_EXTEND_VECTOR_INREG: { 3092 EVT InVT = Op.getOperand(0).getValueType(); 3093 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3094 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3095 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */); 3096 break; 3097 } 3098 case ISD::ANY_EXTEND: { 3099 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3100 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */); 3101 break; 3102 } 3103 case ISD::TRUNCATE: { 3104 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3105 Known = Known.trunc(BitWidth); 3106 break; 3107 } 3108 case ISD::AssertZext: { 3109 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3110 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3111 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3112 Known.Zero |= (~InMask); 3113 Known.One &= (~Known.Zero); 3114 break; 3115 } 3116 case ISD::FGETSIGN: 3117 // All bits are zero except the low bit. 3118 Known.Zero.setBitsFrom(1); 3119 break; 3120 case ISD::USUBO: 3121 case ISD::SSUBO: 3122 if (Op.getResNo() == 1) { 3123 // If we know the result of a setcc has the top bits zero, use this info. 3124 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3125 TargetLowering::ZeroOrOneBooleanContent && 3126 BitWidth > 1) 3127 Known.Zero.setBitsFrom(1); 3128 break; 3129 } 3130 LLVM_FALLTHROUGH; 3131 case ISD::SUB: 3132 case ISD::SUBC: { 3133 assert(Op.getResNo() == 0 && 3134 "We only compute knownbits for the difference here."); 3135 3136 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3137 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3138 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3139 Known, Known2); 3140 break; 3141 } 3142 case ISD::UADDO: 3143 case ISD::SADDO: 3144 case ISD::ADDCARRY: 3145 if (Op.getResNo() == 1) { 3146 // If we know the result of a setcc has the top bits zero, use this info. 3147 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3148 TargetLowering::ZeroOrOneBooleanContent && 3149 BitWidth > 1) 3150 Known.Zero.setBitsFrom(1); 3151 break; 3152 } 3153 LLVM_FALLTHROUGH; 3154 case ISD::ADD: 3155 case ISD::ADDC: 3156 case ISD::ADDE: { 3157 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3158 3159 // With ADDE and ADDCARRY, a carry bit may be added in. 3160 KnownBits Carry(1); 3161 if (Opcode == ISD::ADDE) 3162 // Can't track carry from glue, set carry to unknown. 3163 Carry.resetAll(); 3164 else if (Opcode == ISD::ADDCARRY) 3165 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3166 // the trouble (how often will we find a known carry bit). And I haven't 3167 // tested this very much yet, but something like this might work: 3168 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3169 // Carry = Carry.zextOrTrunc(1, false); 3170 Carry.resetAll(); 3171 else 3172 Carry.setAllZero(); 3173 3174 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3175 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3176 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3177 break; 3178 } 3179 case ISD::SREM: 3180 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3181 const APInt &RA = Rem->getAPIntValue().abs(); 3182 if (RA.isPowerOf2()) { 3183 APInt LowBits = RA - 1; 3184 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3185 3186 // The low bits of the first operand are unchanged by the srem. 3187 Known.Zero = Known2.Zero & LowBits; 3188 Known.One = Known2.One & LowBits; 3189 3190 // If the first operand is non-negative or has all low bits zero, then 3191 // the upper bits are all zero. 3192 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3193 Known.Zero |= ~LowBits; 3194 3195 // If the first operand is negative and not all low bits are zero, then 3196 // the upper bits are all one. 3197 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3198 Known.One |= ~LowBits; 3199 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3200 } 3201 } 3202 break; 3203 case ISD::UREM: { 3204 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3205 const APInt &RA = Rem->getAPIntValue(); 3206 if (RA.isPowerOf2()) { 3207 APInt LowBits = (RA - 1); 3208 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3209 3210 // The upper bits are all zero, the lower ones are unchanged. 3211 Known.Zero = Known2.Zero | ~LowBits; 3212 Known.One = Known2.One & LowBits; 3213 break; 3214 } 3215 } 3216 3217 // Since the result is less than or equal to either operand, any leading 3218 // zero bits in either operand must also exist in the result. 3219 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3220 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3221 3222 uint32_t Leaders = 3223 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3224 Known.resetAll(); 3225 Known.Zero.setHighBits(Leaders); 3226 break; 3227 } 3228 case ISD::EXTRACT_ELEMENT: { 3229 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3230 const unsigned Index = Op.getConstantOperandVal(1); 3231 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3232 3233 // Remove low part of known bits mask 3234 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3235 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3236 3237 // Remove high part of known bit mask 3238 Known = Known.trunc(EltBitWidth); 3239 break; 3240 } 3241 case ISD::EXTRACT_VECTOR_ELT: { 3242 SDValue InVec = Op.getOperand(0); 3243 SDValue EltNo = Op.getOperand(1); 3244 EVT VecVT = InVec.getValueType(); 3245 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3246 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3247 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3248 // anything about the extended bits. 3249 if (BitWidth > EltBitWidth) 3250 Known = Known.trunc(EltBitWidth); 3251 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3252 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 3253 // If we know the element index, just demand that vector element. 3254 unsigned Idx = ConstEltNo->getZExtValue(); 3255 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 3256 Known = computeKnownBits(InVec, DemandedElt, Depth + 1); 3257 } else { 3258 // Unknown element index, so ignore DemandedElts and demand them all. 3259 Known = computeKnownBits(InVec, Depth + 1); 3260 } 3261 if (BitWidth > EltBitWidth) 3262 Known = Known.zext(BitWidth, false /* => any extend */); 3263 break; 3264 } 3265 case ISD::INSERT_VECTOR_ELT: { 3266 SDValue InVec = Op.getOperand(0); 3267 SDValue InVal = Op.getOperand(1); 3268 SDValue EltNo = Op.getOperand(2); 3269 3270 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3271 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3272 // If we know the element index, split the demand between the 3273 // source vector and the inserted element. 3274 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 3275 unsigned EltIdx = CEltNo->getZExtValue(); 3276 3277 // If we demand the inserted element then add its common known bits. 3278 if (DemandedElts[EltIdx]) { 3279 Known2 = computeKnownBits(InVal, Depth + 1); 3280 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3281 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3282 } 3283 3284 // If we demand the source vector then add its common known bits, ensuring 3285 // that we don't demand the inserted element. 3286 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 3287 if (!!VectorElts) { 3288 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1); 3289 Known.One &= Known2.One; 3290 Known.Zero &= Known2.Zero; 3291 } 3292 } else { 3293 // Unknown element index, so ignore DemandedElts and demand them all. 3294 Known = computeKnownBits(InVec, Depth + 1); 3295 Known2 = computeKnownBits(InVal, Depth + 1); 3296 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3297 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3298 } 3299 break; 3300 } 3301 case ISD::BITREVERSE: { 3302 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3303 Known.Zero = Known2.Zero.reverseBits(); 3304 Known.One = Known2.One.reverseBits(); 3305 break; 3306 } 3307 case ISD::BSWAP: { 3308 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3309 Known.Zero = Known2.Zero.byteSwap(); 3310 Known.One = Known2.One.byteSwap(); 3311 break; 3312 } 3313 case ISD::ABS: { 3314 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3315 3316 // If the source's MSB is zero then we know the rest of the bits already. 3317 if (Known2.isNonNegative()) { 3318 Known.Zero = Known2.Zero; 3319 Known.One = Known2.One; 3320 break; 3321 } 3322 3323 // We only know that the absolute values's MSB will be zero iff there is 3324 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3325 Known2.One.clearSignBit(); 3326 if (Known2.One.getBoolValue()) { 3327 Known.Zero = APInt::getSignMask(BitWidth); 3328 break; 3329 } 3330 break; 3331 } 3332 case ISD::UMIN: { 3333 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3334 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3335 3336 // UMIN - we know that the result will have the maximum of the 3337 // known zero leading bits of the inputs. 3338 unsigned LeadZero = Known.countMinLeadingZeros(); 3339 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3340 3341 Known.Zero &= Known2.Zero; 3342 Known.One &= Known2.One; 3343 Known.Zero.setHighBits(LeadZero); 3344 break; 3345 } 3346 case ISD::UMAX: { 3347 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3348 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3349 3350 // UMAX - we know that the result will have the maximum of the 3351 // known one leading bits of the inputs. 3352 unsigned LeadOne = Known.countMinLeadingOnes(); 3353 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3354 3355 Known.Zero &= Known2.Zero; 3356 Known.One &= Known2.One; 3357 Known.One.setHighBits(LeadOne); 3358 break; 3359 } 3360 case ISD::SMIN: 3361 case ISD::SMAX: { 3362 // If we have a clamp pattern, we know that the number of sign bits will be 3363 // the minimum of the clamp min/max range. 3364 bool IsMax = (Opcode == ISD::SMAX); 3365 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3366 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3367 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3368 CstHigh = 3369 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3370 if (CstLow && CstHigh) { 3371 if (!IsMax) 3372 std::swap(CstLow, CstHigh); 3373 3374 const APInt &ValueLow = CstLow->getAPIntValue(); 3375 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3376 if (ValueLow.sle(ValueHigh)) { 3377 unsigned LowSignBits = ValueLow.getNumSignBits(); 3378 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3379 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3380 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3381 Known.One.setHighBits(MinSignBits); 3382 break; 3383 } 3384 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3385 Known.Zero.setHighBits(MinSignBits); 3386 break; 3387 } 3388 } 3389 } 3390 3391 // Fallback - just get the shared known bits of the operands. 3392 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3393 if (Known.isUnknown()) break; // Early-out 3394 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3395 Known.Zero &= Known2.Zero; 3396 Known.One &= Known2.One; 3397 break; 3398 } 3399 case ISD::FrameIndex: 3400 case ISD::TargetFrameIndex: 3401 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3402 break; 3403 3404 default: 3405 if (Opcode < ISD::BUILTIN_OP_END) 3406 break; 3407 LLVM_FALLTHROUGH; 3408 case ISD::INTRINSIC_WO_CHAIN: 3409 case ISD::INTRINSIC_W_CHAIN: 3410 case ISD::INTRINSIC_VOID: 3411 // Allow the target to implement this method for its nodes. 3412 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3413 break; 3414 } 3415 3416 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3417 return Known; 3418 } 3419 3420 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3421 SDValue N1) const { 3422 // X + 0 never overflow 3423 if (isNullConstant(N1)) 3424 return OFK_Never; 3425 3426 KnownBits N1Known = computeKnownBits(N1); 3427 if (N1Known.Zero.getBoolValue()) { 3428 KnownBits N0Known = computeKnownBits(N0); 3429 3430 bool overflow; 3431 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3432 if (!overflow) 3433 return OFK_Never; 3434 } 3435 3436 // mulhi + 1 never overflow 3437 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3438 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3439 return OFK_Never; 3440 3441 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3442 KnownBits N0Known = computeKnownBits(N0); 3443 3444 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3445 return OFK_Never; 3446 } 3447 3448 return OFK_Sometime; 3449 } 3450 3451 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3452 EVT OpVT = Val.getValueType(); 3453 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3454 3455 // Is the constant a known power of 2? 3456 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3457 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3458 3459 // A left-shift of a constant one will have exactly one bit set because 3460 // shifting the bit off the end is undefined. 3461 if (Val.getOpcode() == ISD::SHL) { 3462 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3463 if (C && C->getAPIntValue() == 1) 3464 return true; 3465 } 3466 3467 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3468 // one bit set. 3469 if (Val.getOpcode() == ISD::SRL) { 3470 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3471 if (C && C->getAPIntValue().isSignMask()) 3472 return true; 3473 } 3474 3475 // Are all operands of a build vector constant powers of two? 3476 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3477 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3478 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3479 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3480 return false; 3481 })) 3482 return true; 3483 3484 // More could be done here, though the above checks are enough 3485 // to handle some common cases. 3486 3487 // Fall back to computeKnownBits to catch other known cases. 3488 KnownBits Known = computeKnownBits(Val); 3489 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3490 } 3491 3492 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3493 EVT VT = Op.getValueType(); 3494 APInt DemandedElts = VT.isVector() 3495 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3496 : APInt(1, 1); 3497 return ComputeNumSignBits(Op, DemandedElts, Depth); 3498 } 3499 3500 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3501 unsigned Depth) const { 3502 EVT VT = Op.getValueType(); 3503 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3504 unsigned VTBits = VT.getScalarSizeInBits(); 3505 unsigned NumElts = DemandedElts.getBitWidth(); 3506 unsigned Tmp, Tmp2; 3507 unsigned FirstAnswer = 1; 3508 3509 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3510 const APInt &Val = C->getAPIntValue(); 3511 return Val.getNumSignBits(); 3512 } 3513 3514 if (Depth >= MaxRecursionDepth) 3515 return 1; // Limit search depth. 3516 3517 if (!DemandedElts) 3518 return 1; // No demanded elts, better to assume we don't know anything. 3519 3520 unsigned Opcode = Op.getOpcode(); 3521 switch (Opcode) { 3522 default: break; 3523 case ISD::AssertSext: 3524 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3525 return VTBits-Tmp+1; 3526 case ISD::AssertZext: 3527 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3528 return VTBits-Tmp; 3529 3530 case ISD::BUILD_VECTOR: 3531 Tmp = VTBits; 3532 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3533 if (!DemandedElts[i]) 3534 continue; 3535 3536 SDValue SrcOp = Op.getOperand(i); 3537 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3538 3539 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3540 if (SrcOp.getValueSizeInBits() != VTBits) { 3541 assert(SrcOp.getValueSizeInBits() > VTBits && 3542 "Expected BUILD_VECTOR implicit truncation"); 3543 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3544 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3545 } 3546 Tmp = std::min(Tmp, Tmp2); 3547 } 3548 return Tmp; 3549 3550 case ISD::VECTOR_SHUFFLE: { 3551 // Collect the minimum number of sign bits that are shared by every vector 3552 // element referenced by the shuffle. 3553 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3554 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3555 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3556 for (unsigned i = 0; i != NumElts; ++i) { 3557 int M = SVN->getMaskElt(i); 3558 if (!DemandedElts[i]) 3559 continue; 3560 // For UNDEF elements, we don't know anything about the common state of 3561 // the shuffle result. 3562 if (M < 0) 3563 return 1; 3564 if ((unsigned)M < NumElts) 3565 DemandedLHS.setBit((unsigned)M % NumElts); 3566 else 3567 DemandedRHS.setBit((unsigned)M % NumElts); 3568 } 3569 Tmp = std::numeric_limits<unsigned>::max(); 3570 if (!!DemandedLHS) 3571 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3572 if (!!DemandedRHS) { 3573 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3574 Tmp = std::min(Tmp, Tmp2); 3575 } 3576 // If we don't know anything, early out and try computeKnownBits fall-back. 3577 if (Tmp == 1) 3578 break; 3579 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3580 return Tmp; 3581 } 3582 3583 case ISD::BITCAST: { 3584 SDValue N0 = Op.getOperand(0); 3585 EVT SrcVT = N0.getValueType(); 3586 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3587 3588 // Ignore bitcasts from unsupported types.. 3589 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3590 break; 3591 3592 // Fast handling of 'identity' bitcasts. 3593 if (VTBits == SrcBits) 3594 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3595 3596 bool IsLE = getDataLayout().isLittleEndian(); 3597 3598 // Bitcast 'large element' scalar/vector to 'small element' vector. 3599 if ((SrcBits % VTBits) == 0) { 3600 assert(VT.isVector() && "Expected bitcast to vector"); 3601 3602 unsigned Scale = SrcBits / VTBits; 3603 APInt SrcDemandedElts(NumElts / Scale, 0); 3604 for (unsigned i = 0; i != NumElts; ++i) 3605 if (DemandedElts[i]) 3606 SrcDemandedElts.setBit(i / Scale); 3607 3608 // Fast case - sign splat can be simply split across the small elements. 3609 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3610 if (Tmp == SrcBits) 3611 return VTBits; 3612 3613 // Slow case - determine how far the sign extends into each sub-element. 3614 Tmp2 = VTBits; 3615 for (unsigned i = 0; i != NumElts; ++i) 3616 if (DemandedElts[i]) { 3617 unsigned SubOffset = i % Scale; 3618 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3619 SubOffset = SubOffset * VTBits; 3620 if (Tmp <= SubOffset) 3621 return 1; 3622 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3623 } 3624 return Tmp2; 3625 } 3626 break; 3627 } 3628 3629 case ISD::SIGN_EXTEND: 3630 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3631 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3632 case ISD::SIGN_EXTEND_INREG: 3633 // Max of the input and what this extends. 3634 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3635 Tmp = VTBits-Tmp+1; 3636 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3637 return std::max(Tmp, Tmp2); 3638 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3639 SDValue Src = Op.getOperand(0); 3640 EVT SrcVT = Src.getValueType(); 3641 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3642 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3643 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3644 } 3645 case ISD::SRA: 3646 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3647 // SRA X, C -> adds C sign bits. 3648 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) 3649 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3650 else if (const APInt *ShAmt = 3651 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3652 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3653 return Tmp; 3654 case ISD::SHL: 3655 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 3656 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3657 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3658 if (ShAmt->ult(Tmp)) 3659 return Tmp - ShAmt->getZExtValue(); 3660 } else if (const APInt *ShAmt = 3661 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3662 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3663 if (ShAmt->ult(Tmp)) 3664 return Tmp - ShAmt->getZExtValue(); 3665 } 3666 break; 3667 case ISD::AND: 3668 case ISD::OR: 3669 case ISD::XOR: // NOT is handled here. 3670 // Logical binary ops preserve the number of sign bits at the worst. 3671 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3672 if (Tmp != 1) { 3673 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3674 FirstAnswer = std::min(Tmp, Tmp2); 3675 // We computed what we know about the sign bits as our first 3676 // answer. Now proceed to the generic code that uses 3677 // computeKnownBits, and pick whichever answer is better. 3678 } 3679 break; 3680 3681 case ISD::SELECT: 3682 case ISD::VSELECT: 3683 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3684 if (Tmp == 1) return 1; // Early out. 3685 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3686 return std::min(Tmp, Tmp2); 3687 case ISD::SELECT_CC: 3688 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3689 if (Tmp == 1) return 1; // Early out. 3690 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3691 return std::min(Tmp, Tmp2); 3692 3693 case ISD::SMIN: 3694 case ISD::SMAX: { 3695 // If we have a clamp pattern, we know that the number of sign bits will be 3696 // the minimum of the clamp min/max range. 3697 bool IsMax = (Opcode == ISD::SMAX); 3698 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3699 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3700 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3701 CstHigh = 3702 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3703 if (CstLow && CstHigh) { 3704 if (!IsMax) 3705 std::swap(CstLow, CstHigh); 3706 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3707 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3708 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3709 return std::min(Tmp, Tmp2); 3710 } 3711 } 3712 3713 // Fallback - just get the minimum number of sign bits of the operands. 3714 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3715 if (Tmp == 1) 3716 return 1; // Early out. 3717 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3718 return std::min(Tmp, Tmp2); 3719 } 3720 case ISD::UMIN: 3721 case ISD::UMAX: 3722 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3723 if (Tmp == 1) 3724 return 1; // Early out. 3725 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3726 return std::min(Tmp, Tmp2); 3727 case ISD::SADDO: 3728 case ISD::UADDO: 3729 case ISD::SSUBO: 3730 case ISD::USUBO: 3731 case ISD::SMULO: 3732 case ISD::UMULO: 3733 if (Op.getResNo() != 1) 3734 break; 3735 // The boolean result conforms to getBooleanContents. Fall through. 3736 // If setcc returns 0/-1, all bits are sign bits. 3737 // We know that we have an integer-based boolean since these operations 3738 // are only available for integer. 3739 if (TLI->getBooleanContents(VT.isVector(), false) == 3740 TargetLowering::ZeroOrNegativeOneBooleanContent) 3741 return VTBits; 3742 break; 3743 case ISD::SETCC: 3744 case ISD::STRICT_FSETCC: 3745 case ISD::STRICT_FSETCCS: { 3746 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3747 // If setcc returns 0/-1, all bits are sign bits. 3748 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3749 TargetLowering::ZeroOrNegativeOneBooleanContent) 3750 return VTBits; 3751 break; 3752 } 3753 case ISD::ROTL: 3754 case ISD::ROTR: 3755 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3756 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3757 3758 // Handle rotate right by N like a rotate left by 32-N. 3759 if (Opcode == ISD::ROTR) 3760 RotAmt = (VTBits - RotAmt) % VTBits; 3761 3762 // If we aren't rotating out all of the known-in sign bits, return the 3763 // number that are left. This handles rotl(sext(x), 1) for example. 3764 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3765 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3766 } 3767 break; 3768 case ISD::ADD: 3769 case ISD::ADDC: 3770 // Add can have at most one carry bit. Thus we know that the output 3771 // is, at worst, one more bit than the inputs. 3772 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3773 if (Tmp == 1) return 1; // Early out. 3774 3775 // Special case decrementing a value (ADD X, -1): 3776 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3777 if (CRHS->isAllOnesValue()) { 3778 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1); 3779 3780 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3781 // sign bits set. 3782 if ((Known.Zero | 1).isAllOnesValue()) 3783 return VTBits; 3784 3785 // If we are subtracting one from a positive number, there is no carry 3786 // out of the result. 3787 if (Known.isNonNegative()) 3788 return Tmp; 3789 } 3790 3791 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3792 if (Tmp2 == 1) return 1; 3793 return std::min(Tmp, Tmp2)-1; 3794 3795 case ISD::SUB: 3796 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3797 if (Tmp2 == 1) return 1; 3798 3799 // Handle NEG. 3800 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3801 if (CLHS->isNullValue()) { 3802 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1); 3803 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3804 // sign bits set. 3805 if ((Known.Zero | 1).isAllOnesValue()) 3806 return VTBits; 3807 3808 // If the input is known to be positive (the sign bit is known clear), 3809 // the output of the NEG has the same number of sign bits as the input. 3810 if (Known.isNonNegative()) 3811 return Tmp2; 3812 3813 // Otherwise, we treat this like a SUB. 3814 } 3815 3816 // Sub can have at most one carry bit. Thus we know that the output 3817 // is, at worst, one more bit than the inputs. 3818 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3819 if (Tmp == 1) return 1; // Early out. 3820 return std::min(Tmp, Tmp2)-1; 3821 case ISD::MUL: { 3822 // The output of the Mul can be at most twice the valid bits in the inputs. 3823 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3824 if (SignBitsOp0 == 1) 3825 break; 3826 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3827 if (SignBitsOp1 == 1) 3828 break; 3829 unsigned OutValidBits = 3830 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3831 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3832 } 3833 case ISD::TRUNCATE: { 3834 // Check if the sign bits of source go down as far as the truncated value. 3835 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3836 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3837 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3838 return NumSrcSignBits - (NumSrcBits - VTBits); 3839 break; 3840 } 3841 case ISD::EXTRACT_ELEMENT: { 3842 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3843 const int BitWidth = Op.getValueSizeInBits(); 3844 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3845 3846 // Get reverse index (starting from 1), Op1 value indexes elements from 3847 // little end. Sign starts at big end. 3848 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3849 3850 // If the sign portion ends in our element the subtraction gives correct 3851 // result. Otherwise it gives either negative or > bitwidth result 3852 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3853 } 3854 case ISD::INSERT_VECTOR_ELT: { 3855 SDValue InVec = Op.getOperand(0); 3856 SDValue InVal = Op.getOperand(1); 3857 SDValue EltNo = Op.getOperand(2); 3858 3859 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3860 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3861 // If we know the element index, split the demand between the 3862 // source vector and the inserted element. 3863 unsigned EltIdx = CEltNo->getZExtValue(); 3864 3865 // If we demand the inserted element then get its sign bits. 3866 Tmp = std::numeric_limits<unsigned>::max(); 3867 if (DemandedElts[EltIdx]) { 3868 // TODO - handle implicit truncation of inserted elements. 3869 if (InVal.getScalarValueSizeInBits() != VTBits) 3870 break; 3871 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3872 } 3873 3874 // If we demand the source vector then get its sign bits, and determine 3875 // the minimum. 3876 APInt VectorElts = DemandedElts; 3877 VectorElts.clearBit(EltIdx); 3878 if (!!VectorElts) { 3879 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3880 Tmp = std::min(Tmp, Tmp2); 3881 } 3882 } else { 3883 // Unknown element index, so ignore DemandedElts and demand them all. 3884 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3885 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3886 Tmp = std::min(Tmp, Tmp2); 3887 } 3888 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3889 return Tmp; 3890 } 3891 case ISD::EXTRACT_VECTOR_ELT: { 3892 SDValue InVec = Op.getOperand(0); 3893 SDValue EltNo = Op.getOperand(1); 3894 EVT VecVT = InVec.getValueType(); 3895 const unsigned BitWidth = Op.getValueSizeInBits(); 3896 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3897 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3898 3899 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3900 // anything about sign bits. But if the sizes match we can derive knowledge 3901 // about sign bits from the vector operand. 3902 if (BitWidth != EltBitWidth) 3903 break; 3904 3905 // If we know the element index, just demand that vector element, else for 3906 // an unknown element index, ignore DemandedElts and demand them all. 3907 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3908 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3909 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3910 DemandedSrcElts = 3911 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3912 3913 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3914 } 3915 case ISD::EXTRACT_SUBVECTOR: { 3916 // If we know the element index, just demand that subvector elements, 3917 // otherwise demand them all. 3918 SDValue Src = Op.getOperand(0); 3919 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3920 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3921 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 3922 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3923 // Offset the demanded elts by the subvector index. 3924 uint64_t Idx = SubIdx->getZExtValue(); 3925 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3926 } 3927 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3928 } 3929 case ISD::CONCAT_VECTORS: { 3930 // Determine the minimum number of sign bits across all demanded 3931 // elts of the input vectors. Early out if the result is already 1. 3932 Tmp = std::numeric_limits<unsigned>::max(); 3933 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3934 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3935 unsigned NumSubVectors = Op.getNumOperands(); 3936 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3937 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3938 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3939 if (!DemandedSub) 3940 continue; 3941 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3942 Tmp = std::min(Tmp, Tmp2); 3943 } 3944 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3945 return Tmp; 3946 } 3947 case ISD::INSERT_SUBVECTOR: { 3948 // If we know the element index, demand any elements from the subvector and 3949 // the remainder from the src its inserted into, otherwise demand them all. 3950 SDValue Src = Op.getOperand(0); 3951 SDValue Sub = Op.getOperand(1); 3952 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3953 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3954 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 3955 Tmp = std::numeric_limits<unsigned>::max(); 3956 uint64_t Idx = SubIdx->getZExtValue(); 3957 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3958 if (!!DemandedSubElts) { 3959 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3960 if (Tmp == 1) return 1; // early-out 3961 } 3962 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 3963 APInt DemandedSrcElts = DemandedElts & ~SubMask; 3964 if (!!DemandedSrcElts) { 3965 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3966 Tmp = std::min(Tmp, Tmp2); 3967 } 3968 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3969 return Tmp; 3970 } 3971 3972 // Not able to determine the index so just assume worst case. 3973 Tmp = ComputeNumSignBits(Sub, Depth + 1); 3974 if (Tmp == 1) return 1; // early-out 3975 Tmp2 = ComputeNumSignBits(Src, Depth + 1); 3976 Tmp = std::min(Tmp, Tmp2); 3977 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3978 return Tmp; 3979 } 3980 } 3981 3982 // If we are looking at the loaded value of the SDNode. 3983 if (Op.getResNo() == 0) { 3984 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3985 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3986 unsigned ExtType = LD->getExtensionType(); 3987 switch (ExtType) { 3988 default: break; 3989 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 3990 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3991 return VTBits - Tmp + 1; 3992 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 3993 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3994 return VTBits - Tmp; 3995 case ISD::NON_EXTLOAD: 3996 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 3997 // We only need to handle vectors - computeKnownBits should handle 3998 // scalar cases. 3999 Type *CstTy = Cst->getType(); 4000 if (CstTy->isVectorTy() && 4001 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4002 Tmp = VTBits; 4003 for (unsigned i = 0; i != NumElts; ++i) { 4004 if (!DemandedElts[i]) 4005 continue; 4006 if (Constant *Elt = Cst->getAggregateElement(i)) { 4007 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4008 const APInt &Value = CInt->getValue(); 4009 Tmp = std::min(Tmp, Value.getNumSignBits()); 4010 continue; 4011 } 4012 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4013 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4014 Tmp = std::min(Tmp, Value.getNumSignBits()); 4015 continue; 4016 } 4017 } 4018 // Unknown type. Conservatively assume no bits match sign bit. 4019 return 1; 4020 } 4021 return Tmp; 4022 } 4023 } 4024 break; 4025 } 4026 } 4027 } 4028 4029 // Allow the target to implement this method for its nodes. 4030 if (Opcode >= ISD::BUILTIN_OP_END || 4031 Opcode == ISD::INTRINSIC_WO_CHAIN || 4032 Opcode == ISD::INTRINSIC_W_CHAIN || 4033 Opcode == ISD::INTRINSIC_VOID) { 4034 unsigned NumBits = 4035 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4036 if (NumBits > 1) 4037 FirstAnswer = std::max(FirstAnswer, NumBits); 4038 } 4039 4040 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4041 // use this information. 4042 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4043 4044 APInt Mask; 4045 if (Known.isNonNegative()) { // sign bit is 0 4046 Mask = Known.Zero; 4047 } else if (Known.isNegative()) { // sign bit is 1; 4048 Mask = Known.One; 4049 } else { 4050 // Nothing known. 4051 return FirstAnswer; 4052 } 4053 4054 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 4055 // the number of identical bits in the top of the input value. 4056 Mask = ~Mask; 4057 Mask <<= Mask.getBitWidth()-VTBits; 4058 // Return # leading zeros. We use 'min' here in case Val was zero before 4059 // shifting. We don't want to return '64' as for an i32 "0". 4060 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 4061 } 4062 4063 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4064 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4065 !isa<ConstantSDNode>(Op.getOperand(1))) 4066 return false; 4067 4068 if (Op.getOpcode() == ISD::OR && 4069 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4070 return false; 4071 4072 return true; 4073 } 4074 4075 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4076 // If we're told that NaNs won't happen, assume they won't. 4077 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4078 return true; 4079 4080 if (Depth >= MaxRecursionDepth) 4081 return false; // Limit search depth. 4082 4083 // TODO: Handle vectors. 4084 // If the value is a constant, we can obviously see if it is a NaN or not. 4085 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4086 return !C->getValueAPF().isNaN() || 4087 (SNaN && !C->getValueAPF().isSignaling()); 4088 } 4089 4090 unsigned Opcode = Op.getOpcode(); 4091 switch (Opcode) { 4092 case ISD::FADD: 4093 case ISD::FSUB: 4094 case ISD::FMUL: 4095 case ISD::FDIV: 4096 case ISD::FREM: 4097 case ISD::FSIN: 4098 case ISD::FCOS: { 4099 if (SNaN) 4100 return true; 4101 // TODO: Need isKnownNeverInfinity 4102 return false; 4103 } 4104 case ISD::FCANONICALIZE: 4105 case ISD::FEXP: 4106 case ISD::FEXP2: 4107 case ISD::FTRUNC: 4108 case ISD::FFLOOR: 4109 case ISD::FCEIL: 4110 case ISD::FROUND: 4111 case ISD::FRINT: 4112 case ISD::FNEARBYINT: { 4113 if (SNaN) 4114 return true; 4115 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4116 } 4117 case ISD::FABS: 4118 case ISD::FNEG: 4119 case ISD::FCOPYSIGN: { 4120 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4121 } 4122 case ISD::SELECT: 4123 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4124 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4125 case ISD::FP_EXTEND: 4126 case ISD::FP_ROUND: { 4127 if (SNaN) 4128 return true; 4129 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4130 } 4131 case ISD::SINT_TO_FP: 4132 case ISD::UINT_TO_FP: 4133 return true; 4134 case ISD::FMA: 4135 case ISD::FMAD: { 4136 if (SNaN) 4137 return true; 4138 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4139 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4140 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4141 } 4142 case ISD::FSQRT: // Need is known positive 4143 case ISD::FLOG: 4144 case ISD::FLOG2: 4145 case ISD::FLOG10: 4146 case ISD::FPOWI: 4147 case ISD::FPOW: { 4148 if (SNaN) 4149 return true; 4150 // TODO: Refine on operand 4151 return false; 4152 } 4153 case ISD::FMINNUM: 4154 case ISD::FMAXNUM: { 4155 // Only one needs to be known not-nan, since it will be returned if the 4156 // other ends up being one. 4157 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4158 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4159 } 4160 case ISD::FMINNUM_IEEE: 4161 case ISD::FMAXNUM_IEEE: { 4162 if (SNaN) 4163 return true; 4164 // This can return a NaN if either operand is an sNaN, or if both operands 4165 // are NaN. 4166 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4167 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4168 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4169 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4170 } 4171 case ISD::FMINIMUM: 4172 case ISD::FMAXIMUM: { 4173 // TODO: Does this quiet or return the origina NaN as-is? 4174 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4175 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4176 } 4177 case ISD::EXTRACT_VECTOR_ELT: { 4178 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4179 } 4180 default: 4181 if (Opcode >= ISD::BUILTIN_OP_END || 4182 Opcode == ISD::INTRINSIC_WO_CHAIN || 4183 Opcode == ISD::INTRINSIC_W_CHAIN || 4184 Opcode == ISD::INTRINSIC_VOID) { 4185 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4186 } 4187 4188 return false; 4189 } 4190 } 4191 4192 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4193 assert(Op.getValueType().isFloatingPoint() && 4194 "Floating point type expected"); 4195 4196 // If the value is a constant, we can obviously see if it is a zero or not. 4197 // TODO: Add BuildVector support. 4198 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4199 return !C->isZero(); 4200 return false; 4201 } 4202 4203 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4204 assert(!Op.getValueType().isFloatingPoint() && 4205 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4206 4207 // If the value is a constant, we can obviously see if it is a zero or not. 4208 if (ISD::matchUnaryPredicate( 4209 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4210 return true; 4211 4212 // TODO: Recognize more cases here. 4213 switch (Op.getOpcode()) { 4214 default: break; 4215 case ISD::OR: 4216 if (isKnownNeverZero(Op.getOperand(1)) || 4217 isKnownNeverZero(Op.getOperand(0))) 4218 return true; 4219 break; 4220 } 4221 4222 return false; 4223 } 4224 4225 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4226 // Check the obvious case. 4227 if (A == B) return true; 4228 4229 // For for negative and positive zero. 4230 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4231 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4232 if (CA->isZero() && CB->isZero()) return true; 4233 4234 // Otherwise they may not be equal. 4235 return false; 4236 } 4237 4238 // FIXME: unify with llvm::haveNoCommonBitsSet. 4239 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4240 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4241 assert(A.getValueType() == B.getValueType() && 4242 "Values must have the same type"); 4243 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4244 } 4245 4246 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4247 ArrayRef<SDValue> Ops, 4248 SelectionDAG &DAG) { 4249 int NumOps = Ops.size(); 4250 assert(NumOps != 0 && "Can't build an empty vector!"); 4251 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4252 "Incorrect element count in BUILD_VECTOR!"); 4253 4254 // BUILD_VECTOR of UNDEFs is UNDEF. 4255 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4256 return DAG.getUNDEF(VT); 4257 4258 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4259 SDValue IdentitySrc; 4260 bool IsIdentity = true; 4261 for (int i = 0; i != NumOps; ++i) { 4262 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4263 Ops[i].getOperand(0).getValueType() != VT || 4264 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4265 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4266 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4267 IsIdentity = false; 4268 break; 4269 } 4270 IdentitySrc = Ops[i].getOperand(0); 4271 } 4272 if (IsIdentity) 4273 return IdentitySrc; 4274 4275 return SDValue(); 4276 } 4277 4278 /// Try to simplify vector concatenation to an input value, undef, or build 4279 /// vector. 4280 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4281 ArrayRef<SDValue> Ops, 4282 SelectionDAG &DAG) { 4283 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4284 assert(llvm::all_of(Ops, 4285 [Ops](SDValue Op) { 4286 return Ops[0].getValueType() == Op.getValueType(); 4287 }) && 4288 "Concatenation of vectors with inconsistent value types!"); 4289 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4290 VT.getVectorNumElements() && 4291 "Incorrect element count in vector concatenation!"); 4292 4293 if (Ops.size() == 1) 4294 return Ops[0]; 4295 4296 // Concat of UNDEFs is UNDEF. 4297 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4298 return DAG.getUNDEF(VT); 4299 4300 // Scan the operands and look for extract operations from a single source 4301 // that correspond to insertion at the same location via this concatenation: 4302 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4303 SDValue IdentitySrc; 4304 bool IsIdentity = true; 4305 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4306 SDValue Op = Ops[i]; 4307 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4308 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4309 Op.getOperand(0).getValueType() != VT || 4310 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4311 !isa<ConstantSDNode>(Op.getOperand(1)) || 4312 Op.getConstantOperandVal(1) != IdentityIndex) { 4313 IsIdentity = false; 4314 break; 4315 } 4316 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4317 "Unexpected identity source vector for concat of extracts"); 4318 IdentitySrc = Op.getOperand(0); 4319 } 4320 if (IsIdentity) { 4321 assert(IdentitySrc && "Failed to set source vector of extracts"); 4322 return IdentitySrc; 4323 } 4324 4325 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4326 // simplified to one big BUILD_VECTOR. 4327 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4328 EVT SVT = VT.getScalarType(); 4329 SmallVector<SDValue, 16> Elts; 4330 for (SDValue Op : Ops) { 4331 EVT OpVT = Op.getValueType(); 4332 if (Op.isUndef()) 4333 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4334 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4335 Elts.append(Op->op_begin(), Op->op_end()); 4336 else 4337 return SDValue(); 4338 } 4339 4340 // BUILD_VECTOR requires all inputs to be of the same type, find the 4341 // maximum type and extend them all. 4342 for (SDValue Op : Elts) 4343 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4344 4345 if (SVT.bitsGT(VT.getScalarType())) 4346 for (SDValue &Op : Elts) 4347 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4348 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4349 : DAG.getSExtOrTrunc(Op, DL, SVT); 4350 4351 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4352 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4353 return V; 4354 } 4355 4356 /// Gets or creates the specified node. 4357 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4358 FoldingSetNodeID ID; 4359 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4360 void *IP = nullptr; 4361 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4362 return SDValue(E, 0); 4363 4364 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4365 getVTList(VT)); 4366 CSEMap.InsertNode(N, IP); 4367 4368 InsertNode(N); 4369 SDValue V = SDValue(N, 0); 4370 NewSDValueDbgMsg(V, "Creating new node: ", this); 4371 return V; 4372 } 4373 4374 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4375 SDValue Operand, const SDNodeFlags Flags) { 4376 // Constant fold unary operations with an integer constant operand. Even 4377 // opaque constant will be folded, because the folding of unary operations 4378 // doesn't create new constants with different values. Nevertheless, the 4379 // opaque flag is preserved during folding to prevent future folding with 4380 // other constants. 4381 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4382 const APInt &Val = C->getAPIntValue(); 4383 switch (Opcode) { 4384 default: break; 4385 case ISD::SIGN_EXTEND: 4386 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4387 C->isTargetOpcode(), C->isOpaque()); 4388 case ISD::TRUNCATE: 4389 if (C->isOpaque()) 4390 break; 4391 LLVM_FALLTHROUGH; 4392 case ISD::ANY_EXTEND: 4393 case ISD::ZERO_EXTEND: 4394 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4395 C->isTargetOpcode(), C->isOpaque()); 4396 case ISD::UINT_TO_FP: 4397 case ISD::SINT_TO_FP: { 4398 APFloat apf(EVTToAPFloatSemantics(VT), 4399 APInt::getNullValue(VT.getSizeInBits())); 4400 (void)apf.convertFromAPInt(Val, 4401 Opcode==ISD::SINT_TO_FP, 4402 APFloat::rmNearestTiesToEven); 4403 return getConstantFP(apf, DL, VT); 4404 } 4405 case ISD::BITCAST: 4406 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4407 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4408 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4409 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4410 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4411 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4412 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4413 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4414 break; 4415 case ISD::ABS: 4416 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4417 C->isOpaque()); 4418 case ISD::BITREVERSE: 4419 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4420 C->isOpaque()); 4421 case ISD::BSWAP: 4422 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4423 C->isOpaque()); 4424 case ISD::CTPOP: 4425 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4426 C->isOpaque()); 4427 case ISD::CTLZ: 4428 case ISD::CTLZ_ZERO_UNDEF: 4429 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4430 C->isOpaque()); 4431 case ISD::CTTZ: 4432 case ISD::CTTZ_ZERO_UNDEF: 4433 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4434 C->isOpaque()); 4435 case ISD::FP16_TO_FP: { 4436 bool Ignored; 4437 APFloat FPV(APFloat::IEEEhalf(), 4438 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4439 4440 // This can return overflow, underflow, or inexact; we don't care. 4441 // FIXME need to be more flexible about rounding mode. 4442 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4443 APFloat::rmNearestTiesToEven, &Ignored); 4444 return getConstantFP(FPV, DL, VT); 4445 } 4446 } 4447 } 4448 4449 // Constant fold unary operations with a floating point constant operand. 4450 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4451 APFloat V = C->getValueAPF(); // make copy 4452 switch (Opcode) { 4453 case ISD::FNEG: 4454 V.changeSign(); 4455 return getConstantFP(V, DL, VT); 4456 case ISD::FABS: 4457 V.clearSign(); 4458 return getConstantFP(V, DL, VT); 4459 case ISD::FCEIL: { 4460 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4461 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4462 return getConstantFP(V, DL, VT); 4463 break; 4464 } 4465 case ISD::FTRUNC: { 4466 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4467 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4468 return getConstantFP(V, DL, VT); 4469 break; 4470 } 4471 case ISD::FFLOOR: { 4472 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4473 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4474 return getConstantFP(V, DL, VT); 4475 break; 4476 } 4477 case ISD::FP_EXTEND: { 4478 bool ignored; 4479 // This can return overflow, underflow, or inexact; we don't care. 4480 // FIXME need to be more flexible about rounding mode. 4481 (void)V.convert(EVTToAPFloatSemantics(VT), 4482 APFloat::rmNearestTiesToEven, &ignored); 4483 return getConstantFP(V, DL, VT); 4484 } 4485 case ISD::FP_TO_SINT: 4486 case ISD::FP_TO_UINT: { 4487 bool ignored; 4488 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4489 // FIXME need to be more flexible about rounding mode. 4490 APFloat::opStatus s = 4491 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4492 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4493 break; 4494 return getConstant(IntVal, DL, VT); 4495 } 4496 case ISD::BITCAST: 4497 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4498 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4499 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4500 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4501 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4502 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4503 break; 4504 case ISD::FP_TO_FP16: { 4505 bool Ignored; 4506 // This can return overflow, underflow, or inexact; we don't care. 4507 // FIXME need to be more flexible about rounding mode. 4508 (void)V.convert(APFloat::IEEEhalf(), 4509 APFloat::rmNearestTiesToEven, &Ignored); 4510 return getConstant(V.bitcastToAPInt(), DL, VT); 4511 } 4512 } 4513 } 4514 4515 // Constant fold unary operations with a vector integer or float operand. 4516 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4517 if (BV->isConstant()) { 4518 switch (Opcode) { 4519 default: 4520 // FIXME: Entirely reasonable to perform folding of other unary 4521 // operations here as the need arises. 4522 break; 4523 case ISD::FNEG: 4524 case ISD::FABS: 4525 case ISD::FCEIL: 4526 case ISD::FTRUNC: 4527 case ISD::FFLOOR: 4528 case ISD::FP_EXTEND: 4529 case ISD::FP_TO_SINT: 4530 case ISD::FP_TO_UINT: 4531 case ISD::TRUNCATE: 4532 case ISD::ANY_EXTEND: 4533 case ISD::ZERO_EXTEND: 4534 case ISD::SIGN_EXTEND: 4535 case ISD::UINT_TO_FP: 4536 case ISD::SINT_TO_FP: 4537 case ISD::ABS: 4538 case ISD::BITREVERSE: 4539 case ISD::BSWAP: 4540 case ISD::CTLZ: 4541 case ISD::CTLZ_ZERO_UNDEF: 4542 case ISD::CTTZ: 4543 case ISD::CTTZ_ZERO_UNDEF: 4544 case ISD::CTPOP: { 4545 SDValue Ops = { Operand }; 4546 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4547 return Fold; 4548 } 4549 } 4550 } 4551 } 4552 4553 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4554 switch (Opcode) { 4555 case ISD::TokenFactor: 4556 case ISD::MERGE_VALUES: 4557 case ISD::CONCAT_VECTORS: 4558 return Operand; // Factor, merge or concat of one node? No need. 4559 case ISD::BUILD_VECTOR: { 4560 // Attempt to simplify BUILD_VECTOR. 4561 SDValue Ops[] = {Operand}; 4562 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4563 return V; 4564 break; 4565 } 4566 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4567 case ISD::FP_EXTEND: 4568 assert(VT.isFloatingPoint() && 4569 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4570 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4571 assert((!VT.isVector() || 4572 VT.getVectorNumElements() == 4573 Operand.getValueType().getVectorNumElements()) && 4574 "Vector element count mismatch!"); 4575 assert(Operand.getValueType().bitsLT(VT) && 4576 "Invalid fpext node, dst < src!"); 4577 if (Operand.isUndef()) 4578 return getUNDEF(VT); 4579 break; 4580 case ISD::FP_TO_SINT: 4581 case ISD::FP_TO_UINT: 4582 if (Operand.isUndef()) 4583 return getUNDEF(VT); 4584 break; 4585 case ISD::SINT_TO_FP: 4586 case ISD::UINT_TO_FP: 4587 // [us]itofp(undef) = 0, because the result value is bounded. 4588 if (Operand.isUndef()) 4589 return getConstantFP(0.0, DL, VT); 4590 break; 4591 case ISD::SIGN_EXTEND: 4592 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4593 "Invalid SIGN_EXTEND!"); 4594 assert(VT.isVector() == Operand.getValueType().isVector() && 4595 "SIGN_EXTEND result type type should be vector iff the operand " 4596 "type is vector!"); 4597 if (Operand.getValueType() == VT) return Operand; // noop extension 4598 assert((!VT.isVector() || 4599 VT.getVectorNumElements() == 4600 Operand.getValueType().getVectorNumElements()) && 4601 "Vector element count mismatch!"); 4602 assert(Operand.getValueType().bitsLT(VT) && 4603 "Invalid sext node, dst < src!"); 4604 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4605 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4606 else if (OpOpcode == ISD::UNDEF) 4607 // sext(undef) = 0, because the top bits will all be the same. 4608 return getConstant(0, DL, VT); 4609 break; 4610 case ISD::ZERO_EXTEND: 4611 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4612 "Invalid ZERO_EXTEND!"); 4613 assert(VT.isVector() == Operand.getValueType().isVector() && 4614 "ZERO_EXTEND result type type should be vector iff the operand " 4615 "type is vector!"); 4616 if (Operand.getValueType() == VT) return Operand; // noop extension 4617 assert((!VT.isVector() || 4618 VT.getVectorNumElements() == 4619 Operand.getValueType().getVectorNumElements()) && 4620 "Vector element count mismatch!"); 4621 assert(Operand.getValueType().bitsLT(VT) && 4622 "Invalid zext node, dst < src!"); 4623 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4624 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4625 else if (OpOpcode == ISD::UNDEF) 4626 // zext(undef) = 0, because the top bits will be zero. 4627 return getConstant(0, DL, VT); 4628 break; 4629 case ISD::ANY_EXTEND: 4630 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4631 "Invalid ANY_EXTEND!"); 4632 assert(VT.isVector() == Operand.getValueType().isVector() && 4633 "ANY_EXTEND result type type should be vector iff the operand " 4634 "type is vector!"); 4635 if (Operand.getValueType() == VT) return Operand; // noop extension 4636 assert((!VT.isVector() || 4637 VT.getVectorNumElements() == 4638 Operand.getValueType().getVectorNumElements()) && 4639 "Vector element count mismatch!"); 4640 assert(Operand.getValueType().bitsLT(VT) && 4641 "Invalid anyext node, dst < src!"); 4642 4643 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4644 OpOpcode == ISD::ANY_EXTEND) 4645 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4646 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4647 else if (OpOpcode == ISD::UNDEF) 4648 return getUNDEF(VT); 4649 4650 // (ext (trunc x)) -> x 4651 if (OpOpcode == ISD::TRUNCATE) { 4652 SDValue OpOp = Operand.getOperand(0); 4653 if (OpOp.getValueType() == VT) { 4654 transferDbgValues(Operand, OpOp); 4655 return OpOp; 4656 } 4657 } 4658 break; 4659 case ISD::TRUNCATE: 4660 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4661 "Invalid TRUNCATE!"); 4662 assert(VT.isVector() == Operand.getValueType().isVector() && 4663 "TRUNCATE result type type should be vector iff the operand " 4664 "type is vector!"); 4665 if (Operand.getValueType() == VT) return Operand; // noop truncate 4666 assert((!VT.isVector() || 4667 VT.getVectorNumElements() == 4668 Operand.getValueType().getVectorNumElements()) && 4669 "Vector element count mismatch!"); 4670 assert(Operand.getValueType().bitsGT(VT) && 4671 "Invalid truncate node, src < dst!"); 4672 if (OpOpcode == ISD::TRUNCATE) 4673 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4674 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4675 OpOpcode == ISD::ANY_EXTEND) { 4676 // If the source is smaller than the dest, we still need an extend. 4677 if (Operand.getOperand(0).getValueType().getScalarType() 4678 .bitsLT(VT.getScalarType())) 4679 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4680 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4681 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4682 return Operand.getOperand(0); 4683 } 4684 if (OpOpcode == ISD::UNDEF) 4685 return getUNDEF(VT); 4686 break; 4687 case ISD::ANY_EXTEND_VECTOR_INREG: 4688 case ISD::ZERO_EXTEND_VECTOR_INREG: 4689 case ISD::SIGN_EXTEND_VECTOR_INREG: 4690 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4691 assert(Operand.getValueType().bitsLE(VT) && 4692 "The input must be the same size or smaller than the result."); 4693 assert(VT.getVectorNumElements() < 4694 Operand.getValueType().getVectorNumElements() && 4695 "The destination vector type must have fewer lanes than the input."); 4696 break; 4697 case ISD::ABS: 4698 assert(VT.isInteger() && VT == Operand.getValueType() && 4699 "Invalid ABS!"); 4700 if (OpOpcode == ISD::UNDEF) 4701 return getUNDEF(VT); 4702 break; 4703 case ISD::BSWAP: 4704 assert(VT.isInteger() && VT == Operand.getValueType() && 4705 "Invalid BSWAP!"); 4706 assert((VT.getScalarSizeInBits() % 16 == 0) && 4707 "BSWAP types must be a multiple of 16 bits!"); 4708 if (OpOpcode == ISD::UNDEF) 4709 return getUNDEF(VT); 4710 break; 4711 case ISD::BITREVERSE: 4712 assert(VT.isInteger() && VT == Operand.getValueType() && 4713 "Invalid BITREVERSE!"); 4714 if (OpOpcode == ISD::UNDEF) 4715 return getUNDEF(VT); 4716 break; 4717 case ISD::BITCAST: 4718 // Basic sanity checking. 4719 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4720 "Cannot BITCAST between types of different sizes!"); 4721 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4722 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4723 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4724 if (OpOpcode == ISD::UNDEF) 4725 return getUNDEF(VT); 4726 break; 4727 case ISD::SCALAR_TO_VECTOR: 4728 assert(VT.isVector() && !Operand.getValueType().isVector() && 4729 (VT.getVectorElementType() == Operand.getValueType() || 4730 (VT.getVectorElementType().isInteger() && 4731 Operand.getValueType().isInteger() && 4732 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4733 "Illegal SCALAR_TO_VECTOR node!"); 4734 if (OpOpcode == ISD::UNDEF) 4735 return getUNDEF(VT); 4736 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4737 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4738 isa<ConstantSDNode>(Operand.getOperand(1)) && 4739 Operand.getConstantOperandVal(1) == 0 && 4740 Operand.getOperand(0).getValueType() == VT) 4741 return Operand.getOperand(0); 4742 break; 4743 case ISD::FNEG: 4744 // Negation of an unknown bag of bits is still completely undefined. 4745 if (OpOpcode == ISD::UNDEF) 4746 return getUNDEF(VT); 4747 4748 if (OpOpcode == ISD::FNEG) // --X -> X 4749 return Operand.getOperand(0); 4750 break; 4751 case ISD::FABS: 4752 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4753 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4754 break; 4755 } 4756 4757 SDNode *N; 4758 SDVTList VTs = getVTList(VT); 4759 SDValue Ops[] = {Operand}; 4760 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4761 FoldingSetNodeID ID; 4762 AddNodeIDNode(ID, Opcode, VTs, Ops); 4763 void *IP = nullptr; 4764 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4765 E->intersectFlagsWith(Flags); 4766 return SDValue(E, 0); 4767 } 4768 4769 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4770 N->setFlags(Flags); 4771 createOperands(N, Ops); 4772 CSEMap.InsertNode(N, IP); 4773 } else { 4774 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4775 createOperands(N, Ops); 4776 } 4777 4778 InsertNode(N); 4779 SDValue V = SDValue(N, 0); 4780 NewSDValueDbgMsg(V, "Creating new node: ", this); 4781 return V; 4782 } 4783 4784 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4785 const APInt &C2) { 4786 switch (Opcode) { 4787 case ISD::ADD: return C1 + C2; 4788 case ISD::SUB: return C1 - C2; 4789 case ISD::MUL: return C1 * C2; 4790 case ISD::AND: return C1 & C2; 4791 case ISD::OR: return C1 | C2; 4792 case ISD::XOR: return C1 ^ C2; 4793 case ISD::SHL: return C1 << C2; 4794 case ISD::SRL: return C1.lshr(C2); 4795 case ISD::SRA: return C1.ashr(C2); 4796 case ISD::ROTL: return C1.rotl(C2); 4797 case ISD::ROTR: return C1.rotr(C2); 4798 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4799 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4800 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4801 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4802 case ISD::SADDSAT: return C1.sadd_sat(C2); 4803 case ISD::UADDSAT: return C1.uadd_sat(C2); 4804 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4805 case ISD::USUBSAT: return C1.usub_sat(C2); 4806 case ISD::UDIV: 4807 if (!C2.getBoolValue()) 4808 break; 4809 return C1.udiv(C2); 4810 case ISD::UREM: 4811 if (!C2.getBoolValue()) 4812 break; 4813 return C1.urem(C2); 4814 case ISD::SDIV: 4815 if (!C2.getBoolValue()) 4816 break; 4817 return C1.sdiv(C2); 4818 case ISD::SREM: 4819 if (!C2.getBoolValue()) 4820 break; 4821 return C1.srem(C2); 4822 } 4823 return llvm::None; 4824 } 4825 4826 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4827 EVT VT, const ConstantSDNode *C1, 4828 const ConstantSDNode *C2) { 4829 if (C1->isOpaque() || C2->isOpaque()) 4830 return SDValue(); 4831 if (Optional<APInt> Folded = 4832 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue())) 4833 return getConstant(Folded.getValue(), DL, VT); 4834 return SDValue(); 4835 } 4836 4837 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4838 const GlobalAddressSDNode *GA, 4839 const SDNode *N2) { 4840 if (GA->getOpcode() != ISD::GlobalAddress) 4841 return SDValue(); 4842 if (!TLI->isOffsetFoldingLegal(GA)) 4843 return SDValue(); 4844 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4845 if (!C2) 4846 return SDValue(); 4847 int64_t Offset = C2->getSExtValue(); 4848 switch (Opcode) { 4849 case ISD::ADD: break; 4850 case ISD::SUB: Offset = -uint64_t(Offset); break; 4851 default: return SDValue(); 4852 } 4853 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4854 GA->getOffset() + uint64_t(Offset)); 4855 } 4856 4857 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4858 switch (Opcode) { 4859 case ISD::SDIV: 4860 case ISD::UDIV: 4861 case ISD::SREM: 4862 case ISD::UREM: { 4863 // If a divisor is zero/undef or any element of a divisor vector is 4864 // zero/undef, the whole op is undef. 4865 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4866 SDValue Divisor = Ops[1]; 4867 if (Divisor.isUndef() || isNullConstant(Divisor)) 4868 return true; 4869 4870 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4871 llvm::any_of(Divisor->op_values(), 4872 [](SDValue V) { return V.isUndef() || 4873 isNullConstant(V); }); 4874 // TODO: Handle signed overflow. 4875 } 4876 // TODO: Handle oversized shifts. 4877 default: 4878 return false; 4879 } 4880 } 4881 4882 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4883 EVT VT, SDNode *N1, SDNode *N2) { 4884 // If the opcode is a target-specific ISD node, there's nothing we can 4885 // do here and the operand rules may not line up with the below, so 4886 // bail early. 4887 if (Opcode >= ISD::BUILTIN_OP_END) 4888 return SDValue(); 4889 4890 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)})) 4891 return getUNDEF(VT); 4892 4893 // Handle the case of two scalars. 4894 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4895 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4896 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2); 4897 assert((!Folded || !VT.isVector()) && 4898 "Can't fold vectors ops with scalar operands"); 4899 return Folded; 4900 } 4901 } 4902 4903 // fold (add Sym, c) -> Sym+c 4904 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4905 return FoldSymbolOffset(Opcode, VT, GA, N2); 4906 if (TLI->isCommutativeBinOp(Opcode)) 4907 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4908 return FoldSymbolOffset(Opcode, VT, GA, N1); 4909 4910 // For vectors, extract each constant element and fold them individually. 4911 // Either input may be an undef value. 4912 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4913 if (!BV1 && !N1->isUndef()) 4914 return SDValue(); 4915 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4916 if (!BV2 && !N2->isUndef()) 4917 return SDValue(); 4918 // If both operands are undef, that's handled the same way as scalars. 4919 if (!BV1 && !BV2) 4920 return SDValue(); 4921 4922 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4923 "Vector binop with different number of elements in operands?"); 4924 4925 EVT SVT = VT.getScalarType(); 4926 EVT LegalSVT = SVT; 4927 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4928 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4929 if (LegalSVT.bitsLT(SVT)) 4930 return SDValue(); 4931 } 4932 SmallVector<SDValue, 4> Outputs; 4933 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4934 for (unsigned I = 0; I != NumOps; ++I) { 4935 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4936 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4937 if (SVT.isInteger()) { 4938 if (V1->getValueType(0).bitsGT(SVT)) 4939 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4940 if (V2->getValueType(0).bitsGT(SVT)) 4941 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4942 } 4943 4944 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4945 return SDValue(); 4946 4947 // Fold one vector element. 4948 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4949 if (LegalSVT != SVT) 4950 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4951 4952 // Scalar folding only succeeded if the result is a constant or UNDEF. 4953 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4954 ScalarResult.getOpcode() != ISD::ConstantFP) 4955 return SDValue(); 4956 Outputs.push_back(ScalarResult); 4957 } 4958 4959 assert(VT.getVectorNumElements() == Outputs.size() && 4960 "Vector size mismatch!"); 4961 4962 // We may have a vector type but a scalar result. Create a splat. 4963 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4964 4965 // Build a big vector out of the scalar elements we generated. 4966 return getBuildVector(VT, SDLoc(), Outputs); 4967 } 4968 4969 // TODO: Merge with FoldConstantArithmetic 4970 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4971 const SDLoc &DL, EVT VT, 4972 ArrayRef<SDValue> Ops, 4973 const SDNodeFlags Flags) { 4974 // If the opcode is a target-specific ISD node, there's nothing we can 4975 // do here and the operand rules may not line up with the below, so 4976 // bail early. 4977 if (Opcode >= ISD::BUILTIN_OP_END) 4978 return SDValue(); 4979 4980 if (isUndef(Opcode, Ops)) 4981 return getUNDEF(VT); 4982 4983 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4984 if (!VT.isVector()) 4985 return SDValue(); 4986 4987 unsigned NumElts = VT.getVectorNumElements(); 4988 4989 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4990 return !Op.getValueType().isVector() || 4991 Op.getValueType().getVectorNumElements() == NumElts; 4992 }; 4993 4994 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4995 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4996 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4997 (BV && BV->isConstant()); 4998 }; 4999 5000 // All operands must be vector types with the same number of elements as 5001 // the result type and must be either UNDEF or a build vector of constant 5002 // or UNDEF scalars. 5003 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5004 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5005 return SDValue(); 5006 5007 // If we are comparing vectors, then the result needs to be a i1 boolean 5008 // that is then sign-extended back to the legal result type. 5009 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5010 5011 // Find legal integer scalar type for constant promotion and 5012 // ensure that its scalar size is at least as large as source. 5013 EVT LegalSVT = VT.getScalarType(); 5014 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5015 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5016 if (LegalSVT.bitsLT(VT.getScalarType())) 5017 return SDValue(); 5018 } 5019 5020 // Constant fold each scalar lane separately. 5021 SmallVector<SDValue, 4> ScalarResults; 5022 for (unsigned i = 0; i != NumElts; i++) { 5023 SmallVector<SDValue, 4> ScalarOps; 5024 for (SDValue Op : Ops) { 5025 EVT InSVT = Op.getValueType().getScalarType(); 5026 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5027 if (!InBV) { 5028 // We've checked that this is UNDEF or a constant of some kind. 5029 if (Op.isUndef()) 5030 ScalarOps.push_back(getUNDEF(InSVT)); 5031 else 5032 ScalarOps.push_back(Op); 5033 continue; 5034 } 5035 5036 SDValue ScalarOp = InBV->getOperand(i); 5037 EVT ScalarVT = ScalarOp.getValueType(); 5038 5039 // Build vector (integer) scalar operands may need implicit 5040 // truncation - do this before constant folding. 5041 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5042 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5043 5044 ScalarOps.push_back(ScalarOp); 5045 } 5046 5047 // Constant fold the scalar operands. 5048 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5049 5050 // Legalize the (integer) scalar constant if necessary. 5051 if (LegalSVT != SVT) 5052 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5053 5054 // Scalar folding only succeeded if the result is a constant or UNDEF. 5055 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5056 ScalarResult.getOpcode() != ISD::ConstantFP) 5057 return SDValue(); 5058 ScalarResults.push_back(ScalarResult); 5059 } 5060 5061 SDValue V = getBuildVector(VT, DL, ScalarResults); 5062 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5063 return V; 5064 } 5065 5066 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5067 EVT VT, SDValue N1, SDValue N2) { 5068 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5069 // should. That will require dealing with a potentially non-default 5070 // rounding mode, checking the "opStatus" return value from the APFloat 5071 // math calculations, and possibly other variations. 5072 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5073 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5074 if (N1CFP && N2CFP) { 5075 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5076 switch (Opcode) { 5077 case ISD::FADD: 5078 C1.add(C2, APFloat::rmNearestTiesToEven); 5079 return getConstantFP(C1, DL, VT); 5080 case ISD::FSUB: 5081 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5082 return getConstantFP(C1, DL, VT); 5083 case ISD::FMUL: 5084 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5085 return getConstantFP(C1, DL, VT); 5086 case ISD::FDIV: 5087 C1.divide(C2, APFloat::rmNearestTiesToEven); 5088 return getConstantFP(C1, DL, VT); 5089 case ISD::FREM: 5090 C1.mod(C2); 5091 return getConstantFP(C1, DL, VT); 5092 case ISD::FCOPYSIGN: 5093 C1.copySign(C2); 5094 return getConstantFP(C1, DL, VT); 5095 default: break; 5096 } 5097 } 5098 if (N1CFP && Opcode == ISD::FP_ROUND) { 5099 APFloat C1 = N1CFP->getValueAPF(); // make copy 5100 bool Unused; 5101 // This can return overflow, underflow, or inexact; we don't care. 5102 // FIXME need to be more flexible about rounding mode. 5103 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5104 &Unused); 5105 return getConstantFP(C1, DL, VT); 5106 } 5107 5108 switch (Opcode) { 5109 case ISD::FADD: 5110 case ISD::FSUB: 5111 case ISD::FMUL: 5112 case ISD::FDIV: 5113 case ISD::FREM: 5114 // If both operands are undef, the result is undef. If 1 operand is undef, 5115 // the result is NaN. This should match the behavior of the IR optimizer. 5116 if (N1.isUndef() && N2.isUndef()) 5117 return getUNDEF(VT); 5118 if (N1.isUndef() || N2.isUndef()) 5119 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5120 } 5121 return SDValue(); 5122 } 5123 5124 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5125 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5126 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5127 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5128 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5129 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5130 5131 // Canonicalize constant to RHS if commutative. 5132 if (TLI->isCommutativeBinOp(Opcode)) { 5133 if (N1C && !N2C) { 5134 std::swap(N1C, N2C); 5135 std::swap(N1, N2); 5136 } else if (N1CFP && !N2CFP) { 5137 std::swap(N1CFP, N2CFP); 5138 std::swap(N1, N2); 5139 } 5140 } 5141 5142 switch (Opcode) { 5143 default: break; 5144 case ISD::TokenFactor: 5145 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5146 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5147 // Fold trivial token factors. 5148 if (N1.getOpcode() == ISD::EntryToken) return N2; 5149 if (N2.getOpcode() == ISD::EntryToken) return N1; 5150 if (N1 == N2) return N1; 5151 break; 5152 case ISD::BUILD_VECTOR: { 5153 // Attempt to simplify BUILD_VECTOR. 5154 SDValue Ops[] = {N1, N2}; 5155 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5156 return V; 5157 break; 5158 } 5159 case ISD::CONCAT_VECTORS: { 5160 SDValue Ops[] = {N1, N2}; 5161 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5162 return V; 5163 break; 5164 } 5165 case ISD::AND: 5166 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5167 assert(N1.getValueType() == N2.getValueType() && 5168 N1.getValueType() == VT && "Binary operator types must match!"); 5169 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5170 // worth handling here. 5171 if (N2C && N2C->isNullValue()) 5172 return N2; 5173 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5174 return N1; 5175 break; 5176 case ISD::OR: 5177 case ISD::XOR: 5178 case ISD::ADD: 5179 case ISD::SUB: 5180 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5181 assert(N1.getValueType() == N2.getValueType() && 5182 N1.getValueType() == VT && "Binary operator types must match!"); 5183 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5184 // it's worth handling here. 5185 if (N2C && N2C->isNullValue()) 5186 return N1; 5187 break; 5188 case ISD::UDIV: 5189 case ISD::UREM: 5190 case ISD::MULHU: 5191 case ISD::MULHS: 5192 case ISD::MUL: 5193 case ISD::SDIV: 5194 case ISD::SREM: 5195 case ISD::SMIN: 5196 case ISD::SMAX: 5197 case ISD::UMIN: 5198 case ISD::UMAX: 5199 case ISD::SADDSAT: 5200 case ISD::SSUBSAT: 5201 case ISD::UADDSAT: 5202 case ISD::USUBSAT: 5203 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5204 assert(N1.getValueType() == N2.getValueType() && 5205 N1.getValueType() == VT && "Binary operator types must match!"); 5206 break; 5207 case ISD::FADD: 5208 case ISD::FSUB: 5209 case ISD::FMUL: 5210 case ISD::FDIV: 5211 case ISD::FREM: 5212 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5213 assert(N1.getValueType() == N2.getValueType() && 5214 N1.getValueType() == VT && "Binary operator types must match!"); 5215 if (SDValue V = simplifyFPBinop(Opcode, N1, N2)) 5216 return V; 5217 break; 5218 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5219 assert(N1.getValueType() == VT && 5220 N1.getValueType().isFloatingPoint() && 5221 N2.getValueType().isFloatingPoint() && 5222 "Invalid FCOPYSIGN!"); 5223 break; 5224 case ISD::SHL: 5225 case ISD::SRA: 5226 case ISD::SRL: 5227 if (SDValue V = simplifyShift(N1, N2)) 5228 return V; 5229 LLVM_FALLTHROUGH; 5230 case ISD::ROTL: 5231 case ISD::ROTR: 5232 assert(VT == N1.getValueType() && 5233 "Shift operators return type must be the same as their first arg"); 5234 assert(VT.isInteger() && N2.getValueType().isInteger() && 5235 "Shifts only work on integers"); 5236 assert((!VT.isVector() || VT == N2.getValueType()) && 5237 "Vector shift amounts must be in the same as their first arg"); 5238 // Verify that the shift amount VT is big enough to hold valid shift 5239 // amounts. This catches things like trying to shift an i1024 value by an 5240 // i8, which is easy to fall into in generic code that uses 5241 // TLI.getShiftAmount(). 5242 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 5243 "Invalid use of small shift amount with oversized value!"); 5244 5245 // Always fold shifts of i1 values so the code generator doesn't need to 5246 // handle them. Since we know the size of the shift has to be less than the 5247 // size of the value, the shift/rotate count is guaranteed to be zero. 5248 if (VT == MVT::i1) 5249 return N1; 5250 if (N2C && N2C->isNullValue()) 5251 return N1; 5252 break; 5253 case ISD::FP_ROUND: 5254 assert(VT.isFloatingPoint() && 5255 N1.getValueType().isFloatingPoint() && 5256 VT.bitsLE(N1.getValueType()) && 5257 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5258 "Invalid FP_ROUND!"); 5259 if (N1.getValueType() == VT) return N1; // noop conversion. 5260 break; 5261 case ISD::AssertSext: 5262 case ISD::AssertZext: { 5263 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5264 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5265 assert(VT.isInteger() && EVT.isInteger() && 5266 "Cannot *_EXTEND_INREG FP types"); 5267 assert(!EVT.isVector() && 5268 "AssertSExt/AssertZExt type should be the vector element type " 5269 "rather than the vector type!"); 5270 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5271 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5272 break; 5273 } 5274 case ISD::SIGN_EXTEND_INREG: { 5275 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5276 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5277 assert(VT.isInteger() && EVT.isInteger() && 5278 "Cannot *_EXTEND_INREG FP types"); 5279 assert(EVT.isVector() == VT.isVector() && 5280 "SIGN_EXTEND_INREG type should be vector iff the operand " 5281 "type is vector!"); 5282 assert((!EVT.isVector() || 5283 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 5284 "Vector element counts must match in SIGN_EXTEND_INREG"); 5285 assert(EVT.bitsLE(VT) && "Not extending!"); 5286 if (EVT == VT) return N1; // Not actually extending 5287 5288 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5289 unsigned FromBits = EVT.getScalarSizeInBits(); 5290 Val <<= Val.getBitWidth() - FromBits; 5291 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5292 return getConstant(Val, DL, ConstantVT); 5293 }; 5294 5295 if (N1C) { 5296 const APInt &Val = N1C->getAPIntValue(); 5297 return SignExtendInReg(Val, VT); 5298 } 5299 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5300 SmallVector<SDValue, 8> Ops; 5301 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5302 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5303 SDValue Op = N1.getOperand(i); 5304 if (Op.isUndef()) { 5305 Ops.push_back(getUNDEF(OpVT)); 5306 continue; 5307 } 5308 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5309 APInt Val = C->getAPIntValue(); 5310 Ops.push_back(SignExtendInReg(Val, OpVT)); 5311 } 5312 return getBuildVector(VT, DL, Ops); 5313 } 5314 break; 5315 } 5316 case ISD::EXTRACT_VECTOR_ELT: 5317 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5318 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5319 element type of the vector."); 5320 5321 // Extract from an undefined value or using an undefined index is undefined. 5322 if (N1.isUndef() || N2.isUndef()) 5323 return getUNDEF(VT); 5324 5325 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 5326 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5327 return getUNDEF(VT); 5328 5329 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5330 // expanding copies of large vectors from registers. 5331 if (N2C && 5332 N1.getOpcode() == ISD::CONCAT_VECTORS && 5333 N1.getNumOperands() > 0) { 5334 unsigned Factor = 5335 N1.getOperand(0).getValueType().getVectorNumElements(); 5336 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5337 N1.getOperand(N2C->getZExtValue() / Factor), 5338 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5339 } 5340 5341 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 5342 // expanding large vector constants. 5343 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 5344 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 5345 5346 if (VT != Elt.getValueType()) 5347 // If the vector element type is not legal, the BUILD_VECTOR operands 5348 // are promoted and implicitly truncated, and the result implicitly 5349 // extended. Make that explicit here. 5350 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5351 5352 return Elt; 5353 } 5354 5355 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5356 // operations are lowered to scalars. 5357 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5358 // If the indices are the same, return the inserted element else 5359 // if the indices are known different, extract the element from 5360 // the original vector. 5361 SDValue N1Op2 = N1.getOperand(2); 5362 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5363 5364 if (N1Op2C && N2C) { 5365 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5366 if (VT == N1.getOperand(1).getValueType()) 5367 return N1.getOperand(1); 5368 else 5369 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5370 } 5371 5372 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5373 } 5374 } 5375 5376 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5377 // when vector types are scalarized and v1iX is legal. 5378 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 5379 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5380 N1.getValueType().getVectorNumElements() == 1) { 5381 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5382 N1.getOperand(1)); 5383 } 5384 break; 5385 case ISD::EXTRACT_ELEMENT: 5386 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5387 assert(!N1.getValueType().isVector() && !VT.isVector() && 5388 (N1.getValueType().isInteger() == VT.isInteger()) && 5389 N1.getValueType() != VT && 5390 "Wrong types for EXTRACT_ELEMENT!"); 5391 5392 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5393 // 64-bit integers into 32-bit parts. Instead of building the extract of 5394 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5395 if (N1.getOpcode() == ISD::BUILD_PAIR) 5396 return N1.getOperand(N2C->getZExtValue()); 5397 5398 // EXTRACT_ELEMENT of a constant int is also very common. 5399 if (N1C) { 5400 unsigned ElementSize = VT.getSizeInBits(); 5401 unsigned Shift = ElementSize * N2C->getZExtValue(); 5402 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5403 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5404 } 5405 break; 5406 case ISD::EXTRACT_SUBVECTOR: 5407 if (VT.isSimple() && N1.getValueType().isSimple()) { 5408 assert(VT.isVector() && N1.getValueType().isVector() && 5409 "Extract subvector VTs must be a vectors!"); 5410 assert(VT.getVectorElementType() == 5411 N1.getValueType().getVectorElementType() && 5412 "Extract subvector VTs must have the same element type!"); 5413 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 5414 "Extract subvector must be from larger vector to smaller vector!"); 5415 5416 if (N2C) { 5417 assert((VT.getVectorNumElements() + N2C->getZExtValue() 5418 <= N1.getValueType().getVectorNumElements()) 5419 && "Extract subvector overflow!"); 5420 } 5421 5422 // Trivial extraction. 5423 if (VT.getSimpleVT() == N1.getSimpleValueType()) 5424 return N1; 5425 5426 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5427 if (N1.isUndef()) 5428 return getUNDEF(VT); 5429 5430 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5431 // the concat have the same type as the extract. 5432 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5433 N1.getNumOperands() > 0 && 5434 VT == N1.getOperand(0).getValueType()) { 5435 unsigned Factor = VT.getVectorNumElements(); 5436 return N1.getOperand(N2C->getZExtValue() / Factor); 5437 } 5438 5439 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5440 // during shuffle legalization. 5441 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5442 VT == N1.getOperand(1).getValueType()) 5443 return N1.getOperand(1); 5444 } 5445 break; 5446 } 5447 5448 // Perform trivial constant folding. 5449 if (SDValue SV = 5450 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 5451 return SV; 5452 5453 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5454 return V; 5455 5456 // Canonicalize an UNDEF to the RHS, even over a constant. 5457 if (N1.isUndef()) { 5458 if (TLI->isCommutativeBinOp(Opcode)) { 5459 std::swap(N1, N2); 5460 } else { 5461 switch (Opcode) { 5462 case ISD::SIGN_EXTEND_INREG: 5463 case ISD::SUB: 5464 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5465 case ISD::UDIV: 5466 case ISD::SDIV: 5467 case ISD::UREM: 5468 case ISD::SREM: 5469 case ISD::SSUBSAT: 5470 case ISD::USUBSAT: 5471 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5472 } 5473 } 5474 } 5475 5476 // Fold a bunch of operators when the RHS is undef. 5477 if (N2.isUndef()) { 5478 switch (Opcode) { 5479 case ISD::XOR: 5480 if (N1.isUndef()) 5481 // Handle undef ^ undef -> 0 special case. This is a common 5482 // idiom (misuse). 5483 return getConstant(0, DL, VT); 5484 LLVM_FALLTHROUGH; 5485 case ISD::ADD: 5486 case ISD::SUB: 5487 case ISD::UDIV: 5488 case ISD::SDIV: 5489 case ISD::UREM: 5490 case ISD::SREM: 5491 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5492 case ISD::MUL: 5493 case ISD::AND: 5494 case ISD::SSUBSAT: 5495 case ISD::USUBSAT: 5496 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5497 case ISD::OR: 5498 case ISD::SADDSAT: 5499 case ISD::UADDSAT: 5500 return getAllOnesConstant(DL, VT); 5501 } 5502 } 5503 5504 // Memoize this node if possible. 5505 SDNode *N; 5506 SDVTList VTs = getVTList(VT); 5507 SDValue Ops[] = {N1, N2}; 5508 if (VT != MVT::Glue) { 5509 FoldingSetNodeID ID; 5510 AddNodeIDNode(ID, Opcode, VTs, Ops); 5511 void *IP = nullptr; 5512 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5513 E->intersectFlagsWith(Flags); 5514 return SDValue(E, 0); 5515 } 5516 5517 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5518 N->setFlags(Flags); 5519 createOperands(N, Ops); 5520 CSEMap.InsertNode(N, IP); 5521 } else { 5522 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5523 createOperands(N, Ops); 5524 } 5525 5526 InsertNode(N); 5527 SDValue V = SDValue(N, 0); 5528 NewSDValueDbgMsg(V, "Creating new node: ", this); 5529 return V; 5530 } 5531 5532 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5533 SDValue N1, SDValue N2, SDValue N3, 5534 const SDNodeFlags Flags) { 5535 // Perform various simplifications. 5536 switch (Opcode) { 5537 case ISD::FMA: { 5538 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5539 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5540 N3.getValueType() == VT && "FMA types must match!"); 5541 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5542 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5543 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5544 if (N1CFP && N2CFP && N3CFP) { 5545 APFloat V1 = N1CFP->getValueAPF(); 5546 const APFloat &V2 = N2CFP->getValueAPF(); 5547 const APFloat &V3 = N3CFP->getValueAPF(); 5548 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5549 return getConstantFP(V1, DL, VT); 5550 } 5551 break; 5552 } 5553 case ISD::BUILD_VECTOR: { 5554 // Attempt to simplify BUILD_VECTOR. 5555 SDValue Ops[] = {N1, N2, N3}; 5556 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5557 return V; 5558 break; 5559 } 5560 case ISD::CONCAT_VECTORS: { 5561 SDValue Ops[] = {N1, N2, N3}; 5562 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5563 return V; 5564 break; 5565 } 5566 case ISD::SETCC: { 5567 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5568 assert(N1.getValueType() == N2.getValueType() && 5569 "SETCC operands must have the same type!"); 5570 assert(VT.isVector() == N1.getValueType().isVector() && 5571 "SETCC type should be vector iff the operand type is vector!"); 5572 assert((!VT.isVector() || 5573 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) && 5574 "SETCC vector element counts must match!"); 5575 // Use FoldSetCC to simplify SETCC's. 5576 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5577 return V; 5578 // Vector constant folding. 5579 SDValue Ops[] = {N1, N2, N3}; 5580 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5581 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5582 return V; 5583 } 5584 break; 5585 } 5586 case ISD::SELECT: 5587 case ISD::VSELECT: 5588 if (SDValue V = simplifySelect(N1, N2, N3)) 5589 return V; 5590 break; 5591 case ISD::VECTOR_SHUFFLE: 5592 llvm_unreachable("should use getVectorShuffle constructor!"); 5593 case ISD::INSERT_VECTOR_ELT: { 5594 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5595 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 5596 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5597 return getUNDEF(VT); 5598 5599 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5600 if (N3.isUndef()) 5601 return getUNDEF(VT); 5602 5603 // If the inserted element is an UNDEF, just use the input vector. 5604 if (N2.isUndef()) 5605 return N1; 5606 5607 break; 5608 } 5609 case ISD::INSERT_SUBVECTOR: { 5610 // Inserting undef into undef is still undef. 5611 if (N1.isUndef() && N2.isUndef()) 5612 return getUNDEF(VT); 5613 SDValue Index = N3; 5614 if (VT.isSimple() && N1.getValueType().isSimple() 5615 && N2.getValueType().isSimple()) { 5616 assert(VT.isVector() && N1.getValueType().isVector() && 5617 N2.getValueType().isVector() && 5618 "Insert subvector VTs must be a vectors"); 5619 assert(VT == N1.getValueType() && 5620 "Dest and insert subvector source types must match!"); 5621 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5622 "Insert subvector must be from smaller vector to larger vector!"); 5623 if (isa<ConstantSDNode>(Index)) { 5624 assert((N2.getValueType().getVectorNumElements() + 5625 cast<ConstantSDNode>(Index)->getZExtValue() 5626 <= VT.getVectorNumElements()) 5627 && "Insert subvector overflow!"); 5628 } 5629 5630 // Trivial insertion. 5631 if (VT.getSimpleVT() == N2.getSimpleValueType()) 5632 return N2; 5633 5634 // If this is an insert of an extracted vector into an undef vector, we 5635 // can just use the input to the extract. 5636 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5637 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5638 return N2.getOperand(0); 5639 } 5640 break; 5641 } 5642 case ISD::BITCAST: 5643 // Fold bit_convert nodes from a type to themselves. 5644 if (N1.getValueType() == VT) 5645 return N1; 5646 break; 5647 } 5648 5649 // Memoize node if it doesn't produce a flag. 5650 SDNode *N; 5651 SDVTList VTs = getVTList(VT); 5652 SDValue Ops[] = {N1, N2, N3}; 5653 if (VT != MVT::Glue) { 5654 FoldingSetNodeID ID; 5655 AddNodeIDNode(ID, Opcode, VTs, Ops); 5656 void *IP = nullptr; 5657 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5658 E->intersectFlagsWith(Flags); 5659 return SDValue(E, 0); 5660 } 5661 5662 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5663 N->setFlags(Flags); 5664 createOperands(N, Ops); 5665 CSEMap.InsertNode(N, IP); 5666 } else { 5667 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5668 createOperands(N, Ops); 5669 } 5670 5671 InsertNode(N); 5672 SDValue V = SDValue(N, 0); 5673 NewSDValueDbgMsg(V, "Creating new node: ", this); 5674 return V; 5675 } 5676 5677 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5678 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5679 SDValue Ops[] = { N1, N2, N3, N4 }; 5680 return getNode(Opcode, DL, VT, Ops); 5681 } 5682 5683 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5684 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5685 SDValue N5) { 5686 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5687 return getNode(Opcode, DL, VT, Ops); 5688 } 5689 5690 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5691 /// the incoming stack arguments to be loaded from the stack. 5692 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5693 SmallVector<SDValue, 8> ArgChains; 5694 5695 // Include the original chain at the beginning of the list. When this is 5696 // used by target LowerCall hooks, this helps legalize find the 5697 // CALLSEQ_BEGIN node. 5698 ArgChains.push_back(Chain); 5699 5700 // Add a chain value for each stack argument. 5701 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5702 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5703 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5704 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5705 if (FI->getIndex() < 0) 5706 ArgChains.push_back(SDValue(L, 1)); 5707 5708 // Build a tokenfactor for all the chains. 5709 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5710 } 5711 5712 /// getMemsetValue - Vectorized representation of the memset value 5713 /// operand. 5714 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5715 const SDLoc &dl) { 5716 assert(!Value.isUndef()); 5717 5718 unsigned NumBits = VT.getScalarSizeInBits(); 5719 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5720 assert(C->getAPIntValue().getBitWidth() == 8); 5721 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5722 if (VT.isInteger()) { 5723 bool IsOpaque = VT.getSizeInBits() > 64 || 5724 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5725 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5726 } 5727 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5728 VT); 5729 } 5730 5731 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5732 EVT IntVT = VT.getScalarType(); 5733 if (!IntVT.isInteger()) 5734 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5735 5736 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5737 if (NumBits > 8) { 5738 // Use a multiplication with 0x010101... to extend the input to the 5739 // required length. 5740 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5741 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5742 DAG.getConstant(Magic, dl, IntVT)); 5743 } 5744 5745 if (VT != Value.getValueType() && !VT.isInteger()) 5746 Value = DAG.getBitcast(VT.getScalarType(), Value); 5747 if (VT != Value.getValueType()) 5748 Value = DAG.getSplatBuildVector(VT, dl, Value); 5749 5750 return Value; 5751 } 5752 5753 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5754 /// used when a memcpy is turned into a memset when the source is a constant 5755 /// string ptr. 5756 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5757 const TargetLowering &TLI, 5758 const ConstantDataArraySlice &Slice) { 5759 // Handle vector with all elements zero. 5760 if (Slice.Array == nullptr) { 5761 if (VT.isInteger()) 5762 return DAG.getConstant(0, dl, VT); 5763 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5764 return DAG.getConstantFP(0.0, dl, VT); 5765 else if (VT.isVector()) { 5766 unsigned NumElts = VT.getVectorNumElements(); 5767 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5768 return DAG.getNode(ISD::BITCAST, dl, VT, 5769 DAG.getConstant(0, dl, 5770 EVT::getVectorVT(*DAG.getContext(), 5771 EltVT, NumElts))); 5772 } else 5773 llvm_unreachable("Expected type!"); 5774 } 5775 5776 assert(!VT.isVector() && "Can't handle vector type here!"); 5777 unsigned NumVTBits = VT.getSizeInBits(); 5778 unsigned NumVTBytes = NumVTBits / 8; 5779 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5780 5781 APInt Val(NumVTBits, 0); 5782 if (DAG.getDataLayout().isLittleEndian()) { 5783 for (unsigned i = 0; i != NumBytes; ++i) 5784 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5785 } else { 5786 for (unsigned i = 0; i != NumBytes; ++i) 5787 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5788 } 5789 5790 // If the "cost" of materializing the integer immediate is less than the cost 5791 // of a load, then it is cost effective to turn the load into the immediate. 5792 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5793 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5794 return DAG.getConstant(Val, dl, VT); 5795 return SDValue(nullptr, 0); 5796 } 5797 5798 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5799 const SDLoc &DL, 5800 const SDNodeFlags Flags) { 5801 EVT VT = Base.getValueType(); 5802 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5803 } 5804 5805 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5806 const SDLoc &DL, 5807 const SDNodeFlags Flags) { 5808 assert(Offset.getValueType().isInteger()); 5809 EVT BasePtrVT = Ptr.getValueType(); 5810 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5811 } 5812 5813 /// Returns true if memcpy source is constant data. 5814 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5815 uint64_t SrcDelta = 0; 5816 GlobalAddressSDNode *G = nullptr; 5817 if (Src.getOpcode() == ISD::GlobalAddress) 5818 G = cast<GlobalAddressSDNode>(Src); 5819 else if (Src.getOpcode() == ISD::ADD && 5820 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5821 Src.getOperand(1).getOpcode() == ISD::Constant) { 5822 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5823 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5824 } 5825 if (!G) 5826 return false; 5827 5828 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5829 SrcDelta + G->getOffset()); 5830 } 5831 5832 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5833 SelectionDAG &DAG) { 5834 // On Darwin, -Os means optimize for size without hurting performance, so 5835 // only really optimize for size when -Oz (MinSize) is used. 5836 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5837 return MF.getFunction().hasMinSize(); 5838 return DAG.shouldOptForSize(); 5839 } 5840 5841 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5842 SmallVector<SDValue, 32> &OutChains, unsigned From, 5843 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5844 SmallVector<SDValue, 16> &OutStoreChains) { 5845 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5846 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5847 SmallVector<SDValue, 16> GluedLoadChains; 5848 for (unsigned i = From; i < To; ++i) { 5849 OutChains.push_back(OutLoadChains[i]); 5850 GluedLoadChains.push_back(OutLoadChains[i]); 5851 } 5852 5853 // Chain for all loads. 5854 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5855 GluedLoadChains); 5856 5857 for (unsigned i = From; i < To; ++i) { 5858 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5859 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5860 ST->getBasePtr(), ST->getMemoryVT(), 5861 ST->getMemOperand()); 5862 OutChains.push_back(NewStore); 5863 } 5864 } 5865 5866 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5867 SDValue Chain, SDValue Dst, SDValue Src, 5868 uint64_t Size, unsigned Alignment, 5869 bool isVol, bool AlwaysInline, 5870 MachinePointerInfo DstPtrInfo, 5871 MachinePointerInfo SrcPtrInfo) { 5872 // Turn a memcpy of undef to nop. 5873 // FIXME: We need to honor volatile even is Src is undef. 5874 if (Src.isUndef()) 5875 return Chain; 5876 5877 // Expand memcpy to a series of load and store ops if the size operand falls 5878 // below a certain threshold. 5879 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5880 // rather than maybe a humongous number of loads and stores. 5881 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5882 const DataLayout &DL = DAG.getDataLayout(); 5883 LLVMContext &C = *DAG.getContext(); 5884 std::vector<EVT> MemOps; 5885 bool DstAlignCanChange = false; 5886 MachineFunction &MF = DAG.getMachineFunction(); 5887 MachineFrameInfo &MFI = MF.getFrameInfo(); 5888 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 5889 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5890 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5891 DstAlignCanChange = true; 5892 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5893 if (Alignment > SrcAlign) 5894 SrcAlign = Alignment; 5895 ConstantDataArraySlice Slice; 5896 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5897 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5898 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5899 5900 if (!TLI.findOptimalMemOpLowering( 5901 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment), 5902 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false, 5903 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant, 5904 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), 5905 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 5906 return SDValue(); 5907 5908 if (DstAlignCanChange) { 5909 Type *Ty = MemOps[0].getTypeForEVT(C); 5910 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5911 5912 // Don't promote to an alignment that would require dynamic stack 5913 // realignment. 5914 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5915 if (!TRI->needsStackRealignment(MF)) 5916 while (NewAlign > Alignment && 5917 DL.exceedsNaturalStackAlignment(Align(NewAlign))) 5918 NewAlign /= 2; 5919 5920 if (NewAlign > Alignment) { 5921 // Give the stack frame object a larger alignment if needed. 5922 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5923 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5924 Alignment = NewAlign; 5925 } 5926 } 5927 5928 MachineMemOperand::Flags MMOFlags = 5929 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5930 SmallVector<SDValue, 16> OutLoadChains; 5931 SmallVector<SDValue, 16> OutStoreChains; 5932 SmallVector<SDValue, 32> OutChains; 5933 unsigned NumMemOps = MemOps.size(); 5934 uint64_t SrcOff = 0, DstOff = 0; 5935 for (unsigned i = 0; i != NumMemOps; ++i) { 5936 EVT VT = MemOps[i]; 5937 unsigned VTSize = VT.getSizeInBits() / 8; 5938 SDValue Value, Store; 5939 5940 if (VTSize > Size) { 5941 // Issuing an unaligned load / store pair that overlaps with the previous 5942 // pair. Adjust the offset accordingly. 5943 assert(i == NumMemOps-1 && i != 0); 5944 SrcOff -= VTSize - Size; 5945 DstOff -= VTSize - Size; 5946 } 5947 5948 if (CopyFromConstant && 5949 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5950 // It's unlikely a store of a vector immediate can be done in a single 5951 // instruction. It would require a load from a constantpool first. 5952 // We only handle zero vectors here. 5953 // FIXME: Handle other cases where store of vector immediate is done in 5954 // a single instruction. 5955 ConstantDataArraySlice SubSlice; 5956 if (SrcOff < Slice.Length) { 5957 SubSlice = Slice; 5958 SubSlice.move(SrcOff); 5959 } else { 5960 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5961 SubSlice.Array = nullptr; 5962 SubSlice.Offset = 0; 5963 SubSlice.Length = VTSize; 5964 } 5965 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5966 if (Value.getNode()) { 5967 Store = DAG.getStore( 5968 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5969 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 5970 OutChains.push_back(Store); 5971 } 5972 } 5973 5974 if (!Store.getNode()) { 5975 // The type might not be legal for the target. This should only happen 5976 // if the type is smaller than a legal type, as on PPC, so the right 5977 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5978 // to Load/Store if NVT==VT. 5979 // FIXME does the case above also need this? 5980 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5981 assert(NVT.bitsGE(VT)); 5982 5983 bool isDereferenceable = 5984 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5985 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5986 if (isDereferenceable) 5987 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5988 5989 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5990 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5991 SrcPtrInfo.getWithOffset(SrcOff), VT, 5992 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5993 OutLoadChains.push_back(Value.getValue(1)); 5994 5995 Store = DAG.getTruncStore( 5996 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5997 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 5998 OutStoreChains.push_back(Store); 5999 } 6000 SrcOff += VTSize; 6001 DstOff += VTSize; 6002 Size -= VTSize; 6003 } 6004 6005 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6006 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6007 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6008 6009 if (NumLdStInMemcpy) { 6010 // It may be that memcpy might be converted to memset if it's memcpy 6011 // of constants. In such a case, we won't have loads and stores, but 6012 // just stores. In the absence of loads, there is nothing to gang up. 6013 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6014 // If target does not care, just leave as it. 6015 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6016 OutChains.push_back(OutLoadChains[i]); 6017 OutChains.push_back(OutStoreChains[i]); 6018 } 6019 } else { 6020 // Ld/St less than/equal limit set by target. 6021 if (NumLdStInMemcpy <= GluedLdStLimit) { 6022 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6023 NumLdStInMemcpy, OutLoadChains, 6024 OutStoreChains); 6025 } else { 6026 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6027 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6028 unsigned GlueIter = 0; 6029 6030 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6031 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6032 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6033 6034 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6035 OutLoadChains, OutStoreChains); 6036 GlueIter += GluedLdStLimit; 6037 } 6038 6039 // Residual ld/st. 6040 if (RemainingLdStInMemcpy) { 6041 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6042 RemainingLdStInMemcpy, OutLoadChains, 6043 OutStoreChains); 6044 } 6045 } 6046 } 6047 } 6048 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6049 } 6050 6051 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6052 SDValue Chain, SDValue Dst, SDValue Src, 6053 uint64_t Size, unsigned Align, 6054 bool isVol, bool AlwaysInline, 6055 MachinePointerInfo DstPtrInfo, 6056 MachinePointerInfo SrcPtrInfo) { 6057 // Turn a memmove of undef to nop. 6058 // FIXME: We need to honor volatile even is Src is undef. 6059 if (Src.isUndef()) 6060 return Chain; 6061 6062 // Expand memmove to a series of load and store ops if the size operand falls 6063 // below a certain threshold. 6064 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6065 const DataLayout &DL = DAG.getDataLayout(); 6066 LLVMContext &C = *DAG.getContext(); 6067 std::vector<EVT> MemOps; 6068 bool DstAlignCanChange = false; 6069 MachineFunction &MF = DAG.getMachineFunction(); 6070 MachineFrameInfo &MFI = MF.getFrameInfo(); 6071 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6072 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6073 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6074 DstAlignCanChange = true; 6075 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 6076 if (Align > SrcAlign) 6077 SrcAlign = Align; 6078 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6079 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in 6080 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the 6081 // correct code. 6082 bool AllowOverlap = false; 6083 if (!TLI.findOptimalMemOpLowering( 6084 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign, 6085 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, 6086 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6087 MF.getFunction().getAttributes())) 6088 return SDValue(); 6089 6090 if (DstAlignCanChange) { 6091 Type *Ty = MemOps[0].getTypeForEVT(C); 6092 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 6093 if (NewAlign > Align) { 6094 // Give the stack frame object a larger alignment if needed. 6095 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6096 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6097 Align = NewAlign; 6098 } 6099 } 6100 6101 MachineMemOperand::Flags MMOFlags = 6102 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6103 uint64_t SrcOff = 0, DstOff = 0; 6104 SmallVector<SDValue, 8> LoadValues; 6105 SmallVector<SDValue, 8> LoadChains; 6106 SmallVector<SDValue, 8> OutChains; 6107 unsigned NumMemOps = MemOps.size(); 6108 for (unsigned i = 0; i < NumMemOps; i++) { 6109 EVT VT = MemOps[i]; 6110 unsigned VTSize = VT.getSizeInBits() / 8; 6111 SDValue Value; 6112 6113 bool isDereferenceable = 6114 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6115 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6116 if (isDereferenceable) 6117 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6118 6119 Value = 6120 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6121 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 6122 LoadValues.push_back(Value); 6123 LoadChains.push_back(Value.getValue(1)); 6124 SrcOff += VTSize; 6125 } 6126 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6127 OutChains.clear(); 6128 for (unsigned i = 0; i < NumMemOps; i++) { 6129 EVT VT = MemOps[i]; 6130 unsigned VTSize = VT.getSizeInBits() / 8; 6131 SDValue Store; 6132 6133 Store = DAG.getStore(Chain, dl, LoadValues[i], 6134 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6135 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 6136 OutChains.push_back(Store); 6137 DstOff += VTSize; 6138 } 6139 6140 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6141 } 6142 6143 /// Lower the call to 'memset' intrinsic function into a series of store 6144 /// operations. 6145 /// 6146 /// \param DAG Selection DAG where lowered code is placed. 6147 /// \param dl Link to corresponding IR location. 6148 /// \param Chain Control flow dependency. 6149 /// \param Dst Pointer to destination memory location. 6150 /// \param Src Value of byte to write into the memory. 6151 /// \param Size Number of bytes to write. 6152 /// \param Align Alignment of the destination in bytes. 6153 /// \param isVol True if destination is volatile. 6154 /// \param DstPtrInfo IR information on the memory pointer. 6155 /// \returns New head in the control flow, if lowering was successful, empty 6156 /// SDValue otherwise. 6157 /// 6158 /// The function tries to replace 'llvm.memset' intrinsic with several store 6159 /// operations and value calculation code. This is usually profitable for small 6160 /// memory size. 6161 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6162 SDValue Chain, SDValue Dst, SDValue Src, 6163 uint64_t Size, unsigned Align, bool isVol, 6164 MachinePointerInfo DstPtrInfo) { 6165 // Turn a memset of undef to nop. 6166 // FIXME: We need to honor volatile even is Src is undef. 6167 if (Src.isUndef()) 6168 return Chain; 6169 6170 // Expand memset to a series of load/store ops if the size operand 6171 // falls below a certain threshold. 6172 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6173 std::vector<EVT> MemOps; 6174 bool DstAlignCanChange = false; 6175 MachineFunction &MF = DAG.getMachineFunction(); 6176 MachineFrameInfo &MFI = MF.getFrameInfo(); 6177 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6178 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6179 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6180 DstAlignCanChange = true; 6181 bool IsZeroVal = 6182 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6183 if (!TLI.findOptimalMemOpLowering( 6184 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size, 6185 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true, 6186 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false, 6187 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u, 6188 MF.getFunction().getAttributes())) 6189 return SDValue(); 6190 6191 if (DstAlignCanChange) { 6192 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6193 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 6194 if (NewAlign > Align) { 6195 // Give the stack frame object a larger alignment if needed. 6196 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6197 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6198 Align = NewAlign; 6199 } 6200 } 6201 6202 SmallVector<SDValue, 8> OutChains; 6203 uint64_t DstOff = 0; 6204 unsigned NumMemOps = MemOps.size(); 6205 6206 // Find the largest store and generate the bit pattern for it. 6207 EVT LargestVT = MemOps[0]; 6208 for (unsigned i = 1; i < NumMemOps; i++) 6209 if (MemOps[i].bitsGT(LargestVT)) 6210 LargestVT = MemOps[i]; 6211 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6212 6213 for (unsigned i = 0; i < NumMemOps; i++) { 6214 EVT VT = MemOps[i]; 6215 unsigned VTSize = VT.getSizeInBits() / 8; 6216 if (VTSize > Size) { 6217 // Issuing an unaligned load / store pair that overlaps with the previous 6218 // pair. Adjust the offset accordingly. 6219 assert(i == NumMemOps-1 && i != 0); 6220 DstOff -= VTSize - Size; 6221 } 6222 6223 // If this store is smaller than the largest store see whether we can get 6224 // the smaller value for free with a truncate. 6225 SDValue Value = MemSetValue; 6226 if (VT.bitsLT(LargestVT)) { 6227 if (!LargestVT.isVector() && !VT.isVector() && 6228 TLI.isTruncateFree(LargestVT, VT)) 6229 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6230 else 6231 Value = getMemsetValue(Src, VT, DAG, dl); 6232 } 6233 assert(Value.getValueType() == VT && "Value with wrong type."); 6234 SDValue Store = DAG.getStore( 6235 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6236 DstPtrInfo.getWithOffset(DstOff), Align, 6237 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6238 OutChains.push_back(Store); 6239 DstOff += VT.getSizeInBits() / 8; 6240 Size -= VTSize; 6241 } 6242 6243 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6244 } 6245 6246 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6247 unsigned AS) { 6248 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6249 // pointer operands can be losslessly bitcasted to pointers of address space 0 6250 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6251 report_fatal_error("cannot lower memory intrinsic in address space " + 6252 Twine(AS)); 6253 } 6254 } 6255 6256 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6257 SDValue Src, SDValue Size, unsigned Align, 6258 bool isVol, bool AlwaysInline, bool isTailCall, 6259 MachinePointerInfo DstPtrInfo, 6260 MachinePointerInfo SrcPtrInfo) { 6261 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6262 6263 // Check to see if we should lower the memcpy to loads and stores first. 6264 // For cases within the target-specified limits, this is the best choice. 6265 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6266 if (ConstantSize) { 6267 // Memcpy with size zero? Just return the original chain. 6268 if (ConstantSize->isNullValue()) 6269 return Chain; 6270 6271 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6272 ConstantSize->getZExtValue(),Align, 6273 isVol, false, DstPtrInfo, SrcPtrInfo); 6274 if (Result.getNode()) 6275 return Result; 6276 } 6277 6278 // Then check to see if we should lower the memcpy with target-specific 6279 // code. If the target chooses to do this, this is the next best. 6280 if (TSI) { 6281 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6282 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 6283 DstPtrInfo, SrcPtrInfo); 6284 if (Result.getNode()) 6285 return Result; 6286 } 6287 6288 // If we really need inline code and the target declined to provide it, 6289 // use a (potentially long) sequence of loads and stores. 6290 if (AlwaysInline) { 6291 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6292 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6293 ConstantSize->getZExtValue(), Align, isVol, 6294 true, DstPtrInfo, SrcPtrInfo); 6295 } 6296 6297 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6298 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6299 6300 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6301 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6302 // respect volatile, so they may do things like read or write memory 6303 // beyond the given memory regions. But fixing this isn't easy, and most 6304 // people don't care. 6305 6306 // Emit a library call. 6307 TargetLowering::ArgListTy Args; 6308 TargetLowering::ArgListEntry Entry; 6309 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6310 Entry.Node = Dst; Args.push_back(Entry); 6311 Entry.Node = Src; Args.push_back(Entry); 6312 6313 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6314 Entry.Node = Size; Args.push_back(Entry); 6315 // FIXME: pass in SDLoc 6316 TargetLowering::CallLoweringInfo CLI(*this); 6317 CLI.setDebugLoc(dl) 6318 .setChain(Chain) 6319 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6320 Dst.getValueType().getTypeForEVT(*getContext()), 6321 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6322 TLI->getPointerTy(getDataLayout())), 6323 std::move(Args)) 6324 .setDiscardResult() 6325 .setTailCall(isTailCall); 6326 6327 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6328 return CallResult.second; 6329 } 6330 6331 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6332 SDValue Dst, unsigned DstAlign, 6333 SDValue Src, unsigned SrcAlign, 6334 SDValue Size, Type *SizeTy, 6335 unsigned ElemSz, bool isTailCall, 6336 MachinePointerInfo DstPtrInfo, 6337 MachinePointerInfo SrcPtrInfo) { 6338 // Emit a library call. 6339 TargetLowering::ArgListTy Args; 6340 TargetLowering::ArgListEntry Entry; 6341 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6342 Entry.Node = Dst; 6343 Args.push_back(Entry); 6344 6345 Entry.Node = Src; 6346 Args.push_back(Entry); 6347 6348 Entry.Ty = SizeTy; 6349 Entry.Node = Size; 6350 Args.push_back(Entry); 6351 6352 RTLIB::Libcall LibraryCall = 6353 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6354 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6355 report_fatal_error("Unsupported element size"); 6356 6357 TargetLowering::CallLoweringInfo CLI(*this); 6358 CLI.setDebugLoc(dl) 6359 .setChain(Chain) 6360 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6361 Type::getVoidTy(*getContext()), 6362 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6363 TLI->getPointerTy(getDataLayout())), 6364 std::move(Args)) 6365 .setDiscardResult() 6366 .setTailCall(isTailCall); 6367 6368 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6369 return CallResult.second; 6370 } 6371 6372 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6373 SDValue Src, SDValue Size, unsigned Align, 6374 bool isVol, bool isTailCall, 6375 MachinePointerInfo DstPtrInfo, 6376 MachinePointerInfo SrcPtrInfo) { 6377 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6378 6379 // Check to see if we should lower the memmove to loads and stores first. 6380 // For cases within the target-specified limits, this is the best choice. 6381 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6382 if (ConstantSize) { 6383 // Memmove with size zero? Just return the original chain. 6384 if (ConstantSize->isNullValue()) 6385 return Chain; 6386 6387 SDValue Result = 6388 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 6389 ConstantSize->getZExtValue(), Align, isVol, 6390 false, DstPtrInfo, SrcPtrInfo); 6391 if (Result.getNode()) 6392 return Result; 6393 } 6394 6395 // Then check to see if we should lower the memmove with target-specific 6396 // code. If the target chooses to do this, this is the next best. 6397 if (TSI) { 6398 SDValue Result = TSI->EmitTargetCodeForMemmove( 6399 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 6400 if (Result.getNode()) 6401 return Result; 6402 } 6403 6404 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6405 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6406 6407 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6408 // not be safe. See memcpy above for more details. 6409 6410 // Emit a library call. 6411 TargetLowering::ArgListTy Args; 6412 TargetLowering::ArgListEntry Entry; 6413 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6414 Entry.Node = Dst; Args.push_back(Entry); 6415 Entry.Node = Src; Args.push_back(Entry); 6416 6417 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6418 Entry.Node = Size; Args.push_back(Entry); 6419 // FIXME: pass in SDLoc 6420 TargetLowering::CallLoweringInfo CLI(*this); 6421 CLI.setDebugLoc(dl) 6422 .setChain(Chain) 6423 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6424 Dst.getValueType().getTypeForEVT(*getContext()), 6425 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6426 TLI->getPointerTy(getDataLayout())), 6427 std::move(Args)) 6428 .setDiscardResult() 6429 .setTailCall(isTailCall); 6430 6431 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6432 return CallResult.second; 6433 } 6434 6435 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6436 SDValue Dst, unsigned DstAlign, 6437 SDValue Src, unsigned SrcAlign, 6438 SDValue Size, Type *SizeTy, 6439 unsigned ElemSz, bool isTailCall, 6440 MachinePointerInfo DstPtrInfo, 6441 MachinePointerInfo SrcPtrInfo) { 6442 // Emit a library call. 6443 TargetLowering::ArgListTy Args; 6444 TargetLowering::ArgListEntry Entry; 6445 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6446 Entry.Node = Dst; 6447 Args.push_back(Entry); 6448 6449 Entry.Node = Src; 6450 Args.push_back(Entry); 6451 6452 Entry.Ty = SizeTy; 6453 Entry.Node = Size; 6454 Args.push_back(Entry); 6455 6456 RTLIB::Libcall LibraryCall = 6457 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6458 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6459 report_fatal_error("Unsupported element size"); 6460 6461 TargetLowering::CallLoweringInfo CLI(*this); 6462 CLI.setDebugLoc(dl) 6463 .setChain(Chain) 6464 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6465 Type::getVoidTy(*getContext()), 6466 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6467 TLI->getPointerTy(getDataLayout())), 6468 std::move(Args)) 6469 .setDiscardResult() 6470 .setTailCall(isTailCall); 6471 6472 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6473 return CallResult.second; 6474 } 6475 6476 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6477 SDValue Src, SDValue Size, unsigned Align, 6478 bool isVol, bool isTailCall, 6479 MachinePointerInfo DstPtrInfo) { 6480 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6481 6482 // Check to see if we should lower the memset to stores first. 6483 // For cases within the target-specified limits, this is the best choice. 6484 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6485 if (ConstantSize) { 6486 // Memset with size zero? Just return the original chain. 6487 if (ConstantSize->isNullValue()) 6488 return Chain; 6489 6490 SDValue Result = 6491 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 6492 Align, isVol, DstPtrInfo); 6493 6494 if (Result.getNode()) 6495 return Result; 6496 } 6497 6498 // Then check to see if we should lower the memset with target-specific 6499 // code. If the target chooses to do this, this is the next best. 6500 if (TSI) { 6501 SDValue Result = TSI->EmitTargetCodeForMemset( 6502 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 6503 if (Result.getNode()) 6504 return Result; 6505 } 6506 6507 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6508 6509 // Emit a library call. 6510 TargetLowering::ArgListTy Args; 6511 TargetLowering::ArgListEntry Entry; 6512 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6513 Args.push_back(Entry); 6514 Entry.Node = Src; 6515 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6516 Args.push_back(Entry); 6517 Entry.Node = Size; 6518 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6519 Args.push_back(Entry); 6520 6521 // FIXME: pass in SDLoc 6522 TargetLowering::CallLoweringInfo CLI(*this); 6523 CLI.setDebugLoc(dl) 6524 .setChain(Chain) 6525 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6526 Dst.getValueType().getTypeForEVT(*getContext()), 6527 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6528 TLI->getPointerTy(getDataLayout())), 6529 std::move(Args)) 6530 .setDiscardResult() 6531 .setTailCall(isTailCall); 6532 6533 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6534 return CallResult.second; 6535 } 6536 6537 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6538 SDValue Dst, unsigned DstAlign, 6539 SDValue Value, SDValue Size, Type *SizeTy, 6540 unsigned ElemSz, bool isTailCall, 6541 MachinePointerInfo DstPtrInfo) { 6542 // Emit a library call. 6543 TargetLowering::ArgListTy Args; 6544 TargetLowering::ArgListEntry Entry; 6545 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6546 Entry.Node = Dst; 6547 Args.push_back(Entry); 6548 6549 Entry.Ty = Type::getInt8Ty(*getContext()); 6550 Entry.Node = Value; 6551 Args.push_back(Entry); 6552 6553 Entry.Ty = SizeTy; 6554 Entry.Node = Size; 6555 Args.push_back(Entry); 6556 6557 RTLIB::Libcall LibraryCall = 6558 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6559 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6560 report_fatal_error("Unsupported element size"); 6561 6562 TargetLowering::CallLoweringInfo CLI(*this); 6563 CLI.setDebugLoc(dl) 6564 .setChain(Chain) 6565 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6566 Type::getVoidTy(*getContext()), 6567 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6568 TLI->getPointerTy(getDataLayout())), 6569 std::move(Args)) 6570 .setDiscardResult() 6571 .setTailCall(isTailCall); 6572 6573 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6574 return CallResult.second; 6575 } 6576 6577 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6578 SDVTList VTList, ArrayRef<SDValue> Ops, 6579 MachineMemOperand *MMO) { 6580 FoldingSetNodeID ID; 6581 ID.AddInteger(MemVT.getRawBits()); 6582 AddNodeIDNode(ID, Opcode, VTList, Ops); 6583 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6584 void* IP = nullptr; 6585 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6586 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6587 return SDValue(E, 0); 6588 } 6589 6590 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6591 VTList, MemVT, MMO); 6592 createOperands(N, Ops); 6593 6594 CSEMap.InsertNode(N, IP); 6595 InsertNode(N); 6596 return SDValue(N, 0); 6597 } 6598 6599 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6600 EVT MemVT, SDVTList VTs, SDValue Chain, 6601 SDValue Ptr, SDValue Cmp, SDValue Swp, 6602 MachineMemOperand *MMO) { 6603 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6604 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6605 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6606 6607 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6608 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6609 } 6610 6611 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6612 SDValue Chain, SDValue Ptr, SDValue Val, 6613 MachineMemOperand *MMO) { 6614 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6615 Opcode == ISD::ATOMIC_LOAD_SUB || 6616 Opcode == ISD::ATOMIC_LOAD_AND || 6617 Opcode == ISD::ATOMIC_LOAD_CLR || 6618 Opcode == ISD::ATOMIC_LOAD_OR || 6619 Opcode == ISD::ATOMIC_LOAD_XOR || 6620 Opcode == ISD::ATOMIC_LOAD_NAND || 6621 Opcode == ISD::ATOMIC_LOAD_MIN || 6622 Opcode == ISD::ATOMIC_LOAD_MAX || 6623 Opcode == ISD::ATOMIC_LOAD_UMIN || 6624 Opcode == ISD::ATOMIC_LOAD_UMAX || 6625 Opcode == ISD::ATOMIC_LOAD_FADD || 6626 Opcode == ISD::ATOMIC_LOAD_FSUB || 6627 Opcode == ISD::ATOMIC_SWAP || 6628 Opcode == ISD::ATOMIC_STORE) && 6629 "Invalid Atomic Op"); 6630 6631 EVT VT = Val.getValueType(); 6632 6633 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6634 getVTList(VT, MVT::Other); 6635 SDValue Ops[] = {Chain, Ptr, Val}; 6636 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6637 } 6638 6639 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6640 EVT VT, SDValue Chain, SDValue Ptr, 6641 MachineMemOperand *MMO) { 6642 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6643 6644 SDVTList VTs = getVTList(VT, MVT::Other); 6645 SDValue Ops[] = {Chain, Ptr}; 6646 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6647 } 6648 6649 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6650 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6651 if (Ops.size() == 1) 6652 return Ops[0]; 6653 6654 SmallVector<EVT, 4> VTs; 6655 VTs.reserve(Ops.size()); 6656 for (unsigned i = 0; i < Ops.size(); ++i) 6657 VTs.push_back(Ops[i].getValueType()); 6658 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6659 } 6660 6661 SDValue SelectionDAG::getMemIntrinsicNode( 6662 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6663 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 6664 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6665 if (Align == 0) // Ensure that codegen never sees alignment 0 6666 Align = getEVTAlignment(MemVT); 6667 6668 if (!Size && MemVT.isScalableVector()) 6669 Size = MemoryLocation::UnknownSize; 6670 else if (!Size) 6671 Size = MemVT.getStoreSize(); 6672 6673 MachineFunction &MF = getMachineFunction(); 6674 MachineMemOperand *MMO = 6675 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo); 6676 6677 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6678 } 6679 6680 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6681 SDVTList VTList, 6682 ArrayRef<SDValue> Ops, EVT MemVT, 6683 MachineMemOperand *MMO) { 6684 assert((Opcode == ISD::INTRINSIC_VOID || 6685 Opcode == ISD::INTRINSIC_W_CHAIN || 6686 Opcode == ISD::PREFETCH || 6687 Opcode == ISD::LIFETIME_START || 6688 Opcode == ISD::LIFETIME_END || 6689 ((int)Opcode <= std::numeric_limits<int>::max() && 6690 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6691 "Opcode is not a memory-accessing opcode!"); 6692 6693 // Memoize the node unless it returns a flag. 6694 MemIntrinsicSDNode *N; 6695 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6696 FoldingSetNodeID ID; 6697 AddNodeIDNode(ID, Opcode, VTList, Ops); 6698 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6699 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6700 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6701 void *IP = nullptr; 6702 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6703 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6704 return SDValue(E, 0); 6705 } 6706 6707 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6708 VTList, MemVT, MMO); 6709 createOperands(N, Ops); 6710 6711 CSEMap.InsertNode(N, IP); 6712 } else { 6713 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6714 VTList, MemVT, MMO); 6715 createOperands(N, Ops); 6716 } 6717 InsertNode(N); 6718 SDValue V(N, 0); 6719 NewSDValueDbgMsg(V, "Creating new node: ", this); 6720 return V; 6721 } 6722 6723 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6724 SDValue Chain, int FrameIndex, 6725 int64_t Size, int64_t Offset) { 6726 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6727 const auto VTs = getVTList(MVT::Other); 6728 SDValue Ops[2] = { 6729 Chain, 6730 getFrameIndex(FrameIndex, 6731 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6732 true)}; 6733 6734 FoldingSetNodeID ID; 6735 AddNodeIDNode(ID, Opcode, VTs, Ops); 6736 ID.AddInteger(FrameIndex); 6737 ID.AddInteger(Size); 6738 ID.AddInteger(Offset); 6739 void *IP = nullptr; 6740 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6741 return SDValue(E, 0); 6742 6743 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6744 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6745 createOperands(N, Ops); 6746 CSEMap.InsertNode(N, IP); 6747 InsertNode(N); 6748 SDValue V(N, 0); 6749 NewSDValueDbgMsg(V, "Creating new node: ", this); 6750 return V; 6751 } 6752 6753 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6754 /// MachinePointerInfo record from it. This is particularly useful because the 6755 /// code generator has many cases where it doesn't bother passing in a 6756 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6757 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6758 SelectionDAG &DAG, SDValue Ptr, 6759 int64_t Offset = 0) { 6760 // If this is FI+Offset, we can model it. 6761 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6762 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6763 FI->getIndex(), Offset); 6764 6765 // If this is (FI+Offset1)+Offset2, we can model it. 6766 if (Ptr.getOpcode() != ISD::ADD || 6767 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6768 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6769 return Info; 6770 6771 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6772 return MachinePointerInfo::getFixedStack( 6773 DAG.getMachineFunction(), FI, 6774 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6775 } 6776 6777 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6778 /// MachinePointerInfo record from it. This is particularly useful because the 6779 /// code generator has many cases where it doesn't bother passing in a 6780 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6781 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6782 SelectionDAG &DAG, SDValue Ptr, 6783 SDValue OffsetOp) { 6784 // If the 'Offset' value isn't a constant, we can't handle this. 6785 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6786 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6787 if (OffsetOp.isUndef()) 6788 return InferPointerInfo(Info, DAG, Ptr); 6789 return Info; 6790 } 6791 6792 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6793 EVT VT, const SDLoc &dl, SDValue Chain, 6794 SDValue Ptr, SDValue Offset, 6795 MachinePointerInfo PtrInfo, EVT MemVT, 6796 unsigned Alignment, 6797 MachineMemOperand::Flags MMOFlags, 6798 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6799 assert(Chain.getValueType() == MVT::Other && 6800 "Invalid chain type"); 6801 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6802 Alignment = getEVTAlignment(MemVT); 6803 6804 MMOFlags |= MachineMemOperand::MOLoad; 6805 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6806 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6807 // clients. 6808 if (PtrInfo.V.isNull()) 6809 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6810 6811 MachineFunction &MF = getMachineFunction(); 6812 MachineMemOperand *MMO = MF.getMachineMemOperand( 6813 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6814 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6815 } 6816 6817 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6818 EVT VT, const SDLoc &dl, SDValue Chain, 6819 SDValue Ptr, SDValue Offset, EVT MemVT, 6820 MachineMemOperand *MMO) { 6821 if (VT == MemVT) { 6822 ExtType = ISD::NON_EXTLOAD; 6823 } else if (ExtType == ISD::NON_EXTLOAD) { 6824 assert(VT == MemVT && "Non-extending load from different memory type!"); 6825 } else { 6826 // Extending load. 6827 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6828 "Should only be an extending load, not truncating!"); 6829 assert(VT.isInteger() == MemVT.isInteger() && 6830 "Cannot convert from FP to Int or Int -> FP!"); 6831 assert(VT.isVector() == MemVT.isVector() && 6832 "Cannot use an ext load to convert to or from a vector!"); 6833 assert((!VT.isVector() || 6834 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6835 "Cannot use an ext load to change the number of vector elements!"); 6836 } 6837 6838 bool Indexed = AM != ISD::UNINDEXED; 6839 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6840 6841 SDVTList VTs = Indexed ? 6842 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6843 SDValue Ops[] = { Chain, Ptr, Offset }; 6844 FoldingSetNodeID ID; 6845 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6846 ID.AddInteger(MemVT.getRawBits()); 6847 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6848 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6849 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6850 void *IP = nullptr; 6851 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6852 cast<LoadSDNode>(E)->refineAlignment(MMO); 6853 return SDValue(E, 0); 6854 } 6855 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6856 ExtType, MemVT, MMO); 6857 createOperands(N, Ops); 6858 6859 CSEMap.InsertNode(N, IP); 6860 InsertNode(N); 6861 SDValue V(N, 0); 6862 NewSDValueDbgMsg(V, "Creating new node: ", this); 6863 return V; 6864 } 6865 6866 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6867 SDValue Ptr, MachinePointerInfo PtrInfo, 6868 unsigned Alignment, 6869 MachineMemOperand::Flags MMOFlags, 6870 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6871 SDValue Undef = getUNDEF(Ptr.getValueType()); 6872 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6873 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6874 } 6875 6876 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6877 SDValue Ptr, MachineMemOperand *MMO) { 6878 SDValue Undef = getUNDEF(Ptr.getValueType()); 6879 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6880 VT, MMO); 6881 } 6882 6883 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6884 EVT VT, SDValue Chain, SDValue Ptr, 6885 MachinePointerInfo PtrInfo, EVT MemVT, 6886 unsigned Alignment, 6887 MachineMemOperand::Flags MMOFlags, 6888 const AAMDNodes &AAInfo) { 6889 SDValue Undef = getUNDEF(Ptr.getValueType()); 6890 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6891 MemVT, Alignment, MMOFlags, AAInfo); 6892 } 6893 6894 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6895 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6896 MachineMemOperand *MMO) { 6897 SDValue Undef = getUNDEF(Ptr.getValueType()); 6898 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6899 MemVT, MMO); 6900 } 6901 6902 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6903 SDValue Base, SDValue Offset, 6904 ISD::MemIndexedMode AM) { 6905 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6906 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6907 // Don't propagate the invariant or dereferenceable flags. 6908 auto MMOFlags = 6909 LD->getMemOperand()->getFlags() & 6910 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6911 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6912 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6913 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6914 LD->getAAInfo()); 6915 } 6916 6917 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6918 SDValue Ptr, MachinePointerInfo PtrInfo, 6919 unsigned Alignment, 6920 MachineMemOperand::Flags MMOFlags, 6921 const AAMDNodes &AAInfo) { 6922 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6923 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6924 Alignment = getEVTAlignment(Val.getValueType()); 6925 6926 MMOFlags |= MachineMemOperand::MOStore; 6927 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6928 6929 if (PtrInfo.V.isNull()) 6930 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6931 6932 MachineFunction &MF = getMachineFunction(); 6933 MachineMemOperand *MMO = MF.getMachineMemOperand( 6934 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6935 return getStore(Chain, dl, Val, Ptr, MMO); 6936 } 6937 6938 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6939 SDValue Ptr, MachineMemOperand *MMO) { 6940 assert(Chain.getValueType() == MVT::Other && 6941 "Invalid chain type"); 6942 EVT VT = Val.getValueType(); 6943 SDVTList VTs = getVTList(MVT::Other); 6944 SDValue Undef = getUNDEF(Ptr.getValueType()); 6945 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6946 FoldingSetNodeID ID; 6947 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6948 ID.AddInteger(VT.getRawBits()); 6949 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6950 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6951 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6952 void *IP = nullptr; 6953 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6954 cast<StoreSDNode>(E)->refineAlignment(MMO); 6955 return SDValue(E, 0); 6956 } 6957 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6958 ISD::UNINDEXED, false, VT, MMO); 6959 createOperands(N, Ops); 6960 6961 CSEMap.InsertNode(N, IP); 6962 InsertNode(N); 6963 SDValue V(N, 0); 6964 NewSDValueDbgMsg(V, "Creating new node: ", this); 6965 return V; 6966 } 6967 6968 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6969 SDValue Ptr, MachinePointerInfo PtrInfo, 6970 EVT SVT, unsigned Alignment, 6971 MachineMemOperand::Flags MMOFlags, 6972 const AAMDNodes &AAInfo) { 6973 assert(Chain.getValueType() == MVT::Other && 6974 "Invalid chain type"); 6975 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6976 Alignment = getEVTAlignment(SVT); 6977 6978 MMOFlags |= MachineMemOperand::MOStore; 6979 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6980 6981 if (PtrInfo.V.isNull()) 6982 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6983 6984 MachineFunction &MF = getMachineFunction(); 6985 MachineMemOperand *MMO = MF.getMachineMemOperand( 6986 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6987 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6988 } 6989 6990 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6991 SDValue Ptr, EVT SVT, 6992 MachineMemOperand *MMO) { 6993 EVT VT = Val.getValueType(); 6994 6995 assert(Chain.getValueType() == MVT::Other && 6996 "Invalid chain type"); 6997 if (VT == SVT) 6998 return getStore(Chain, dl, Val, Ptr, MMO); 6999 7000 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7001 "Should only be a truncating store, not extending!"); 7002 assert(VT.isInteger() == SVT.isInteger() && 7003 "Can't do FP-INT conversion!"); 7004 assert(VT.isVector() == SVT.isVector() && 7005 "Cannot use trunc store to convert to or from a vector!"); 7006 assert((!VT.isVector() || 7007 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 7008 "Cannot use trunc store to change the number of vector elements!"); 7009 7010 SDVTList VTs = getVTList(MVT::Other); 7011 SDValue Undef = getUNDEF(Ptr.getValueType()); 7012 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7013 FoldingSetNodeID ID; 7014 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7015 ID.AddInteger(SVT.getRawBits()); 7016 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7017 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7018 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7019 void *IP = nullptr; 7020 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7021 cast<StoreSDNode>(E)->refineAlignment(MMO); 7022 return SDValue(E, 0); 7023 } 7024 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7025 ISD::UNINDEXED, true, SVT, MMO); 7026 createOperands(N, Ops); 7027 7028 CSEMap.InsertNode(N, IP); 7029 InsertNode(N); 7030 SDValue V(N, 0); 7031 NewSDValueDbgMsg(V, "Creating new node: ", this); 7032 return V; 7033 } 7034 7035 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7036 SDValue Base, SDValue Offset, 7037 ISD::MemIndexedMode AM) { 7038 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7039 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7040 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7041 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7042 FoldingSetNodeID ID; 7043 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7044 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7045 ID.AddInteger(ST->getRawSubclassData()); 7046 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7047 void *IP = nullptr; 7048 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7049 return SDValue(E, 0); 7050 7051 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7052 ST->isTruncatingStore(), ST->getMemoryVT(), 7053 ST->getMemOperand()); 7054 createOperands(N, Ops); 7055 7056 CSEMap.InsertNode(N, IP); 7057 InsertNode(N); 7058 SDValue V(N, 0); 7059 NewSDValueDbgMsg(V, "Creating new node: ", this); 7060 return V; 7061 } 7062 7063 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7064 SDValue Base, SDValue Offset, SDValue Mask, 7065 SDValue PassThru, EVT MemVT, 7066 MachineMemOperand *MMO, 7067 ISD::MemIndexedMode AM, 7068 ISD::LoadExtType ExtTy, bool isExpanding) { 7069 bool Indexed = AM != ISD::UNINDEXED; 7070 assert((Indexed || Offset.isUndef()) && 7071 "Unindexed masked load with an offset!"); 7072 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7073 : getVTList(VT, MVT::Other); 7074 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7075 FoldingSetNodeID ID; 7076 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7077 ID.AddInteger(MemVT.getRawBits()); 7078 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7079 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7080 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7081 void *IP = nullptr; 7082 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7083 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7084 return SDValue(E, 0); 7085 } 7086 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7087 AM, ExtTy, isExpanding, MemVT, MMO); 7088 createOperands(N, Ops); 7089 7090 CSEMap.InsertNode(N, IP); 7091 InsertNode(N); 7092 SDValue V(N, 0); 7093 NewSDValueDbgMsg(V, "Creating new node: ", this); 7094 return V; 7095 } 7096 7097 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7098 SDValue Base, SDValue Offset, 7099 ISD::MemIndexedMode AM) { 7100 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7101 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7102 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7103 Offset, LD->getMask(), LD->getPassThru(), 7104 LD->getMemoryVT(), LD->getMemOperand(), AM, 7105 LD->getExtensionType(), LD->isExpandingLoad()); 7106 } 7107 7108 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7109 SDValue Val, SDValue Base, SDValue Offset, 7110 SDValue Mask, EVT MemVT, 7111 MachineMemOperand *MMO, 7112 ISD::MemIndexedMode AM, bool IsTruncating, 7113 bool IsCompressing) { 7114 assert(Chain.getValueType() == MVT::Other && 7115 "Invalid chain type"); 7116 bool Indexed = AM != ISD::UNINDEXED; 7117 assert((Indexed || Offset.isUndef()) && 7118 "Unindexed masked store with an offset!"); 7119 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7120 : getVTList(MVT::Other); 7121 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7122 FoldingSetNodeID ID; 7123 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7124 ID.AddInteger(MemVT.getRawBits()); 7125 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7126 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7127 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7128 void *IP = nullptr; 7129 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7130 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7131 return SDValue(E, 0); 7132 } 7133 auto *N = 7134 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7135 IsTruncating, IsCompressing, MemVT, MMO); 7136 createOperands(N, Ops); 7137 7138 CSEMap.InsertNode(N, IP); 7139 InsertNode(N); 7140 SDValue V(N, 0); 7141 NewSDValueDbgMsg(V, "Creating new node: ", this); 7142 return V; 7143 } 7144 7145 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7146 SDValue Base, SDValue Offset, 7147 ISD::MemIndexedMode AM) { 7148 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7149 assert(ST->getOffset().isUndef() && 7150 "Masked store is already a indexed store!"); 7151 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7152 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7153 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7154 } 7155 7156 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7157 ArrayRef<SDValue> Ops, 7158 MachineMemOperand *MMO, 7159 ISD::MemIndexType IndexType) { 7160 assert(Ops.size() == 6 && "Incompatible number of operands"); 7161 7162 FoldingSetNodeID ID; 7163 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7164 ID.AddInteger(VT.getRawBits()); 7165 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7166 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7167 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7168 void *IP = nullptr; 7169 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7170 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7171 return SDValue(E, 0); 7172 } 7173 7174 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7175 VTs, VT, MMO, IndexType); 7176 createOperands(N, Ops); 7177 7178 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7179 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7180 assert(N->getMask().getValueType().getVectorNumElements() == 7181 N->getValueType(0).getVectorNumElements() && 7182 "Vector width mismatch between mask and data"); 7183 assert(N->getIndex().getValueType().getVectorNumElements() >= 7184 N->getValueType(0).getVectorNumElements() && 7185 "Vector width mismatch between index and data"); 7186 assert(isa<ConstantSDNode>(N->getScale()) && 7187 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7188 "Scale should be a constant power of 2"); 7189 7190 CSEMap.InsertNode(N, IP); 7191 InsertNode(N); 7192 SDValue V(N, 0); 7193 NewSDValueDbgMsg(V, "Creating new node: ", this); 7194 return V; 7195 } 7196 7197 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7198 ArrayRef<SDValue> Ops, 7199 MachineMemOperand *MMO, 7200 ISD::MemIndexType IndexType) { 7201 assert(Ops.size() == 6 && "Incompatible number of operands"); 7202 7203 FoldingSetNodeID ID; 7204 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7205 ID.AddInteger(VT.getRawBits()); 7206 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7207 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7208 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7209 void *IP = nullptr; 7210 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7211 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7212 return SDValue(E, 0); 7213 } 7214 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7215 VTs, VT, MMO, IndexType); 7216 createOperands(N, Ops); 7217 7218 assert(N->getMask().getValueType().getVectorNumElements() == 7219 N->getValue().getValueType().getVectorNumElements() && 7220 "Vector width mismatch between mask and data"); 7221 assert(N->getIndex().getValueType().getVectorNumElements() >= 7222 N->getValue().getValueType().getVectorNumElements() && 7223 "Vector width mismatch between index and data"); 7224 assert(isa<ConstantSDNode>(N->getScale()) && 7225 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7226 "Scale should be a constant power of 2"); 7227 7228 CSEMap.InsertNode(N, IP); 7229 InsertNode(N); 7230 SDValue V(N, 0); 7231 NewSDValueDbgMsg(V, "Creating new node: ", this); 7232 return V; 7233 } 7234 7235 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7236 // select undef, T, F --> T (if T is a constant), otherwise F 7237 // select, ?, undef, F --> F 7238 // select, ?, T, undef --> T 7239 if (Cond.isUndef()) 7240 return isConstantValueOfAnyType(T) ? T : F; 7241 if (T.isUndef()) 7242 return F; 7243 if (F.isUndef()) 7244 return T; 7245 7246 // select true, T, F --> T 7247 // select false, T, F --> F 7248 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7249 return CondC->isNullValue() ? F : T; 7250 7251 // TODO: This should simplify VSELECT with constant condition using something 7252 // like this (but check boolean contents to be complete?): 7253 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7254 // return T; 7255 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7256 // return F; 7257 7258 // select ?, T, T --> T 7259 if (T == F) 7260 return T; 7261 7262 return SDValue(); 7263 } 7264 7265 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7266 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7267 if (X.isUndef()) 7268 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7269 // shift X, undef --> undef (because it may shift by the bitwidth) 7270 if (Y.isUndef()) 7271 return getUNDEF(X.getValueType()); 7272 7273 // shift 0, Y --> 0 7274 // shift X, 0 --> X 7275 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7276 return X; 7277 7278 // shift X, C >= bitwidth(X) --> undef 7279 // All vector elements must be too big (or undef) to avoid partial undefs. 7280 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7281 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7282 }; 7283 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7284 return getUNDEF(X.getValueType()); 7285 7286 return SDValue(); 7287 } 7288 7289 // TODO: Use fast-math-flags to enable more simplifications. 7290 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) { 7291 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7292 if (!YC) 7293 return SDValue(); 7294 7295 // X + -0.0 --> X 7296 if (Opcode == ISD::FADD) 7297 if (YC->getValueAPF().isNegZero()) 7298 return X; 7299 7300 // X - +0.0 --> X 7301 if (Opcode == ISD::FSUB) 7302 if (YC->getValueAPF().isPosZero()) 7303 return X; 7304 7305 // X * 1.0 --> X 7306 // X / 1.0 --> X 7307 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7308 if (YC->getValueAPF().isExactlyValue(1.0)) 7309 return X; 7310 7311 return SDValue(); 7312 } 7313 7314 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7315 SDValue Ptr, SDValue SV, unsigned Align) { 7316 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7317 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7318 } 7319 7320 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7321 ArrayRef<SDUse> Ops) { 7322 switch (Ops.size()) { 7323 case 0: return getNode(Opcode, DL, VT); 7324 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7325 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7326 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7327 default: break; 7328 } 7329 7330 // Copy from an SDUse array into an SDValue array for use with 7331 // the regular getNode logic. 7332 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7333 return getNode(Opcode, DL, VT, NewOps); 7334 } 7335 7336 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7337 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7338 unsigned NumOps = Ops.size(); 7339 switch (NumOps) { 7340 case 0: return getNode(Opcode, DL, VT); 7341 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7342 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7343 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7344 default: break; 7345 } 7346 7347 switch (Opcode) { 7348 default: break; 7349 case ISD::BUILD_VECTOR: 7350 // Attempt to simplify BUILD_VECTOR. 7351 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7352 return V; 7353 break; 7354 case ISD::CONCAT_VECTORS: 7355 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7356 return V; 7357 break; 7358 case ISD::SELECT_CC: 7359 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7360 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7361 "LHS and RHS of condition must have same type!"); 7362 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7363 "True and False arms of SelectCC must have same type!"); 7364 assert(Ops[2].getValueType() == VT && 7365 "select_cc node must be of same type as true and false value!"); 7366 break; 7367 case ISD::BR_CC: 7368 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7369 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7370 "LHS/RHS of comparison should match types!"); 7371 break; 7372 } 7373 7374 // Memoize nodes. 7375 SDNode *N; 7376 SDVTList VTs = getVTList(VT); 7377 7378 if (VT != MVT::Glue) { 7379 FoldingSetNodeID ID; 7380 AddNodeIDNode(ID, Opcode, VTs, Ops); 7381 void *IP = nullptr; 7382 7383 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7384 return SDValue(E, 0); 7385 7386 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7387 createOperands(N, Ops); 7388 7389 CSEMap.InsertNode(N, IP); 7390 } else { 7391 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7392 createOperands(N, Ops); 7393 } 7394 7395 InsertNode(N); 7396 SDValue V(N, 0); 7397 NewSDValueDbgMsg(V, "Creating new node: ", this); 7398 return V; 7399 } 7400 7401 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7402 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7403 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7404 } 7405 7406 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7407 ArrayRef<SDValue> Ops) { 7408 if (VTList.NumVTs == 1) 7409 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7410 7411 switch (Opcode) { 7412 case ISD::STRICT_FP_EXTEND: 7413 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7414 "Invalid STRICT_FP_EXTEND!"); 7415 assert(VTList.VTs[0].isFloatingPoint() && 7416 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7417 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7418 "STRICT_FP_EXTEND result type should be vector iff the operand " 7419 "type is vector!"); 7420 assert((!VTList.VTs[0].isVector() || 7421 VTList.VTs[0].getVectorNumElements() == 7422 Ops[1].getValueType().getVectorNumElements()) && 7423 "Vector element count mismatch!"); 7424 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7425 "Invalid fpext node, dst <= src!"); 7426 break; 7427 case ISD::STRICT_FP_ROUND: 7428 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7429 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7430 "STRICT_FP_ROUND result type should be vector iff the operand " 7431 "type is vector!"); 7432 assert((!VTList.VTs[0].isVector() || 7433 VTList.VTs[0].getVectorNumElements() == 7434 Ops[1].getValueType().getVectorNumElements()) && 7435 "Vector element count mismatch!"); 7436 assert(VTList.VTs[0].isFloatingPoint() && 7437 Ops[1].getValueType().isFloatingPoint() && 7438 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7439 isa<ConstantSDNode>(Ops[2]) && 7440 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7441 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7442 "Invalid STRICT_FP_ROUND!"); 7443 break; 7444 #if 0 7445 // FIXME: figure out how to safely handle things like 7446 // int foo(int x) { return 1 << (x & 255); } 7447 // int bar() { return foo(256); } 7448 case ISD::SRA_PARTS: 7449 case ISD::SRL_PARTS: 7450 case ISD::SHL_PARTS: 7451 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7452 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7453 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7454 else if (N3.getOpcode() == ISD::AND) 7455 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7456 // If the and is only masking out bits that cannot effect the shift, 7457 // eliminate the and. 7458 unsigned NumBits = VT.getScalarSizeInBits()*2; 7459 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7460 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7461 } 7462 break; 7463 #endif 7464 } 7465 7466 // Memoize the node unless it returns a flag. 7467 SDNode *N; 7468 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7469 FoldingSetNodeID ID; 7470 AddNodeIDNode(ID, Opcode, VTList, Ops); 7471 void *IP = nullptr; 7472 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7473 return SDValue(E, 0); 7474 7475 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7476 createOperands(N, Ops); 7477 CSEMap.InsertNode(N, IP); 7478 } else { 7479 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7480 createOperands(N, Ops); 7481 } 7482 InsertNode(N); 7483 SDValue V(N, 0); 7484 NewSDValueDbgMsg(V, "Creating new node: ", this); 7485 return V; 7486 } 7487 7488 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7489 SDVTList VTList) { 7490 return getNode(Opcode, DL, VTList, None); 7491 } 7492 7493 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7494 SDValue N1) { 7495 SDValue Ops[] = { N1 }; 7496 return getNode(Opcode, DL, VTList, Ops); 7497 } 7498 7499 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7500 SDValue N1, SDValue N2) { 7501 SDValue Ops[] = { N1, N2 }; 7502 return getNode(Opcode, DL, VTList, Ops); 7503 } 7504 7505 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7506 SDValue N1, SDValue N2, SDValue N3) { 7507 SDValue Ops[] = { N1, N2, N3 }; 7508 return getNode(Opcode, DL, VTList, Ops); 7509 } 7510 7511 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7512 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7513 SDValue Ops[] = { N1, N2, N3, N4 }; 7514 return getNode(Opcode, DL, VTList, Ops); 7515 } 7516 7517 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7518 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7519 SDValue N5) { 7520 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7521 return getNode(Opcode, DL, VTList, Ops); 7522 } 7523 7524 SDVTList SelectionDAG::getVTList(EVT VT) { 7525 return makeVTList(SDNode::getValueTypeList(VT), 1); 7526 } 7527 7528 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7529 FoldingSetNodeID ID; 7530 ID.AddInteger(2U); 7531 ID.AddInteger(VT1.getRawBits()); 7532 ID.AddInteger(VT2.getRawBits()); 7533 7534 void *IP = nullptr; 7535 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7536 if (!Result) { 7537 EVT *Array = Allocator.Allocate<EVT>(2); 7538 Array[0] = VT1; 7539 Array[1] = VT2; 7540 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7541 VTListMap.InsertNode(Result, IP); 7542 } 7543 return Result->getSDVTList(); 7544 } 7545 7546 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7547 FoldingSetNodeID ID; 7548 ID.AddInteger(3U); 7549 ID.AddInteger(VT1.getRawBits()); 7550 ID.AddInteger(VT2.getRawBits()); 7551 ID.AddInteger(VT3.getRawBits()); 7552 7553 void *IP = nullptr; 7554 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7555 if (!Result) { 7556 EVT *Array = Allocator.Allocate<EVT>(3); 7557 Array[0] = VT1; 7558 Array[1] = VT2; 7559 Array[2] = VT3; 7560 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7561 VTListMap.InsertNode(Result, IP); 7562 } 7563 return Result->getSDVTList(); 7564 } 7565 7566 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7567 FoldingSetNodeID ID; 7568 ID.AddInteger(4U); 7569 ID.AddInteger(VT1.getRawBits()); 7570 ID.AddInteger(VT2.getRawBits()); 7571 ID.AddInteger(VT3.getRawBits()); 7572 ID.AddInteger(VT4.getRawBits()); 7573 7574 void *IP = nullptr; 7575 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7576 if (!Result) { 7577 EVT *Array = Allocator.Allocate<EVT>(4); 7578 Array[0] = VT1; 7579 Array[1] = VT2; 7580 Array[2] = VT3; 7581 Array[3] = VT4; 7582 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7583 VTListMap.InsertNode(Result, IP); 7584 } 7585 return Result->getSDVTList(); 7586 } 7587 7588 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7589 unsigned NumVTs = VTs.size(); 7590 FoldingSetNodeID ID; 7591 ID.AddInteger(NumVTs); 7592 for (unsigned index = 0; index < NumVTs; index++) { 7593 ID.AddInteger(VTs[index].getRawBits()); 7594 } 7595 7596 void *IP = nullptr; 7597 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7598 if (!Result) { 7599 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7600 llvm::copy(VTs, Array); 7601 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7602 VTListMap.InsertNode(Result, IP); 7603 } 7604 return Result->getSDVTList(); 7605 } 7606 7607 7608 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7609 /// specified operands. If the resultant node already exists in the DAG, 7610 /// this does not modify the specified node, instead it returns the node that 7611 /// already exists. If the resultant node does not exist in the DAG, the 7612 /// input node is returned. As a degenerate case, if you specify the same 7613 /// input operands as the node already has, the input node is returned. 7614 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7615 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7616 7617 // Check to see if there is no change. 7618 if (Op == N->getOperand(0)) return N; 7619 7620 // See if the modified node already exists. 7621 void *InsertPos = nullptr; 7622 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7623 return Existing; 7624 7625 // Nope it doesn't. Remove the node from its current place in the maps. 7626 if (InsertPos) 7627 if (!RemoveNodeFromCSEMaps(N)) 7628 InsertPos = nullptr; 7629 7630 // Now we update the operands. 7631 N->OperandList[0].set(Op); 7632 7633 updateDivergence(N); 7634 // If this gets put into a CSE map, add it. 7635 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7636 return N; 7637 } 7638 7639 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7640 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7641 7642 // Check to see if there is no change. 7643 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7644 return N; // No operands changed, just return the input node. 7645 7646 // See if the modified node already exists. 7647 void *InsertPos = nullptr; 7648 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7649 return Existing; 7650 7651 // Nope it doesn't. Remove the node from its current place in the maps. 7652 if (InsertPos) 7653 if (!RemoveNodeFromCSEMaps(N)) 7654 InsertPos = nullptr; 7655 7656 // Now we update the operands. 7657 if (N->OperandList[0] != Op1) 7658 N->OperandList[0].set(Op1); 7659 if (N->OperandList[1] != Op2) 7660 N->OperandList[1].set(Op2); 7661 7662 updateDivergence(N); 7663 // If this gets put into a CSE map, add it. 7664 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7665 return N; 7666 } 7667 7668 SDNode *SelectionDAG:: 7669 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7670 SDValue Ops[] = { Op1, Op2, Op3 }; 7671 return UpdateNodeOperands(N, Ops); 7672 } 7673 7674 SDNode *SelectionDAG:: 7675 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7676 SDValue Op3, SDValue Op4) { 7677 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7678 return UpdateNodeOperands(N, Ops); 7679 } 7680 7681 SDNode *SelectionDAG:: 7682 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7683 SDValue Op3, SDValue Op4, SDValue Op5) { 7684 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7685 return UpdateNodeOperands(N, Ops); 7686 } 7687 7688 SDNode *SelectionDAG:: 7689 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7690 unsigned NumOps = Ops.size(); 7691 assert(N->getNumOperands() == NumOps && 7692 "Update with wrong number of operands"); 7693 7694 // If no operands changed just return the input node. 7695 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7696 return N; 7697 7698 // See if the modified node already exists. 7699 void *InsertPos = nullptr; 7700 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7701 return Existing; 7702 7703 // Nope it doesn't. Remove the node from its current place in the maps. 7704 if (InsertPos) 7705 if (!RemoveNodeFromCSEMaps(N)) 7706 InsertPos = nullptr; 7707 7708 // Now we update the operands. 7709 for (unsigned i = 0; i != NumOps; ++i) 7710 if (N->OperandList[i] != Ops[i]) 7711 N->OperandList[i].set(Ops[i]); 7712 7713 updateDivergence(N); 7714 // If this gets put into a CSE map, add it. 7715 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7716 return N; 7717 } 7718 7719 /// DropOperands - Release the operands and set this node to have 7720 /// zero operands. 7721 void SDNode::DropOperands() { 7722 // Unlike the code in MorphNodeTo that does this, we don't need to 7723 // watch for dead nodes here. 7724 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7725 SDUse &Use = *I++; 7726 Use.set(SDValue()); 7727 } 7728 } 7729 7730 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7731 ArrayRef<MachineMemOperand *> NewMemRefs) { 7732 if (NewMemRefs.empty()) { 7733 N->clearMemRefs(); 7734 return; 7735 } 7736 7737 // Check if we can avoid allocating by storing a single reference directly. 7738 if (NewMemRefs.size() == 1) { 7739 N->MemRefs = NewMemRefs[0]; 7740 N->NumMemRefs = 1; 7741 return; 7742 } 7743 7744 MachineMemOperand **MemRefsBuffer = 7745 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7746 llvm::copy(NewMemRefs, MemRefsBuffer); 7747 N->MemRefs = MemRefsBuffer; 7748 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7749 } 7750 7751 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7752 /// machine opcode. 7753 /// 7754 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7755 EVT VT) { 7756 SDVTList VTs = getVTList(VT); 7757 return SelectNodeTo(N, MachineOpc, VTs, None); 7758 } 7759 7760 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7761 EVT VT, SDValue Op1) { 7762 SDVTList VTs = getVTList(VT); 7763 SDValue Ops[] = { Op1 }; 7764 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7765 } 7766 7767 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7768 EVT VT, SDValue Op1, 7769 SDValue Op2) { 7770 SDVTList VTs = getVTList(VT); 7771 SDValue Ops[] = { Op1, Op2 }; 7772 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7773 } 7774 7775 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7776 EVT VT, SDValue Op1, 7777 SDValue Op2, SDValue Op3) { 7778 SDVTList VTs = getVTList(VT); 7779 SDValue Ops[] = { Op1, Op2, Op3 }; 7780 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7781 } 7782 7783 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7784 EVT VT, ArrayRef<SDValue> Ops) { 7785 SDVTList VTs = getVTList(VT); 7786 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7787 } 7788 7789 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7790 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7791 SDVTList VTs = getVTList(VT1, VT2); 7792 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7793 } 7794 7795 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7796 EVT VT1, EVT VT2) { 7797 SDVTList VTs = getVTList(VT1, VT2); 7798 return SelectNodeTo(N, MachineOpc, VTs, None); 7799 } 7800 7801 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7802 EVT VT1, EVT VT2, EVT VT3, 7803 ArrayRef<SDValue> Ops) { 7804 SDVTList VTs = getVTList(VT1, VT2, VT3); 7805 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7806 } 7807 7808 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7809 EVT VT1, EVT VT2, 7810 SDValue Op1, SDValue Op2) { 7811 SDVTList VTs = getVTList(VT1, VT2); 7812 SDValue Ops[] = { Op1, Op2 }; 7813 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7814 } 7815 7816 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7817 SDVTList VTs,ArrayRef<SDValue> Ops) { 7818 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7819 // Reset the NodeID to -1. 7820 New->setNodeId(-1); 7821 if (New != N) { 7822 ReplaceAllUsesWith(N, New); 7823 RemoveDeadNode(N); 7824 } 7825 return New; 7826 } 7827 7828 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7829 /// the line number information on the merged node since it is not possible to 7830 /// preserve the information that operation is associated with multiple lines. 7831 /// This will make the debugger working better at -O0, were there is a higher 7832 /// probability having other instructions associated with that line. 7833 /// 7834 /// For IROrder, we keep the smaller of the two 7835 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7836 DebugLoc NLoc = N->getDebugLoc(); 7837 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7838 N->setDebugLoc(DebugLoc()); 7839 } 7840 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7841 N->setIROrder(Order); 7842 return N; 7843 } 7844 7845 /// MorphNodeTo - This *mutates* the specified node to have the specified 7846 /// return type, opcode, and operands. 7847 /// 7848 /// Note that MorphNodeTo returns the resultant node. If there is already a 7849 /// node of the specified opcode and operands, it returns that node instead of 7850 /// the current one. Note that the SDLoc need not be the same. 7851 /// 7852 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7853 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7854 /// node, and because it doesn't require CSE recalculation for any of 7855 /// the node's users. 7856 /// 7857 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7858 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7859 /// the legalizer which maintain worklists that would need to be updated when 7860 /// deleting things. 7861 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7862 SDVTList VTs, ArrayRef<SDValue> Ops) { 7863 // If an identical node already exists, use it. 7864 void *IP = nullptr; 7865 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7866 FoldingSetNodeID ID; 7867 AddNodeIDNode(ID, Opc, VTs, Ops); 7868 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7869 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7870 } 7871 7872 if (!RemoveNodeFromCSEMaps(N)) 7873 IP = nullptr; 7874 7875 // Start the morphing. 7876 N->NodeType = Opc; 7877 N->ValueList = VTs.VTs; 7878 N->NumValues = VTs.NumVTs; 7879 7880 // Clear the operands list, updating used nodes to remove this from their 7881 // use list. Keep track of any operands that become dead as a result. 7882 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7883 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7884 SDUse &Use = *I++; 7885 SDNode *Used = Use.getNode(); 7886 Use.set(SDValue()); 7887 if (Used->use_empty()) 7888 DeadNodeSet.insert(Used); 7889 } 7890 7891 // For MachineNode, initialize the memory references information. 7892 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7893 MN->clearMemRefs(); 7894 7895 // Swap for an appropriately sized array from the recycler. 7896 removeOperands(N); 7897 createOperands(N, Ops); 7898 7899 // Delete any nodes that are still dead after adding the uses for the 7900 // new operands. 7901 if (!DeadNodeSet.empty()) { 7902 SmallVector<SDNode *, 16> DeadNodes; 7903 for (SDNode *N : DeadNodeSet) 7904 if (N->use_empty()) 7905 DeadNodes.push_back(N); 7906 RemoveDeadNodes(DeadNodes); 7907 } 7908 7909 if (IP) 7910 CSEMap.InsertNode(N, IP); // Memoize the new node. 7911 return N; 7912 } 7913 7914 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7915 unsigned OrigOpc = Node->getOpcode(); 7916 unsigned NewOpc; 7917 switch (OrigOpc) { 7918 default: 7919 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7920 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7921 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 7922 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7923 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 7924 #include "llvm/IR/ConstrainedOps.def" 7925 } 7926 7927 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 7928 7929 // We're taking this node out of the chain, so we need to re-link things. 7930 SDValue InputChain = Node->getOperand(0); 7931 SDValue OutputChain = SDValue(Node, 1); 7932 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7933 7934 SmallVector<SDValue, 3> Ops; 7935 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 7936 Ops.push_back(Node->getOperand(i)); 7937 7938 SDVTList VTs = getVTList(Node->getValueType(0)); 7939 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 7940 7941 // MorphNodeTo can operate in two ways: if an existing node with the 7942 // specified operands exists, it can just return it. Otherwise, it 7943 // updates the node in place to have the requested operands. 7944 if (Res == Node) { 7945 // If we updated the node in place, reset the node ID. To the isel, 7946 // this should be just like a newly allocated machine node. 7947 Res->setNodeId(-1); 7948 } else { 7949 ReplaceAllUsesWith(Node, Res); 7950 RemoveDeadNode(Node); 7951 } 7952 7953 return Res; 7954 } 7955 7956 /// getMachineNode - These are used for target selectors to create a new node 7957 /// with specified return type(s), MachineInstr opcode, and operands. 7958 /// 7959 /// Note that getMachineNode returns the resultant node. If there is already a 7960 /// node of the specified opcode and operands, it returns that node instead of 7961 /// the current one. 7962 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7963 EVT VT) { 7964 SDVTList VTs = getVTList(VT); 7965 return getMachineNode(Opcode, dl, VTs, None); 7966 } 7967 7968 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7969 EVT VT, SDValue Op1) { 7970 SDVTList VTs = getVTList(VT); 7971 SDValue Ops[] = { Op1 }; 7972 return getMachineNode(Opcode, dl, VTs, Ops); 7973 } 7974 7975 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7976 EVT VT, SDValue Op1, SDValue Op2) { 7977 SDVTList VTs = getVTList(VT); 7978 SDValue Ops[] = { Op1, Op2 }; 7979 return getMachineNode(Opcode, dl, VTs, Ops); 7980 } 7981 7982 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7983 EVT VT, SDValue Op1, SDValue Op2, 7984 SDValue Op3) { 7985 SDVTList VTs = getVTList(VT); 7986 SDValue Ops[] = { Op1, Op2, Op3 }; 7987 return getMachineNode(Opcode, dl, VTs, Ops); 7988 } 7989 7990 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7991 EVT VT, ArrayRef<SDValue> Ops) { 7992 SDVTList VTs = getVTList(VT); 7993 return getMachineNode(Opcode, dl, VTs, Ops); 7994 } 7995 7996 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7997 EVT VT1, EVT VT2, SDValue Op1, 7998 SDValue Op2) { 7999 SDVTList VTs = getVTList(VT1, VT2); 8000 SDValue Ops[] = { Op1, Op2 }; 8001 return getMachineNode(Opcode, dl, VTs, Ops); 8002 } 8003 8004 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8005 EVT VT1, EVT VT2, SDValue Op1, 8006 SDValue Op2, SDValue Op3) { 8007 SDVTList VTs = getVTList(VT1, VT2); 8008 SDValue Ops[] = { Op1, Op2, Op3 }; 8009 return getMachineNode(Opcode, dl, VTs, Ops); 8010 } 8011 8012 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8013 EVT VT1, EVT VT2, 8014 ArrayRef<SDValue> Ops) { 8015 SDVTList VTs = getVTList(VT1, VT2); 8016 return getMachineNode(Opcode, dl, VTs, Ops); 8017 } 8018 8019 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8020 EVT VT1, EVT VT2, EVT VT3, 8021 SDValue Op1, SDValue Op2) { 8022 SDVTList VTs = getVTList(VT1, VT2, VT3); 8023 SDValue Ops[] = { Op1, Op2 }; 8024 return getMachineNode(Opcode, dl, VTs, Ops); 8025 } 8026 8027 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8028 EVT VT1, EVT VT2, EVT VT3, 8029 SDValue Op1, SDValue Op2, 8030 SDValue Op3) { 8031 SDVTList VTs = getVTList(VT1, VT2, VT3); 8032 SDValue Ops[] = { Op1, Op2, Op3 }; 8033 return getMachineNode(Opcode, dl, VTs, Ops); 8034 } 8035 8036 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8037 EVT VT1, EVT VT2, EVT VT3, 8038 ArrayRef<SDValue> Ops) { 8039 SDVTList VTs = getVTList(VT1, VT2, VT3); 8040 return getMachineNode(Opcode, dl, VTs, Ops); 8041 } 8042 8043 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8044 ArrayRef<EVT> ResultTys, 8045 ArrayRef<SDValue> Ops) { 8046 SDVTList VTs = getVTList(ResultTys); 8047 return getMachineNode(Opcode, dl, VTs, Ops); 8048 } 8049 8050 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8051 SDVTList VTs, 8052 ArrayRef<SDValue> Ops) { 8053 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8054 MachineSDNode *N; 8055 void *IP = nullptr; 8056 8057 if (DoCSE) { 8058 FoldingSetNodeID ID; 8059 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8060 IP = nullptr; 8061 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8062 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8063 } 8064 } 8065 8066 // Allocate a new MachineSDNode. 8067 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8068 createOperands(N, Ops); 8069 8070 if (DoCSE) 8071 CSEMap.InsertNode(N, IP); 8072 8073 InsertNode(N); 8074 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8075 return N; 8076 } 8077 8078 /// getTargetExtractSubreg - A convenience function for creating 8079 /// TargetOpcode::EXTRACT_SUBREG nodes. 8080 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8081 SDValue Operand) { 8082 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8083 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8084 VT, Operand, SRIdxVal); 8085 return SDValue(Subreg, 0); 8086 } 8087 8088 /// getTargetInsertSubreg - A convenience function for creating 8089 /// TargetOpcode::INSERT_SUBREG nodes. 8090 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8091 SDValue Operand, SDValue Subreg) { 8092 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8093 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8094 VT, Operand, Subreg, SRIdxVal); 8095 return SDValue(Result, 0); 8096 } 8097 8098 /// getNodeIfExists - Get the specified node if it's already available, or 8099 /// else return NULL. 8100 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8101 ArrayRef<SDValue> Ops, 8102 const SDNodeFlags Flags) { 8103 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8104 FoldingSetNodeID ID; 8105 AddNodeIDNode(ID, Opcode, VTList, Ops); 8106 void *IP = nullptr; 8107 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8108 E->intersectFlagsWith(Flags); 8109 return E; 8110 } 8111 } 8112 return nullptr; 8113 } 8114 8115 /// getDbgValue - Creates a SDDbgValue node. 8116 /// 8117 /// SDNode 8118 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8119 SDNode *N, unsigned R, bool IsIndirect, 8120 const DebugLoc &DL, unsigned O) { 8121 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8122 "Expected inlined-at fields to agree"); 8123 return new (DbgInfo->getAlloc()) 8124 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8125 } 8126 8127 /// Constant 8128 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8129 DIExpression *Expr, 8130 const Value *C, 8131 const DebugLoc &DL, unsigned O) { 8132 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8133 "Expected inlined-at fields to agree"); 8134 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8135 } 8136 8137 /// FrameIndex 8138 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8139 DIExpression *Expr, unsigned FI, 8140 bool IsIndirect, 8141 const DebugLoc &DL, 8142 unsigned O) { 8143 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8144 "Expected inlined-at fields to agree"); 8145 return new (DbgInfo->getAlloc()) 8146 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8147 } 8148 8149 /// VReg 8150 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8151 DIExpression *Expr, 8152 unsigned VReg, bool IsIndirect, 8153 const DebugLoc &DL, unsigned O) { 8154 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8155 "Expected inlined-at fields to agree"); 8156 return new (DbgInfo->getAlloc()) 8157 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8158 } 8159 8160 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8161 unsigned OffsetInBits, unsigned SizeInBits, 8162 bool InvalidateDbg) { 8163 SDNode *FromNode = From.getNode(); 8164 SDNode *ToNode = To.getNode(); 8165 assert(FromNode && ToNode && "Can't modify dbg values"); 8166 8167 // PR35338 8168 // TODO: assert(From != To && "Redundant dbg value transfer"); 8169 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8170 if (From == To || FromNode == ToNode) 8171 return; 8172 8173 if (!FromNode->getHasDebugValue()) 8174 return; 8175 8176 SmallVector<SDDbgValue *, 2> ClonedDVs; 8177 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8178 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8179 continue; 8180 8181 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8182 8183 // Just transfer the dbg value attached to From. 8184 if (Dbg->getResNo() != From.getResNo()) 8185 continue; 8186 8187 DIVariable *Var = Dbg->getVariable(); 8188 auto *Expr = Dbg->getExpression(); 8189 // If a fragment is requested, update the expression. 8190 if (SizeInBits) { 8191 // When splitting a larger (e.g., sign-extended) value whose 8192 // lower bits are described with an SDDbgValue, do not attempt 8193 // to transfer the SDDbgValue to the upper bits. 8194 if (auto FI = Expr->getFragmentInfo()) 8195 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8196 continue; 8197 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8198 SizeInBits); 8199 if (!Fragment) 8200 continue; 8201 Expr = *Fragment; 8202 } 8203 // Clone the SDDbgValue and move it to To. 8204 SDDbgValue *Clone = getDbgValue( 8205 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8206 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8207 ClonedDVs.push_back(Clone); 8208 8209 if (InvalidateDbg) { 8210 // Invalidate value and indicate the SDDbgValue should not be emitted. 8211 Dbg->setIsInvalidated(); 8212 Dbg->setIsEmitted(); 8213 } 8214 } 8215 8216 for (SDDbgValue *Dbg : ClonedDVs) 8217 AddDbgValue(Dbg, ToNode, false); 8218 } 8219 8220 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8221 if (!N.getHasDebugValue()) 8222 return; 8223 8224 SmallVector<SDDbgValue *, 2> ClonedDVs; 8225 for (auto DV : GetDbgValues(&N)) { 8226 if (DV->isInvalidated()) 8227 continue; 8228 switch (N.getOpcode()) { 8229 default: 8230 break; 8231 case ISD::ADD: 8232 SDValue N0 = N.getOperand(0); 8233 SDValue N1 = N.getOperand(1); 8234 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8235 isConstantIntBuildVectorOrConstantInt(N1)) { 8236 uint64_t Offset = N.getConstantOperandVal(1); 8237 // Rewrite an ADD constant node into a DIExpression. Since we are 8238 // performing arithmetic to compute the variable's *value* in the 8239 // DIExpression, we need to mark the expression with a 8240 // DW_OP_stack_value. 8241 auto *DIExpr = DV->getExpression(); 8242 DIExpr = 8243 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8244 SDDbgValue *Clone = 8245 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8246 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8247 ClonedDVs.push_back(Clone); 8248 DV->setIsInvalidated(); 8249 DV->setIsEmitted(); 8250 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8251 N0.getNode()->dumprFull(this); 8252 dbgs() << " into " << *DIExpr << '\n'); 8253 } 8254 } 8255 } 8256 8257 for (SDDbgValue *Dbg : ClonedDVs) 8258 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8259 } 8260 8261 /// Creates a SDDbgLabel node. 8262 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8263 const DebugLoc &DL, unsigned O) { 8264 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8265 "Expected inlined-at fields to agree"); 8266 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8267 } 8268 8269 namespace { 8270 8271 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8272 /// pointed to by a use iterator is deleted, increment the use iterator 8273 /// so that it doesn't dangle. 8274 /// 8275 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8276 SDNode::use_iterator &UI; 8277 SDNode::use_iterator &UE; 8278 8279 void NodeDeleted(SDNode *N, SDNode *E) override { 8280 // Increment the iterator as needed. 8281 while (UI != UE && N == *UI) 8282 ++UI; 8283 } 8284 8285 public: 8286 RAUWUpdateListener(SelectionDAG &d, 8287 SDNode::use_iterator &ui, 8288 SDNode::use_iterator &ue) 8289 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8290 }; 8291 8292 } // end anonymous namespace 8293 8294 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8295 /// This can cause recursive merging of nodes in the DAG. 8296 /// 8297 /// This version assumes From has a single result value. 8298 /// 8299 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8300 SDNode *From = FromN.getNode(); 8301 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8302 "Cannot replace with this method!"); 8303 assert(From != To.getNode() && "Cannot replace uses of with self"); 8304 8305 // Preserve Debug Values 8306 transferDbgValues(FromN, To); 8307 8308 // Iterate over all the existing uses of From. New uses will be added 8309 // to the beginning of the use list, which we avoid visiting. 8310 // This specifically avoids visiting uses of From that arise while the 8311 // replacement is happening, because any such uses would be the result 8312 // of CSE: If an existing node looks like From after one of its operands 8313 // is replaced by To, we don't want to replace of all its users with To 8314 // too. See PR3018 for more info. 8315 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8316 RAUWUpdateListener Listener(*this, UI, UE); 8317 while (UI != UE) { 8318 SDNode *User = *UI; 8319 8320 // This node is about to morph, remove its old self from the CSE maps. 8321 RemoveNodeFromCSEMaps(User); 8322 8323 // A user can appear in a use list multiple times, and when this 8324 // happens the uses are usually next to each other in the list. 8325 // To help reduce the number of CSE recomputations, process all 8326 // the uses of this user that we can find this way. 8327 do { 8328 SDUse &Use = UI.getUse(); 8329 ++UI; 8330 Use.set(To); 8331 if (To->isDivergent() != From->isDivergent()) 8332 updateDivergence(User); 8333 } while (UI != UE && *UI == User); 8334 // Now that we have modified User, add it back to the CSE maps. If it 8335 // already exists there, recursively merge the results together. 8336 AddModifiedNodeToCSEMaps(User); 8337 } 8338 8339 // If we just RAUW'd the root, take note. 8340 if (FromN == getRoot()) 8341 setRoot(To); 8342 } 8343 8344 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8345 /// This can cause recursive merging of nodes in the DAG. 8346 /// 8347 /// This version assumes that for each value of From, there is a 8348 /// corresponding value in To in the same position with the same type. 8349 /// 8350 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8351 #ifndef NDEBUG 8352 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8353 assert((!From->hasAnyUseOfValue(i) || 8354 From->getValueType(i) == To->getValueType(i)) && 8355 "Cannot use this version of ReplaceAllUsesWith!"); 8356 #endif 8357 8358 // Handle the trivial case. 8359 if (From == To) 8360 return; 8361 8362 // Preserve Debug Info. Only do this if there's a use. 8363 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8364 if (From->hasAnyUseOfValue(i)) { 8365 assert((i < To->getNumValues()) && "Invalid To location"); 8366 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8367 } 8368 8369 // Iterate over just the existing users of From. See the comments in 8370 // the ReplaceAllUsesWith above. 8371 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8372 RAUWUpdateListener Listener(*this, UI, UE); 8373 while (UI != UE) { 8374 SDNode *User = *UI; 8375 8376 // This node is about to morph, remove its old self from the CSE maps. 8377 RemoveNodeFromCSEMaps(User); 8378 8379 // A user can appear in a use list multiple times, and when this 8380 // happens the uses are usually next to each other in the list. 8381 // To help reduce the number of CSE recomputations, process all 8382 // the uses of this user that we can find this way. 8383 do { 8384 SDUse &Use = UI.getUse(); 8385 ++UI; 8386 Use.setNode(To); 8387 if (To->isDivergent() != From->isDivergent()) 8388 updateDivergence(User); 8389 } while (UI != UE && *UI == User); 8390 8391 // Now that we have modified User, add it back to the CSE maps. If it 8392 // already exists there, recursively merge the results together. 8393 AddModifiedNodeToCSEMaps(User); 8394 } 8395 8396 // If we just RAUW'd the root, take note. 8397 if (From == getRoot().getNode()) 8398 setRoot(SDValue(To, getRoot().getResNo())); 8399 } 8400 8401 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8402 /// This can cause recursive merging of nodes in the DAG. 8403 /// 8404 /// This version can replace From with any result values. To must match the 8405 /// number and types of values returned by From. 8406 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8407 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8408 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8409 8410 // Preserve Debug Info. 8411 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8412 transferDbgValues(SDValue(From, i), To[i]); 8413 8414 // Iterate over just the existing users of From. See the comments in 8415 // the ReplaceAllUsesWith above. 8416 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8417 RAUWUpdateListener Listener(*this, UI, UE); 8418 while (UI != UE) { 8419 SDNode *User = *UI; 8420 8421 // This node is about to morph, remove its old self from the CSE maps. 8422 RemoveNodeFromCSEMaps(User); 8423 8424 // A user can appear in a use list multiple times, and when this happens the 8425 // uses are usually next to each other in the list. To help reduce the 8426 // number of CSE and divergence recomputations, process all the uses of this 8427 // user that we can find this way. 8428 bool To_IsDivergent = false; 8429 do { 8430 SDUse &Use = UI.getUse(); 8431 const SDValue &ToOp = To[Use.getResNo()]; 8432 ++UI; 8433 Use.set(ToOp); 8434 To_IsDivergent |= ToOp->isDivergent(); 8435 } while (UI != UE && *UI == User); 8436 8437 if (To_IsDivergent != From->isDivergent()) 8438 updateDivergence(User); 8439 8440 // Now that we have modified User, add it back to the CSE maps. If it 8441 // already exists there, recursively merge the results together. 8442 AddModifiedNodeToCSEMaps(User); 8443 } 8444 8445 // If we just RAUW'd the root, take note. 8446 if (From == getRoot().getNode()) 8447 setRoot(SDValue(To[getRoot().getResNo()])); 8448 } 8449 8450 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8451 /// uses of other values produced by From.getNode() alone. The Deleted 8452 /// vector is handled the same way as for ReplaceAllUsesWith. 8453 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8454 // Handle the really simple, really trivial case efficiently. 8455 if (From == To) return; 8456 8457 // Handle the simple, trivial, case efficiently. 8458 if (From.getNode()->getNumValues() == 1) { 8459 ReplaceAllUsesWith(From, To); 8460 return; 8461 } 8462 8463 // Preserve Debug Info. 8464 transferDbgValues(From, To); 8465 8466 // Iterate over just the existing users of From. See the comments in 8467 // the ReplaceAllUsesWith above. 8468 SDNode::use_iterator UI = From.getNode()->use_begin(), 8469 UE = From.getNode()->use_end(); 8470 RAUWUpdateListener Listener(*this, UI, UE); 8471 while (UI != UE) { 8472 SDNode *User = *UI; 8473 bool UserRemovedFromCSEMaps = false; 8474 8475 // A user can appear in a use list multiple times, and when this 8476 // happens the uses are usually next to each other in the list. 8477 // To help reduce the number of CSE recomputations, process all 8478 // the uses of this user that we can find this way. 8479 do { 8480 SDUse &Use = UI.getUse(); 8481 8482 // Skip uses of different values from the same node. 8483 if (Use.getResNo() != From.getResNo()) { 8484 ++UI; 8485 continue; 8486 } 8487 8488 // If this node hasn't been modified yet, it's still in the CSE maps, 8489 // so remove its old self from the CSE maps. 8490 if (!UserRemovedFromCSEMaps) { 8491 RemoveNodeFromCSEMaps(User); 8492 UserRemovedFromCSEMaps = true; 8493 } 8494 8495 ++UI; 8496 Use.set(To); 8497 if (To->isDivergent() != From->isDivergent()) 8498 updateDivergence(User); 8499 } while (UI != UE && *UI == User); 8500 // We are iterating over all uses of the From node, so if a use 8501 // doesn't use the specific value, no changes are made. 8502 if (!UserRemovedFromCSEMaps) 8503 continue; 8504 8505 // Now that we have modified User, add it back to the CSE maps. If it 8506 // already exists there, recursively merge the results together. 8507 AddModifiedNodeToCSEMaps(User); 8508 } 8509 8510 // If we just RAUW'd the root, take note. 8511 if (From == getRoot()) 8512 setRoot(To); 8513 } 8514 8515 namespace { 8516 8517 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8518 /// to record information about a use. 8519 struct UseMemo { 8520 SDNode *User; 8521 unsigned Index; 8522 SDUse *Use; 8523 }; 8524 8525 /// operator< - Sort Memos by User. 8526 bool operator<(const UseMemo &L, const UseMemo &R) { 8527 return (intptr_t)L.User < (intptr_t)R.User; 8528 } 8529 8530 } // end anonymous namespace 8531 8532 void SelectionDAG::updateDivergence(SDNode * N) 8533 { 8534 if (TLI->isSDNodeAlwaysUniform(N)) 8535 return; 8536 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8537 for (auto &Op : N->ops()) { 8538 if (Op.Val.getValueType() != MVT::Other) 8539 IsDivergent |= Op.getNode()->isDivergent(); 8540 } 8541 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8542 N->SDNodeBits.IsDivergent = IsDivergent; 8543 for (auto U : N->uses()) { 8544 updateDivergence(U); 8545 } 8546 } 8547 } 8548 8549 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8550 DenseMap<SDNode *, unsigned> Degree; 8551 Order.reserve(AllNodes.size()); 8552 for (auto &N : allnodes()) { 8553 unsigned NOps = N.getNumOperands(); 8554 Degree[&N] = NOps; 8555 if (0 == NOps) 8556 Order.push_back(&N); 8557 } 8558 for (size_t I = 0; I != Order.size(); ++I) { 8559 SDNode *N = Order[I]; 8560 for (auto U : N->uses()) { 8561 unsigned &UnsortedOps = Degree[U]; 8562 if (0 == --UnsortedOps) 8563 Order.push_back(U); 8564 } 8565 } 8566 } 8567 8568 #ifndef NDEBUG 8569 void SelectionDAG::VerifyDAGDiverence() { 8570 std::vector<SDNode *> TopoOrder; 8571 CreateTopologicalOrder(TopoOrder); 8572 const TargetLowering &TLI = getTargetLoweringInfo(); 8573 DenseMap<const SDNode *, bool> DivergenceMap; 8574 for (auto &N : allnodes()) { 8575 DivergenceMap[&N] = false; 8576 } 8577 for (auto N : TopoOrder) { 8578 bool IsDivergent = DivergenceMap[N]; 8579 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8580 for (auto &Op : N->ops()) { 8581 if (Op.Val.getValueType() != MVT::Other) 8582 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8583 } 8584 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8585 DivergenceMap[N] = true; 8586 } 8587 } 8588 for (auto &N : allnodes()) { 8589 (void)N; 8590 assert(DivergenceMap[&N] == N.isDivergent() && 8591 "Divergence bit inconsistency detected\n"); 8592 } 8593 } 8594 #endif 8595 8596 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8597 /// uses of other values produced by From.getNode() alone. The same value 8598 /// may appear in both the From and To list. The Deleted vector is 8599 /// handled the same way as for ReplaceAllUsesWith. 8600 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8601 const SDValue *To, 8602 unsigned Num){ 8603 // Handle the simple, trivial case efficiently. 8604 if (Num == 1) 8605 return ReplaceAllUsesOfValueWith(*From, *To); 8606 8607 transferDbgValues(*From, *To); 8608 8609 // Read up all the uses and make records of them. This helps 8610 // processing new uses that are introduced during the 8611 // replacement process. 8612 SmallVector<UseMemo, 4> Uses; 8613 for (unsigned i = 0; i != Num; ++i) { 8614 unsigned FromResNo = From[i].getResNo(); 8615 SDNode *FromNode = From[i].getNode(); 8616 for (SDNode::use_iterator UI = FromNode->use_begin(), 8617 E = FromNode->use_end(); UI != E; ++UI) { 8618 SDUse &Use = UI.getUse(); 8619 if (Use.getResNo() == FromResNo) { 8620 UseMemo Memo = { *UI, i, &Use }; 8621 Uses.push_back(Memo); 8622 } 8623 } 8624 } 8625 8626 // Sort the uses, so that all the uses from a given User are together. 8627 llvm::sort(Uses); 8628 8629 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8630 UseIndex != UseIndexEnd; ) { 8631 // We know that this user uses some value of From. If it is the right 8632 // value, update it. 8633 SDNode *User = Uses[UseIndex].User; 8634 8635 // This node is about to morph, remove its old self from the CSE maps. 8636 RemoveNodeFromCSEMaps(User); 8637 8638 // The Uses array is sorted, so all the uses for a given User 8639 // are next to each other in the list. 8640 // To help reduce the number of CSE recomputations, process all 8641 // the uses of this user that we can find this way. 8642 do { 8643 unsigned i = Uses[UseIndex].Index; 8644 SDUse &Use = *Uses[UseIndex].Use; 8645 ++UseIndex; 8646 8647 Use.set(To[i]); 8648 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8649 8650 // Now that we have modified User, add it back to the CSE maps. If it 8651 // already exists there, recursively merge the results together. 8652 AddModifiedNodeToCSEMaps(User); 8653 } 8654 } 8655 8656 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8657 /// based on their topological order. It returns the maximum id and a vector 8658 /// of the SDNodes* in assigned order by reference. 8659 unsigned SelectionDAG::AssignTopologicalOrder() { 8660 unsigned DAGSize = 0; 8661 8662 // SortedPos tracks the progress of the algorithm. Nodes before it are 8663 // sorted, nodes after it are unsorted. When the algorithm completes 8664 // it is at the end of the list. 8665 allnodes_iterator SortedPos = allnodes_begin(); 8666 8667 // Visit all the nodes. Move nodes with no operands to the front of 8668 // the list immediately. Annotate nodes that do have operands with their 8669 // operand count. Before we do this, the Node Id fields of the nodes 8670 // may contain arbitrary values. After, the Node Id fields for nodes 8671 // before SortedPos will contain the topological sort index, and the 8672 // Node Id fields for nodes At SortedPos and after will contain the 8673 // count of outstanding operands. 8674 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8675 SDNode *N = &*I++; 8676 checkForCycles(N, this); 8677 unsigned Degree = N->getNumOperands(); 8678 if (Degree == 0) { 8679 // A node with no uses, add it to the result array immediately. 8680 N->setNodeId(DAGSize++); 8681 allnodes_iterator Q(N); 8682 if (Q != SortedPos) 8683 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8684 assert(SortedPos != AllNodes.end() && "Overran node list"); 8685 ++SortedPos; 8686 } else { 8687 // Temporarily use the Node Id as scratch space for the degree count. 8688 N->setNodeId(Degree); 8689 } 8690 } 8691 8692 // Visit all the nodes. As we iterate, move nodes into sorted order, 8693 // such that by the time the end is reached all nodes will be sorted. 8694 for (SDNode &Node : allnodes()) { 8695 SDNode *N = &Node; 8696 checkForCycles(N, this); 8697 // N is in sorted position, so all its uses have one less operand 8698 // that needs to be sorted. 8699 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8700 UI != UE; ++UI) { 8701 SDNode *P = *UI; 8702 unsigned Degree = P->getNodeId(); 8703 assert(Degree != 0 && "Invalid node degree"); 8704 --Degree; 8705 if (Degree == 0) { 8706 // All of P's operands are sorted, so P may sorted now. 8707 P->setNodeId(DAGSize++); 8708 if (P->getIterator() != SortedPos) 8709 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8710 assert(SortedPos != AllNodes.end() && "Overran node list"); 8711 ++SortedPos; 8712 } else { 8713 // Update P's outstanding operand count. 8714 P->setNodeId(Degree); 8715 } 8716 } 8717 if (Node.getIterator() == SortedPos) { 8718 #ifndef NDEBUG 8719 allnodes_iterator I(N); 8720 SDNode *S = &*++I; 8721 dbgs() << "Overran sorted position:\n"; 8722 S->dumprFull(this); dbgs() << "\n"; 8723 dbgs() << "Checking if this is due to cycles\n"; 8724 checkForCycles(this, true); 8725 #endif 8726 llvm_unreachable(nullptr); 8727 } 8728 } 8729 8730 assert(SortedPos == AllNodes.end() && 8731 "Topological sort incomplete!"); 8732 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8733 "First node in topological sort is not the entry token!"); 8734 assert(AllNodes.front().getNodeId() == 0 && 8735 "First node in topological sort has non-zero id!"); 8736 assert(AllNodes.front().getNumOperands() == 0 && 8737 "First node in topological sort has operands!"); 8738 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8739 "Last node in topologic sort has unexpected id!"); 8740 assert(AllNodes.back().use_empty() && 8741 "Last node in topologic sort has users!"); 8742 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8743 return DAGSize; 8744 } 8745 8746 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8747 /// value is produced by SD. 8748 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8749 if (SD) { 8750 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8751 SD->setHasDebugValue(true); 8752 } 8753 DbgInfo->add(DB, SD, isParameter); 8754 } 8755 8756 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8757 DbgInfo->add(DB); 8758 } 8759 8760 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8761 SDValue NewMemOp) { 8762 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8763 // The new memory operation must have the same position as the old load in 8764 // terms of memory dependency. Create a TokenFactor for the old load and new 8765 // memory operation and update uses of the old load's output chain to use that 8766 // TokenFactor. 8767 SDValue OldChain = SDValue(OldLoad, 1); 8768 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8769 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8770 return NewChain; 8771 8772 SDValue TokenFactor = 8773 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8774 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8775 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8776 return TokenFactor; 8777 } 8778 8779 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8780 Function **OutFunction) { 8781 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8782 8783 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8784 auto *Module = MF->getFunction().getParent(); 8785 auto *Function = Module->getFunction(Symbol); 8786 8787 if (OutFunction != nullptr) 8788 *OutFunction = Function; 8789 8790 if (Function != nullptr) { 8791 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8792 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8793 } 8794 8795 std::string ErrorStr; 8796 raw_string_ostream ErrorFormatter(ErrorStr); 8797 8798 ErrorFormatter << "Undefined external symbol "; 8799 ErrorFormatter << '"' << Symbol << '"'; 8800 ErrorFormatter.flush(); 8801 8802 report_fatal_error(ErrorStr); 8803 } 8804 8805 //===----------------------------------------------------------------------===// 8806 // SDNode Class 8807 //===----------------------------------------------------------------------===// 8808 8809 bool llvm::isNullConstant(SDValue V) { 8810 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8811 return Const != nullptr && Const->isNullValue(); 8812 } 8813 8814 bool llvm::isNullFPConstant(SDValue V) { 8815 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8816 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8817 } 8818 8819 bool llvm::isAllOnesConstant(SDValue V) { 8820 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8821 return Const != nullptr && Const->isAllOnesValue(); 8822 } 8823 8824 bool llvm::isOneConstant(SDValue V) { 8825 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8826 return Const != nullptr && Const->isOne(); 8827 } 8828 8829 SDValue llvm::peekThroughBitcasts(SDValue V) { 8830 while (V.getOpcode() == ISD::BITCAST) 8831 V = V.getOperand(0); 8832 return V; 8833 } 8834 8835 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8836 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8837 V = V.getOperand(0); 8838 return V; 8839 } 8840 8841 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8842 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8843 V = V.getOperand(0); 8844 return V; 8845 } 8846 8847 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8848 if (V.getOpcode() != ISD::XOR) 8849 return false; 8850 V = peekThroughBitcasts(V.getOperand(1)); 8851 unsigned NumBits = V.getScalarValueSizeInBits(); 8852 ConstantSDNode *C = 8853 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8854 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8855 } 8856 8857 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8858 bool AllowTruncation) { 8859 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8860 return CN; 8861 8862 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8863 BitVector UndefElements; 8864 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8865 8866 // BuildVectors can truncate their operands. Ignore that case here unless 8867 // AllowTruncation is set. 8868 if (CN && (UndefElements.none() || AllowUndefs)) { 8869 EVT CVT = CN->getValueType(0); 8870 EVT NSVT = N.getValueType().getScalarType(); 8871 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8872 if (AllowTruncation || (CVT == NSVT)) 8873 return CN; 8874 } 8875 } 8876 8877 return nullptr; 8878 } 8879 8880 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8881 bool AllowUndefs, 8882 bool AllowTruncation) { 8883 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8884 return CN; 8885 8886 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8887 BitVector UndefElements; 8888 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8889 8890 // BuildVectors can truncate their operands. Ignore that case here unless 8891 // AllowTruncation is set. 8892 if (CN && (UndefElements.none() || AllowUndefs)) { 8893 EVT CVT = CN->getValueType(0); 8894 EVT NSVT = N.getValueType().getScalarType(); 8895 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8896 if (AllowTruncation || (CVT == NSVT)) 8897 return CN; 8898 } 8899 } 8900 8901 return nullptr; 8902 } 8903 8904 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8905 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8906 return CN; 8907 8908 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8909 BitVector UndefElements; 8910 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8911 if (CN && (UndefElements.none() || AllowUndefs)) 8912 return CN; 8913 } 8914 8915 return nullptr; 8916 } 8917 8918 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 8919 const APInt &DemandedElts, 8920 bool AllowUndefs) { 8921 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8922 return CN; 8923 8924 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8925 BitVector UndefElements; 8926 ConstantFPSDNode *CN = 8927 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 8928 if (CN && (UndefElements.none() || AllowUndefs)) 8929 return CN; 8930 } 8931 8932 return nullptr; 8933 } 8934 8935 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 8936 // TODO: may want to use peekThroughBitcast() here. 8937 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 8938 return C && C->isNullValue(); 8939 } 8940 8941 bool llvm::isOneOrOneSplat(SDValue N) { 8942 // TODO: may want to use peekThroughBitcast() here. 8943 unsigned BitWidth = N.getScalarValueSizeInBits(); 8944 ConstantSDNode *C = isConstOrConstSplat(N); 8945 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8946 } 8947 8948 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8949 N = peekThroughBitcasts(N); 8950 unsigned BitWidth = N.getScalarValueSizeInBits(); 8951 ConstantSDNode *C = isConstOrConstSplat(N); 8952 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8953 } 8954 8955 HandleSDNode::~HandleSDNode() { 8956 DropOperands(); 8957 } 8958 8959 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8960 const DebugLoc &DL, 8961 const GlobalValue *GA, EVT VT, 8962 int64_t o, unsigned TF) 8963 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 8964 TheGlobal = GA; 8965 } 8966 8967 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 8968 EVT VT, unsigned SrcAS, 8969 unsigned DestAS) 8970 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 8971 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 8972 8973 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 8974 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 8975 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 8976 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 8977 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 8978 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 8979 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 8980 8981 // We check here that the size of the memory operand fits within the size of 8982 // the MMO. This is because the MMO might indicate only a possible address 8983 // range instead of specifying the affected memory addresses precisely. 8984 // TODO: Make MachineMemOperands aware of scalable vectors. 8985 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 8986 "Size mismatch!"); 8987 } 8988 8989 /// Profile - Gather unique data for the node. 8990 /// 8991 void SDNode::Profile(FoldingSetNodeID &ID) const { 8992 AddNodeIDNode(ID, this); 8993 } 8994 8995 namespace { 8996 8997 struct EVTArray { 8998 std::vector<EVT> VTs; 8999 9000 EVTArray() { 9001 VTs.reserve(MVT::LAST_VALUETYPE); 9002 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9003 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9004 } 9005 }; 9006 9007 } // end anonymous namespace 9008 9009 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9010 static ManagedStatic<EVTArray> SimpleVTArray; 9011 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9012 9013 /// getValueTypeList - Return a pointer to the specified value type. 9014 /// 9015 const EVT *SDNode::getValueTypeList(EVT VT) { 9016 if (VT.isExtended()) { 9017 sys::SmartScopedLock<true> Lock(*VTMutex); 9018 return &(*EVTs->insert(VT).first); 9019 } else { 9020 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9021 "Value type out of range!"); 9022 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9023 } 9024 } 9025 9026 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9027 /// indicated value. This method ignores uses of other values defined by this 9028 /// operation. 9029 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9030 assert(Value < getNumValues() && "Bad value!"); 9031 9032 // TODO: Only iterate over uses of a given value of the node 9033 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9034 if (UI.getUse().getResNo() == Value) { 9035 if (NUses == 0) 9036 return false; 9037 --NUses; 9038 } 9039 } 9040 9041 // Found exactly the right number of uses? 9042 return NUses == 0; 9043 } 9044 9045 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9046 /// value. This method ignores uses of other values defined by this operation. 9047 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9048 assert(Value < getNumValues() && "Bad value!"); 9049 9050 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9051 if (UI.getUse().getResNo() == Value) 9052 return true; 9053 9054 return false; 9055 } 9056 9057 /// isOnlyUserOf - Return true if this node is the only use of N. 9058 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9059 bool Seen = false; 9060 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9061 SDNode *User = *I; 9062 if (User == this) 9063 Seen = true; 9064 else 9065 return false; 9066 } 9067 9068 return Seen; 9069 } 9070 9071 /// Return true if the only users of N are contained in Nodes. 9072 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9073 bool Seen = false; 9074 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9075 SDNode *User = *I; 9076 if (llvm::any_of(Nodes, 9077 [&User](const SDNode *Node) { return User == Node; })) 9078 Seen = true; 9079 else 9080 return false; 9081 } 9082 9083 return Seen; 9084 } 9085 9086 /// isOperand - Return true if this node is an operand of N. 9087 bool SDValue::isOperandOf(const SDNode *N) const { 9088 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9089 } 9090 9091 bool SDNode::isOperandOf(const SDNode *N) const { 9092 return any_of(N->op_values(), 9093 [this](SDValue Op) { return this == Op.getNode(); }); 9094 } 9095 9096 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9097 /// be a chain) reaches the specified operand without crossing any 9098 /// side-effecting instructions on any chain path. In practice, this looks 9099 /// through token factors and non-volatile loads. In order to remain efficient, 9100 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9101 /// 9102 /// Note that we only need to examine chains when we're searching for 9103 /// side-effects; SelectionDAG requires that all side-effects are represented 9104 /// by chains, even if another operand would force a specific ordering. This 9105 /// constraint is necessary to allow transformations like splitting loads. 9106 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9107 unsigned Depth) const { 9108 if (*this == Dest) return true; 9109 9110 // Don't search too deeply, we just want to be able to see through 9111 // TokenFactor's etc. 9112 if (Depth == 0) return false; 9113 9114 // If this is a token factor, all inputs to the TF happen in parallel. 9115 if (getOpcode() == ISD::TokenFactor) { 9116 // First, try a shallow search. 9117 if (is_contained((*this)->ops(), Dest)) { 9118 // We found the chain we want as an operand of this TokenFactor. 9119 // Essentially, we reach the chain without side-effects if we could 9120 // serialize the TokenFactor into a simple chain of operations with 9121 // Dest as the last operation. This is automatically true if the 9122 // chain has one use: there are no other ordering constraints. 9123 // If the chain has more than one use, we give up: some other 9124 // use of Dest might force a side-effect between Dest and the current 9125 // node. 9126 if (Dest.hasOneUse()) 9127 return true; 9128 } 9129 // Next, try a deep search: check whether every operand of the TokenFactor 9130 // reaches Dest. 9131 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9132 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9133 }); 9134 } 9135 9136 // Loads don't have side effects, look through them. 9137 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9138 if (Ld->isUnordered()) 9139 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9140 } 9141 return false; 9142 } 9143 9144 bool SDNode::hasPredecessor(const SDNode *N) const { 9145 SmallPtrSet<const SDNode *, 32> Visited; 9146 SmallVector<const SDNode *, 16> Worklist; 9147 Worklist.push_back(this); 9148 return hasPredecessorHelper(N, Visited, Worklist); 9149 } 9150 9151 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9152 this->Flags.intersectWith(Flags); 9153 } 9154 9155 SDValue 9156 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9157 ArrayRef<ISD::NodeType> CandidateBinOps, 9158 bool AllowPartials) { 9159 // The pattern must end in an extract from index 0. 9160 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9161 !isNullConstant(Extract->getOperand(1))) 9162 return SDValue(); 9163 9164 // Match against one of the candidate binary ops. 9165 SDValue Op = Extract->getOperand(0); 9166 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9167 return Op.getOpcode() == unsigned(BinOp); 9168 })) 9169 return SDValue(); 9170 9171 // Floating-point reductions may require relaxed constraints on the final step 9172 // of the reduction because they may reorder intermediate operations. 9173 unsigned CandidateBinOp = Op.getOpcode(); 9174 if (Op.getValueType().isFloatingPoint()) { 9175 SDNodeFlags Flags = Op->getFlags(); 9176 switch (CandidateBinOp) { 9177 case ISD::FADD: 9178 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9179 return SDValue(); 9180 break; 9181 default: 9182 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9183 } 9184 } 9185 9186 // Matching failed - attempt to see if we did enough stages that a partial 9187 // reduction from a subvector is possible. 9188 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9189 if (!AllowPartials || !Op) 9190 return SDValue(); 9191 EVT OpVT = Op.getValueType(); 9192 EVT OpSVT = OpVT.getScalarType(); 9193 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9194 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9195 return SDValue(); 9196 BinOp = (ISD::NodeType)CandidateBinOp; 9197 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9198 getVectorIdxConstant(0, SDLoc(Op))); 9199 }; 9200 9201 // At each stage, we're looking for something that looks like: 9202 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9203 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9204 // i32 undef, i32 undef, i32 undef, i32 undef> 9205 // %a = binop <8 x i32> %op, %s 9206 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9207 // we expect something like: 9208 // <4,5,6,7,u,u,u,u> 9209 // <2,3,u,u,u,u,u,u> 9210 // <1,u,u,u,u,u,u,u> 9211 // While a partial reduction match would be: 9212 // <2,3,u,u,u,u,u,u> 9213 // <1,u,u,u,u,u,u,u> 9214 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9215 SDValue PrevOp; 9216 for (unsigned i = 0; i < Stages; ++i) { 9217 unsigned MaskEnd = (1 << i); 9218 9219 if (Op.getOpcode() != CandidateBinOp) 9220 return PartialReduction(PrevOp, MaskEnd); 9221 9222 SDValue Op0 = Op.getOperand(0); 9223 SDValue Op1 = Op.getOperand(1); 9224 9225 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9226 if (Shuffle) { 9227 Op = Op1; 9228 } else { 9229 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9230 Op = Op0; 9231 } 9232 9233 // The first operand of the shuffle should be the same as the other operand 9234 // of the binop. 9235 if (!Shuffle || Shuffle->getOperand(0) != Op) 9236 return PartialReduction(PrevOp, MaskEnd); 9237 9238 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9239 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9240 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9241 return PartialReduction(PrevOp, MaskEnd); 9242 9243 PrevOp = Op; 9244 } 9245 9246 BinOp = (ISD::NodeType)CandidateBinOp; 9247 return Op; 9248 } 9249 9250 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9251 assert(N->getNumValues() == 1 && 9252 "Can't unroll a vector with multiple results!"); 9253 9254 EVT VT = N->getValueType(0); 9255 unsigned NE = VT.getVectorNumElements(); 9256 EVT EltVT = VT.getVectorElementType(); 9257 SDLoc dl(N); 9258 9259 SmallVector<SDValue, 8> Scalars; 9260 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9261 9262 // If ResNE is 0, fully unroll the vector op. 9263 if (ResNE == 0) 9264 ResNE = NE; 9265 else if (NE > ResNE) 9266 NE = ResNE; 9267 9268 unsigned i; 9269 for (i= 0; i != NE; ++i) { 9270 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9271 SDValue Operand = N->getOperand(j); 9272 EVT OperandVT = Operand.getValueType(); 9273 if (OperandVT.isVector()) { 9274 // A vector operand; extract a single element. 9275 EVT OperandEltVT = OperandVT.getVectorElementType(); 9276 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9277 Operand, getVectorIdxConstant(i, dl)); 9278 } else { 9279 // A scalar operand; just use it as is. 9280 Operands[j] = Operand; 9281 } 9282 } 9283 9284 switch (N->getOpcode()) { 9285 default: { 9286 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9287 N->getFlags())); 9288 break; 9289 } 9290 case ISD::VSELECT: 9291 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9292 break; 9293 case ISD::SHL: 9294 case ISD::SRA: 9295 case ISD::SRL: 9296 case ISD::ROTL: 9297 case ISD::ROTR: 9298 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9299 getShiftAmountOperand(Operands[0].getValueType(), 9300 Operands[1]))); 9301 break; 9302 case ISD::SIGN_EXTEND_INREG: { 9303 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9304 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9305 Operands[0], 9306 getValueType(ExtVT))); 9307 } 9308 } 9309 } 9310 9311 for (; i < ResNE; ++i) 9312 Scalars.push_back(getUNDEF(EltVT)); 9313 9314 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9315 return getBuildVector(VecVT, dl, Scalars); 9316 } 9317 9318 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9319 SDNode *N, unsigned ResNE) { 9320 unsigned Opcode = N->getOpcode(); 9321 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9322 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9323 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9324 "Expected an overflow opcode"); 9325 9326 EVT ResVT = N->getValueType(0); 9327 EVT OvVT = N->getValueType(1); 9328 EVT ResEltVT = ResVT.getVectorElementType(); 9329 EVT OvEltVT = OvVT.getVectorElementType(); 9330 SDLoc dl(N); 9331 9332 // If ResNE is 0, fully unroll the vector op. 9333 unsigned NE = ResVT.getVectorNumElements(); 9334 if (ResNE == 0) 9335 ResNE = NE; 9336 else if (NE > ResNE) 9337 NE = ResNE; 9338 9339 SmallVector<SDValue, 8> LHSScalars; 9340 SmallVector<SDValue, 8> RHSScalars; 9341 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9342 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9343 9344 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9345 SDVTList VTs = getVTList(ResEltVT, SVT); 9346 SmallVector<SDValue, 8> ResScalars; 9347 SmallVector<SDValue, 8> OvScalars; 9348 for (unsigned i = 0; i < NE; ++i) { 9349 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9350 SDValue Ov = 9351 getSelect(dl, OvEltVT, Res.getValue(1), 9352 getBoolConstant(true, dl, OvEltVT, ResVT), 9353 getConstant(0, dl, OvEltVT)); 9354 9355 ResScalars.push_back(Res); 9356 OvScalars.push_back(Ov); 9357 } 9358 9359 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9360 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9361 9362 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9363 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9364 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9365 getBuildVector(NewOvVT, dl, OvScalars)); 9366 } 9367 9368 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9369 LoadSDNode *Base, 9370 unsigned Bytes, 9371 int Dist) const { 9372 if (LD->isVolatile() || Base->isVolatile()) 9373 return false; 9374 // TODO: probably too restrictive for atomics, revisit 9375 if (!LD->isSimple()) 9376 return false; 9377 if (LD->isIndexed() || Base->isIndexed()) 9378 return false; 9379 if (LD->getChain() != Base->getChain()) 9380 return false; 9381 EVT VT = LD->getValueType(0); 9382 if (VT.getSizeInBits() / 8 != Bytes) 9383 return false; 9384 9385 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9386 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9387 9388 int64_t Offset = 0; 9389 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9390 return (Dist * Bytes == Offset); 9391 return false; 9392 } 9393 9394 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 9395 /// it cannot be inferred. 9396 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 9397 // If this is a GlobalAddress + cst, return the alignment. 9398 const GlobalValue *GV = nullptr; 9399 int64_t GVOffset = 0; 9400 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9401 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9402 KnownBits Known(PtrWidth); 9403 llvm::computeKnownBits(GV, Known, getDataLayout()); 9404 unsigned AlignBits = Known.countMinTrailingZeros(); 9405 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 9406 if (Align) 9407 return MinAlign(Align, GVOffset); 9408 } 9409 9410 // If this is a direct reference to a stack slot, use information about the 9411 // stack slot's alignment. 9412 int FrameIdx = INT_MIN; 9413 int64_t FrameOffset = 0; 9414 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9415 FrameIdx = FI->getIndex(); 9416 } else if (isBaseWithConstantOffset(Ptr) && 9417 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9418 // Handle FI+Cst 9419 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9420 FrameOffset = Ptr.getConstantOperandVal(1); 9421 } 9422 9423 if (FrameIdx != INT_MIN) { 9424 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9425 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 9426 FrameOffset); 9427 return FIInfoAlign; 9428 } 9429 9430 return 0; 9431 } 9432 9433 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9434 /// which is split (or expanded) into two not necessarily identical pieces. 9435 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9436 // Currently all types are split in half. 9437 EVT LoVT, HiVT; 9438 if (!VT.isVector()) 9439 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9440 else 9441 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9442 9443 return std::make_pair(LoVT, HiVT); 9444 } 9445 9446 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9447 /// low/high part. 9448 std::pair<SDValue, SDValue> 9449 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9450 const EVT &HiVT) { 9451 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9452 N.getValueType().getVectorNumElements() && 9453 "More vector elements requested than available!"); 9454 SDValue Lo, Hi; 9455 Lo = 9456 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9457 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9458 getVectorIdxConstant(LoVT.getVectorNumElements(), DL)); 9459 return std::make_pair(Lo, Hi); 9460 } 9461 9462 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9463 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9464 EVT VT = N.getValueType(); 9465 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9466 NextPowerOf2(VT.getVectorNumElements())); 9467 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9468 getVectorIdxConstant(0, DL)); 9469 } 9470 9471 void SelectionDAG::ExtractVectorElements(SDValue Op, 9472 SmallVectorImpl<SDValue> &Args, 9473 unsigned Start, unsigned Count) { 9474 EVT VT = Op.getValueType(); 9475 if (Count == 0) 9476 Count = VT.getVectorNumElements(); 9477 9478 EVT EltVT = VT.getVectorElementType(); 9479 SDLoc SL(Op); 9480 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9481 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9482 getVectorIdxConstant(i, SL))); 9483 } 9484 } 9485 9486 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9487 unsigned GlobalAddressSDNode::getAddressSpace() const { 9488 return getGlobal()->getType()->getAddressSpace(); 9489 } 9490 9491 Type *ConstantPoolSDNode::getType() const { 9492 if (isMachineConstantPoolEntry()) 9493 return Val.MachineCPVal->getType(); 9494 return Val.ConstVal->getType(); 9495 } 9496 9497 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9498 unsigned &SplatBitSize, 9499 bool &HasAnyUndefs, 9500 unsigned MinSplatBits, 9501 bool IsBigEndian) const { 9502 EVT VT = getValueType(0); 9503 assert(VT.isVector() && "Expected a vector type"); 9504 unsigned VecWidth = VT.getSizeInBits(); 9505 if (MinSplatBits > VecWidth) 9506 return false; 9507 9508 // FIXME: The widths are based on this node's type, but build vectors can 9509 // truncate their operands. 9510 SplatValue = APInt(VecWidth, 0); 9511 SplatUndef = APInt(VecWidth, 0); 9512 9513 // Get the bits. Bits with undefined values (when the corresponding element 9514 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9515 // in SplatValue. If any of the values are not constant, give up and return 9516 // false. 9517 unsigned int NumOps = getNumOperands(); 9518 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9519 unsigned EltWidth = VT.getScalarSizeInBits(); 9520 9521 for (unsigned j = 0; j < NumOps; ++j) { 9522 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9523 SDValue OpVal = getOperand(i); 9524 unsigned BitPos = j * EltWidth; 9525 9526 if (OpVal.isUndef()) 9527 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9528 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9529 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9530 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9531 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9532 else 9533 return false; 9534 } 9535 9536 // The build_vector is all constants or undefs. Find the smallest element 9537 // size that splats the vector. 9538 HasAnyUndefs = (SplatUndef != 0); 9539 9540 // FIXME: This does not work for vectors with elements less than 8 bits. 9541 while (VecWidth > 8) { 9542 unsigned HalfSize = VecWidth / 2; 9543 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9544 APInt LowValue = SplatValue.trunc(HalfSize); 9545 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9546 APInt LowUndef = SplatUndef.trunc(HalfSize); 9547 9548 // If the two halves do not match (ignoring undef bits), stop here. 9549 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9550 MinSplatBits > HalfSize) 9551 break; 9552 9553 SplatValue = HighValue | LowValue; 9554 SplatUndef = HighUndef & LowUndef; 9555 9556 VecWidth = HalfSize; 9557 } 9558 9559 SplatBitSize = VecWidth; 9560 return true; 9561 } 9562 9563 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9564 BitVector *UndefElements) const { 9565 if (UndefElements) { 9566 UndefElements->clear(); 9567 UndefElements->resize(getNumOperands()); 9568 } 9569 assert(getNumOperands() == DemandedElts.getBitWidth() && 9570 "Unexpected vector size"); 9571 if (!DemandedElts) 9572 return SDValue(); 9573 SDValue Splatted; 9574 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9575 if (!DemandedElts[i]) 9576 continue; 9577 SDValue Op = getOperand(i); 9578 if (Op.isUndef()) { 9579 if (UndefElements) 9580 (*UndefElements)[i] = true; 9581 } else if (!Splatted) { 9582 Splatted = Op; 9583 } else if (Splatted != Op) { 9584 return SDValue(); 9585 } 9586 } 9587 9588 if (!Splatted) { 9589 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9590 assert(getOperand(FirstDemandedIdx).isUndef() && 9591 "Can only have a splat without a constant for all undefs."); 9592 return getOperand(FirstDemandedIdx); 9593 } 9594 9595 return Splatted; 9596 } 9597 9598 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9599 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9600 return getSplatValue(DemandedElts, UndefElements); 9601 } 9602 9603 ConstantSDNode * 9604 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9605 BitVector *UndefElements) const { 9606 return dyn_cast_or_null<ConstantSDNode>( 9607 getSplatValue(DemandedElts, UndefElements)); 9608 } 9609 9610 ConstantSDNode * 9611 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9612 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9613 } 9614 9615 ConstantFPSDNode * 9616 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9617 BitVector *UndefElements) const { 9618 return dyn_cast_or_null<ConstantFPSDNode>( 9619 getSplatValue(DemandedElts, UndefElements)); 9620 } 9621 9622 ConstantFPSDNode * 9623 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9624 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9625 } 9626 9627 int32_t 9628 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9629 uint32_t BitWidth) const { 9630 if (ConstantFPSDNode *CN = 9631 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9632 bool IsExact; 9633 APSInt IntVal(BitWidth); 9634 const APFloat &APF = CN->getValueAPF(); 9635 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9636 APFloat::opOK || 9637 !IsExact) 9638 return -1; 9639 9640 return IntVal.exactLogBase2(); 9641 } 9642 return -1; 9643 } 9644 9645 bool BuildVectorSDNode::isConstant() const { 9646 for (const SDValue &Op : op_values()) { 9647 unsigned Opc = Op.getOpcode(); 9648 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9649 return false; 9650 } 9651 return true; 9652 } 9653 9654 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9655 // Find the first non-undef value in the shuffle mask. 9656 unsigned i, e; 9657 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9658 /* search */; 9659 9660 // If all elements are undefined, this shuffle can be considered a splat 9661 // (although it should eventually get simplified away completely). 9662 if (i == e) 9663 return true; 9664 9665 // Make sure all remaining elements are either undef or the same as the first 9666 // non-undef value. 9667 for (int Idx = Mask[i]; i != e; ++i) 9668 if (Mask[i] >= 0 && Mask[i] != Idx) 9669 return false; 9670 return true; 9671 } 9672 9673 // Returns the SDNode if it is a constant integer BuildVector 9674 // or constant integer. 9675 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9676 if (isa<ConstantSDNode>(N)) 9677 return N.getNode(); 9678 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9679 return N.getNode(); 9680 // Treat a GlobalAddress supporting constant offset folding as a 9681 // constant integer. 9682 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9683 if (GA->getOpcode() == ISD::GlobalAddress && 9684 TLI->isOffsetFoldingLegal(GA)) 9685 return GA; 9686 return nullptr; 9687 } 9688 9689 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9690 if (isa<ConstantFPSDNode>(N)) 9691 return N.getNode(); 9692 9693 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9694 return N.getNode(); 9695 9696 return nullptr; 9697 } 9698 9699 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9700 assert(!Node->OperandList && "Node already has operands"); 9701 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9702 "too many operands to fit into SDNode"); 9703 SDUse *Ops = OperandRecycler.allocate( 9704 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9705 9706 bool IsDivergent = false; 9707 for (unsigned I = 0; I != Vals.size(); ++I) { 9708 Ops[I].setUser(Node); 9709 Ops[I].setInitial(Vals[I]); 9710 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9711 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9712 } 9713 Node->NumOperands = Vals.size(); 9714 Node->OperandList = Ops; 9715 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9716 if (!TLI->isSDNodeAlwaysUniform(Node)) 9717 Node->SDNodeBits.IsDivergent = IsDivergent; 9718 checkForCycles(Node); 9719 } 9720 9721 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9722 SmallVectorImpl<SDValue> &Vals) { 9723 size_t Limit = SDNode::getMaxNumOperands(); 9724 while (Vals.size() > Limit) { 9725 unsigned SliceIdx = Vals.size() - Limit; 9726 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9727 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9728 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9729 Vals.emplace_back(NewTF); 9730 } 9731 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9732 } 9733 9734 #ifndef NDEBUG 9735 static void checkForCyclesHelper(const SDNode *N, 9736 SmallPtrSetImpl<const SDNode*> &Visited, 9737 SmallPtrSetImpl<const SDNode*> &Checked, 9738 const llvm::SelectionDAG *DAG) { 9739 // If this node has already been checked, don't check it again. 9740 if (Checked.count(N)) 9741 return; 9742 9743 // If a node has already been visited on this depth-first walk, reject it as 9744 // a cycle. 9745 if (!Visited.insert(N).second) { 9746 errs() << "Detected cycle in SelectionDAG\n"; 9747 dbgs() << "Offending node:\n"; 9748 N->dumprFull(DAG); dbgs() << "\n"; 9749 abort(); 9750 } 9751 9752 for (const SDValue &Op : N->op_values()) 9753 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9754 9755 Checked.insert(N); 9756 Visited.erase(N); 9757 } 9758 #endif 9759 9760 void llvm::checkForCycles(const llvm::SDNode *N, 9761 const llvm::SelectionDAG *DAG, 9762 bool force) { 9763 #ifndef NDEBUG 9764 bool check = force; 9765 #ifdef EXPENSIVE_CHECKS 9766 check = true; 9767 #endif // EXPENSIVE_CHECKS 9768 if (check) { 9769 assert(N && "Checking nonexistent SDNode"); 9770 SmallPtrSet<const SDNode*, 32> visited; 9771 SmallPtrSet<const SDNode*, 32> checked; 9772 checkForCyclesHelper(N, visited, checked, DAG); 9773 } 9774 #endif // !NDEBUG 9775 } 9776 9777 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9778 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9779 } 9780