1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetLowering.h" 42 #include "llvm/CodeGen/TargetRegisterInfo.h" 43 #include "llvm/CodeGen/TargetSubtargetInfo.h" 44 #include "llvm/CodeGen/ValueTypes.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/DerivedTypes.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Compiler.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/KnownBits.h" 62 #include "llvm/Support/MachineValueType.h" 63 #include "llvm/Support/ManagedStatic.h" 64 #include "llvm/Support/MathExtras.h" 65 #include "llvm/Support/Mutex.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Target/TargetMachine.h" 68 #include "llvm/Target/TargetOptions.h" 69 #include "llvm/Transforms/Utils/SizeOpts.h" 70 #include <algorithm> 71 #include <cassert> 72 #include <cstdint> 73 #include <cstdlib> 74 #include <limits> 75 #include <set> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 using namespace llvm; 81 82 /// makeVTList - Return an instance of the SDVTList struct initialized with the 83 /// specified members. 84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 85 SDVTList Res = {VTs, NumVTs}; 86 return Res; 87 } 88 89 // Default null implementations of the callbacks. 90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 93 94 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 95 96 #define DEBUG_TYPE "selectiondag" 97 98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 99 cl::Hidden, cl::init(true), 100 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 101 102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 103 cl::desc("Number limit for gluing ld/st of memcpy."), 104 cl::Hidden, cl::init(0)); 105 106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 108 } 109 110 //===----------------------------------------------------------------------===// 111 // ConstantFPSDNode Class 112 //===----------------------------------------------------------------------===// 113 114 /// isExactlyValue - We don't rely on operator== working on double values, as 115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 116 /// As such, this method can be used to do an exact bit-for-bit comparison of 117 /// two floating point values. 118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 119 return getValueAPF().bitwiseIsEqual(V); 120 } 121 122 bool ConstantFPSDNode::isValueValidForType(EVT VT, 123 const APFloat& Val) { 124 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 125 126 // convert modifies in place, so make a copy. 127 APFloat Val2 = APFloat(Val); 128 bool losesInfo; 129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 130 APFloat::rmNearestTiesToEven, 131 &losesInfo); 132 return !losesInfo; 133 } 134 135 //===----------------------------------------------------------------------===// 136 // ISD Namespace 137 //===----------------------------------------------------------------------===// 138 139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 140 auto *BV = dyn_cast<BuildVectorSDNode>(N); 141 if (!BV) 142 return false; 143 144 APInt SplatUndef; 145 unsigned SplatBitSize; 146 bool HasUndefs; 147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 149 EltSize) && 150 EltSize == SplatBitSize; 151 } 152 153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 154 // specializations of the more general isConstantSplatVector()? 155 156 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 157 // Look through a bit convert. 158 while (N->getOpcode() == ISD::BITCAST) 159 N = N->getOperand(0).getNode(); 160 161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 162 163 unsigned i = 0, e = N->getNumOperands(); 164 165 // Skip over all of the undef values. 166 while (i != e && N->getOperand(i).isUndef()) 167 ++i; 168 169 // Do not accept an all-undef vector. 170 if (i == e) return false; 171 172 // Do not accept build_vectors that aren't all constants or which have non-~0 173 // elements. We have to be a bit careful here, as the type of the constant 174 // may not be the same as the type of the vector elements due to type 175 // legalization (the elements are promoted to a legal type for the target and 176 // a vector of a type may be legal when the base element type is not). 177 // We only want to check enough bits to cover the vector elements, because 178 // we care if the resultant vector is all ones, not whether the individual 179 // constants are. 180 SDValue NotZero = N->getOperand(i); 181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 183 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 184 return false; 185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 187 return false; 188 } else 189 return false; 190 191 // Okay, we have at least one ~0 value, check to see if the rest match or are 192 // undefs. Even with the above element type twiddling, this should be OK, as 193 // the same type legalization should have applied to all the elements. 194 for (++i; i != e; ++i) 195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 196 return false; 197 return true; 198 } 199 200 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 201 // Look through a bit convert. 202 while (N->getOpcode() == ISD::BITCAST) 203 N = N->getOperand(0).getNode(); 204 205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 206 207 bool IsAllUndef = true; 208 for (const SDValue &Op : N->op_values()) { 209 if (Op.isUndef()) 210 continue; 211 IsAllUndef = false; 212 // Do not accept build_vectors that aren't all constants or which have non-0 213 // elements. We have to be a bit careful here, as the type of the constant 214 // may not be the same as the type of the vector elements due to type 215 // legalization (the elements are promoted to a legal type for the target 216 // and a vector of a type may be legal when the base element type is not). 217 // We only want to check enough bits to cover the vector elements, because 218 // we care if the resultant vector is all zeros, not whether the individual 219 // constants are. 220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 222 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 223 return false; 224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 226 return false; 227 } else 228 return false; 229 } 230 231 // Do not accept an all-undef vector. 232 if (IsAllUndef) 233 return false; 234 return true; 235 } 236 237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 238 if (N->getOpcode() != ISD::BUILD_VECTOR) 239 return false; 240 241 for (const SDValue &Op : N->op_values()) { 242 if (Op.isUndef()) 243 continue; 244 if (!isa<ConstantSDNode>(Op)) 245 return false; 246 } 247 return true; 248 } 249 250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 251 if (N->getOpcode() != ISD::BUILD_VECTOR) 252 return false; 253 254 for (const SDValue &Op : N->op_values()) { 255 if (Op.isUndef()) 256 continue; 257 if (!isa<ConstantFPSDNode>(Op)) 258 return false; 259 } 260 return true; 261 } 262 263 bool ISD::allOperandsUndef(const SDNode *N) { 264 // Return false if the node has no operands. 265 // This is "logically inconsistent" with the definition of "all" but 266 // is probably the desired behavior. 267 if (N->getNumOperands() == 0) 268 return false; 269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 270 } 271 272 bool ISD::matchUnaryPredicate(SDValue Op, 273 std::function<bool(ConstantSDNode *)> Match, 274 bool AllowUndefs) { 275 // FIXME: Add support for scalar UNDEF cases? 276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 277 return Match(Cst); 278 279 // FIXME: Add support for vector UNDEF cases? 280 if (ISD::BUILD_VECTOR != Op.getOpcode()) 281 return false; 282 283 EVT SVT = Op.getValueType().getScalarType(); 284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 285 if (AllowUndefs && Op.getOperand(i).isUndef()) { 286 if (!Match(nullptr)) 287 return false; 288 continue; 289 } 290 291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 293 return false; 294 } 295 return true; 296 } 297 298 bool ISD::matchBinaryPredicate( 299 SDValue LHS, SDValue RHS, 300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 301 bool AllowUndefs, bool AllowTypeMismatch) { 302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 303 return false; 304 305 // TODO: Add support for scalar UNDEF cases? 306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 308 return Match(LHSCst, RHSCst); 309 310 // TODO: Add support for vector UNDEF cases? 311 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 312 ISD::BUILD_VECTOR != RHS.getOpcode()) 313 return false; 314 315 EVT SVT = LHS.getValueType().getScalarType(); 316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 317 SDValue LHSOp = LHS.getOperand(i); 318 SDValue RHSOp = RHS.getOperand(i); 319 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 320 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 324 return false; 325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 326 LHSOp.getValueType() != RHSOp.getValueType())) 327 return false; 328 if (!Match(LHSCst, RHSCst)) 329 return false; 330 } 331 return true; 332 } 333 334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 335 switch (ExtType) { 336 case ISD::EXTLOAD: 337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 338 case ISD::SEXTLOAD: 339 return ISD::SIGN_EXTEND; 340 case ISD::ZEXTLOAD: 341 return ISD::ZERO_EXTEND; 342 default: 343 break; 344 } 345 346 llvm_unreachable("Invalid LoadExtType"); 347 } 348 349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 350 // To perform this operation, we just need to swap the L and G bits of the 351 // operation. 352 unsigned OldL = (Operation >> 2) & 1; 353 unsigned OldG = (Operation >> 1) & 1; 354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 355 (OldL << 1) | // New G bit 356 (OldG << 2)); // New L bit. 357 } 358 359 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 360 unsigned Operation = Op; 361 if (isIntegerLike) 362 Operation ^= 7; // Flip L, G, E bits, but not U. 363 else 364 Operation ^= 15; // Flip all of the condition bits. 365 366 if (Operation > ISD::SETTRUE2) 367 Operation &= ~8; // Don't let N and U bits get set. 368 369 return ISD::CondCode(Operation); 370 } 371 372 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 373 return getSetCCInverseImpl(Op, Type.isInteger()); 374 } 375 376 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 377 bool isIntegerLike) { 378 return getSetCCInverseImpl(Op, isIntegerLike); 379 } 380 381 /// For an integer comparison, return 1 if the comparison is a signed operation 382 /// and 2 if the result is an unsigned comparison. Return zero if the operation 383 /// does not depend on the sign of the input (setne and seteq). 384 static int isSignedOp(ISD::CondCode Opcode) { 385 switch (Opcode) { 386 default: llvm_unreachable("Illegal integer setcc operation!"); 387 case ISD::SETEQ: 388 case ISD::SETNE: return 0; 389 case ISD::SETLT: 390 case ISD::SETLE: 391 case ISD::SETGT: 392 case ISD::SETGE: return 1; 393 case ISD::SETULT: 394 case ISD::SETULE: 395 case ISD::SETUGT: 396 case ISD::SETUGE: return 2; 397 } 398 } 399 400 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 401 EVT Type) { 402 bool IsInteger = Type.isInteger(); 403 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 404 // Cannot fold a signed integer setcc with an unsigned integer setcc. 405 return ISD::SETCC_INVALID; 406 407 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 408 409 // If the N and U bits get set, then the resultant comparison DOES suddenly 410 // care about orderedness, and it is true when ordered. 411 if (Op > ISD::SETTRUE2) 412 Op &= ~16; // Clear the U bit if the N bit is set. 413 414 // Canonicalize illegal integer setcc's. 415 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 416 Op = ISD::SETNE; 417 418 return ISD::CondCode(Op); 419 } 420 421 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 422 EVT Type) { 423 bool IsInteger = Type.isInteger(); 424 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 425 // Cannot fold a signed setcc with an unsigned setcc. 426 return ISD::SETCC_INVALID; 427 428 // Combine all of the condition bits. 429 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 430 431 // Canonicalize illegal integer setcc's. 432 if (IsInteger) { 433 switch (Result) { 434 default: break; 435 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 436 case ISD::SETOEQ: // SETEQ & SETU[LG]E 437 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 438 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 439 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 440 } 441 } 442 443 return Result; 444 } 445 446 //===----------------------------------------------------------------------===// 447 // SDNode Profile Support 448 //===----------------------------------------------------------------------===// 449 450 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 451 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 452 ID.AddInteger(OpC); 453 } 454 455 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 456 /// solely with their pointer. 457 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 458 ID.AddPointer(VTList.VTs); 459 } 460 461 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 462 static void AddNodeIDOperands(FoldingSetNodeID &ID, 463 ArrayRef<SDValue> Ops) { 464 for (auto& Op : Ops) { 465 ID.AddPointer(Op.getNode()); 466 ID.AddInteger(Op.getResNo()); 467 } 468 } 469 470 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 471 static void AddNodeIDOperands(FoldingSetNodeID &ID, 472 ArrayRef<SDUse> Ops) { 473 for (auto& Op : Ops) { 474 ID.AddPointer(Op.getNode()); 475 ID.AddInteger(Op.getResNo()); 476 } 477 } 478 479 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 480 SDVTList VTList, ArrayRef<SDValue> OpList) { 481 AddNodeIDOpcode(ID, OpC); 482 AddNodeIDValueTypes(ID, VTList); 483 AddNodeIDOperands(ID, OpList); 484 } 485 486 /// If this is an SDNode with special info, add this info to the NodeID data. 487 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 488 switch (N->getOpcode()) { 489 case ISD::TargetExternalSymbol: 490 case ISD::ExternalSymbol: 491 case ISD::MCSymbol: 492 llvm_unreachable("Should only be used on nodes with operands"); 493 default: break; // Normal nodes don't need extra info. 494 case ISD::TargetConstant: 495 case ISD::Constant: { 496 const ConstantSDNode *C = cast<ConstantSDNode>(N); 497 ID.AddPointer(C->getConstantIntValue()); 498 ID.AddBoolean(C->isOpaque()); 499 break; 500 } 501 case ISD::TargetConstantFP: 502 case ISD::ConstantFP: 503 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 504 break; 505 case ISD::TargetGlobalAddress: 506 case ISD::GlobalAddress: 507 case ISD::TargetGlobalTLSAddress: 508 case ISD::GlobalTLSAddress: { 509 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 510 ID.AddPointer(GA->getGlobal()); 511 ID.AddInteger(GA->getOffset()); 512 ID.AddInteger(GA->getTargetFlags()); 513 break; 514 } 515 case ISD::BasicBlock: 516 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 517 break; 518 case ISD::Register: 519 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 520 break; 521 case ISD::RegisterMask: 522 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 523 break; 524 case ISD::SRCVALUE: 525 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 526 break; 527 case ISD::FrameIndex: 528 case ISD::TargetFrameIndex: 529 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 530 break; 531 case ISD::LIFETIME_START: 532 case ISD::LIFETIME_END: 533 if (cast<LifetimeSDNode>(N)->hasOffset()) { 534 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 535 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 536 } 537 break; 538 case ISD::JumpTable: 539 case ISD::TargetJumpTable: 540 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 541 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 542 break; 543 case ISD::ConstantPool: 544 case ISD::TargetConstantPool: { 545 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 546 ID.AddInteger(CP->getAlign().value()); 547 ID.AddInteger(CP->getOffset()); 548 if (CP->isMachineConstantPoolEntry()) 549 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 550 else 551 ID.AddPointer(CP->getConstVal()); 552 ID.AddInteger(CP->getTargetFlags()); 553 break; 554 } 555 case ISD::TargetIndex: { 556 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 557 ID.AddInteger(TI->getIndex()); 558 ID.AddInteger(TI->getOffset()); 559 ID.AddInteger(TI->getTargetFlags()); 560 break; 561 } 562 case ISD::LOAD: { 563 const LoadSDNode *LD = cast<LoadSDNode>(N); 564 ID.AddInteger(LD->getMemoryVT().getRawBits()); 565 ID.AddInteger(LD->getRawSubclassData()); 566 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 567 break; 568 } 569 case ISD::STORE: { 570 const StoreSDNode *ST = cast<StoreSDNode>(N); 571 ID.AddInteger(ST->getMemoryVT().getRawBits()); 572 ID.AddInteger(ST->getRawSubclassData()); 573 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 574 break; 575 } 576 case ISD::MLOAD: { 577 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 578 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 579 ID.AddInteger(MLD->getRawSubclassData()); 580 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 581 break; 582 } 583 case ISD::MSTORE: { 584 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 585 ID.AddInteger(MST->getMemoryVT().getRawBits()); 586 ID.AddInteger(MST->getRawSubclassData()); 587 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 588 break; 589 } 590 case ISD::MGATHER: { 591 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 592 ID.AddInteger(MG->getMemoryVT().getRawBits()); 593 ID.AddInteger(MG->getRawSubclassData()); 594 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 595 break; 596 } 597 case ISD::MSCATTER: { 598 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 599 ID.AddInteger(MS->getMemoryVT().getRawBits()); 600 ID.AddInteger(MS->getRawSubclassData()); 601 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 602 break; 603 } 604 case ISD::ATOMIC_CMP_SWAP: 605 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 606 case ISD::ATOMIC_SWAP: 607 case ISD::ATOMIC_LOAD_ADD: 608 case ISD::ATOMIC_LOAD_SUB: 609 case ISD::ATOMIC_LOAD_AND: 610 case ISD::ATOMIC_LOAD_CLR: 611 case ISD::ATOMIC_LOAD_OR: 612 case ISD::ATOMIC_LOAD_XOR: 613 case ISD::ATOMIC_LOAD_NAND: 614 case ISD::ATOMIC_LOAD_MIN: 615 case ISD::ATOMIC_LOAD_MAX: 616 case ISD::ATOMIC_LOAD_UMIN: 617 case ISD::ATOMIC_LOAD_UMAX: 618 case ISD::ATOMIC_LOAD: 619 case ISD::ATOMIC_STORE: { 620 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 621 ID.AddInteger(AT->getMemoryVT().getRawBits()); 622 ID.AddInteger(AT->getRawSubclassData()); 623 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 624 break; 625 } 626 case ISD::PREFETCH: { 627 const MemSDNode *PF = cast<MemSDNode>(N); 628 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 629 break; 630 } 631 case ISD::VECTOR_SHUFFLE: { 632 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 633 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 634 i != e; ++i) 635 ID.AddInteger(SVN->getMaskElt(i)); 636 break; 637 } 638 case ISD::TargetBlockAddress: 639 case ISD::BlockAddress: { 640 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 641 ID.AddPointer(BA->getBlockAddress()); 642 ID.AddInteger(BA->getOffset()); 643 ID.AddInteger(BA->getTargetFlags()); 644 break; 645 } 646 } // end switch (N->getOpcode()) 647 648 // Target specific memory nodes could also have address spaces to check. 649 if (N->isTargetMemoryOpcode()) 650 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 651 } 652 653 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 654 /// data. 655 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 656 AddNodeIDOpcode(ID, N->getOpcode()); 657 // Add the return value info. 658 AddNodeIDValueTypes(ID, N->getVTList()); 659 // Add the operand info. 660 AddNodeIDOperands(ID, N->ops()); 661 662 // Handle SDNode leafs with special info. 663 AddNodeIDCustom(ID, N); 664 } 665 666 //===----------------------------------------------------------------------===// 667 // SelectionDAG Class 668 //===----------------------------------------------------------------------===// 669 670 /// doNotCSE - Return true if CSE should not be performed for this node. 671 static bool doNotCSE(SDNode *N) { 672 if (N->getValueType(0) == MVT::Glue) 673 return true; // Never CSE anything that produces a flag. 674 675 switch (N->getOpcode()) { 676 default: break; 677 case ISD::HANDLENODE: 678 case ISD::EH_LABEL: 679 return true; // Never CSE these nodes. 680 } 681 682 // Check that remaining values produced are not flags. 683 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 684 if (N->getValueType(i) == MVT::Glue) 685 return true; // Never CSE anything that produces a flag. 686 687 return false; 688 } 689 690 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 691 /// SelectionDAG. 692 void SelectionDAG::RemoveDeadNodes() { 693 // Create a dummy node (which is not added to allnodes), that adds a reference 694 // to the root node, preventing it from being deleted. 695 HandleSDNode Dummy(getRoot()); 696 697 SmallVector<SDNode*, 128> DeadNodes; 698 699 // Add all obviously-dead nodes to the DeadNodes worklist. 700 for (SDNode &Node : allnodes()) 701 if (Node.use_empty()) 702 DeadNodes.push_back(&Node); 703 704 RemoveDeadNodes(DeadNodes); 705 706 // If the root changed (e.g. it was a dead load, update the root). 707 setRoot(Dummy.getValue()); 708 } 709 710 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 711 /// given list, and any nodes that become unreachable as a result. 712 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 713 714 // Process the worklist, deleting the nodes and adding their uses to the 715 // worklist. 716 while (!DeadNodes.empty()) { 717 SDNode *N = DeadNodes.pop_back_val(); 718 // Skip to next node if we've already managed to delete the node. This could 719 // happen if replacing a node causes a node previously added to the node to 720 // be deleted. 721 if (N->getOpcode() == ISD::DELETED_NODE) 722 continue; 723 724 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 725 DUL->NodeDeleted(N, nullptr); 726 727 // Take the node out of the appropriate CSE map. 728 RemoveNodeFromCSEMaps(N); 729 730 // Next, brutally remove the operand list. This is safe to do, as there are 731 // no cycles in the graph. 732 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 733 SDUse &Use = *I++; 734 SDNode *Operand = Use.getNode(); 735 Use.set(SDValue()); 736 737 // Now that we removed this operand, see if there are no uses of it left. 738 if (Operand->use_empty()) 739 DeadNodes.push_back(Operand); 740 } 741 742 DeallocateNode(N); 743 } 744 } 745 746 void SelectionDAG::RemoveDeadNode(SDNode *N){ 747 SmallVector<SDNode*, 16> DeadNodes(1, N); 748 749 // Create a dummy node that adds a reference to the root node, preventing 750 // it from being deleted. (This matters if the root is an operand of the 751 // dead node.) 752 HandleSDNode Dummy(getRoot()); 753 754 RemoveDeadNodes(DeadNodes); 755 } 756 757 void SelectionDAG::DeleteNode(SDNode *N) { 758 // First take this out of the appropriate CSE map. 759 RemoveNodeFromCSEMaps(N); 760 761 // Finally, remove uses due to operands of this node, remove from the 762 // AllNodes list, and delete the node. 763 DeleteNodeNotInCSEMaps(N); 764 } 765 766 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 767 assert(N->getIterator() != AllNodes.begin() && 768 "Cannot delete the entry node!"); 769 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 770 771 // Drop all of the operands and decrement used node's use counts. 772 N->DropOperands(); 773 774 DeallocateNode(N); 775 } 776 777 void SDDbgInfo::erase(const SDNode *Node) { 778 DbgValMapType::iterator I = DbgValMap.find(Node); 779 if (I == DbgValMap.end()) 780 return; 781 for (auto &Val: I->second) 782 Val->setIsInvalidated(); 783 DbgValMap.erase(I); 784 } 785 786 void SelectionDAG::DeallocateNode(SDNode *N) { 787 // If we have operands, deallocate them. 788 removeOperands(N); 789 790 NodeAllocator.Deallocate(AllNodes.remove(N)); 791 792 // Set the opcode to DELETED_NODE to help catch bugs when node 793 // memory is reallocated. 794 // FIXME: There are places in SDag that have grown a dependency on the opcode 795 // value in the released node. 796 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 797 N->NodeType = ISD::DELETED_NODE; 798 799 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 800 // them and forget about that node. 801 DbgInfo->erase(N); 802 } 803 804 #ifndef NDEBUG 805 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 806 static void VerifySDNode(SDNode *N) { 807 switch (N->getOpcode()) { 808 default: 809 break; 810 case ISD::BUILD_PAIR: { 811 EVT VT = N->getValueType(0); 812 assert(N->getNumValues() == 1 && "Too many results!"); 813 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 814 "Wrong return type!"); 815 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 816 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 817 "Mismatched operand types!"); 818 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 819 "Wrong operand type!"); 820 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 821 "Wrong return type size"); 822 break; 823 } 824 case ISD::BUILD_VECTOR: { 825 assert(N->getNumValues() == 1 && "Too many results!"); 826 assert(N->getValueType(0).isVector() && "Wrong return type!"); 827 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 828 "Wrong number of operands!"); 829 EVT EltVT = N->getValueType(0).getVectorElementType(); 830 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 831 assert((I->getValueType() == EltVT || 832 (EltVT.isInteger() && I->getValueType().isInteger() && 833 EltVT.bitsLE(I->getValueType()))) && 834 "Wrong operand type!"); 835 assert(I->getValueType() == N->getOperand(0).getValueType() && 836 "Operands must all have the same type"); 837 } 838 break; 839 } 840 } 841 } 842 #endif // NDEBUG 843 844 /// Insert a newly allocated node into the DAG. 845 /// 846 /// Handles insertion into the all nodes list and CSE map, as well as 847 /// verification and other common operations when a new node is allocated. 848 void SelectionDAG::InsertNode(SDNode *N) { 849 AllNodes.push_back(N); 850 #ifndef NDEBUG 851 N->PersistentId = NextPersistentId++; 852 VerifySDNode(N); 853 #endif 854 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 855 DUL->NodeInserted(N); 856 } 857 858 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 859 /// correspond to it. This is useful when we're about to delete or repurpose 860 /// the node. We don't want future request for structurally identical nodes 861 /// to return N anymore. 862 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 863 bool Erased = false; 864 switch (N->getOpcode()) { 865 case ISD::HANDLENODE: return false; // noop. 866 case ISD::CONDCODE: 867 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 868 "Cond code doesn't exist!"); 869 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 870 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 871 break; 872 case ISD::ExternalSymbol: 873 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 874 break; 875 case ISD::TargetExternalSymbol: { 876 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 877 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 878 ESN->getSymbol(), ESN->getTargetFlags())); 879 break; 880 } 881 case ISD::MCSymbol: { 882 auto *MCSN = cast<MCSymbolSDNode>(N); 883 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 884 break; 885 } 886 case ISD::VALUETYPE: { 887 EVT VT = cast<VTSDNode>(N)->getVT(); 888 if (VT.isExtended()) { 889 Erased = ExtendedValueTypeNodes.erase(VT); 890 } else { 891 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 892 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 893 } 894 break; 895 } 896 default: 897 // Remove it from the CSE Map. 898 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 899 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 900 Erased = CSEMap.RemoveNode(N); 901 break; 902 } 903 #ifndef NDEBUG 904 // Verify that the node was actually in one of the CSE maps, unless it has a 905 // flag result (which cannot be CSE'd) or is one of the special cases that are 906 // not subject to CSE. 907 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 908 !N->isMachineOpcode() && !doNotCSE(N)) { 909 N->dump(this); 910 dbgs() << "\n"; 911 llvm_unreachable("Node is not in map!"); 912 } 913 #endif 914 return Erased; 915 } 916 917 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 918 /// maps and modified in place. Add it back to the CSE maps, unless an identical 919 /// node already exists, in which case transfer all its users to the existing 920 /// node. This transfer can potentially trigger recursive merging. 921 void 922 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 923 // For node types that aren't CSE'd, just act as if no identical node 924 // already exists. 925 if (!doNotCSE(N)) { 926 SDNode *Existing = CSEMap.GetOrInsertNode(N); 927 if (Existing != N) { 928 // If there was already an existing matching node, use ReplaceAllUsesWith 929 // to replace the dead one with the existing one. This can cause 930 // recursive merging of other unrelated nodes down the line. 931 ReplaceAllUsesWith(N, Existing); 932 933 // N is now dead. Inform the listeners and delete it. 934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 935 DUL->NodeDeleted(N, Existing); 936 DeleteNodeNotInCSEMaps(N); 937 return; 938 } 939 } 940 941 // If the node doesn't already exist, we updated it. Inform listeners. 942 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 943 DUL->NodeUpdated(N); 944 } 945 946 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 947 /// were replaced with those specified. If this node is never memoized, 948 /// return null, otherwise return a pointer to the slot it would take. If a 949 /// node already exists with these operands, the slot will be non-null. 950 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 951 void *&InsertPos) { 952 if (doNotCSE(N)) 953 return nullptr; 954 955 SDValue Ops[] = { Op }; 956 FoldingSetNodeID ID; 957 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 958 AddNodeIDCustom(ID, N); 959 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 960 if (Node) 961 Node->intersectFlagsWith(N->getFlags()); 962 return Node; 963 } 964 965 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 966 /// were replaced with those specified. If this node is never memoized, 967 /// return null, otherwise return a pointer to the slot it would take. If a 968 /// node already exists with these operands, the slot will be non-null. 969 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 970 SDValue Op1, SDValue Op2, 971 void *&InsertPos) { 972 if (doNotCSE(N)) 973 return nullptr; 974 975 SDValue Ops[] = { Op1, Op2 }; 976 FoldingSetNodeID ID; 977 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 978 AddNodeIDCustom(ID, N); 979 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 980 if (Node) 981 Node->intersectFlagsWith(N->getFlags()); 982 return Node; 983 } 984 985 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 986 /// were replaced with those specified. If this node is never memoized, 987 /// return null, otherwise return a pointer to the slot it would take. If a 988 /// node already exists with these operands, the slot will be non-null. 989 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 990 void *&InsertPos) { 991 if (doNotCSE(N)) 992 return nullptr; 993 994 FoldingSetNodeID ID; 995 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 996 AddNodeIDCustom(ID, N); 997 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 998 if (Node) 999 Node->intersectFlagsWith(N->getFlags()); 1000 return Node; 1001 } 1002 1003 Align SelectionDAG::getEVTAlign(EVT VT) const { 1004 Type *Ty = VT == MVT::iPTR ? 1005 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1006 VT.getTypeForEVT(*getContext()); 1007 1008 return getDataLayout().getABITypeAlign(Ty); 1009 } 1010 1011 // EntryNode could meaningfully have debug info if we can find it... 1012 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1013 : TM(tm), OptLevel(OL), 1014 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1015 Root(getEntryNode()) { 1016 InsertNode(&EntryNode); 1017 DbgInfo = new SDDbgInfo(); 1018 } 1019 1020 void SelectionDAG::init(MachineFunction &NewMF, 1021 OptimizationRemarkEmitter &NewORE, 1022 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1023 LegacyDivergenceAnalysis * Divergence, 1024 ProfileSummaryInfo *PSIin, 1025 BlockFrequencyInfo *BFIin) { 1026 MF = &NewMF; 1027 SDAGISelPass = PassPtr; 1028 ORE = &NewORE; 1029 TLI = getSubtarget().getTargetLowering(); 1030 TSI = getSubtarget().getSelectionDAGInfo(); 1031 LibInfo = LibraryInfo; 1032 Context = &MF->getFunction().getContext(); 1033 DA = Divergence; 1034 PSI = PSIin; 1035 BFI = BFIin; 1036 } 1037 1038 SelectionDAG::~SelectionDAG() { 1039 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1040 allnodes_clear(); 1041 OperandRecycler.clear(OperandAllocator); 1042 delete DbgInfo; 1043 } 1044 1045 bool SelectionDAG::shouldOptForSize() const { 1046 return MF->getFunction().hasOptSize() || 1047 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1048 } 1049 1050 void SelectionDAG::allnodes_clear() { 1051 assert(&*AllNodes.begin() == &EntryNode); 1052 AllNodes.remove(AllNodes.begin()); 1053 while (!AllNodes.empty()) 1054 DeallocateNode(&AllNodes.front()); 1055 #ifndef NDEBUG 1056 NextPersistentId = 0; 1057 #endif 1058 } 1059 1060 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1061 void *&InsertPos) { 1062 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1063 if (N) { 1064 switch (N->getOpcode()) { 1065 default: break; 1066 case ISD::Constant: 1067 case ISD::ConstantFP: 1068 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1069 "debug location. Use another overload."); 1070 } 1071 } 1072 return N; 1073 } 1074 1075 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1076 const SDLoc &DL, void *&InsertPos) { 1077 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1078 if (N) { 1079 switch (N->getOpcode()) { 1080 case ISD::Constant: 1081 case ISD::ConstantFP: 1082 // Erase debug location from the node if the node is used at several 1083 // different places. Do not propagate one location to all uses as it 1084 // will cause a worse single stepping debugging experience. 1085 if (N->getDebugLoc() != DL.getDebugLoc()) 1086 N->setDebugLoc(DebugLoc()); 1087 break; 1088 default: 1089 // When the node's point of use is located earlier in the instruction 1090 // sequence than its prior point of use, update its debug info to the 1091 // earlier location. 1092 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1093 N->setDebugLoc(DL.getDebugLoc()); 1094 break; 1095 } 1096 } 1097 return N; 1098 } 1099 1100 void SelectionDAG::clear() { 1101 allnodes_clear(); 1102 OperandRecycler.clear(OperandAllocator); 1103 OperandAllocator.Reset(); 1104 CSEMap.clear(); 1105 1106 ExtendedValueTypeNodes.clear(); 1107 ExternalSymbols.clear(); 1108 TargetExternalSymbols.clear(); 1109 MCSymbols.clear(); 1110 SDCallSiteDbgInfo.clear(); 1111 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1112 static_cast<CondCodeSDNode*>(nullptr)); 1113 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1114 static_cast<SDNode*>(nullptr)); 1115 1116 EntryNode.UseList = nullptr; 1117 InsertNode(&EntryNode); 1118 Root = getEntryNode(); 1119 DbgInfo->clear(); 1120 } 1121 1122 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1123 return VT.bitsGT(Op.getValueType()) 1124 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1125 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1126 } 1127 1128 std::pair<SDValue, SDValue> 1129 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1130 const SDLoc &DL, EVT VT) { 1131 assert(!VT.bitsEq(Op.getValueType()) && 1132 "Strict no-op FP extend/round not allowed."); 1133 SDValue Res = 1134 VT.bitsGT(Op.getValueType()) 1135 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1136 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1137 {Chain, Op, getIntPtrConstant(0, DL)}); 1138 1139 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1140 } 1141 1142 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1143 return VT.bitsGT(Op.getValueType()) ? 1144 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1145 getNode(ISD::TRUNCATE, DL, VT, Op); 1146 } 1147 1148 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1149 return VT.bitsGT(Op.getValueType()) ? 1150 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1151 getNode(ISD::TRUNCATE, DL, VT, Op); 1152 } 1153 1154 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1155 return VT.bitsGT(Op.getValueType()) ? 1156 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1157 getNode(ISD::TRUNCATE, DL, VT, Op); 1158 } 1159 1160 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1161 EVT OpVT) { 1162 if (VT.bitsLE(Op.getValueType())) 1163 return getNode(ISD::TRUNCATE, SL, VT, Op); 1164 1165 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1166 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1167 } 1168 1169 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1170 EVT OpVT = Op.getValueType(); 1171 assert(VT.isInteger() && OpVT.isInteger() && 1172 "Cannot getZeroExtendInReg FP types"); 1173 assert(VT.isVector() == OpVT.isVector() && 1174 "getZeroExtendInReg type should be vector iff the operand " 1175 "type is vector!"); 1176 assert((!VT.isVector() || 1177 VT.getVectorNumElements() == OpVT.getVectorNumElements()) && 1178 "Vector element counts must match in getZeroExtendInReg"); 1179 assert(VT.bitsLE(OpVT) && "Not extending!"); 1180 if (OpVT == VT) 1181 return Op; 1182 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1183 VT.getScalarSizeInBits()); 1184 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1185 } 1186 1187 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1188 // Only unsigned pointer semantics are supported right now. In the future this 1189 // might delegate to TLI to check pointer signedness. 1190 return getZExtOrTrunc(Op, DL, VT); 1191 } 1192 1193 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1194 // Only unsigned pointer semantics are supported right now. In the future this 1195 // might delegate to TLI to check pointer signedness. 1196 return getZeroExtendInReg(Op, DL, VT); 1197 } 1198 1199 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1200 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1201 EVT EltVT = VT.getScalarType(); 1202 SDValue NegOne = 1203 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1204 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1205 } 1206 1207 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1208 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1209 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1210 } 1211 1212 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1213 EVT OpVT) { 1214 if (!V) 1215 return getConstant(0, DL, VT); 1216 1217 switch (TLI->getBooleanContents(OpVT)) { 1218 case TargetLowering::ZeroOrOneBooleanContent: 1219 case TargetLowering::UndefinedBooleanContent: 1220 return getConstant(1, DL, VT); 1221 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1222 return getAllOnesConstant(DL, VT); 1223 } 1224 llvm_unreachable("Unexpected boolean content enum!"); 1225 } 1226 1227 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1228 bool isT, bool isO) { 1229 EVT EltVT = VT.getScalarType(); 1230 assert((EltVT.getSizeInBits() >= 64 || 1231 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1232 "getConstant with a uint64_t value that doesn't fit in the type!"); 1233 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1234 } 1235 1236 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1237 bool isT, bool isO) { 1238 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1239 } 1240 1241 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1242 EVT VT, bool isT, bool isO) { 1243 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1244 1245 EVT EltVT = VT.getScalarType(); 1246 const ConstantInt *Elt = &Val; 1247 1248 // In some cases the vector type is legal but the element type is illegal and 1249 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1250 // inserted value (the type does not need to match the vector element type). 1251 // Any extra bits introduced will be truncated away. 1252 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1253 TargetLowering::TypePromoteInteger) { 1254 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1255 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1256 Elt = ConstantInt::get(*getContext(), NewVal); 1257 } 1258 // In other cases the element type is illegal and needs to be expanded, for 1259 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1260 // the value into n parts and use a vector type with n-times the elements. 1261 // Then bitcast to the type requested. 1262 // Legalizing constants too early makes the DAGCombiner's job harder so we 1263 // only legalize if the DAG tells us we must produce legal types. 1264 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1265 TLI->getTypeAction(*getContext(), EltVT) == 1266 TargetLowering::TypeExpandInteger) { 1267 const APInt &NewVal = Elt->getValue(); 1268 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1269 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1270 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1271 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1272 1273 // Check the temporary vector is the correct size. If this fails then 1274 // getTypeToTransformTo() probably returned a type whose size (in bits) 1275 // isn't a power-of-2 factor of the requested type size. 1276 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1277 1278 SmallVector<SDValue, 2> EltParts; 1279 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1280 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1281 .zextOrTrunc(ViaEltSizeInBits), DL, 1282 ViaEltVT, isT, isO)); 1283 } 1284 1285 // EltParts is currently in little endian order. If we actually want 1286 // big-endian order then reverse it now. 1287 if (getDataLayout().isBigEndian()) 1288 std::reverse(EltParts.begin(), EltParts.end()); 1289 1290 // The elements must be reversed when the element order is different 1291 // to the endianness of the elements (because the BITCAST is itself a 1292 // vector shuffle in this situation). However, we do not need any code to 1293 // perform this reversal because getConstant() is producing a vector 1294 // splat. 1295 // This situation occurs in MIPS MSA. 1296 1297 SmallVector<SDValue, 8> Ops; 1298 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1299 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1300 1301 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1302 return V; 1303 } 1304 1305 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1306 "APInt size does not match type size!"); 1307 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1308 FoldingSetNodeID ID; 1309 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1310 ID.AddPointer(Elt); 1311 ID.AddBoolean(isO); 1312 void *IP = nullptr; 1313 SDNode *N = nullptr; 1314 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1315 if (!VT.isVector()) 1316 return SDValue(N, 0); 1317 1318 if (!N) { 1319 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1320 CSEMap.InsertNode(N, IP); 1321 InsertNode(N); 1322 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1323 } 1324 1325 SDValue Result(N, 0); 1326 if (VT.isScalableVector()) 1327 Result = getSplatVector(VT, DL, Result); 1328 else if (VT.isVector()) 1329 Result = getSplatBuildVector(VT, DL, Result); 1330 1331 return Result; 1332 } 1333 1334 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1335 bool isTarget) { 1336 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1337 } 1338 1339 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1340 const SDLoc &DL, bool LegalTypes) { 1341 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1342 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1343 return getConstant(Val, DL, ShiftVT); 1344 } 1345 1346 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1347 bool isTarget) { 1348 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1349 } 1350 1351 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1352 bool isTarget) { 1353 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1354 } 1355 1356 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1357 EVT VT, bool isTarget) { 1358 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1359 1360 EVT EltVT = VT.getScalarType(); 1361 1362 // Do the map lookup using the actual bit pattern for the floating point 1363 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1364 // we don't have issues with SNANs. 1365 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1366 FoldingSetNodeID ID; 1367 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1368 ID.AddPointer(&V); 1369 void *IP = nullptr; 1370 SDNode *N = nullptr; 1371 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1372 if (!VT.isVector()) 1373 return SDValue(N, 0); 1374 1375 if (!N) { 1376 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1377 CSEMap.InsertNode(N, IP); 1378 InsertNode(N); 1379 } 1380 1381 SDValue Result(N, 0); 1382 if (VT.isVector()) 1383 Result = getSplatBuildVector(VT, DL, Result); 1384 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1385 return Result; 1386 } 1387 1388 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1389 bool isTarget) { 1390 EVT EltVT = VT.getScalarType(); 1391 if (EltVT == MVT::f32) 1392 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1393 else if (EltVT == MVT::f64) 1394 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1395 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1396 EltVT == MVT::f16) { 1397 bool Ignored; 1398 APFloat APF = APFloat(Val); 1399 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1400 &Ignored); 1401 return getConstantFP(APF, DL, VT, isTarget); 1402 } else 1403 llvm_unreachable("Unsupported type in getConstantFP"); 1404 } 1405 1406 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1407 EVT VT, int64_t Offset, bool isTargetGA, 1408 unsigned TargetFlags) { 1409 assert((TargetFlags == 0 || isTargetGA) && 1410 "Cannot set target flags on target-independent globals"); 1411 1412 // Truncate (with sign-extension) the offset value to the pointer size. 1413 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1414 if (BitWidth < 64) 1415 Offset = SignExtend64(Offset, BitWidth); 1416 1417 unsigned Opc; 1418 if (GV->isThreadLocal()) 1419 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1420 else 1421 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1422 1423 FoldingSetNodeID ID; 1424 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1425 ID.AddPointer(GV); 1426 ID.AddInteger(Offset); 1427 ID.AddInteger(TargetFlags); 1428 void *IP = nullptr; 1429 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1430 return SDValue(E, 0); 1431 1432 auto *N = newSDNode<GlobalAddressSDNode>( 1433 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1434 CSEMap.InsertNode(N, IP); 1435 InsertNode(N); 1436 return SDValue(N, 0); 1437 } 1438 1439 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1440 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1441 FoldingSetNodeID ID; 1442 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1443 ID.AddInteger(FI); 1444 void *IP = nullptr; 1445 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1446 return SDValue(E, 0); 1447 1448 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1449 CSEMap.InsertNode(N, IP); 1450 InsertNode(N); 1451 return SDValue(N, 0); 1452 } 1453 1454 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1455 unsigned TargetFlags) { 1456 assert((TargetFlags == 0 || isTarget) && 1457 "Cannot set target flags on target-independent jump tables"); 1458 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1459 FoldingSetNodeID ID; 1460 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1461 ID.AddInteger(JTI); 1462 ID.AddInteger(TargetFlags); 1463 void *IP = nullptr; 1464 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1465 return SDValue(E, 0); 1466 1467 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1468 CSEMap.InsertNode(N, IP); 1469 InsertNode(N); 1470 return SDValue(N, 0); 1471 } 1472 1473 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1474 MaybeAlign Alignment, int Offset, 1475 bool isTarget, unsigned TargetFlags) { 1476 assert((TargetFlags == 0 || isTarget) && 1477 "Cannot set target flags on target-independent globals"); 1478 if (!Alignment) 1479 Alignment = shouldOptForSize() 1480 ? getDataLayout().getABITypeAlign(C->getType()) 1481 : getDataLayout().getPrefTypeAlign(C->getType()); 1482 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1483 FoldingSetNodeID ID; 1484 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1485 ID.AddInteger(Alignment->value()); 1486 ID.AddInteger(Offset); 1487 ID.AddPointer(C); 1488 ID.AddInteger(TargetFlags); 1489 void *IP = nullptr; 1490 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1491 return SDValue(E, 0); 1492 1493 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1494 TargetFlags); 1495 CSEMap.InsertNode(N, IP); 1496 InsertNode(N); 1497 SDValue V = SDValue(N, 0); 1498 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1499 return V; 1500 } 1501 1502 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1503 MaybeAlign Alignment, int Offset, 1504 bool isTarget, unsigned TargetFlags) { 1505 assert((TargetFlags == 0 || isTarget) && 1506 "Cannot set target flags on target-independent globals"); 1507 if (!Alignment) 1508 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1509 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1510 FoldingSetNodeID ID; 1511 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1512 ID.AddInteger(Alignment->value()); 1513 ID.AddInteger(Offset); 1514 C->addSelectionDAGCSEId(ID); 1515 ID.AddInteger(TargetFlags); 1516 void *IP = nullptr; 1517 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1518 return SDValue(E, 0); 1519 1520 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1521 TargetFlags); 1522 CSEMap.InsertNode(N, IP); 1523 InsertNode(N); 1524 return SDValue(N, 0); 1525 } 1526 1527 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1528 unsigned TargetFlags) { 1529 FoldingSetNodeID ID; 1530 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1531 ID.AddInteger(Index); 1532 ID.AddInteger(Offset); 1533 ID.AddInteger(TargetFlags); 1534 void *IP = nullptr; 1535 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1536 return SDValue(E, 0); 1537 1538 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1539 CSEMap.InsertNode(N, IP); 1540 InsertNode(N); 1541 return SDValue(N, 0); 1542 } 1543 1544 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1545 FoldingSetNodeID ID; 1546 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1547 ID.AddPointer(MBB); 1548 void *IP = nullptr; 1549 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1550 return SDValue(E, 0); 1551 1552 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1553 CSEMap.InsertNode(N, IP); 1554 InsertNode(N); 1555 return SDValue(N, 0); 1556 } 1557 1558 SDValue SelectionDAG::getValueType(EVT VT) { 1559 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1560 ValueTypeNodes.size()) 1561 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1562 1563 SDNode *&N = VT.isExtended() ? 1564 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1565 1566 if (N) return SDValue(N, 0); 1567 N = newSDNode<VTSDNode>(VT); 1568 InsertNode(N); 1569 return SDValue(N, 0); 1570 } 1571 1572 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1573 SDNode *&N = ExternalSymbols[Sym]; 1574 if (N) return SDValue(N, 0); 1575 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1576 InsertNode(N); 1577 return SDValue(N, 0); 1578 } 1579 1580 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1581 SDNode *&N = MCSymbols[Sym]; 1582 if (N) 1583 return SDValue(N, 0); 1584 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1585 InsertNode(N); 1586 return SDValue(N, 0); 1587 } 1588 1589 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1590 unsigned TargetFlags) { 1591 SDNode *&N = 1592 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1593 if (N) return SDValue(N, 0); 1594 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1595 InsertNode(N); 1596 return SDValue(N, 0); 1597 } 1598 1599 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1600 if ((unsigned)Cond >= CondCodeNodes.size()) 1601 CondCodeNodes.resize(Cond+1); 1602 1603 if (!CondCodeNodes[Cond]) { 1604 auto *N = newSDNode<CondCodeSDNode>(Cond); 1605 CondCodeNodes[Cond] = N; 1606 InsertNode(N); 1607 } 1608 1609 return SDValue(CondCodeNodes[Cond], 0); 1610 } 1611 1612 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1613 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1614 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1615 std::swap(N1, N2); 1616 ShuffleVectorSDNode::commuteMask(M); 1617 } 1618 1619 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1620 SDValue N2, ArrayRef<int> Mask) { 1621 assert(VT.getVectorNumElements() == Mask.size() && 1622 "Must have the same number of vector elements as mask elements!"); 1623 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1624 "Invalid VECTOR_SHUFFLE"); 1625 1626 // Canonicalize shuffle undef, undef -> undef 1627 if (N1.isUndef() && N2.isUndef()) 1628 return getUNDEF(VT); 1629 1630 // Validate that all indices in Mask are within the range of the elements 1631 // input to the shuffle. 1632 int NElts = Mask.size(); 1633 assert(llvm::all_of(Mask, 1634 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1635 "Index out of range"); 1636 1637 // Copy the mask so we can do any needed cleanup. 1638 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1639 1640 // Canonicalize shuffle v, v -> v, undef 1641 if (N1 == N2) { 1642 N2 = getUNDEF(VT); 1643 for (int i = 0; i != NElts; ++i) 1644 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1645 } 1646 1647 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1648 if (N1.isUndef()) 1649 commuteShuffle(N1, N2, MaskVec); 1650 1651 if (TLI->hasVectorBlend()) { 1652 // If shuffling a splat, try to blend the splat instead. We do this here so 1653 // that even when this arises during lowering we don't have to re-handle it. 1654 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1655 BitVector UndefElements; 1656 SDValue Splat = BV->getSplatValue(&UndefElements); 1657 if (!Splat) 1658 return; 1659 1660 for (int i = 0; i < NElts; ++i) { 1661 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1662 continue; 1663 1664 // If this input comes from undef, mark it as such. 1665 if (UndefElements[MaskVec[i] - Offset]) { 1666 MaskVec[i] = -1; 1667 continue; 1668 } 1669 1670 // If we can blend a non-undef lane, use that instead. 1671 if (!UndefElements[i]) 1672 MaskVec[i] = i + Offset; 1673 } 1674 }; 1675 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1676 BlendSplat(N1BV, 0); 1677 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1678 BlendSplat(N2BV, NElts); 1679 } 1680 1681 // Canonicalize all index into lhs, -> shuffle lhs, undef 1682 // Canonicalize all index into rhs, -> shuffle rhs, undef 1683 bool AllLHS = true, AllRHS = true; 1684 bool N2Undef = N2.isUndef(); 1685 for (int i = 0; i != NElts; ++i) { 1686 if (MaskVec[i] >= NElts) { 1687 if (N2Undef) 1688 MaskVec[i] = -1; 1689 else 1690 AllLHS = false; 1691 } else if (MaskVec[i] >= 0) { 1692 AllRHS = false; 1693 } 1694 } 1695 if (AllLHS && AllRHS) 1696 return getUNDEF(VT); 1697 if (AllLHS && !N2Undef) 1698 N2 = getUNDEF(VT); 1699 if (AllRHS) { 1700 N1 = getUNDEF(VT); 1701 commuteShuffle(N1, N2, MaskVec); 1702 } 1703 // Reset our undef status after accounting for the mask. 1704 N2Undef = N2.isUndef(); 1705 // Re-check whether both sides ended up undef. 1706 if (N1.isUndef() && N2Undef) 1707 return getUNDEF(VT); 1708 1709 // If Identity shuffle return that node. 1710 bool Identity = true, AllSame = true; 1711 for (int i = 0; i != NElts; ++i) { 1712 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1713 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1714 } 1715 if (Identity && NElts) 1716 return N1; 1717 1718 // Shuffling a constant splat doesn't change the result. 1719 if (N2Undef) { 1720 SDValue V = N1; 1721 1722 // Look through any bitcasts. We check that these don't change the number 1723 // (and size) of elements and just changes their types. 1724 while (V.getOpcode() == ISD::BITCAST) 1725 V = V->getOperand(0); 1726 1727 // A splat should always show up as a build vector node. 1728 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1729 BitVector UndefElements; 1730 SDValue Splat = BV->getSplatValue(&UndefElements); 1731 // If this is a splat of an undef, shuffling it is also undef. 1732 if (Splat && Splat.isUndef()) 1733 return getUNDEF(VT); 1734 1735 bool SameNumElts = 1736 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1737 1738 // We only have a splat which can skip shuffles if there is a splatted 1739 // value and no undef lanes rearranged by the shuffle. 1740 if (Splat && UndefElements.none()) { 1741 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1742 // number of elements match or the value splatted is a zero constant. 1743 if (SameNumElts) 1744 return N1; 1745 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1746 if (C->isNullValue()) 1747 return N1; 1748 } 1749 1750 // If the shuffle itself creates a splat, build the vector directly. 1751 if (AllSame && SameNumElts) { 1752 EVT BuildVT = BV->getValueType(0); 1753 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1754 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1755 1756 // We may have jumped through bitcasts, so the type of the 1757 // BUILD_VECTOR may not match the type of the shuffle. 1758 if (BuildVT != VT) 1759 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1760 return NewBV; 1761 } 1762 } 1763 } 1764 1765 FoldingSetNodeID ID; 1766 SDValue Ops[2] = { N1, N2 }; 1767 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1768 for (int i = 0; i != NElts; ++i) 1769 ID.AddInteger(MaskVec[i]); 1770 1771 void* IP = nullptr; 1772 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1773 return SDValue(E, 0); 1774 1775 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1776 // SDNode doesn't have access to it. This memory will be "leaked" when 1777 // the node is deallocated, but recovered when the NodeAllocator is released. 1778 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1779 llvm::copy(MaskVec, MaskAlloc); 1780 1781 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1782 dl.getDebugLoc(), MaskAlloc); 1783 createOperands(N, Ops); 1784 1785 CSEMap.InsertNode(N, IP); 1786 InsertNode(N); 1787 SDValue V = SDValue(N, 0); 1788 NewSDValueDbgMsg(V, "Creating new node: ", this); 1789 return V; 1790 } 1791 1792 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1793 EVT VT = SV.getValueType(0); 1794 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1795 ShuffleVectorSDNode::commuteMask(MaskVec); 1796 1797 SDValue Op0 = SV.getOperand(0); 1798 SDValue Op1 = SV.getOperand(1); 1799 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1800 } 1801 1802 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1803 FoldingSetNodeID ID; 1804 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1805 ID.AddInteger(RegNo); 1806 void *IP = nullptr; 1807 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1808 return SDValue(E, 0); 1809 1810 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1811 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1812 CSEMap.InsertNode(N, IP); 1813 InsertNode(N); 1814 return SDValue(N, 0); 1815 } 1816 1817 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1818 FoldingSetNodeID ID; 1819 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1820 ID.AddPointer(RegMask); 1821 void *IP = nullptr; 1822 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1823 return SDValue(E, 0); 1824 1825 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1826 CSEMap.InsertNode(N, IP); 1827 InsertNode(N); 1828 return SDValue(N, 0); 1829 } 1830 1831 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1832 MCSymbol *Label) { 1833 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1834 } 1835 1836 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1837 SDValue Root, MCSymbol *Label) { 1838 FoldingSetNodeID ID; 1839 SDValue Ops[] = { Root }; 1840 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1841 ID.AddPointer(Label); 1842 void *IP = nullptr; 1843 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1844 return SDValue(E, 0); 1845 1846 auto *N = 1847 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1848 createOperands(N, Ops); 1849 1850 CSEMap.InsertNode(N, IP); 1851 InsertNode(N); 1852 return SDValue(N, 0); 1853 } 1854 1855 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1856 int64_t Offset, bool isTarget, 1857 unsigned TargetFlags) { 1858 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1859 1860 FoldingSetNodeID ID; 1861 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1862 ID.AddPointer(BA); 1863 ID.AddInteger(Offset); 1864 ID.AddInteger(TargetFlags); 1865 void *IP = nullptr; 1866 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1867 return SDValue(E, 0); 1868 1869 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1870 CSEMap.InsertNode(N, IP); 1871 InsertNode(N); 1872 return SDValue(N, 0); 1873 } 1874 1875 SDValue SelectionDAG::getSrcValue(const Value *V) { 1876 assert((!V || V->getType()->isPointerTy()) && 1877 "SrcValue is not a pointer?"); 1878 1879 FoldingSetNodeID ID; 1880 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1881 ID.AddPointer(V); 1882 1883 void *IP = nullptr; 1884 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1885 return SDValue(E, 0); 1886 1887 auto *N = newSDNode<SrcValueSDNode>(V); 1888 CSEMap.InsertNode(N, IP); 1889 InsertNode(N); 1890 return SDValue(N, 0); 1891 } 1892 1893 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1894 FoldingSetNodeID ID; 1895 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1896 ID.AddPointer(MD); 1897 1898 void *IP = nullptr; 1899 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1900 return SDValue(E, 0); 1901 1902 auto *N = newSDNode<MDNodeSDNode>(MD); 1903 CSEMap.InsertNode(N, IP); 1904 InsertNode(N); 1905 return SDValue(N, 0); 1906 } 1907 1908 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1909 if (VT == V.getValueType()) 1910 return V; 1911 1912 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1913 } 1914 1915 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1916 unsigned SrcAS, unsigned DestAS) { 1917 SDValue Ops[] = {Ptr}; 1918 FoldingSetNodeID ID; 1919 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1920 ID.AddInteger(SrcAS); 1921 ID.AddInteger(DestAS); 1922 1923 void *IP = nullptr; 1924 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1925 return SDValue(E, 0); 1926 1927 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1928 VT, SrcAS, DestAS); 1929 createOperands(N, Ops); 1930 1931 CSEMap.InsertNode(N, IP); 1932 InsertNode(N); 1933 return SDValue(N, 0); 1934 } 1935 1936 SDValue SelectionDAG::getFreeze(SDValue V) { 1937 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 1938 } 1939 1940 /// getShiftAmountOperand - Return the specified value casted to 1941 /// the target's desired shift amount type. 1942 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1943 EVT OpTy = Op.getValueType(); 1944 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1945 if (OpTy == ShTy || OpTy.isVector()) return Op; 1946 1947 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1948 } 1949 1950 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1951 SDLoc dl(Node); 1952 const TargetLowering &TLI = getTargetLoweringInfo(); 1953 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1954 EVT VT = Node->getValueType(0); 1955 SDValue Tmp1 = Node->getOperand(0); 1956 SDValue Tmp2 = Node->getOperand(1); 1957 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1958 1959 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1960 Tmp2, MachinePointerInfo(V)); 1961 SDValue VAList = VAListLoad; 1962 1963 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1964 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1965 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1966 1967 VAList = 1968 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1969 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1970 } 1971 1972 // Increment the pointer, VAList, to the next vaarg 1973 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1974 getConstant(getDataLayout().getTypeAllocSize( 1975 VT.getTypeForEVT(*getContext())), 1976 dl, VAList.getValueType())); 1977 // Store the incremented VAList to the legalized pointer 1978 Tmp1 = 1979 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1980 // Load the actual argument out of the pointer VAList 1981 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1982 } 1983 1984 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1985 SDLoc dl(Node); 1986 const TargetLowering &TLI = getTargetLoweringInfo(); 1987 // This defaults to loading a pointer from the input and storing it to the 1988 // output, returning the chain. 1989 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1990 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1991 SDValue Tmp1 = 1992 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1993 Node->getOperand(2), MachinePointerInfo(VS)); 1994 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1995 MachinePointerInfo(VD)); 1996 } 1997 1998 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 1999 MachineFrameInfo &MFI = MF->getFrameInfo(); 2000 int FrameIdx = MFI.CreateStackObject(Bytes, Alignment, false); 2001 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2002 } 2003 2004 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2005 Type *Ty = VT.getTypeForEVT(*getContext()); 2006 Align StackAlign = 2007 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2008 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2009 } 2010 2011 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2012 TypeSize Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 2013 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2014 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2015 const DataLayout &DL = getDataLayout(); 2016 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2017 return CreateStackTemporary(Bytes, Align); 2018 } 2019 2020 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2021 ISD::CondCode Cond, const SDLoc &dl) { 2022 EVT OpVT = N1.getValueType(); 2023 2024 // These setcc operations always fold. 2025 switch (Cond) { 2026 default: break; 2027 case ISD::SETFALSE: 2028 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2029 case ISD::SETTRUE: 2030 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2031 2032 case ISD::SETOEQ: 2033 case ISD::SETOGT: 2034 case ISD::SETOGE: 2035 case ISD::SETOLT: 2036 case ISD::SETOLE: 2037 case ISD::SETONE: 2038 case ISD::SETO: 2039 case ISD::SETUO: 2040 case ISD::SETUEQ: 2041 case ISD::SETUNE: 2042 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2043 break; 2044 } 2045 2046 if (OpVT.isInteger()) { 2047 // For EQ and NE, we can always pick a value for the undef to make the 2048 // predicate pass or fail, so we can return undef. 2049 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2050 // icmp eq/ne X, undef -> undef. 2051 if ((N1.isUndef() || N2.isUndef()) && 2052 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2053 return getUNDEF(VT); 2054 2055 // If both operands are undef, we can return undef for int comparison. 2056 // icmp undef, undef -> undef. 2057 if (N1.isUndef() && N2.isUndef()) 2058 return getUNDEF(VT); 2059 2060 // icmp X, X -> true/false 2061 // icmp X, undef -> true/false because undef could be X. 2062 if (N1 == N2) 2063 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2064 } 2065 2066 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2067 const APInt &C2 = N2C->getAPIntValue(); 2068 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2069 const APInt &C1 = N1C->getAPIntValue(); 2070 2071 switch (Cond) { 2072 default: llvm_unreachable("Unknown integer setcc!"); 2073 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2074 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2075 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2076 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2077 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2078 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2079 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2080 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2081 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2082 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2083 } 2084 } 2085 } 2086 2087 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2088 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2089 2090 if (N1CFP && N2CFP) { 2091 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2092 switch (Cond) { 2093 default: break; 2094 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2095 return getUNDEF(VT); 2096 LLVM_FALLTHROUGH; 2097 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2098 OpVT); 2099 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2100 return getUNDEF(VT); 2101 LLVM_FALLTHROUGH; 2102 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2103 R==APFloat::cmpLessThan, dl, VT, 2104 OpVT); 2105 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2106 return getUNDEF(VT); 2107 LLVM_FALLTHROUGH; 2108 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2109 OpVT); 2110 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2111 return getUNDEF(VT); 2112 LLVM_FALLTHROUGH; 2113 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2114 VT, OpVT); 2115 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2116 return getUNDEF(VT); 2117 LLVM_FALLTHROUGH; 2118 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2119 R==APFloat::cmpEqual, dl, VT, 2120 OpVT); 2121 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2122 return getUNDEF(VT); 2123 LLVM_FALLTHROUGH; 2124 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2125 R==APFloat::cmpEqual, dl, VT, OpVT); 2126 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2127 OpVT); 2128 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2129 OpVT); 2130 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2131 R==APFloat::cmpEqual, dl, VT, 2132 OpVT); 2133 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2134 OpVT); 2135 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2136 R==APFloat::cmpLessThan, dl, VT, 2137 OpVT); 2138 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2139 R==APFloat::cmpUnordered, dl, VT, 2140 OpVT); 2141 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2142 VT, OpVT); 2143 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2144 OpVT); 2145 } 2146 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2147 // Ensure that the constant occurs on the RHS. 2148 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2149 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2150 return SDValue(); 2151 return getSetCC(dl, VT, N2, N1, SwappedCond); 2152 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2153 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2154 // If an operand is known to be a nan (or undef that could be a nan), we can 2155 // fold it. 2156 // Choosing NaN for the undef will always make unordered comparison succeed 2157 // and ordered comparison fails. 2158 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2159 switch (ISD::getUnorderedFlavor(Cond)) { 2160 default: 2161 llvm_unreachable("Unknown flavor!"); 2162 case 0: // Known false. 2163 return getBoolConstant(false, dl, VT, OpVT); 2164 case 1: // Known true. 2165 return getBoolConstant(true, dl, VT, OpVT); 2166 case 2: // Undefined. 2167 return getUNDEF(VT); 2168 } 2169 } 2170 2171 // Could not fold it. 2172 return SDValue(); 2173 } 2174 2175 /// See if the specified operand can be simplified with the knowledge that only 2176 /// the bits specified by DemandedBits are used. 2177 /// TODO: really we should be making this into the DAG equivalent of 2178 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2179 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2180 EVT VT = V.getValueType(); 2181 APInt DemandedElts = VT.isVector() 2182 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2183 : APInt(1, 1); 2184 return GetDemandedBits(V, DemandedBits, DemandedElts); 2185 } 2186 2187 /// See if the specified operand can be simplified with the knowledge that only 2188 /// the bits specified by DemandedBits are used in the elements specified by 2189 /// DemandedElts. 2190 /// TODO: really we should be making this into the DAG equivalent of 2191 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2192 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2193 const APInt &DemandedElts) { 2194 switch (V.getOpcode()) { 2195 default: 2196 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2197 *this, 0); 2198 break; 2199 case ISD::Constant: { 2200 auto *CV = cast<ConstantSDNode>(V.getNode()); 2201 assert(CV && "Const value should be ConstSDNode."); 2202 const APInt &CVal = CV->getAPIntValue(); 2203 APInt NewVal = CVal & DemandedBits; 2204 if (NewVal != CVal) 2205 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2206 break; 2207 } 2208 case ISD::SRL: 2209 // Only look at single-use SRLs. 2210 if (!V.getNode()->hasOneUse()) 2211 break; 2212 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2213 // See if we can recursively simplify the LHS. 2214 unsigned Amt = RHSC->getZExtValue(); 2215 2216 // Watch out for shift count overflow though. 2217 if (Amt >= DemandedBits.getBitWidth()) 2218 break; 2219 APInt SrcDemandedBits = DemandedBits << Amt; 2220 if (SDValue SimplifyLHS = 2221 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2222 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2223 V.getOperand(1)); 2224 } 2225 break; 2226 case ISD::AND: { 2227 // X & -1 -> X (ignoring bits which aren't demanded). 2228 // Also handle the case where masked out bits in X are known to be zero. 2229 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2230 const APInt &AndVal = RHSC->getAPIntValue(); 2231 if (DemandedBits.isSubsetOf(AndVal) || 2232 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2233 AndVal)) 2234 return V.getOperand(0); 2235 } 2236 break; 2237 } 2238 } 2239 return SDValue(); 2240 } 2241 2242 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2243 /// use this predicate to simplify operations downstream. 2244 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2245 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2246 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2247 } 2248 2249 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2250 /// this predicate to simplify operations downstream. Mask is known to be zero 2251 /// for bits that V cannot have. 2252 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2253 unsigned Depth) const { 2254 EVT VT = V.getValueType(); 2255 APInt DemandedElts = VT.isVector() 2256 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2257 : APInt(1, 1); 2258 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2259 } 2260 2261 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2262 /// DemandedElts. We use this predicate to simplify operations downstream. 2263 /// Mask is known to be zero for bits that V cannot have. 2264 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2265 const APInt &DemandedElts, 2266 unsigned Depth) const { 2267 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2268 } 2269 2270 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2271 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2272 unsigned Depth) const { 2273 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2274 } 2275 2276 /// isSplatValue - Return true if the vector V has the same value 2277 /// across all DemandedElts. For scalable vectors it does not make 2278 /// sense to specify which elements are demanded or undefined, therefore 2279 /// they are simply ignored. 2280 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2281 APInt &UndefElts) { 2282 EVT VT = V.getValueType(); 2283 assert(VT.isVector() && "Vector type expected"); 2284 2285 if (!VT.isScalableVector() && !DemandedElts) 2286 return false; // No demanded elts, better to assume we don't know anything. 2287 2288 // Deal with some common cases here that work for both fixed and scalable 2289 // vector types. 2290 switch (V.getOpcode()) { 2291 case ISD::SPLAT_VECTOR: 2292 return true; 2293 case ISD::ADD: 2294 case ISD::SUB: 2295 case ISD::AND: { 2296 APInt UndefLHS, UndefRHS; 2297 SDValue LHS = V.getOperand(0); 2298 SDValue RHS = V.getOperand(1); 2299 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2300 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2301 UndefElts = UndefLHS | UndefRHS; 2302 return true; 2303 } 2304 break; 2305 } 2306 } 2307 2308 // We don't support other cases than those above for scalable vectors at 2309 // the moment. 2310 if (VT.isScalableVector()) 2311 return false; 2312 2313 unsigned NumElts = VT.getVectorNumElements(); 2314 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2315 UndefElts = APInt::getNullValue(NumElts); 2316 2317 switch (V.getOpcode()) { 2318 case ISD::BUILD_VECTOR: { 2319 SDValue Scl; 2320 for (unsigned i = 0; i != NumElts; ++i) { 2321 SDValue Op = V.getOperand(i); 2322 if (Op.isUndef()) { 2323 UndefElts.setBit(i); 2324 continue; 2325 } 2326 if (!DemandedElts[i]) 2327 continue; 2328 if (Scl && Scl != Op) 2329 return false; 2330 Scl = Op; 2331 } 2332 return true; 2333 } 2334 case ISD::VECTOR_SHUFFLE: { 2335 // Check if this is a shuffle node doing a splat. 2336 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2337 int SplatIndex = -1; 2338 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2339 for (int i = 0; i != (int)NumElts; ++i) { 2340 int M = Mask[i]; 2341 if (M < 0) { 2342 UndefElts.setBit(i); 2343 continue; 2344 } 2345 if (!DemandedElts[i]) 2346 continue; 2347 if (0 <= SplatIndex && SplatIndex != M) 2348 return false; 2349 SplatIndex = M; 2350 } 2351 return true; 2352 } 2353 case ISD::EXTRACT_SUBVECTOR: { 2354 // Offset the demanded elts by the subvector index. 2355 SDValue Src = V.getOperand(0); 2356 uint64_t Idx = V.getConstantOperandVal(1); 2357 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2358 APInt UndefSrcElts; 2359 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2360 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts)) { 2361 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2362 return true; 2363 } 2364 break; 2365 } 2366 } 2367 2368 return false; 2369 } 2370 2371 /// Helper wrapper to main isSplatValue function. 2372 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2373 EVT VT = V.getValueType(); 2374 assert(VT.isVector() && "Vector type expected"); 2375 2376 APInt UndefElts; 2377 APInt DemandedElts; 2378 2379 // For now we don't support this with scalable vectors. 2380 if (!VT.isScalableVector()) 2381 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2382 return isSplatValue(V, DemandedElts, UndefElts) && 2383 (AllowUndefs || !UndefElts); 2384 } 2385 2386 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2387 V = peekThroughExtractSubvectors(V); 2388 2389 EVT VT = V.getValueType(); 2390 unsigned Opcode = V.getOpcode(); 2391 switch (Opcode) { 2392 default: { 2393 APInt UndefElts; 2394 APInt DemandedElts; 2395 2396 if (!VT.isScalableVector()) 2397 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2398 2399 if (isSplatValue(V, DemandedElts, UndefElts)) { 2400 if (VT.isScalableVector()) { 2401 // DemandedElts and UndefElts are ignored for scalable vectors, since 2402 // the only supported cases are SPLAT_VECTOR nodes. 2403 SplatIdx = 0; 2404 } else { 2405 // Handle case where all demanded elements are UNDEF. 2406 if (DemandedElts.isSubsetOf(UndefElts)) { 2407 SplatIdx = 0; 2408 return getUNDEF(VT); 2409 } 2410 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2411 } 2412 return V; 2413 } 2414 break; 2415 } 2416 case ISD::SPLAT_VECTOR: 2417 SplatIdx = 0; 2418 return V; 2419 case ISD::VECTOR_SHUFFLE: { 2420 if (VT.isScalableVector()) 2421 return SDValue(); 2422 2423 // Check if this is a shuffle node doing a splat. 2424 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2425 // getTargetVShiftNode currently struggles without the splat source. 2426 auto *SVN = cast<ShuffleVectorSDNode>(V); 2427 if (!SVN->isSplat()) 2428 break; 2429 int Idx = SVN->getSplatIndex(); 2430 int NumElts = V.getValueType().getVectorNumElements(); 2431 SplatIdx = Idx % NumElts; 2432 return V.getOperand(Idx / NumElts); 2433 } 2434 } 2435 2436 return SDValue(); 2437 } 2438 2439 SDValue SelectionDAG::getSplatValue(SDValue V) { 2440 int SplatIdx; 2441 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2442 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2443 SrcVector.getValueType().getScalarType(), SrcVector, 2444 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2445 return SDValue(); 2446 } 2447 2448 const APInt * 2449 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2450 const APInt &DemandedElts) const { 2451 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2452 V.getOpcode() == ISD::SRA) && 2453 "Unknown shift node"); 2454 unsigned BitWidth = V.getScalarValueSizeInBits(); 2455 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2456 // Shifting more than the bitwidth is not valid. 2457 const APInt &ShAmt = SA->getAPIntValue(); 2458 if (ShAmt.ult(BitWidth)) 2459 return &ShAmt; 2460 } 2461 return nullptr; 2462 } 2463 2464 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2465 SDValue V, const APInt &DemandedElts) const { 2466 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2467 V.getOpcode() == ISD::SRA) && 2468 "Unknown shift node"); 2469 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2470 return ValidAmt; 2471 unsigned BitWidth = V.getScalarValueSizeInBits(); 2472 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2473 if (!BV) 2474 return nullptr; 2475 const APInt *MinShAmt = nullptr; 2476 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2477 if (!DemandedElts[i]) 2478 continue; 2479 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2480 if (!SA) 2481 return nullptr; 2482 // Shifting more than the bitwidth is not valid. 2483 const APInt &ShAmt = SA->getAPIntValue(); 2484 if (ShAmt.uge(BitWidth)) 2485 return nullptr; 2486 if (MinShAmt && MinShAmt->ule(ShAmt)) 2487 continue; 2488 MinShAmt = &ShAmt; 2489 } 2490 return MinShAmt; 2491 } 2492 2493 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2494 SDValue V, const APInt &DemandedElts) const { 2495 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2496 V.getOpcode() == ISD::SRA) && 2497 "Unknown shift node"); 2498 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2499 return ValidAmt; 2500 unsigned BitWidth = V.getScalarValueSizeInBits(); 2501 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2502 if (!BV) 2503 return nullptr; 2504 const APInt *MaxShAmt = nullptr; 2505 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2506 if (!DemandedElts[i]) 2507 continue; 2508 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2509 if (!SA) 2510 return nullptr; 2511 // Shifting more than the bitwidth is not valid. 2512 const APInt &ShAmt = SA->getAPIntValue(); 2513 if (ShAmt.uge(BitWidth)) 2514 return nullptr; 2515 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2516 continue; 2517 MaxShAmt = &ShAmt; 2518 } 2519 return MaxShAmt; 2520 } 2521 2522 /// Determine which bits of Op are known to be either zero or one and return 2523 /// them in Known. For vectors, the known bits are those that are shared by 2524 /// every vector element. 2525 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2526 EVT VT = Op.getValueType(); 2527 APInt DemandedElts = VT.isVector() 2528 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2529 : APInt(1, 1); 2530 return computeKnownBits(Op, DemandedElts, Depth); 2531 } 2532 2533 /// Determine which bits of Op are known to be either zero or one and return 2534 /// them in Known. The DemandedElts argument allows us to only collect the known 2535 /// bits that are shared by the requested vector elements. 2536 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2537 unsigned Depth) const { 2538 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2539 2540 KnownBits Known(BitWidth); // Don't know anything. 2541 2542 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2543 // We know all of the bits for a constant! 2544 Known.One = C->getAPIntValue(); 2545 Known.Zero = ~Known.One; 2546 return Known; 2547 } 2548 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2549 // We know all of the bits for a constant fp! 2550 Known.One = C->getValueAPF().bitcastToAPInt(); 2551 Known.Zero = ~Known.One; 2552 return Known; 2553 } 2554 2555 if (Depth >= MaxRecursionDepth) 2556 return Known; // Limit search depth. 2557 2558 KnownBits Known2; 2559 unsigned NumElts = DemandedElts.getBitWidth(); 2560 assert((!Op.getValueType().isVector() || 2561 NumElts == Op.getValueType().getVectorNumElements()) && 2562 "Unexpected vector size"); 2563 2564 if (!DemandedElts) 2565 return Known; // No demanded elts, better to assume we don't know anything. 2566 2567 unsigned Opcode = Op.getOpcode(); 2568 switch (Opcode) { 2569 case ISD::BUILD_VECTOR: 2570 // Collect the known bits that are shared by every demanded vector element. 2571 Known.Zero.setAllBits(); Known.One.setAllBits(); 2572 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2573 if (!DemandedElts[i]) 2574 continue; 2575 2576 SDValue SrcOp = Op.getOperand(i); 2577 Known2 = computeKnownBits(SrcOp, Depth + 1); 2578 2579 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2580 if (SrcOp.getValueSizeInBits() != BitWidth) { 2581 assert(SrcOp.getValueSizeInBits() > BitWidth && 2582 "Expected BUILD_VECTOR implicit truncation"); 2583 Known2 = Known2.trunc(BitWidth); 2584 } 2585 2586 // Known bits are the values that are shared by every demanded element. 2587 Known.One &= Known2.One; 2588 Known.Zero &= Known2.Zero; 2589 2590 // If we don't know any bits, early out. 2591 if (Known.isUnknown()) 2592 break; 2593 } 2594 break; 2595 case ISD::VECTOR_SHUFFLE: { 2596 // Collect the known bits that are shared by every vector element referenced 2597 // by the shuffle. 2598 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2599 Known.Zero.setAllBits(); Known.One.setAllBits(); 2600 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2601 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2602 for (unsigned i = 0; i != NumElts; ++i) { 2603 if (!DemandedElts[i]) 2604 continue; 2605 2606 int M = SVN->getMaskElt(i); 2607 if (M < 0) { 2608 // For UNDEF elements, we don't know anything about the common state of 2609 // the shuffle result. 2610 Known.resetAll(); 2611 DemandedLHS.clearAllBits(); 2612 DemandedRHS.clearAllBits(); 2613 break; 2614 } 2615 2616 if ((unsigned)M < NumElts) 2617 DemandedLHS.setBit((unsigned)M % NumElts); 2618 else 2619 DemandedRHS.setBit((unsigned)M % NumElts); 2620 } 2621 // Known bits are the values that are shared by every demanded element. 2622 if (!!DemandedLHS) { 2623 SDValue LHS = Op.getOperand(0); 2624 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2625 Known.One &= Known2.One; 2626 Known.Zero &= Known2.Zero; 2627 } 2628 // If we don't know any bits, early out. 2629 if (Known.isUnknown()) 2630 break; 2631 if (!!DemandedRHS) { 2632 SDValue RHS = Op.getOperand(1); 2633 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2634 Known.One &= Known2.One; 2635 Known.Zero &= Known2.Zero; 2636 } 2637 break; 2638 } 2639 case ISD::CONCAT_VECTORS: { 2640 // Split DemandedElts and test each of the demanded subvectors. 2641 Known.Zero.setAllBits(); Known.One.setAllBits(); 2642 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2643 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2644 unsigned NumSubVectors = Op.getNumOperands(); 2645 for (unsigned i = 0; i != NumSubVectors; ++i) { 2646 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2647 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2648 if (!!DemandedSub) { 2649 SDValue Sub = Op.getOperand(i); 2650 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2651 Known.One &= Known2.One; 2652 Known.Zero &= Known2.Zero; 2653 } 2654 // If we don't know any bits, early out. 2655 if (Known.isUnknown()) 2656 break; 2657 } 2658 break; 2659 } 2660 case ISD::INSERT_SUBVECTOR: { 2661 // Demand any elements from the subvector and the remainder from the src its 2662 // inserted into. 2663 SDValue Src = Op.getOperand(0); 2664 SDValue Sub = Op.getOperand(1); 2665 uint64_t Idx = Op.getConstantOperandVal(2); 2666 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2667 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2668 APInt DemandedSrcElts = DemandedElts; 2669 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2670 2671 Known.One.setAllBits(); 2672 Known.Zero.setAllBits(); 2673 if (!!DemandedSubElts) { 2674 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2675 if (Known.isUnknown()) 2676 break; // early-out. 2677 } 2678 if (!!DemandedSrcElts) { 2679 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2680 Known.One &= Known2.One; 2681 Known.Zero &= Known2.Zero; 2682 } 2683 break; 2684 } 2685 case ISD::EXTRACT_SUBVECTOR: { 2686 // Offset the demanded elts by the subvector index. 2687 SDValue Src = Op.getOperand(0); 2688 uint64_t Idx = Op.getConstantOperandVal(1); 2689 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2690 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2691 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2692 break; 2693 } 2694 case ISD::SCALAR_TO_VECTOR: { 2695 // We know about scalar_to_vector as much as we know about it source, 2696 // which becomes the first element of otherwise unknown vector. 2697 if (DemandedElts != 1) 2698 break; 2699 2700 SDValue N0 = Op.getOperand(0); 2701 Known = computeKnownBits(N0, Depth + 1); 2702 if (N0.getValueSizeInBits() != BitWidth) 2703 Known = Known.trunc(BitWidth); 2704 2705 break; 2706 } 2707 case ISD::BITCAST: { 2708 SDValue N0 = Op.getOperand(0); 2709 EVT SubVT = N0.getValueType(); 2710 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2711 2712 // Ignore bitcasts from unsupported types. 2713 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2714 break; 2715 2716 // Fast handling of 'identity' bitcasts. 2717 if (BitWidth == SubBitWidth) { 2718 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2719 break; 2720 } 2721 2722 bool IsLE = getDataLayout().isLittleEndian(); 2723 2724 // Bitcast 'small element' vector to 'large element' scalar/vector. 2725 if ((BitWidth % SubBitWidth) == 0) { 2726 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2727 2728 // Collect known bits for the (larger) output by collecting the known 2729 // bits from each set of sub elements and shift these into place. 2730 // We need to separately call computeKnownBits for each set of 2731 // sub elements as the knownbits for each is likely to be different. 2732 unsigned SubScale = BitWidth / SubBitWidth; 2733 APInt SubDemandedElts(NumElts * SubScale, 0); 2734 for (unsigned i = 0; i != NumElts; ++i) 2735 if (DemandedElts[i]) 2736 SubDemandedElts.setBit(i * SubScale); 2737 2738 for (unsigned i = 0; i != SubScale; ++i) { 2739 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2740 Depth + 1); 2741 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2742 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2743 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2744 } 2745 } 2746 2747 // Bitcast 'large element' scalar/vector to 'small element' vector. 2748 if ((SubBitWidth % BitWidth) == 0) { 2749 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2750 2751 // Collect known bits for the (smaller) output by collecting the known 2752 // bits from the overlapping larger input elements and extracting the 2753 // sub sections we actually care about. 2754 unsigned SubScale = SubBitWidth / BitWidth; 2755 APInt SubDemandedElts(NumElts / SubScale, 0); 2756 for (unsigned i = 0; i != NumElts; ++i) 2757 if (DemandedElts[i]) 2758 SubDemandedElts.setBit(i / SubScale); 2759 2760 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2761 2762 Known.Zero.setAllBits(); Known.One.setAllBits(); 2763 for (unsigned i = 0; i != NumElts; ++i) 2764 if (DemandedElts[i]) { 2765 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2766 unsigned Offset = (Shifts % SubScale) * BitWidth; 2767 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2768 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2769 // If we don't know any bits, early out. 2770 if (Known.isUnknown()) 2771 break; 2772 } 2773 } 2774 break; 2775 } 2776 case ISD::AND: 2777 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2778 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2779 2780 Known &= Known2; 2781 break; 2782 case ISD::OR: 2783 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2784 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2785 2786 Known |= Known2; 2787 break; 2788 case ISD::XOR: 2789 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2790 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2791 2792 Known ^= Known2; 2793 break; 2794 case ISD::MUL: { 2795 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2796 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2797 2798 // If low bits are zero in either operand, output low known-0 bits. 2799 // Also compute a conservative estimate for high known-0 bits. 2800 // More trickiness is possible, but this is sufficient for the 2801 // interesting case of alignment computation. 2802 unsigned TrailZ = Known.countMinTrailingZeros() + 2803 Known2.countMinTrailingZeros(); 2804 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2805 Known2.countMinLeadingZeros(), 2806 BitWidth) - BitWidth; 2807 2808 Known.resetAll(); 2809 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2810 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2811 break; 2812 } 2813 case ISD::UDIV: { 2814 // For the purposes of computing leading zeros we can conservatively 2815 // treat a udiv as a logical right shift by the power of 2 known to 2816 // be less than the denominator. 2817 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2818 unsigned LeadZ = Known2.countMinLeadingZeros(); 2819 2820 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2821 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2822 if (RHSMaxLeadingZeros != BitWidth) 2823 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2824 2825 Known.Zero.setHighBits(LeadZ); 2826 break; 2827 } 2828 case ISD::SELECT: 2829 case ISD::VSELECT: 2830 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2831 // If we don't know any bits, early out. 2832 if (Known.isUnknown()) 2833 break; 2834 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2835 2836 // Only known if known in both the LHS and RHS. 2837 Known.One &= Known2.One; 2838 Known.Zero &= Known2.Zero; 2839 break; 2840 case ISD::SELECT_CC: 2841 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2842 // If we don't know any bits, early out. 2843 if (Known.isUnknown()) 2844 break; 2845 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2846 2847 // Only known if known in both the LHS and RHS. 2848 Known.One &= Known2.One; 2849 Known.Zero &= Known2.Zero; 2850 break; 2851 case ISD::SMULO: 2852 case ISD::UMULO: 2853 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2854 if (Op.getResNo() != 1) 2855 break; 2856 // The boolean result conforms to getBooleanContents. 2857 // If we know the result of a setcc has the top bits zero, use this info. 2858 // We know that we have an integer-based boolean since these operations 2859 // are only available for integer. 2860 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2861 TargetLowering::ZeroOrOneBooleanContent && 2862 BitWidth > 1) 2863 Known.Zero.setBitsFrom(1); 2864 break; 2865 case ISD::SETCC: 2866 case ISD::STRICT_FSETCC: 2867 case ISD::STRICT_FSETCCS: { 2868 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2869 // If we know the result of a setcc has the top bits zero, use this info. 2870 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2871 TargetLowering::ZeroOrOneBooleanContent && 2872 BitWidth > 1) 2873 Known.Zero.setBitsFrom(1); 2874 break; 2875 } 2876 case ISD::SHL: 2877 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2878 2879 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2880 unsigned Shift = ShAmt->getZExtValue(); 2881 Known.Zero <<= Shift; 2882 Known.One <<= Shift; 2883 // Low bits are known zero. 2884 Known.Zero.setLowBits(Shift); 2885 break; 2886 } 2887 2888 // No matter the shift amount, the trailing zeros will stay zero. 2889 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros()); 2890 Known.One.clearAllBits(); 2891 2892 // Minimum shift low bits are known zero. 2893 if (const APInt *ShMinAmt = 2894 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2895 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 2896 break; 2897 case ISD::SRL: 2898 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2899 2900 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2901 unsigned Shift = ShAmt->getZExtValue(); 2902 Known.Zero.lshrInPlace(Shift); 2903 Known.One.lshrInPlace(Shift); 2904 // High bits are known zero. 2905 Known.Zero.setHighBits(Shift); 2906 break; 2907 } 2908 2909 // No matter the shift amount, the leading zeros will stay zero. 2910 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros()); 2911 Known.One.clearAllBits(); 2912 2913 // Minimum shift high bits are known zero. 2914 if (const APInt *ShMinAmt = 2915 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2916 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2917 break; 2918 case ISD::SRA: 2919 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2920 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2921 unsigned Shift = ShAmt->getZExtValue(); 2922 // Sign extend known zero/one bit (else is unknown). 2923 Known.Zero.ashrInPlace(Shift); 2924 Known.One.ashrInPlace(Shift); 2925 } 2926 break; 2927 case ISD::FSHL: 2928 case ISD::FSHR: 2929 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2930 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2931 2932 // For fshl, 0-shift returns the 1st arg. 2933 // For fshr, 0-shift returns the 2nd arg. 2934 if (Amt == 0) { 2935 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2936 DemandedElts, Depth + 1); 2937 break; 2938 } 2939 2940 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2941 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2942 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2943 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2944 if (Opcode == ISD::FSHL) { 2945 Known.One <<= Amt; 2946 Known.Zero <<= Amt; 2947 Known2.One.lshrInPlace(BitWidth - Amt); 2948 Known2.Zero.lshrInPlace(BitWidth - Amt); 2949 } else { 2950 Known.One <<= BitWidth - Amt; 2951 Known.Zero <<= BitWidth - Amt; 2952 Known2.One.lshrInPlace(Amt); 2953 Known2.Zero.lshrInPlace(Amt); 2954 } 2955 Known.One |= Known2.One; 2956 Known.Zero |= Known2.Zero; 2957 } 2958 break; 2959 case ISD::SIGN_EXTEND_INREG: { 2960 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2961 unsigned EBits = EVT.getScalarSizeInBits(); 2962 2963 // Sign extension. Compute the demanded bits in the result that are not 2964 // present in the input. 2965 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2966 2967 APInt InSignMask = APInt::getSignMask(EBits); 2968 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2969 2970 // If the sign extended bits are demanded, we know that the sign 2971 // bit is demanded. 2972 InSignMask = InSignMask.zext(BitWidth); 2973 if (NewBits.getBoolValue()) 2974 InputDemandedBits |= InSignMask; 2975 2976 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2977 Known.One &= InputDemandedBits; 2978 Known.Zero &= InputDemandedBits; 2979 2980 // If the sign bit of the input is known set or clear, then we know the 2981 // top bits of the result. 2982 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2983 Known.Zero |= NewBits; 2984 Known.One &= ~NewBits; 2985 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2986 Known.One |= NewBits; 2987 Known.Zero &= ~NewBits; 2988 } else { // Input sign bit unknown 2989 Known.Zero &= ~NewBits; 2990 Known.One &= ~NewBits; 2991 } 2992 break; 2993 } 2994 case ISD::CTTZ: 2995 case ISD::CTTZ_ZERO_UNDEF: { 2996 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2997 // If we have a known 1, its position is our upper bound. 2998 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2999 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3000 Known.Zero.setBitsFrom(LowBits); 3001 break; 3002 } 3003 case ISD::CTLZ: 3004 case ISD::CTLZ_ZERO_UNDEF: { 3005 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3006 // If we have a known 1, its position is our upper bound. 3007 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3008 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3009 Known.Zero.setBitsFrom(LowBits); 3010 break; 3011 } 3012 case ISD::CTPOP: { 3013 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3014 // If we know some of the bits are zero, they can't be one. 3015 unsigned PossibleOnes = Known2.countMaxPopulation(); 3016 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3017 break; 3018 } 3019 case ISD::LOAD: { 3020 LoadSDNode *LD = cast<LoadSDNode>(Op); 3021 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3022 if (ISD::isNON_EXTLoad(LD) && Cst) { 3023 // Determine any common known bits from the loaded constant pool value. 3024 Type *CstTy = Cst->getType(); 3025 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3026 // If its a vector splat, then we can (quickly) reuse the scalar path. 3027 // NOTE: We assume all elements match and none are UNDEF. 3028 if (CstTy->isVectorTy()) { 3029 if (const Constant *Splat = Cst->getSplatValue()) { 3030 Cst = Splat; 3031 CstTy = Cst->getType(); 3032 } 3033 } 3034 // TODO - do we need to handle different bitwidths? 3035 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3036 // Iterate across all vector elements finding common known bits. 3037 Known.One.setAllBits(); 3038 Known.Zero.setAllBits(); 3039 for (unsigned i = 0; i != NumElts; ++i) { 3040 if (!DemandedElts[i]) 3041 continue; 3042 if (Constant *Elt = Cst->getAggregateElement(i)) { 3043 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3044 const APInt &Value = CInt->getValue(); 3045 Known.One &= Value; 3046 Known.Zero &= ~Value; 3047 continue; 3048 } 3049 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3050 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3051 Known.One &= Value; 3052 Known.Zero &= ~Value; 3053 continue; 3054 } 3055 } 3056 Known.One.clearAllBits(); 3057 Known.Zero.clearAllBits(); 3058 break; 3059 } 3060 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3061 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3062 const APInt &Value = CInt->getValue(); 3063 Known.One = Value; 3064 Known.Zero = ~Value; 3065 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3066 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3067 Known.One = Value; 3068 Known.Zero = ~Value; 3069 } 3070 } 3071 } 3072 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3073 // If this is a ZEXTLoad and we are looking at the loaded value. 3074 EVT VT = LD->getMemoryVT(); 3075 unsigned MemBits = VT.getScalarSizeInBits(); 3076 Known.Zero.setBitsFrom(MemBits); 3077 } else if (const MDNode *Ranges = LD->getRanges()) { 3078 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3079 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3080 } 3081 break; 3082 } 3083 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3084 EVT InVT = Op.getOperand(0).getValueType(); 3085 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3086 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3087 Known = Known.zext(BitWidth); 3088 break; 3089 } 3090 case ISD::ZERO_EXTEND: { 3091 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3092 Known = Known.zext(BitWidth); 3093 break; 3094 } 3095 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3096 EVT InVT = Op.getOperand(0).getValueType(); 3097 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3098 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3099 // If the sign bit is known to be zero or one, then sext will extend 3100 // it to the top bits, else it will just zext. 3101 Known = Known.sext(BitWidth); 3102 break; 3103 } 3104 case ISD::SIGN_EXTEND: { 3105 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3106 // If the sign bit is known to be zero or one, then sext will extend 3107 // it to the top bits, else it will just zext. 3108 Known = Known.sext(BitWidth); 3109 break; 3110 } 3111 case ISD::ANY_EXTEND_VECTOR_INREG: { 3112 EVT InVT = Op.getOperand(0).getValueType(); 3113 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3114 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3115 Known = Known.anyext(BitWidth); 3116 break; 3117 } 3118 case ISD::ANY_EXTEND: { 3119 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3120 Known = Known.anyext(BitWidth); 3121 break; 3122 } 3123 case ISD::TRUNCATE: { 3124 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3125 Known = Known.trunc(BitWidth); 3126 break; 3127 } 3128 case ISD::AssertZext: { 3129 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3130 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3131 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3132 Known.Zero |= (~InMask); 3133 Known.One &= (~Known.Zero); 3134 break; 3135 } 3136 case ISD::FGETSIGN: 3137 // All bits are zero except the low bit. 3138 Known.Zero.setBitsFrom(1); 3139 break; 3140 case ISD::USUBO: 3141 case ISD::SSUBO: 3142 if (Op.getResNo() == 1) { 3143 // If we know the result of a setcc has the top bits zero, use this info. 3144 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3145 TargetLowering::ZeroOrOneBooleanContent && 3146 BitWidth > 1) 3147 Known.Zero.setBitsFrom(1); 3148 break; 3149 } 3150 LLVM_FALLTHROUGH; 3151 case ISD::SUB: 3152 case ISD::SUBC: { 3153 assert(Op.getResNo() == 0 && 3154 "We only compute knownbits for the difference here."); 3155 3156 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3157 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3158 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3159 Known, Known2); 3160 break; 3161 } 3162 case ISD::UADDO: 3163 case ISD::SADDO: 3164 case ISD::ADDCARRY: 3165 if (Op.getResNo() == 1) { 3166 // If we know the result of a setcc has the top bits zero, use this info. 3167 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3168 TargetLowering::ZeroOrOneBooleanContent && 3169 BitWidth > 1) 3170 Known.Zero.setBitsFrom(1); 3171 break; 3172 } 3173 LLVM_FALLTHROUGH; 3174 case ISD::ADD: 3175 case ISD::ADDC: 3176 case ISD::ADDE: { 3177 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3178 3179 // With ADDE and ADDCARRY, a carry bit may be added in. 3180 KnownBits Carry(1); 3181 if (Opcode == ISD::ADDE) 3182 // Can't track carry from glue, set carry to unknown. 3183 Carry.resetAll(); 3184 else if (Opcode == ISD::ADDCARRY) 3185 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3186 // the trouble (how often will we find a known carry bit). And I haven't 3187 // tested this very much yet, but something like this might work: 3188 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3189 // Carry = Carry.zextOrTrunc(1, false); 3190 Carry.resetAll(); 3191 else 3192 Carry.setAllZero(); 3193 3194 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3195 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3196 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3197 break; 3198 } 3199 case ISD::SREM: 3200 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3201 const APInt &RA = Rem->getAPIntValue().abs(); 3202 if (RA.isPowerOf2()) { 3203 APInt LowBits = RA - 1; 3204 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3205 3206 // The low bits of the first operand are unchanged by the srem. 3207 Known.Zero = Known2.Zero & LowBits; 3208 Known.One = Known2.One & LowBits; 3209 3210 // If the first operand is non-negative or has all low bits zero, then 3211 // the upper bits are all zero. 3212 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3213 Known.Zero |= ~LowBits; 3214 3215 // If the first operand is negative and not all low bits are zero, then 3216 // the upper bits are all one. 3217 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3218 Known.One |= ~LowBits; 3219 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3220 } 3221 } 3222 break; 3223 case ISD::UREM: { 3224 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3225 const APInt &RA = Rem->getAPIntValue(); 3226 if (RA.isPowerOf2()) { 3227 APInt LowBits = (RA - 1); 3228 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3229 3230 // The upper bits are all zero, the lower ones are unchanged. 3231 Known.Zero = Known2.Zero | ~LowBits; 3232 Known.One = Known2.One & LowBits; 3233 break; 3234 } 3235 } 3236 3237 // Since the result is less than or equal to either operand, any leading 3238 // zero bits in either operand must also exist in the result. 3239 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3240 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3241 3242 uint32_t Leaders = 3243 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3244 Known.resetAll(); 3245 Known.Zero.setHighBits(Leaders); 3246 break; 3247 } 3248 case ISD::EXTRACT_ELEMENT: { 3249 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3250 const unsigned Index = Op.getConstantOperandVal(1); 3251 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3252 3253 // Remove low part of known bits mask 3254 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3255 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3256 3257 // Remove high part of known bit mask 3258 Known = Known.trunc(EltBitWidth); 3259 break; 3260 } 3261 case ISD::EXTRACT_VECTOR_ELT: { 3262 SDValue InVec = Op.getOperand(0); 3263 SDValue EltNo = Op.getOperand(1); 3264 EVT VecVT = InVec.getValueType(); 3265 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3266 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3267 3268 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3269 // anything about the extended bits. 3270 if (BitWidth > EltBitWidth) 3271 Known = Known.trunc(EltBitWidth); 3272 3273 // If we know the element index, just demand that vector element, else for 3274 // an unknown element index, ignore DemandedElts and demand them all. 3275 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3276 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3277 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3278 DemandedSrcElts = 3279 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3280 3281 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3282 if (BitWidth > EltBitWidth) 3283 Known = Known.anyext(BitWidth); 3284 break; 3285 } 3286 case ISD::INSERT_VECTOR_ELT: { 3287 // If we know the element index, split the demand between the 3288 // source vector and the inserted element, otherwise assume we need 3289 // the original demanded vector elements and the value. 3290 SDValue InVec = Op.getOperand(0); 3291 SDValue InVal = Op.getOperand(1); 3292 SDValue EltNo = Op.getOperand(2); 3293 bool DemandedVal = true; 3294 APInt DemandedVecElts = DemandedElts; 3295 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3296 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3297 unsigned EltIdx = CEltNo->getZExtValue(); 3298 DemandedVal = !!DemandedElts[EltIdx]; 3299 DemandedVecElts.clearBit(EltIdx); 3300 } 3301 Known.One.setAllBits(); 3302 Known.Zero.setAllBits(); 3303 if (DemandedVal) { 3304 Known2 = computeKnownBits(InVal, Depth + 1); 3305 Known.One &= Known2.One.zextOrTrunc(BitWidth); 3306 Known.Zero &= Known2.Zero.zextOrTrunc(BitWidth); 3307 } 3308 if (!!DemandedVecElts) { 3309 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3310 Known.One &= Known2.One; 3311 Known.Zero &= Known2.Zero; 3312 } 3313 break; 3314 } 3315 case ISD::BITREVERSE: { 3316 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3317 Known.Zero = Known2.Zero.reverseBits(); 3318 Known.One = Known2.One.reverseBits(); 3319 break; 3320 } 3321 case ISD::BSWAP: { 3322 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3323 Known.Zero = Known2.Zero.byteSwap(); 3324 Known.One = Known2.One.byteSwap(); 3325 break; 3326 } 3327 case ISD::ABS: { 3328 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3329 3330 // If the source's MSB is zero then we know the rest of the bits already. 3331 if (Known2.isNonNegative()) { 3332 Known.Zero = Known2.Zero; 3333 Known.One = Known2.One; 3334 break; 3335 } 3336 3337 // We only know that the absolute values's MSB will be zero iff there is 3338 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3339 Known2.One.clearSignBit(); 3340 if (Known2.One.getBoolValue()) { 3341 Known.Zero = APInt::getSignMask(BitWidth); 3342 break; 3343 } 3344 break; 3345 } 3346 case ISD::UMIN: { 3347 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3348 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3349 3350 // UMIN - we know that the result will have the maximum of the 3351 // known zero leading bits of the inputs. 3352 unsigned LeadZero = Known.countMinLeadingZeros(); 3353 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3354 3355 Known.Zero &= Known2.Zero; 3356 Known.One &= Known2.One; 3357 Known.Zero.setHighBits(LeadZero); 3358 break; 3359 } 3360 case ISD::UMAX: { 3361 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3362 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3363 3364 // UMAX - we know that the result will have the maximum of the 3365 // known one leading bits of the inputs. 3366 unsigned LeadOne = Known.countMinLeadingOnes(); 3367 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3368 3369 Known.Zero &= Known2.Zero; 3370 Known.One &= Known2.One; 3371 Known.One.setHighBits(LeadOne); 3372 break; 3373 } 3374 case ISD::SMIN: 3375 case ISD::SMAX: { 3376 // If we have a clamp pattern, we know that the number of sign bits will be 3377 // the minimum of the clamp min/max range. 3378 bool IsMax = (Opcode == ISD::SMAX); 3379 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3380 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3381 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3382 CstHigh = 3383 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3384 if (CstLow && CstHigh) { 3385 if (!IsMax) 3386 std::swap(CstLow, CstHigh); 3387 3388 const APInt &ValueLow = CstLow->getAPIntValue(); 3389 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3390 if (ValueLow.sle(ValueHigh)) { 3391 unsigned LowSignBits = ValueLow.getNumSignBits(); 3392 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3393 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3394 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3395 Known.One.setHighBits(MinSignBits); 3396 break; 3397 } 3398 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3399 Known.Zero.setHighBits(MinSignBits); 3400 break; 3401 } 3402 } 3403 } 3404 3405 // Fallback - just get the shared known bits of the operands. 3406 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3407 if (Known.isUnknown()) break; // Early-out 3408 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3409 Known.Zero &= Known2.Zero; 3410 Known.One &= Known2.One; 3411 break; 3412 } 3413 case ISD::FrameIndex: 3414 case ISD::TargetFrameIndex: 3415 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3416 break; 3417 3418 default: 3419 if (Opcode < ISD::BUILTIN_OP_END) 3420 break; 3421 LLVM_FALLTHROUGH; 3422 case ISD::INTRINSIC_WO_CHAIN: 3423 case ISD::INTRINSIC_W_CHAIN: 3424 case ISD::INTRINSIC_VOID: 3425 // Allow the target to implement this method for its nodes. 3426 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3427 break; 3428 } 3429 3430 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3431 return Known; 3432 } 3433 3434 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3435 SDValue N1) const { 3436 // X + 0 never overflow 3437 if (isNullConstant(N1)) 3438 return OFK_Never; 3439 3440 KnownBits N1Known = computeKnownBits(N1); 3441 if (N1Known.Zero.getBoolValue()) { 3442 KnownBits N0Known = computeKnownBits(N0); 3443 3444 bool overflow; 3445 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3446 if (!overflow) 3447 return OFK_Never; 3448 } 3449 3450 // mulhi + 1 never overflow 3451 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3452 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3453 return OFK_Never; 3454 3455 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3456 KnownBits N0Known = computeKnownBits(N0); 3457 3458 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3459 return OFK_Never; 3460 } 3461 3462 return OFK_Sometime; 3463 } 3464 3465 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3466 EVT OpVT = Val.getValueType(); 3467 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3468 3469 // Is the constant a known power of 2? 3470 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3471 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3472 3473 // A left-shift of a constant one will have exactly one bit set because 3474 // shifting the bit off the end is undefined. 3475 if (Val.getOpcode() == ISD::SHL) { 3476 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3477 if (C && C->getAPIntValue() == 1) 3478 return true; 3479 } 3480 3481 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3482 // one bit set. 3483 if (Val.getOpcode() == ISD::SRL) { 3484 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3485 if (C && C->getAPIntValue().isSignMask()) 3486 return true; 3487 } 3488 3489 // Are all operands of a build vector constant powers of two? 3490 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3491 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3492 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3493 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3494 return false; 3495 })) 3496 return true; 3497 3498 // More could be done here, though the above checks are enough 3499 // to handle some common cases. 3500 3501 // Fall back to computeKnownBits to catch other known cases. 3502 KnownBits Known = computeKnownBits(Val); 3503 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3504 } 3505 3506 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3507 EVT VT = Op.getValueType(); 3508 APInt DemandedElts = VT.isVector() 3509 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3510 : APInt(1, 1); 3511 return ComputeNumSignBits(Op, DemandedElts, Depth); 3512 } 3513 3514 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3515 unsigned Depth) const { 3516 EVT VT = Op.getValueType(); 3517 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3518 unsigned VTBits = VT.getScalarSizeInBits(); 3519 unsigned NumElts = DemandedElts.getBitWidth(); 3520 unsigned Tmp, Tmp2; 3521 unsigned FirstAnswer = 1; 3522 3523 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3524 const APInt &Val = C->getAPIntValue(); 3525 return Val.getNumSignBits(); 3526 } 3527 3528 if (Depth >= MaxRecursionDepth) 3529 return 1; // Limit search depth. 3530 3531 if (!DemandedElts) 3532 return 1; // No demanded elts, better to assume we don't know anything. 3533 3534 unsigned Opcode = Op.getOpcode(); 3535 switch (Opcode) { 3536 default: break; 3537 case ISD::AssertSext: 3538 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3539 return VTBits-Tmp+1; 3540 case ISD::AssertZext: 3541 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3542 return VTBits-Tmp; 3543 3544 case ISD::BUILD_VECTOR: 3545 Tmp = VTBits; 3546 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3547 if (!DemandedElts[i]) 3548 continue; 3549 3550 SDValue SrcOp = Op.getOperand(i); 3551 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3552 3553 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3554 if (SrcOp.getValueSizeInBits() != VTBits) { 3555 assert(SrcOp.getValueSizeInBits() > VTBits && 3556 "Expected BUILD_VECTOR implicit truncation"); 3557 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3558 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3559 } 3560 Tmp = std::min(Tmp, Tmp2); 3561 } 3562 return Tmp; 3563 3564 case ISD::VECTOR_SHUFFLE: { 3565 // Collect the minimum number of sign bits that are shared by every vector 3566 // element referenced by the shuffle. 3567 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3568 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3569 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3570 for (unsigned i = 0; i != NumElts; ++i) { 3571 int M = SVN->getMaskElt(i); 3572 if (!DemandedElts[i]) 3573 continue; 3574 // For UNDEF elements, we don't know anything about the common state of 3575 // the shuffle result. 3576 if (M < 0) 3577 return 1; 3578 if ((unsigned)M < NumElts) 3579 DemandedLHS.setBit((unsigned)M % NumElts); 3580 else 3581 DemandedRHS.setBit((unsigned)M % NumElts); 3582 } 3583 Tmp = std::numeric_limits<unsigned>::max(); 3584 if (!!DemandedLHS) 3585 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3586 if (!!DemandedRHS) { 3587 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3588 Tmp = std::min(Tmp, Tmp2); 3589 } 3590 // If we don't know anything, early out and try computeKnownBits fall-back. 3591 if (Tmp == 1) 3592 break; 3593 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3594 return Tmp; 3595 } 3596 3597 case ISD::BITCAST: { 3598 SDValue N0 = Op.getOperand(0); 3599 EVT SrcVT = N0.getValueType(); 3600 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3601 3602 // Ignore bitcasts from unsupported types.. 3603 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3604 break; 3605 3606 // Fast handling of 'identity' bitcasts. 3607 if (VTBits == SrcBits) 3608 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3609 3610 bool IsLE = getDataLayout().isLittleEndian(); 3611 3612 // Bitcast 'large element' scalar/vector to 'small element' vector. 3613 if ((SrcBits % VTBits) == 0) { 3614 assert(VT.isVector() && "Expected bitcast to vector"); 3615 3616 unsigned Scale = SrcBits / VTBits; 3617 APInt SrcDemandedElts(NumElts / Scale, 0); 3618 for (unsigned i = 0; i != NumElts; ++i) 3619 if (DemandedElts[i]) 3620 SrcDemandedElts.setBit(i / Scale); 3621 3622 // Fast case - sign splat can be simply split across the small elements. 3623 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3624 if (Tmp == SrcBits) 3625 return VTBits; 3626 3627 // Slow case - determine how far the sign extends into each sub-element. 3628 Tmp2 = VTBits; 3629 for (unsigned i = 0; i != NumElts; ++i) 3630 if (DemandedElts[i]) { 3631 unsigned SubOffset = i % Scale; 3632 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3633 SubOffset = SubOffset * VTBits; 3634 if (Tmp <= SubOffset) 3635 return 1; 3636 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3637 } 3638 return Tmp2; 3639 } 3640 break; 3641 } 3642 3643 case ISD::SIGN_EXTEND: 3644 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3645 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3646 case ISD::SIGN_EXTEND_INREG: 3647 // Max of the input and what this extends. 3648 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3649 Tmp = VTBits-Tmp+1; 3650 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3651 return std::max(Tmp, Tmp2); 3652 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3653 SDValue Src = Op.getOperand(0); 3654 EVT SrcVT = Src.getValueType(); 3655 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3656 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3657 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3658 } 3659 case ISD::SRA: 3660 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3661 // SRA X, C -> adds C sign bits. 3662 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) 3663 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3664 else if (const APInt *ShAmt = 3665 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3666 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3667 return Tmp; 3668 case ISD::SHL: 3669 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 3670 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3671 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3672 if (ShAmt->ult(Tmp)) 3673 return Tmp - ShAmt->getZExtValue(); 3674 } else if (const APInt *ShAmt = 3675 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3676 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3677 if (ShAmt->ult(Tmp)) 3678 return Tmp - ShAmt->getZExtValue(); 3679 } 3680 break; 3681 case ISD::AND: 3682 case ISD::OR: 3683 case ISD::XOR: // NOT is handled here. 3684 // Logical binary ops preserve the number of sign bits at the worst. 3685 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3686 if (Tmp != 1) { 3687 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3688 FirstAnswer = std::min(Tmp, Tmp2); 3689 // We computed what we know about the sign bits as our first 3690 // answer. Now proceed to the generic code that uses 3691 // computeKnownBits, and pick whichever answer is better. 3692 } 3693 break; 3694 3695 case ISD::SELECT: 3696 case ISD::VSELECT: 3697 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3698 if (Tmp == 1) return 1; // Early out. 3699 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3700 return std::min(Tmp, Tmp2); 3701 case ISD::SELECT_CC: 3702 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3703 if (Tmp == 1) return 1; // Early out. 3704 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3705 return std::min(Tmp, Tmp2); 3706 3707 case ISD::SMIN: 3708 case ISD::SMAX: { 3709 // If we have a clamp pattern, we know that the number of sign bits will be 3710 // the minimum of the clamp min/max range. 3711 bool IsMax = (Opcode == ISD::SMAX); 3712 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3713 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3714 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3715 CstHigh = 3716 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3717 if (CstLow && CstHigh) { 3718 if (!IsMax) 3719 std::swap(CstLow, CstHigh); 3720 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3721 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3722 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3723 return std::min(Tmp, Tmp2); 3724 } 3725 } 3726 3727 // Fallback - just get the minimum number of sign bits of the operands. 3728 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3729 if (Tmp == 1) 3730 return 1; // Early out. 3731 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3732 return std::min(Tmp, Tmp2); 3733 } 3734 case ISD::UMIN: 3735 case ISD::UMAX: 3736 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3737 if (Tmp == 1) 3738 return 1; // Early out. 3739 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3740 return std::min(Tmp, Tmp2); 3741 case ISD::SADDO: 3742 case ISD::UADDO: 3743 case ISD::SSUBO: 3744 case ISD::USUBO: 3745 case ISD::SMULO: 3746 case ISD::UMULO: 3747 if (Op.getResNo() != 1) 3748 break; 3749 // The boolean result conforms to getBooleanContents. Fall through. 3750 // If setcc returns 0/-1, all bits are sign bits. 3751 // We know that we have an integer-based boolean since these operations 3752 // are only available for integer. 3753 if (TLI->getBooleanContents(VT.isVector(), false) == 3754 TargetLowering::ZeroOrNegativeOneBooleanContent) 3755 return VTBits; 3756 break; 3757 case ISD::SETCC: 3758 case ISD::STRICT_FSETCC: 3759 case ISD::STRICT_FSETCCS: { 3760 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3761 // If setcc returns 0/-1, all bits are sign bits. 3762 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3763 TargetLowering::ZeroOrNegativeOneBooleanContent) 3764 return VTBits; 3765 break; 3766 } 3767 case ISD::ROTL: 3768 case ISD::ROTR: 3769 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3770 3771 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3772 if (Tmp == VTBits) 3773 return VTBits; 3774 3775 if (ConstantSDNode *C = 3776 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3777 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3778 3779 // Handle rotate right by N like a rotate left by 32-N. 3780 if (Opcode == ISD::ROTR) 3781 RotAmt = (VTBits - RotAmt) % VTBits; 3782 3783 // If we aren't rotating out all of the known-in sign bits, return the 3784 // number that are left. This handles rotl(sext(x), 1) for example. 3785 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3786 } 3787 break; 3788 case ISD::ADD: 3789 case ISD::ADDC: 3790 // Add can have at most one carry bit. Thus we know that the output 3791 // is, at worst, one more bit than the inputs. 3792 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3793 if (Tmp == 1) return 1; // Early out. 3794 3795 // Special case decrementing a value (ADD X, -1): 3796 if (ConstantSDNode *CRHS = 3797 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3798 if (CRHS->isAllOnesValue()) { 3799 KnownBits Known = 3800 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3801 3802 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3803 // sign bits set. 3804 if ((Known.Zero | 1).isAllOnesValue()) 3805 return VTBits; 3806 3807 // If we are subtracting one from a positive number, there is no carry 3808 // out of the result. 3809 if (Known.isNonNegative()) 3810 return Tmp; 3811 } 3812 3813 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3814 if (Tmp2 == 1) return 1; // Early out. 3815 return std::min(Tmp, Tmp2) - 1; 3816 case ISD::SUB: 3817 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3818 if (Tmp2 == 1) return 1; // Early out. 3819 3820 // Handle NEG. 3821 if (ConstantSDNode *CLHS = 3822 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3823 if (CLHS->isNullValue()) { 3824 KnownBits Known = 3825 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3826 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3827 // sign bits set. 3828 if ((Known.Zero | 1).isAllOnesValue()) 3829 return VTBits; 3830 3831 // If the input is known to be positive (the sign bit is known clear), 3832 // the output of the NEG has the same number of sign bits as the input. 3833 if (Known.isNonNegative()) 3834 return Tmp2; 3835 3836 // Otherwise, we treat this like a SUB. 3837 } 3838 3839 // Sub can have at most one carry bit. Thus we know that the output 3840 // is, at worst, one more bit than the inputs. 3841 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3842 if (Tmp == 1) return 1; // Early out. 3843 return std::min(Tmp, Tmp2) - 1; 3844 case ISD::MUL: { 3845 // The output of the Mul can be at most twice the valid bits in the inputs. 3846 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3847 if (SignBitsOp0 == 1) 3848 break; 3849 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3850 if (SignBitsOp1 == 1) 3851 break; 3852 unsigned OutValidBits = 3853 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3854 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3855 } 3856 case ISD::TRUNCATE: { 3857 // Check if the sign bits of source go down as far as the truncated value. 3858 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3859 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3860 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3861 return NumSrcSignBits - (NumSrcBits - VTBits); 3862 break; 3863 } 3864 case ISD::EXTRACT_ELEMENT: { 3865 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3866 const int BitWidth = Op.getValueSizeInBits(); 3867 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3868 3869 // Get reverse index (starting from 1), Op1 value indexes elements from 3870 // little end. Sign starts at big end. 3871 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3872 3873 // If the sign portion ends in our element the subtraction gives correct 3874 // result. Otherwise it gives either negative or > bitwidth result 3875 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3876 } 3877 case ISD::INSERT_VECTOR_ELT: { 3878 // If we know the element index, split the demand between the 3879 // source vector and the inserted element, otherwise assume we need 3880 // the original demanded vector elements and the value. 3881 SDValue InVec = Op.getOperand(0); 3882 SDValue InVal = Op.getOperand(1); 3883 SDValue EltNo = Op.getOperand(2); 3884 bool DemandedVal = true; 3885 APInt DemandedVecElts = DemandedElts; 3886 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3887 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3888 unsigned EltIdx = CEltNo->getZExtValue(); 3889 DemandedVal = !!DemandedElts[EltIdx]; 3890 DemandedVecElts.clearBit(EltIdx); 3891 } 3892 Tmp = std::numeric_limits<unsigned>::max(); 3893 if (DemandedVal) { 3894 // TODO - handle implicit truncation of inserted elements. 3895 if (InVal.getScalarValueSizeInBits() != VTBits) 3896 break; 3897 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3898 Tmp = std::min(Tmp, Tmp2); 3899 } 3900 if (!!DemandedVecElts) { 3901 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3902 Tmp = std::min(Tmp, Tmp2); 3903 } 3904 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3905 return Tmp; 3906 } 3907 case ISD::EXTRACT_VECTOR_ELT: { 3908 SDValue InVec = Op.getOperand(0); 3909 SDValue EltNo = Op.getOperand(1); 3910 EVT VecVT = InVec.getValueType(); 3911 const unsigned BitWidth = Op.getValueSizeInBits(); 3912 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3913 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3914 3915 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3916 // anything about sign bits. But if the sizes match we can derive knowledge 3917 // about sign bits from the vector operand. 3918 if (BitWidth != EltBitWidth) 3919 break; 3920 3921 // If we know the element index, just demand that vector element, else for 3922 // an unknown element index, ignore DemandedElts and demand them all. 3923 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3924 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3925 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3926 DemandedSrcElts = 3927 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3928 3929 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3930 } 3931 case ISD::EXTRACT_SUBVECTOR: { 3932 // Offset the demanded elts by the subvector index. 3933 SDValue Src = Op.getOperand(0); 3934 uint64_t Idx = Op.getConstantOperandVal(1); 3935 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3936 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3937 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3938 } 3939 case ISD::CONCAT_VECTORS: { 3940 // Determine the minimum number of sign bits across all demanded 3941 // elts of the input vectors. Early out if the result is already 1. 3942 Tmp = std::numeric_limits<unsigned>::max(); 3943 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3944 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3945 unsigned NumSubVectors = Op.getNumOperands(); 3946 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3947 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3948 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3949 if (!DemandedSub) 3950 continue; 3951 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3952 Tmp = std::min(Tmp, Tmp2); 3953 } 3954 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3955 return Tmp; 3956 } 3957 case ISD::INSERT_SUBVECTOR: { 3958 // Demand any elements from the subvector and the remainder from the src its 3959 // inserted into. 3960 SDValue Src = Op.getOperand(0); 3961 SDValue Sub = Op.getOperand(1); 3962 uint64_t Idx = Op.getConstantOperandVal(2); 3963 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3964 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3965 APInt DemandedSrcElts = DemandedElts; 3966 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 3967 3968 Tmp = std::numeric_limits<unsigned>::max(); 3969 if (!!DemandedSubElts) { 3970 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3971 if (Tmp == 1) 3972 return 1; // early-out 3973 } 3974 if (!!DemandedSrcElts) { 3975 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3976 Tmp = std::min(Tmp, Tmp2); 3977 } 3978 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3979 return Tmp; 3980 } 3981 } 3982 3983 // If we are looking at the loaded value of the SDNode. 3984 if (Op.getResNo() == 0) { 3985 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3986 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3987 unsigned ExtType = LD->getExtensionType(); 3988 switch (ExtType) { 3989 default: break; 3990 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 3991 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3992 return VTBits - Tmp + 1; 3993 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 3994 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3995 return VTBits - Tmp; 3996 case ISD::NON_EXTLOAD: 3997 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 3998 // We only need to handle vectors - computeKnownBits should handle 3999 // scalar cases. 4000 Type *CstTy = Cst->getType(); 4001 if (CstTy->isVectorTy() && 4002 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4003 Tmp = VTBits; 4004 for (unsigned i = 0; i != NumElts; ++i) { 4005 if (!DemandedElts[i]) 4006 continue; 4007 if (Constant *Elt = Cst->getAggregateElement(i)) { 4008 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4009 const APInt &Value = CInt->getValue(); 4010 Tmp = std::min(Tmp, Value.getNumSignBits()); 4011 continue; 4012 } 4013 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4014 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4015 Tmp = std::min(Tmp, Value.getNumSignBits()); 4016 continue; 4017 } 4018 } 4019 // Unknown type. Conservatively assume no bits match sign bit. 4020 return 1; 4021 } 4022 return Tmp; 4023 } 4024 } 4025 break; 4026 } 4027 } 4028 } 4029 4030 // Allow the target to implement this method for its nodes. 4031 if (Opcode >= ISD::BUILTIN_OP_END || 4032 Opcode == ISD::INTRINSIC_WO_CHAIN || 4033 Opcode == ISD::INTRINSIC_W_CHAIN || 4034 Opcode == ISD::INTRINSIC_VOID) { 4035 unsigned NumBits = 4036 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4037 if (NumBits > 1) 4038 FirstAnswer = std::max(FirstAnswer, NumBits); 4039 } 4040 4041 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4042 // use this information. 4043 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4044 4045 APInt Mask; 4046 if (Known.isNonNegative()) { // sign bit is 0 4047 Mask = Known.Zero; 4048 } else if (Known.isNegative()) { // sign bit is 1; 4049 Mask = Known.One; 4050 } else { 4051 // Nothing known. 4052 return FirstAnswer; 4053 } 4054 4055 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4056 // the number of identical bits in the top of the input value. 4057 Mask <<= Mask.getBitWidth()-VTBits; 4058 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4059 } 4060 4061 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4062 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4063 !isa<ConstantSDNode>(Op.getOperand(1))) 4064 return false; 4065 4066 if (Op.getOpcode() == ISD::OR && 4067 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4068 return false; 4069 4070 return true; 4071 } 4072 4073 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4074 // If we're told that NaNs won't happen, assume they won't. 4075 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4076 return true; 4077 4078 if (Depth >= MaxRecursionDepth) 4079 return false; // Limit search depth. 4080 4081 // TODO: Handle vectors. 4082 // If the value is a constant, we can obviously see if it is a NaN or not. 4083 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4084 return !C->getValueAPF().isNaN() || 4085 (SNaN && !C->getValueAPF().isSignaling()); 4086 } 4087 4088 unsigned Opcode = Op.getOpcode(); 4089 switch (Opcode) { 4090 case ISD::FADD: 4091 case ISD::FSUB: 4092 case ISD::FMUL: 4093 case ISD::FDIV: 4094 case ISD::FREM: 4095 case ISD::FSIN: 4096 case ISD::FCOS: { 4097 if (SNaN) 4098 return true; 4099 // TODO: Need isKnownNeverInfinity 4100 return false; 4101 } 4102 case ISD::FCANONICALIZE: 4103 case ISD::FEXP: 4104 case ISD::FEXP2: 4105 case ISD::FTRUNC: 4106 case ISD::FFLOOR: 4107 case ISD::FCEIL: 4108 case ISD::FROUND: 4109 case ISD::FRINT: 4110 case ISD::FNEARBYINT: { 4111 if (SNaN) 4112 return true; 4113 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4114 } 4115 case ISD::FABS: 4116 case ISD::FNEG: 4117 case ISD::FCOPYSIGN: { 4118 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4119 } 4120 case ISD::SELECT: 4121 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4122 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4123 case ISD::FP_EXTEND: 4124 case ISD::FP_ROUND: { 4125 if (SNaN) 4126 return true; 4127 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4128 } 4129 case ISD::SINT_TO_FP: 4130 case ISD::UINT_TO_FP: 4131 return true; 4132 case ISD::FMA: 4133 case ISD::FMAD: { 4134 if (SNaN) 4135 return true; 4136 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4137 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4138 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4139 } 4140 case ISD::FSQRT: // Need is known positive 4141 case ISD::FLOG: 4142 case ISD::FLOG2: 4143 case ISD::FLOG10: 4144 case ISD::FPOWI: 4145 case ISD::FPOW: { 4146 if (SNaN) 4147 return true; 4148 // TODO: Refine on operand 4149 return false; 4150 } 4151 case ISD::FMINNUM: 4152 case ISD::FMAXNUM: { 4153 // Only one needs to be known not-nan, since it will be returned if the 4154 // other ends up being one. 4155 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4156 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4157 } 4158 case ISD::FMINNUM_IEEE: 4159 case ISD::FMAXNUM_IEEE: { 4160 if (SNaN) 4161 return true; 4162 // This can return a NaN if either operand is an sNaN, or if both operands 4163 // are NaN. 4164 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4165 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4166 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4167 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4168 } 4169 case ISD::FMINIMUM: 4170 case ISD::FMAXIMUM: { 4171 // TODO: Does this quiet or return the origina NaN as-is? 4172 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4173 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4174 } 4175 case ISD::EXTRACT_VECTOR_ELT: { 4176 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4177 } 4178 default: 4179 if (Opcode >= ISD::BUILTIN_OP_END || 4180 Opcode == ISD::INTRINSIC_WO_CHAIN || 4181 Opcode == ISD::INTRINSIC_W_CHAIN || 4182 Opcode == ISD::INTRINSIC_VOID) { 4183 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4184 } 4185 4186 return false; 4187 } 4188 } 4189 4190 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4191 assert(Op.getValueType().isFloatingPoint() && 4192 "Floating point type expected"); 4193 4194 // If the value is a constant, we can obviously see if it is a zero or not. 4195 // TODO: Add BuildVector support. 4196 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4197 return !C->isZero(); 4198 return false; 4199 } 4200 4201 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4202 assert(!Op.getValueType().isFloatingPoint() && 4203 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4204 4205 // If the value is a constant, we can obviously see if it is a zero or not. 4206 if (ISD::matchUnaryPredicate( 4207 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4208 return true; 4209 4210 // TODO: Recognize more cases here. 4211 switch (Op.getOpcode()) { 4212 default: break; 4213 case ISD::OR: 4214 if (isKnownNeverZero(Op.getOperand(1)) || 4215 isKnownNeverZero(Op.getOperand(0))) 4216 return true; 4217 break; 4218 } 4219 4220 return false; 4221 } 4222 4223 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4224 // Check the obvious case. 4225 if (A == B) return true; 4226 4227 // For for negative and positive zero. 4228 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4229 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4230 if (CA->isZero() && CB->isZero()) return true; 4231 4232 // Otherwise they may not be equal. 4233 return false; 4234 } 4235 4236 // FIXME: unify with llvm::haveNoCommonBitsSet. 4237 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4238 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4239 assert(A.getValueType() == B.getValueType() && 4240 "Values must have the same type"); 4241 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4242 } 4243 4244 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4245 ArrayRef<SDValue> Ops, 4246 SelectionDAG &DAG) { 4247 int NumOps = Ops.size(); 4248 assert(NumOps != 0 && "Can't build an empty vector!"); 4249 assert(!VT.isScalableVector() && 4250 "BUILD_VECTOR cannot be used with scalable types"); 4251 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4252 "Incorrect element count in BUILD_VECTOR!"); 4253 4254 // BUILD_VECTOR of UNDEFs is UNDEF. 4255 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4256 return DAG.getUNDEF(VT); 4257 4258 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4259 SDValue IdentitySrc; 4260 bool IsIdentity = true; 4261 for (int i = 0; i != NumOps; ++i) { 4262 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4263 Ops[i].getOperand(0).getValueType() != VT || 4264 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4265 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4266 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4267 IsIdentity = false; 4268 break; 4269 } 4270 IdentitySrc = Ops[i].getOperand(0); 4271 } 4272 if (IsIdentity) 4273 return IdentitySrc; 4274 4275 return SDValue(); 4276 } 4277 4278 /// Try to simplify vector concatenation to an input value, undef, or build 4279 /// vector. 4280 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4281 ArrayRef<SDValue> Ops, 4282 SelectionDAG &DAG) { 4283 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4284 assert(llvm::all_of(Ops, 4285 [Ops](SDValue Op) { 4286 return Ops[0].getValueType() == Op.getValueType(); 4287 }) && 4288 "Concatenation of vectors with inconsistent value types!"); 4289 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4290 VT.getVectorNumElements() && 4291 "Incorrect element count in vector concatenation!"); 4292 4293 if (Ops.size() == 1) 4294 return Ops[0]; 4295 4296 // Concat of UNDEFs is UNDEF. 4297 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4298 return DAG.getUNDEF(VT); 4299 4300 // Scan the operands and look for extract operations from a single source 4301 // that correspond to insertion at the same location via this concatenation: 4302 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4303 SDValue IdentitySrc; 4304 bool IsIdentity = true; 4305 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4306 SDValue Op = Ops[i]; 4307 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4308 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4309 Op.getOperand(0).getValueType() != VT || 4310 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4311 Op.getConstantOperandVal(1) != IdentityIndex) { 4312 IsIdentity = false; 4313 break; 4314 } 4315 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4316 "Unexpected identity source vector for concat of extracts"); 4317 IdentitySrc = Op.getOperand(0); 4318 } 4319 if (IsIdentity) { 4320 assert(IdentitySrc && "Failed to set source vector of extracts"); 4321 return IdentitySrc; 4322 } 4323 4324 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4325 // simplified to one big BUILD_VECTOR. 4326 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4327 EVT SVT = VT.getScalarType(); 4328 SmallVector<SDValue, 16> Elts; 4329 for (SDValue Op : Ops) { 4330 EVT OpVT = Op.getValueType(); 4331 if (Op.isUndef()) 4332 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4333 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4334 Elts.append(Op->op_begin(), Op->op_end()); 4335 else 4336 return SDValue(); 4337 } 4338 4339 // BUILD_VECTOR requires all inputs to be of the same type, find the 4340 // maximum type and extend them all. 4341 for (SDValue Op : Elts) 4342 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4343 4344 if (SVT.bitsGT(VT.getScalarType())) 4345 for (SDValue &Op : Elts) 4346 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4347 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4348 : DAG.getSExtOrTrunc(Op, DL, SVT); 4349 4350 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4351 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4352 return V; 4353 } 4354 4355 /// Gets or creates the specified node. 4356 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4357 FoldingSetNodeID ID; 4358 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4359 void *IP = nullptr; 4360 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4361 return SDValue(E, 0); 4362 4363 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4364 getVTList(VT)); 4365 CSEMap.InsertNode(N, IP); 4366 4367 InsertNode(N); 4368 SDValue V = SDValue(N, 0); 4369 NewSDValueDbgMsg(V, "Creating new node: ", this); 4370 return V; 4371 } 4372 4373 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4374 SDValue Operand, const SDNodeFlags Flags) { 4375 // Constant fold unary operations with an integer constant operand. Even 4376 // opaque constant will be folded, because the folding of unary operations 4377 // doesn't create new constants with different values. Nevertheless, the 4378 // opaque flag is preserved during folding to prevent future folding with 4379 // other constants. 4380 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4381 const APInt &Val = C->getAPIntValue(); 4382 switch (Opcode) { 4383 default: break; 4384 case ISD::SIGN_EXTEND: 4385 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4386 C->isTargetOpcode(), C->isOpaque()); 4387 case ISD::TRUNCATE: 4388 if (C->isOpaque()) 4389 break; 4390 LLVM_FALLTHROUGH; 4391 case ISD::ANY_EXTEND: 4392 case ISD::ZERO_EXTEND: 4393 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4394 C->isTargetOpcode(), C->isOpaque()); 4395 case ISD::UINT_TO_FP: 4396 case ISD::SINT_TO_FP: { 4397 APFloat apf(EVTToAPFloatSemantics(VT), 4398 APInt::getNullValue(VT.getSizeInBits())); 4399 (void)apf.convertFromAPInt(Val, 4400 Opcode==ISD::SINT_TO_FP, 4401 APFloat::rmNearestTiesToEven); 4402 return getConstantFP(apf, DL, VT); 4403 } 4404 case ISD::BITCAST: 4405 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4406 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4407 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4408 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4409 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4410 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4411 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4412 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4413 break; 4414 case ISD::ABS: 4415 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4416 C->isOpaque()); 4417 case ISD::BITREVERSE: 4418 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4419 C->isOpaque()); 4420 case ISD::BSWAP: 4421 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4422 C->isOpaque()); 4423 case ISD::CTPOP: 4424 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4425 C->isOpaque()); 4426 case ISD::CTLZ: 4427 case ISD::CTLZ_ZERO_UNDEF: 4428 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4429 C->isOpaque()); 4430 case ISD::CTTZ: 4431 case ISD::CTTZ_ZERO_UNDEF: 4432 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4433 C->isOpaque()); 4434 case ISD::FP16_TO_FP: { 4435 bool Ignored; 4436 APFloat FPV(APFloat::IEEEhalf(), 4437 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4438 4439 // This can return overflow, underflow, or inexact; we don't care. 4440 // FIXME need to be more flexible about rounding mode. 4441 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4442 APFloat::rmNearestTiesToEven, &Ignored); 4443 return getConstantFP(FPV, DL, VT); 4444 } 4445 } 4446 } 4447 4448 // Constant fold unary operations with a floating point constant operand. 4449 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4450 APFloat V = C->getValueAPF(); // make copy 4451 switch (Opcode) { 4452 case ISD::FNEG: 4453 V.changeSign(); 4454 return getConstantFP(V, DL, VT); 4455 case ISD::FABS: 4456 V.clearSign(); 4457 return getConstantFP(V, DL, VT); 4458 case ISD::FCEIL: { 4459 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4460 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4461 return getConstantFP(V, DL, VT); 4462 break; 4463 } 4464 case ISD::FTRUNC: { 4465 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4466 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4467 return getConstantFP(V, DL, VT); 4468 break; 4469 } 4470 case ISD::FFLOOR: { 4471 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4472 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4473 return getConstantFP(V, DL, VT); 4474 break; 4475 } 4476 case ISD::FP_EXTEND: { 4477 bool ignored; 4478 // This can return overflow, underflow, or inexact; we don't care. 4479 // FIXME need to be more flexible about rounding mode. 4480 (void)V.convert(EVTToAPFloatSemantics(VT), 4481 APFloat::rmNearestTiesToEven, &ignored); 4482 return getConstantFP(V, DL, VT); 4483 } 4484 case ISD::FP_TO_SINT: 4485 case ISD::FP_TO_UINT: { 4486 bool ignored; 4487 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4488 // FIXME need to be more flexible about rounding mode. 4489 APFloat::opStatus s = 4490 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4491 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4492 break; 4493 return getConstant(IntVal, DL, VT); 4494 } 4495 case ISD::BITCAST: 4496 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4497 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4498 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4499 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4500 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4501 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4502 break; 4503 case ISD::FP_TO_FP16: { 4504 bool Ignored; 4505 // This can return overflow, underflow, or inexact; we don't care. 4506 // FIXME need to be more flexible about rounding mode. 4507 (void)V.convert(APFloat::IEEEhalf(), 4508 APFloat::rmNearestTiesToEven, &Ignored); 4509 return getConstant(V.bitcastToAPInt(), DL, VT); 4510 } 4511 } 4512 } 4513 4514 // Constant fold unary operations with a vector integer or float operand. 4515 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4516 if (BV->isConstant()) { 4517 switch (Opcode) { 4518 default: 4519 // FIXME: Entirely reasonable to perform folding of other unary 4520 // operations here as the need arises. 4521 break; 4522 case ISD::FNEG: 4523 case ISD::FABS: 4524 case ISD::FCEIL: 4525 case ISD::FTRUNC: 4526 case ISD::FFLOOR: 4527 case ISD::FP_EXTEND: 4528 case ISD::FP_TO_SINT: 4529 case ISD::FP_TO_UINT: 4530 case ISD::TRUNCATE: 4531 case ISD::ANY_EXTEND: 4532 case ISD::ZERO_EXTEND: 4533 case ISD::SIGN_EXTEND: 4534 case ISD::UINT_TO_FP: 4535 case ISD::SINT_TO_FP: 4536 case ISD::ABS: 4537 case ISD::BITREVERSE: 4538 case ISD::BSWAP: 4539 case ISD::CTLZ: 4540 case ISD::CTLZ_ZERO_UNDEF: 4541 case ISD::CTTZ: 4542 case ISD::CTTZ_ZERO_UNDEF: 4543 case ISD::CTPOP: { 4544 SDValue Ops = { Operand }; 4545 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4546 return Fold; 4547 } 4548 } 4549 } 4550 } 4551 4552 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4553 switch (Opcode) { 4554 case ISD::FREEZE: 4555 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4556 break; 4557 case ISD::TokenFactor: 4558 case ISD::MERGE_VALUES: 4559 case ISD::CONCAT_VECTORS: 4560 return Operand; // Factor, merge or concat of one node? No need. 4561 case ISD::BUILD_VECTOR: { 4562 // Attempt to simplify BUILD_VECTOR. 4563 SDValue Ops[] = {Operand}; 4564 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4565 return V; 4566 break; 4567 } 4568 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4569 case ISD::FP_EXTEND: 4570 assert(VT.isFloatingPoint() && 4571 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4572 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4573 assert((!VT.isVector() || 4574 VT.getVectorNumElements() == 4575 Operand.getValueType().getVectorNumElements()) && 4576 "Vector element count mismatch!"); 4577 assert(Operand.getValueType().bitsLT(VT) && 4578 "Invalid fpext node, dst < src!"); 4579 if (Operand.isUndef()) 4580 return getUNDEF(VT); 4581 break; 4582 case ISD::FP_TO_SINT: 4583 case ISD::FP_TO_UINT: 4584 if (Operand.isUndef()) 4585 return getUNDEF(VT); 4586 break; 4587 case ISD::SINT_TO_FP: 4588 case ISD::UINT_TO_FP: 4589 // [us]itofp(undef) = 0, because the result value is bounded. 4590 if (Operand.isUndef()) 4591 return getConstantFP(0.0, DL, VT); 4592 break; 4593 case ISD::SIGN_EXTEND: 4594 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4595 "Invalid SIGN_EXTEND!"); 4596 assert(VT.isVector() == Operand.getValueType().isVector() && 4597 "SIGN_EXTEND result type type should be vector iff the operand " 4598 "type is vector!"); 4599 if (Operand.getValueType() == VT) return Operand; // noop extension 4600 assert((!VT.isVector() || 4601 VT.getVectorElementCount() == 4602 Operand.getValueType().getVectorElementCount()) && 4603 "Vector element count mismatch!"); 4604 assert(Operand.getValueType().bitsLT(VT) && 4605 "Invalid sext node, dst < src!"); 4606 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4607 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4608 else if (OpOpcode == ISD::UNDEF) 4609 // sext(undef) = 0, because the top bits will all be the same. 4610 return getConstant(0, DL, VT); 4611 break; 4612 case ISD::ZERO_EXTEND: 4613 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4614 "Invalid ZERO_EXTEND!"); 4615 assert(VT.isVector() == Operand.getValueType().isVector() && 4616 "ZERO_EXTEND result type type should be vector iff the operand " 4617 "type is vector!"); 4618 if (Operand.getValueType() == VT) return Operand; // noop extension 4619 assert((!VT.isVector() || 4620 VT.getVectorElementCount() == 4621 Operand.getValueType().getVectorElementCount()) && 4622 "Vector element count mismatch!"); 4623 assert(Operand.getValueType().bitsLT(VT) && 4624 "Invalid zext node, dst < src!"); 4625 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4626 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4627 else if (OpOpcode == ISD::UNDEF) 4628 // zext(undef) = 0, because the top bits will be zero. 4629 return getConstant(0, DL, VT); 4630 break; 4631 case ISD::ANY_EXTEND: 4632 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4633 "Invalid ANY_EXTEND!"); 4634 assert(VT.isVector() == Operand.getValueType().isVector() && 4635 "ANY_EXTEND result type type should be vector iff the operand " 4636 "type is vector!"); 4637 if (Operand.getValueType() == VT) return Operand; // noop extension 4638 assert((!VT.isVector() || 4639 VT.getVectorElementCount() == 4640 Operand.getValueType().getVectorElementCount()) && 4641 "Vector element count mismatch!"); 4642 assert(Operand.getValueType().bitsLT(VT) && 4643 "Invalid anyext node, dst < src!"); 4644 4645 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4646 OpOpcode == ISD::ANY_EXTEND) 4647 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4648 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4649 else if (OpOpcode == ISD::UNDEF) 4650 return getUNDEF(VT); 4651 4652 // (ext (trunc x)) -> x 4653 if (OpOpcode == ISD::TRUNCATE) { 4654 SDValue OpOp = Operand.getOperand(0); 4655 if (OpOp.getValueType() == VT) { 4656 transferDbgValues(Operand, OpOp); 4657 return OpOp; 4658 } 4659 } 4660 break; 4661 case ISD::TRUNCATE: 4662 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4663 "Invalid TRUNCATE!"); 4664 assert(VT.isVector() == Operand.getValueType().isVector() && 4665 "TRUNCATE result type type should be vector iff the operand " 4666 "type is vector!"); 4667 if (Operand.getValueType() == VT) return Operand; // noop truncate 4668 assert((!VT.isVector() || 4669 VT.getVectorElementCount() == 4670 Operand.getValueType().getVectorElementCount()) && 4671 "Vector element count mismatch!"); 4672 assert(Operand.getValueType().bitsGT(VT) && 4673 "Invalid truncate node, src < dst!"); 4674 if (OpOpcode == ISD::TRUNCATE) 4675 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4676 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4677 OpOpcode == ISD::ANY_EXTEND) { 4678 // If the source is smaller than the dest, we still need an extend. 4679 if (Operand.getOperand(0).getValueType().getScalarType() 4680 .bitsLT(VT.getScalarType())) 4681 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4682 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4683 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4684 return Operand.getOperand(0); 4685 } 4686 if (OpOpcode == ISD::UNDEF) 4687 return getUNDEF(VT); 4688 break; 4689 case ISD::ANY_EXTEND_VECTOR_INREG: 4690 case ISD::ZERO_EXTEND_VECTOR_INREG: 4691 case ISD::SIGN_EXTEND_VECTOR_INREG: 4692 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4693 assert(Operand.getValueType().bitsLE(VT) && 4694 "The input must be the same size or smaller than the result."); 4695 assert(VT.getVectorNumElements() < 4696 Operand.getValueType().getVectorNumElements() && 4697 "The destination vector type must have fewer lanes than the input."); 4698 break; 4699 case ISD::ABS: 4700 assert(VT.isInteger() && VT == Operand.getValueType() && 4701 "Invalid ABS!"); 4702 if (OpOpcode == ISD::UNDEF) 4703 return getUNDEF(VT); 4704 break; 4705 case ISD::BSWAP: 4706 assert(VT.isInteger() && VT == Operand.getValueType() && 4707 "Invalid BSWAP!"); 4708 assert((VT.getScalarSizeInBits() % 16 == 0) && 4709 "BSWAP types must be a multiple of 16 bits!"); 4710 if (OpOpcode == ISD::UNDEF) 4711 return getUNDEF(VT); 4712 break; 4713 case ISD::BITREVERSE: 4714 assert(VT.isInteger() && VT == Operand.getValueType() && 4715 "Invalid BITREVERSE!"); 4716 if (OpOpcode == ISD::UNDEF) 4717 return getUNDEF(VT); 4718 break; 4719 case ISD::BITCAST: 4720 // Basic sanity checking. 4721 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4722 "Cannot BITCAST between types of different sizes!"); 4723 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4724 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4725 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4726 if (OpOpcode == ISD::UNDEF) 4727 return getUNDEF(VT); 4728 break; 4729 case ISD::SCALAR_TO_VECTOR: 4730 assert(VT.isVector() && !Operand.getValueType().isVector() && 4731 (VT.getVectorElementType() == Operand.getValueType() || 4732 (VT.getVectorElementType().isInteger() && 4733 Operand.getValueType().isInteger() && 4734 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4735 "Illegal SCALAR_TO_VECTOR node!"); 4736 if (OpOpcode == ISD::UNDEF) 4737 return getUNDEF(VT); 4738 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4739 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4740 isa<ConstantSDNode>(Operand.getOperand(1)) && 4741 Operand.getConstantOperandVal(1) == 0 && 4742 Operand.getOperand(0).getValueType() == VT) 4743 return Operand.getOperand(0); 4744 break; 4745 case ISD::FNEG: 4746 // Negation of an unknown bag of bits is still completely undefined. 4747 if (OpOpcode == ISD::UNDEF) 4748 return getUNDEF(VT); 4749 4750 if (OpOpcode == ISD::FNEG) // --X -> X 4751 return Operand.getOperand(0); 4752 break; 4753 case ISD::FABS: 4754 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4755 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4756 break; 4757 } 4758 4759 SDNode *N; 4760 SDVTList VTs = getVTList(VT); 4761 SDValue Ops[] = {Operand}; 4762 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4763 FoldingSetNodeID ID; 4764 AddNodeIDNode(ID, Opcode, VTs, Ops); 4765 void *IP = nullptr; 4766 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4767 E->intersectFlagsWith(Flags); 4768 return SDValue(E, 0); 4769 } 4770 4771 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4772 N->setFlags(Flags); 4773 createOperands(N, Ops); 4774 CSEMap.InsertNode(N, IP); 4775 } else { 4776 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4777 createOperands(N, Ops); 4778 } 4779 4780 InsertNode(N); 4781 SDValue V = SDValue(N, 0); 4782 NewSDValueDbgMsg(V, "Creating new node: ", this); 4783 return V; 4784 } 4785 4786 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4787 const APInt &C2) { 4788 switch (Opcode) { 4789 case ISD::ADD: return C1 + C2; 4790 case ISD::SUB: return C1 - C2; 4791 case ISD::MUL: return C1 * C2; 4792 case ISD::AND: return C1 & C2; 4793 case ISD::OR: return C1 | C2; 4794 case ISD::XOR: return C1 ^ C2; 4795 case ISD::SHL: return C1 << C2; 4796 case ISD::SRL: return C1.lshr(C2); 4797 case ISD::SRA: return C1.ashr(C2); 4798 case ISD::ROTL: return C1.rotl(C2); 4799 case ISD::ROTR: return C1.rotr(C2); 4800 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4801 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4802 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4803 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4804 case ISD::SADDSAT: return C1.sadd_sat(C2); 4805 case ISD::UADDSAT: return C1.uadd_sat(C2); 4806 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4807 case ISD::USUBSAT: return C1.usub_sat(C2); 4808 case ISD::UDIV: 4809 if (!C2.getBoolValue()) 4810 break; 4811 return C1.udiv(C2); 4812 case ISD::UREM: 4813 if (!C2.getBoolValue()) 4814 break; 4815 return C1.urem(C2); 4816 case ISD::SDIV: 4817 if (!C2.getBoolValue()) 4818 break; 4819 return C1.sdiv(C2); 4820 case ISD::SREM: 4821 if (!C2.getBoolValue()) 4822 break; 4823 return C1.srem(C2); 4824 } 4825 return llvm::None; 4826 } 4827 4828 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4829 const GlobalAddressSDNode *GA, 4830 const SDNode *N2) { 4831 if (GA->getOpcode() != ISD::GlobalAddress) 4832 return SDValue(); 4833 if (!TLI->isOffsetFoldingLegal(GA)) 4834 return SDValue(); 4835 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4836 if (!C2) 4837 return SDValue(); 4838 int64_t Offset = C2->getSExtValue(); 4839 switch (Opcode) { 4840 case ISD::ADD: break; 4841 case ISD::SUB: Offset = -uint64_t(Offset); break; 4842 default: return SDValue(); 4843 } 4844 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4845 GA->getOffset() + uint64_t(Offset)); 4846 } 4847 4848 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4849 switch (Opcode) { 4850 case ISD::SDIV: 4851 case ISD::UDIV: 4852 case ISD::SREM: 4853 case ISD::UREM: { 4854 // If a divisor is zero/undef or any element of a divisor vector is 4855 // zero/undef, the whole op is undef. 4856 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4857 SDValue Divisor = Ops[1]; 4858 if (Divisor.isUndef() || isNullConstant(Divisor)) 4859 return true; 4860 4861 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4862 llvm::any_of(Divisor->op_values(), 4863 [](SDValue V) { return V.isUndef() || 4864 isNullConstant(V); }); 4865 // TODO: Handle signed overflow. 4866 } 4867 // TODO: Handle oversized shifts. 4868 default: 4869 return false; 4870 } 4871 } 4872 4873 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4874 EVT VT, ArrayRef<SDValue> Ops) { 4875 // If the opcode is a target-specific ISD node, there's nothing we can 4876 // do here and the operand rules may not line up with the below, so 4877 // bail early. 4878 if (Opcode >= ISD::BUILTIN_OP_END) 4879 return SDValue(); 4880 4881 // For now, the array Ops should only contain two values. 4882 // This enforcement will be removed once this function is merged with 4883 // FoldConstantVectorArithmetic 4884 if (Ops.size() != 2) 4885 return SDValue(); 4886 4887 if (isUndef(Opcode, Ops)) 4888 return getUNDEF(VT); 4889 4890 SDNode *N1 = Ops[0].getNode(); 4891 SDNode *N2 = Ops[1].getNode(); 4892 4893 // Handle the case of two scalars. 4894 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4895 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4896 if (C1->isOpaque() || C2->isOpaque()) 4897 return SDValue(); 4898 4899 Optional<APInt> FoldAttempt = 4900 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4901 if (!FoldAttempt) 4902 return SDValue(); 4903 4904 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4905 assert((!Folded || !VT.isVector()) && 4906 "Can't fold vectors ops with scalar operands"); 4907 return Folded; 4908 } 4909 } 4910 4911 // fold (add Sym, c) -> Sym+c 4912 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4913 return FoldSymbolOffset(Opcode, VT, GA, N2); 4914 if (TLI->isCommutativeBinOp(Opcode)) 4915 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4916 return FoldSymbolOffset(Opcode, VT, GA, N1); 4917 4918 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4919 // vector width, however we should be able to do constant folds involving 4920 // splat vector nodes too. 4921 if (VT.isScalableVector()) 4922 return SDValue(); 4923 4924 // For fixed width vectors, extract each constant element and fold them 4925 // individually. Either input may be an undef value. 4926 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4927 if (!BV1 && !N1->isUndef()) 4928 return SDValue(); 4929 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4930 if (!BV2 && !N2->isUndef()) 4931 return SDValue(); 4932 // If both operands are undef, that's handled the same way as scalars. 4933 if (!BV1 && !BV2) 4934 return SDValue(); 4935 4936 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4937 "Vector binop with different number of elements in operands?"); 4938 4939 EVT SVT = VT.getScalarType(); 4940 EVT LegalSVT = SVT; 4941 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4942 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4943 if (LegalSVT.bitsLT(SVT)) 4944 return SDValue(); 4945 } 4946 SmallVector<SDValue, 4> Outputs; 4947 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4948 for (unsigned I = 0; I != NumOps; ++I) { 4949 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4950 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4951 if (SVT.isInteger()) { 4952 if (V1->getValueType(0).bitsGT(SVT)) 4953 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4954 if (V2->getValueType(0).bitsGT(SVT)) 4955 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4956 } 4957 4958 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4959 return SDValue(); 4960 4961 // Fold one vector element. 4962 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4963 if (LegalSVT != SVT) 4964 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4965 4966 // Scalar folding only succeeded if the result is a constant or UNDEF. 4967 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4968 ScalarResult.getOpcode() != ISD::ConstantFP) 4969 return SDValue(); 4970 Outputs.push_back(ScalarResult); 4971 } 4972 4973 assert(VT.getVectorNumElements() == Outputs.size() && 4974 "Vector size mismatch!"); 4975 4976 // We may have a vector type but a scalar result. Create a splat. 4977 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4978 4979 // Build a big vector out of the scalar elements we generated. 4980 return getBuildVector(VT, SDLoc(), Outputs); 4981 } 4982 4983 // TODO: Merge with FoldConstantArithmetic 4984 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4985 const SDLoc &DL, EVT VT, 4986 ArrayRef<SDValue> Ops, 4987 const SDNodeFlags Flags) { 4988 // If the opcode is a target-specific ISD node, there's nothing we can 4989 // do here and the operand rules may not line up with the below, so 4990 // bail early. 4991 if (Opcode >= ISD::BUILTIN_OP_END) 4992 return SDValue(); 4993 4994 if (isUndef(Opcode, Ops)) 4995 return getUNDEF(VT); 4996 4997 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4998 if (!VT.isVector()) 4999 return SDValue(); 5000 5001 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5002 // vector width, however we should be able to do constant folds involving 5003 // splat vector nodes too. 5004 if (VT.isScalableVector()) 5005 return SDValue(); 5006 5007 // From this point onwards all vectors are assumed to be fixed width. 5008 unsigned NumElts = VT.getVectorNumElements(); 5009 5010 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5011 return !Op.getValueType().isVector() || 5012 Op.getValueType().getVectorNumElements() == NumElts; 5013 }; 5014 5015 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5016 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5017 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5018 (BV && BV->isConstant()); 5019 }; 5020 5021 // All operands must be vector types with the same number of elements as 5022 // the result type and must be either UNDEF or a build vector of constant 5023 // or UNDEF scalars. 5024 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5025 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5026 return SDValue(); 5027 5028 // If we are comparing vectors, then the result needs to be a i1 boolean 5029 // that is then sign-extended back to the legal result type. 5030 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5031 5032 // Find legal integer scalar type for constant promotion and 5033 // ensure that its scalar size is at least as large as source. 5034 EVT LegalSVT = VT.getScalarType(); 5035 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5036 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5037 if (LegalSVT.bitsLT(VT.getScalarType())) 5038 return SDValue(); 5039 } 5040 5041 // Constant fold each scalar lane separately. 5042 SmallVector<SDValue, 4> ScalarResults; 5043 for (unsigned i = 0; i != NumElts; i++) { 5044 SmallVector<SDValue, 4> ScalarOps; 5045 for (SDValue Op : Ops) { 5046 EVT InSVT = Op.getValueType().getScalarType(); 5047 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5048 if (!InBV) { 5049 // We've checked that this is UNDEF or a constant of some kind. 5050 if (Op.isUndef()) 5051 ScalarOps.push_back(getUNDEF(InSVT)); 5052 else 5053 ScalarOps.push_back(Op); 5054 continue; 5055 } 5056 5057 SDValue ScalarOp = InBV->getOperand(i); 5058 EVT ScalarVT = ScalarOp.getValueType(); 5059 5060 // Build vector (integer) scalar operands may need implicit 5061 // truncation - do this before constant folding. 5062 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5063 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5064 5065 ScalarOps.push_back(ScalarOp); 5066 } 5067 5068 // Constant fold the scalar operands. 5069 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5070 5071 // Legalize the (integer) scalar constant if necessary. 5072 if (LegalSVT != SVT) 5073 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5074 5075 // Scalar folding only succeeded if the result is a constant or UNDEF. 5076 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5077 ScalarResult.getOpcode() != ISD::ConstantFP) 5078 return SDValue(); 5079 ScalarResults.push_back(ScalarResult); 5080 } 5081 5082 SDValue V = getBuildVector(VT, DL, ScalarResults); 5083 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5084 return V; 5085 } 5086 5087 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5088 EVT VT, SDValue N1, SDValue N2) { 5089 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5090 // should. That will require dealing with a potentially non-default 5091 // rounding mode, checking the "opStatus" return value from the APFloat 5092 // math calculations, and possibly other variations. 5093 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5094 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5095 if (N1CFP && N2CFP) { 5096 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5097 switch (Opcode) { 5098 case ISD::FADD: 5099 C1.add(C2, APFloat::rmNearestTiesToEven); 5100 return getConstantFP(C1, DL, VT); 5101 case ISD::FSUB: 5102 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5103 return getConstantFP(C1, DL, VT); 5104 case ISD::FMUL: 5105 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5106 return getConstantFP(C1, DL, VT); 5107 case ISD::FDIV: 5108 C1.divide(C2, APFloat::rmNearestTiesToEven); 5109 return getConstantFP(C1, DL, VT); 5110 case ISD::FREM: 5111 C1.mod(C2); 5112 return getConstantFP(C1, DL, VT); 5113 case ISD::FCOPYSIGN: 5114 C1.copySign(C2); 5115 return getConstantFP(C1, DL, VT); 5116 default: break; 5117 } 5118 } 5119 if (N1CFP && Opcode == ISD::FP_ROUND) { 5120 APFloat C1 = N1CFP->getValueAPF(); // make copy 5121 bool Unused; 5122 // This can return overflow, underflow, or inexact; we don't care. 5123 // FIXME need to be more flexible about rounding mode. 5124 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5125 &Unused); 5126 return getConstantFP(C1, DL, VT); 5127 } 5128 5129 switch (Opcode) { 5130 case ISD::FSUB: 5131 // -0.0 - undef --> undef (consistent with "fneg undef") 5132 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5133 return getUNDEF(VT); 5134 LLVM_FALLTHROUGH; 5135 5136 case ISD::FADD: 5137 case ISD::FMUL: 5138 case ISD::FDIV: 5139 case ISD::FREM: 5140 // If both operands are undef, the result is undef. If 1 operand is undef, 5141 // the result is NaN. This should match the behavior of the IR optimizer. 5142 if (N1.isUndef() && N2.isUndef()) 5143 return getUNDEF(VT); 5144 if (N1.isUndef() || N2.isUndef()) 5145 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5146 } 5147 return SDValue(); 5148 } 5149 5150 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5151 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5152 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5153 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5154 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5155 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5156 5157 // Canonicalize constant to RHS if commutative. 5158 if (TLI->isCommutativeBinOp(Opcode)) { 5159 if (N1C && !N2C) { 5160 std::swap(N1C, N2C); 5161 std::swap(N1, N2); 5162 } else if (N1CFP && !N2CFP) { 5163 std::swap(N1CFP, N2CFP); 5164 std::swap(N1, N2); 5165 } 5166 } 5167 5168 switch (Opcode) { 5169 default: break; 5170 case ISD::TokenFactor: 5171 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5172 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5173 // Fold trivial token factors. 5174 if (N1.getOpcode() == ISD::EntryToken) return N2; 5175 if (N2.getOpcode() == ISD::EntryToken) return N1; 5176 if (N1 == N2) return N1; 5177 break; 5178 case ISD::BUILD_VECTOR: { 5179 // Attempt to simplify BUILD_VECTOR. 5180 SDValue Ops[] = {N1, N2}; 5181 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5182 return V; 5183 break; 5184 } 5185 case ISD::CONCAT_VECTORS: { 5186 SDValue Ops[] = {N1, N2}; 5187 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5188 return V; 5189 break; 5190 } 5191 case ISD::AND: 5192 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5193 assert(N1.getValueType() == N2.getValueType() && 5194 N1.getValueType() == VT && "Binary operator types must match!"); 5195 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5196 // worth handling here. 5197 if (N2C && N2C->isNullValue()) 5198 return N2; 5199 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5200 return N1; 5201 break; 5202 case ISD::OR: 5203 case ISD::XOR: 5204 case ISD::ADD: 5205 case ISD::SUB: 5206 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5207 assert(N1.getValueType() == N2.getValueType() && 5208 N1.getValueType() == VT && "Binary operator types must match!"); 5209 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5210 // it's worth handling here. 5211 if (N2C && N2C->isNullValue()) 5212 return N1; 5213 break; 5214 case ISD::MUL: 5215 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5216 assert(N1.getValueType() == N2.getValueType() && 5217 N1.getValueType() == VT && "Binary operator types must match!"); 5218 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5219 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5220 APInt N2CImm = N2C->getAPIntValue(); 5221 return getVScale(DL, VT, MulImm * N2CImm); 5222 } 5223 break; 5224 case ISD::UDIV: 5225 case ISD::UREM: 5226 case ISD::MULHU: 5227 case ISD::MULHS: 5228 case ISD::SDIV: 5229 case ISD::SREM: 5230 case ISD::SMIN: 5231 case ISD::SMAX: 5232 case ISD::UMIN: 5233 case ISD::UMAX: 5234 case ISD::SADDSAT: 5235 case ISD::SSUBSAT: 5236 case ISD::UADDSAT: 5237 case ISD::USUBSAT: 5238 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5239 assert(N1.getValueType() == N2.getValueType() && 5240 N1.getValueType() == VT && "Binary operator types must match!"); 5241 break; 5242 case ISD::FADD: 5243 case ISD::FSUB: 5244 case ISD::FMUL: 5245 case ISD::FDIV: 5246 case ISD::FREM: 5247 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5248 assert(N1.getValueType() == N2.getValueType() && 5249 N1.getValueType() == VT && "Binary operator types must match!"); 5250 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5251 return V; 5252 break; 5253 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5254 assert(N1.getValueType() == VT && 5255 N1.getValueType().isFloatingPoint() && 5256 N2.getValueType().isFloatingPoint() && 5257 "Invalid FCOPYSIGN!"); 5258 break; 5259 case ISD::SHL: 5260 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5261 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5262 APInt ShiftImm = N2C->getAPIntValue(); 5263 return getVScale(DL, VT, MulImm << ShiftImm); 5264 } 5265 LLVM_FALLTHROUGH; 5266 case ISD::SRA: 5267 case ISD::SRL: 5268 if (SDValue V = simplifyShift(N1, N2)) 5269 return V; 5270 LLVM_FALLTHROUGH; 5271 case ISD::ROTL: 5272 case ISD::ROTR: 5273 assert(VT == N1.getValueType() && 5274 "Shift operators return type must be the same as their first arg"); 5275 assert(VT.isInteger() && N2.getValueType().isInteger() && 5276 "Shifts only work on integers"); 5277 assert((!VT.isVector() || VT == N2.getValueType()) && 5278 "Vector shift amounts must be in the same as their first arg"); 5279 // Verify that the shift amount VT is big enough to hold valid shift 5280 // amounts. This catches things like trying to shift an i1024 value by an 5281 // i8, which is easy to fall into in generic code that uses 5282 // TLI.getShiftAmount(). 5283 assert(N2.getValueType().getScalarSizeInBits().getFixedSize() >= 5284 Log2_32_Ceil(VT.getScalarSizeInBits().getFixedSize()) && 5285 "Invalid use of small shift amount with oversized value!"); 5286 5287 // Always fold shifts of i1 values so the code generator doesn't need to 5288 // handle them. Since we know the size of the shift has to be less than the 5289 // size of the value, the shift/rotate count is guaranteed to be zero. 5290 if (VT == MVT::i1) 5291 return N1; 5292 if (N2C && N2C->isNullValue()) 5293 return N1; 5294 break; 5295 case ISD::FP_ROUND: 5296 assert(VT.isFloatingPoint() && 5297 N1.getValueType().isFloatingPoint() && 5298 VT.bitsLE(N1.getValueType()) && 5299 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5300 "Invalid FP_ROUND!"); 5301 if (N1.getValueType() == VT) return N1; // noop conversion. 5302 break; 5303 case ISD::AssertSext: 5304 case ISD::AssertZext: { 5305 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5306 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5307 assert(VT.isInteger() && EVT.isInteger() && 5308 "Cannot *_EXTEND_INREG FP types"); 5309 assert(!EVT.isVector() && 5310 "AssertSExt/AssertZExt type should be the vector element type " 5311 "rather than the vector type!"); 5312 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5313 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5314 break; 5315 } 5316 case ISD::SIGN_EXTEND_INREG: { 5317 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5318 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5319 assert(VT.isInteger() && EVT.isInteger() && 5320 "Cannot *_EXTEND_INREG FP types"); 5321 assert(EVT.isVector() == VT.isVector() && 5322 "SIGN_EXTEND_INREG type should be vector iff the operand " 5323 "type is vector!"); 5324 assert((!EVT.isVector() || 5325 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5326 "Vector element counts must match in SIGN_EXTEND_INREG"); 5327 assert(EVT.bitsLE(VT) && "Not extending!"); 5328 if (EVT == VT) return N1; // Not actually extending 5329 5330 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5331 unsigned FromBits = EVT.getScalarSizeInBits(); 5332 Val <<= Val.getBitWidth() - FromBits; 5333 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5334 return getConstant(Val, DL, ConstantVT); 5335 }; 5336 5337 if (N1C) { 5338 const APInt &Val = N1C->getAPIntValue(); 5339 return SignExtendInReg(Val, VT); 5340 } 5341 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5342 SmallVector<SDValue, 8> Ops; 5343 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5344 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5345 SDValue Op = N1.getOperand(i); 5346 if (Op.isUndef()) { 5347 Ops.push_back(getUNDEF(OpVT)); 5348 continue; 5349 } 5350 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5351 APInt Val = C->getAPIntValue(); 5352 Ops.push_back(SignExtendInReg(Val, OpVT)); 5353 } 5354 return getBuildVector(VT, DL, Ops); 5355 } 5356 break; 5357 } 5358 case ISD::EXTRACT_VECTOR_ELT: 5359 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5360 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5361 element type of the vector."); 5362 5363 // Extract from an undefined value or using an undefined index is undefined. 5364 if (N1.isUndef() || N2.isUndef()) 5365 return getUNDEF(VT); 5366 5367 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 5368 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5369 return getUNDEF(VT); 5370 5371 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5372 // expanding copies of large vectors from registers. 5373 if (N2C && 5374 N1.getOpcode() == ISD::CONCAT_VECTORS && 5375 N1.getNumOperands() > 0) { 5376 unsigned Factor = 5377 N1.getOperand(0).getValueType().getVectorNumElements(); 5378 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5379 N1.getOperand(N2C->getZExtValue() / Factor), 5380 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5381 } 5382 5383 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 5384 // expanding large vector constants. 5385 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 5386 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 5387 5388 if (VT != Elt.getValueType()) 5389 // If the vector element type is not legal, the BUILD_VECTOR operands 5390 // are promoted and implicitly truncated, and the result implicitly 5391 // extended. Make that explicit here. 5392 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5393 5394 return Elt; 5395 } 5396 5397 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5398 // operations are lowered to scalars. 5399 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5400 // If the indices are the same, return the inserted element else 5401 // if the indices are known different, extract the element from 5402 // the original vector. 5403 SDValue N1Op2 = N1.getOperand(2); 5404 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5405 5406 if (N1Op2C && N2C) { 5407 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5408 if (VT == N1.getOperand(1).getValueType()) 5409 return N1.getOperand(1); 5410 else 5411 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5412 } 5413 5414 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5415 } 5416 } 5417 5418 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5419 // when vector types are scalarized and v1iX is legal. 5420 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 5421 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5422 N1.getValueType().getVectorNumElements() == 1) { 5423 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5424 N1.getOperand(1)); 5425 } 5426 break; 5427 case ISD::EXTRACT_ELEMENT: 5428 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5429 assert(!N1.getValueType().isVector() && !VT.isVector() && 5430 (N1.getValueType().isInteger() == VT.isInteger()) && 5431 N1.getValueType() != VT && 5432 "Wrong types for EXTRACT_ELEMENT!"); 5433 5434 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5435 // 64-bit integers into 32-bit parts. Instead of building the extract of 5436 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5437 if (N1.getOpcode() == ISD::BUILD_PAIR) 5438 return N1.getOperand(N2C->getZExtValue()); 5439 5440 // EXTRACT_ELEMENT of a constant int is also very common. 5441 if (N1C) { 5442 unsigned ElementSize = VT.getSizeInBits(); 5443 unsigned Shift = ElementSize * N2C->getZExtValue(); 5444 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5445 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5446 } 5447 break; 5448 case ISD::EXTRACT_SUBVECTOR: 5449 assert(VT.isVector() && N1.getValueType().isVector() && 5450 "Extract subvector VTs must be a vectors!"); 5451 assert(VT.getVectorElementType() == 5452 N1.getValueType().getVectorElementType() && 5453 "Extract subvector VTs must have the same element type!"); 5454 assert(VT.getVectorNumElements() <= 5455 N1.getValueType().getVectorNumElements() && 5456 "Extract subvector must be from larger vector to smaller vector!"); 5457 assert(N2C && "Extract subvector index must be a constant"); 5458 assert(VT.getVectorNumElements() + N2C->getZExtValue() <= 5459 N1.getValueType().getVectorNumElements() && 5460 "Extract subvector overflow!"); 5461 5462 // Trivial extraction. 5463 if (VT == N1.getValueType()) 5464 return N1; 5465 5466 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5467 if (N1.isUndef()) 5468 return getUNDEF(VT); 5469 5470 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5471 // the concat have the same type as the extract. 5472 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5473 N1.getNumOperands() > 0 && VT == N1.getOperand(0).getValueType()) { 5474 unsigned Factor = VT.getVectorNumElements(); 5475 return N1.getOperand(N2C->getZExtValue() / Factor); 5476 } 5477 5478 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5479 // during shuffle legalization. 5480 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5481 VT == N1.getOperand(1).getValueType()) 5482 return N1.getOperand(1); 5483 break; 5484 } 5485 5486 // Perform trivial constant folding. 5487 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5488 return SV; 5489 5490 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5491 return V; 5492 5493 // Canonicalize an UNDEF to the RHS, even over a constant. 5494 if (N1.isUndef()) { 5495 if (TLI->isCommutativeBinOp(Opcode)) { 5496 std::swap(N1, N2); 5497 } else { 5498 switch (Opcode) { 5499 case ISD::SIGN_EXTEND_INREG: 5500 case ISD::SUB: 5501 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5502 case ISD::UDIV: 5503 case ISD::SDIV: 5504 case ISD::UREM: 5505 case ISD::SREM: 5506 case ISD::SSUBSAT: 5507 case ISD::USUBSAT: 5508 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5509 } 5510 } 5511 } 5512 5513 // Fold a bunch of operators when the RHS is undef. 5514 if (N2.isUndef()) { 5515 switch (Opcode) { 5516 case ISD::XOR: 5517 if (N1.isUndef()) 5518 // Handle undef ^ undef -> 0 special case. This is a common 5519 // idiom (misuse). 5520 return getConstant(0, DL, VT); 5521 LLVM_FALLTHROUGH; 5522 case ISD::ADD: 5523 case ISD::SUB: 5524 case ISD::UDIV: 5525 case ISD::SDIV: 5526 case ISD::UREM: 5527 case ISD::SREM: 5528 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5529 case ISD::MUL: 5530 case ISD::AND: 5531 case ISD::SSUBSAT: 5532 case ISD::USUBSAT: 5533 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5534 case ISD::OR: 5535 case ISD::SADDSAT: 5536 case ISD::UADDSAT: 5537 return getAllOnesConstant(DL, VT); 5538 } 5539 } 5540 5541 // Memoize this node if possible. 5542 SDNode *N; 5543 SDVTList VTs = getVTList(VT); 5544 SDValue Ops[] = {N1, N2}; 5545 if (VT != MVT::Glue) { 5546 FoldingSetNodeID ID; 5547 AddNodeIDNode(ID, Opcode, VTs, Ops); 5548 void *IP = nullptr; 5549 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5550 E->intersectFlagsWith(Flags); 5551 return SDValue(E, 0); 5552 } 5553 5554 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5555 N->setFlags(Flags); 5556 createOperands(N, Ops); 5557 CSEMap.InsertNode(N, IP); 5558 } else { 5559 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5560 createOperands(N, Ops); 5561 } 5562 5563 InsertNode(N); 5564 SDValue V = SDValue(N, 0); 5565 NewSDValueDbgMsg(V, "Creating new node: ", this); 5566 return V; 5567 } 5568 5569 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5570 SDValue N1, SDValue N2, SDValue N3, 5571 const SDNodeFlags Flags) { 5572 // Perform various simplifications. 5573 switch (Opcode) { 5574 case ISD::FMA: { 5575 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5576 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5577 N3.getValueType() == VT && "FMA types must match!"); 5578 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5579 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5580 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5581 if (N1CFP && N2CFP && N3CFP) { 5582 APFloat V1 = N1CFP->getValueAPF(); 5583 const APFloat &V2 = N2CFP->getValueAPF(); 5584 const APFloat &V3 = N3CFP->getValueAPF(); 5585 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5586 return getConstantFP(V1, DL, VT); 5587 } 5588 break; 5589 } 5590 case ISD::BUILD_VECTOR: { 5591 // Attempt to simplify BUILD_VECTOR. 5592 SDValue Ops[] = {N1, N2, N3}; 5593 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5594 return V; 5595 break; 5596 } 5597 case ISD::CONCAT_VECTORS: { 5598 SDValue Ops[] = {N1, N2, N3}; 5599 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5600 return V; 5601 break; 5602 } 5603 case ISD::SETCC: { 5604 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5605 assert(N1.getValueType() == N2.getValueType() && 5606 "SETCC operands must have the same type!"); 5607 assert(VT.isVector() == N1.getValueType().isVector() && 5608 "SETCC type should be vector iff the operand type is vector!"); 5609 assert((!VT.isVector() || VT.getVectorElementCount() == 5610 N1.getValueType().getVectorElementCount()) && 5611 "SETCC vector element counts must match!"); 5612 // Use FoldSetCC to simplify SETCC's. 5613 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5614 return V; 5615 // Vector constant folding. 5616 SDValue Ops[] = {N1, N2, N3}; 5617 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5618 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5619 return V; 5620 } 5621 break; 5622 } 5623 case ISD::SELECT: 5624 case ISD::VSELECT: 5625 if (SDValue V = simplifySelect(N1, N2, N3)) 5626 return V; 5627 break; 5628 case ISD::VECTOR_SHUFFLE: 5629 llvm_unreachable("should use getVectorShuffle constructor!"); 5630 case ISD::INSERT_VECTOR_ELT: { 5631 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5632 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5633 // for scalable vectors where we will generate appropriate code to 5634 // deal with out-of-bounds cases correctly. 5635 if (N3C && N1.getValueType().isFixedLengthVector() && 5636 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5637 return getUNDEF(VT); 5638 5639 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5640 if (N3.isUndef()) 5641 return getUNDEF(VT); 5642 5643 // If the inserted element is an UNDEF, just use the input vector. 5644 if (N2.isUndef()) 5645 return N1; 5646 5647 break; 5648 } 5649 case ISD::INSERT_SUBVECTOR: { 5650 // Inserting undef into undef is still undef. 5651 if (N1.isUndef() && N2.isUndef()) 5652 return getUNDEF(VT); 5653 assert(VT.isVector() && N1.getValueType().isVector() && 5654 N2.getValueType().isVector() && 5655 "Insert subvector VTs must be a vectors"); 5656 assert(VT == N1.getValueType() && 5657 "Dest and insert subvector source types must match!"); 5658 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5659 "Insert subvector must be from smaller vector to larger vector!"); 5660 assert(isa<ConstantSDNode>(N3) && 5661 "Insert subvector index must be constant"); 5662 assert(N2.getValueType().getVectorNumElements() + 5663 cast<ConstantSDNode>(N3)->getZExtValue() <= 5664 VT.getVectorNumElements() && 5665 "Insert subvector overflow!"); 5666 5667 // Trivial insertion. 5668 if (VT == N2.getValueType()) 5669 return N2; 5670 5671 // If this is an insert of an extracted vector into an undef vector, we 5672 // can just use the input to the extract. 5673 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5674 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5675 return N2.getOperand(0); 5676 break; 5677 } 5678 case ISD::BITCAST: 5679 // Fold bit_convert nodes from a type to themselves. 5680 if (N1.getValueType() == VT) 5681 return N1; 5682 break; 5683 } 5684 5685 // Memoize node if it doesn't produce a flag. 5686 SDNode *N; 5687 SDVTList VTs = getVTList(VT); 5688 SDValue Ops[] = {N1, N2, N3}; 5689 if (VT != MVT::Glue) { 5690 FoldingSetNodeID ID; 5691 AddNodeIDNode(ID, Opcode, VTs, Ops); 5692 void *IP = nullptr; 5693 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5694 E->intersectFlagsWith(Flags); 5695 return SDValue(E, 0); 5696 } 5697 5698 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5699 N->setFlags(Flags); 5700 createOperands(N, Ops); 5701 CSEMap.InsertNode(N, IP); 5702 } else { 5703 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5704 createOperands(N, Ops); 5705 } 5706 5707 InsertNode(N); 5708 SDValue V = SDValue(N, 0); 5709 NewSDValueDbgMsg(V, "Creating new node: ", this); 5710 return V; 5711 } 5712 5713 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5714 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5715 SDValue Ops[] = { N1, N2, N3, N4 }; 5716 return getNode(Opcode, DL, VT, Ops); 5717 } 5718 5719 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5720 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5721 SDValue N5) { 5722 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5723 return getNode(Opcode, DL, VT, Ops); 5724 } 5725 5726 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5727 /// the incoming stack arguments to be loaded from the stack. 5728 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5729 SmallVector<SDValue, 8> ArgChains; 5730 5731 // Include the original chain at the beginning of the list. When this is 5732 // used by target LowerCall hooks, this helps legalize find the 5733 // CALLSEQ_BEGIN node. 5734 ArgChains.push_back(Chain); 5735 5736 // Add a chain value for each stack argument. 5737 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5738 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5739 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5740 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5741 if (FI->getIndex() < 0) 5742 ArgChains.push_back(SDValue(L, 1)); 5743 5744 // Build a tokenfactor for all the chains. 5745 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5746 } 5747 5748 /// getMemsetValue - Vectorized representation of the memset value 5749 /// operand. 5750 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5751 const SDLoc &dl) { 5752 assert(!Value.isUndef()); 5753 5754 unsigned NumBits = VT.getScalarSizeInBits(); 5755 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5756 assert(C->getAPIntValue().getBitWidth() == 8); 5757 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5758 if (VT.isInteger()) { 5759 bool IsOpaque = VT.getSizeInBits() > 64 || 5760 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5761 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5762 } 5763 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5764 VT); 5765 } 5766 5767 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5768 EVT IntVT = VT.getScalarType(); 5769 if (!IntVT.isInteger()) 5770 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5771 5772 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5773 if (NumBits > 8) { 5774 // Use a multiplication with 0x010101... to extend the input to the 5775 // required length. 5776 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5777 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5778 DAG.getConstant(Magic, dl, IntVT)); 5779 } 5780 5781 if (VT != Value.getValueType() && !VT.isInteger()) 5782 Value = DAG.getBitcast(VT.getScalarType(), Value); 5783 if (VT != Value.getValueType()) 5784 Value = DAG.getSplatBuildVector(VT, dl, Value); 5785 5786 return Value; 5787 } 5788 5789 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5790 /// used when a memcpy is turned into a memset when the source is a constant 5791 /// string ptr. 5792 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5793 const TargetLowering &TLI, 5794 const ConstantDataArraySlice &Slice) { 5795 // Handle vector with all elements zero. 5796 if (Slice.Array == nullptr) { 5797 if (VT.isInteger()) 5798 return DAG.getConstant(0, dl, VT); 5799 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5800 return DAG.getConstantFP(0.0, dl, VT); 5801 else if (VT.isVector()) { 5802 unsigned NumElts = VT.getVectorNumElements(); 5803 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5804 return DAG.getNode(ISD::BITCAST, dl, VT, 5805 DAG.getConstant(0, dl, 5806 EVT::getVectorVT(*DAG.getContext(), 5807 EltVT, NumElts))); 5808 } else 5809 llvm_unreachable("Expected type!"); 5810 } 5811 5812 assert(!VT.isVector() && "Can't handle vector type here!"); 5813 unsigned NumVTBits = VT.getSizeInBits(); 5814 unsigned NumVTBytes = NumVTBits / 8; 5815 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5816 5817 APInt Val(NumVTBits, 0); 5818 if (DAG.getDataLayout().isLittleEndian()) { 5819 for (unsigned i = 0; i != NumBytes; ++i) 5820 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5821 } else { 5822 for (unsigned i = 0; i != NumBytes; ++i) 5823 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5824 } 5825 5826 // If the "cost" of materializing the integer immediate is less than the cost 5827 // of a load, then it is cost effective to turn the load into the immediate. 5828 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5829 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5830 return DAG.getConstant(Val, dl, VT); 5831 return SDValue(nullptr, 0); 5832 } 5833 5834 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5835 const SDLoc &DL, 5836 const SDNodeFlags Flags) { 5837 EVT VT = Base.getValueType(); 5838 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5839 } 5840 5841 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5842 const SDLoc &DL, 5843 const SDNodeFlags Flags) { 5844 assert(Offset.getValueType().isInteger()); 5845 EVT BasePtrVT = Ptr.getValueType(); 5846 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5847 } 5848 5849 /// Returns true if memcpy source is constant data. 5850 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5851 uint64_t SrcDelta = 0; 5852 GlobalAddressSDNode *G = nullptr; 5853 if (Src.getOpcode() == ISD::GlobalAddress) 5854 G = cast<GlobalAddressSDNode>(Src); 5855 else if (Src.getOpcode() == ISD::ADD && 5856 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5857 Src.getOperand(1).getOpcode() == ISD::Constant) { 5858 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5859 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5860 } 5861 if (!G) 5862 return false; 5863 5864 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5865 SrcDelta + G->getOffset()); 5866 } 5867 5868 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5869 SelectionDAG &DAG) { 5870 // On Darwin, -Os means optimize for size without hurting performance, so 5871 // only really optimize for size when -Oz (MinSize) is used. 5872 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5873 return MF.getFunction().hasMinSize(); 5874 return DAG.shouldOptForSize(); 5875 } 5876 5877 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5878 SmallVector<SDValue, 32> &OutChains, unsigned From, 5879 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5880 SmallVector<SDValue, 16> &OutStoreChains) { 5881 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5882 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5883 SmallVector<SDValue, 16> GluedLoadChains; 5884 for (unsigned i = From; i < To; ++i) { 5885 OutChains.push_back(OutLoadChains[i]); 5886 GluedLoadChains.push_back(OutLoadChains[i]); 5887 } 5888 5889 // Chain for all loads. 5890 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5891 GluedLoadChains); 5892 5893 for (unsigned i = From; i < To; ++i) { 5894 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5895 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5896 ST->getBasePtr(), ST->getMemoryVT(), 5897 ST->getMemOperand()); 5898 OutChains.push_back(NewStore); 5899 } 5900 } 5901 5902 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5903 SDValue Chain, SDValue Dst, SDValue Src, 5904 uint64_t Size, Align Alignment, 5905 bool isVol, bool AlwaysInline, 5906 MachinePointerInfo DstPtrInfo, 5907 MachinePointerInfo SrcPtrInfo) { 5908 // Turn a memcpy of undef to nop. 5909 // FIXME: We need to honor volatile even is Src is undef. 5910 if (Src.isUndef()) 5911 return Chain; 5912 5913 // Expand memcpy to a series of load and store ops if the size operand falls 5914 // below a certain threshold. 5915 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5916 // rather than maybe a humongous number of loads and stores. 5917 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5918 const DataLayout &DL = DAG.getDataLayout(); 5919 LLVMContext &C = *DAG.getContext(); 5920 std::vector<EVT> MemOps; 5921 bool DstAlignCanChange = false; 5922 MachineFunction &MF = DAG.getMachineFunction(); 5923 MachineFrameInfo &MFI = MF.getFrameInfo(); 5924 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 5925 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5926 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5927 DstAlignCanChange = true; 5928 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 5929 if (!SrcAlign || Alignment > *SrcAlign) 5930 SrcAlign = Alignment; 5931 assert(SrcAlign && "SrcAlign must be set"); 5932 ConstantDataArraySlice Slice; 5933 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5934 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5935 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5936 const MemOp Op = isZeroConstant 5937 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 5938 /*IsZeroMemset*/ true, isVol) 5939 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 5940 *SrcAlign, isVol, CopyFromConstant); 5941 if (!TLI.findOptimalMemOpLowering( 5942 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 5943 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 5944 return SDValue(); 5945 5946 if (DstAlignCanChange) { 5947 Type *Ty = MemOps[0].getTypeForEVT(C); 5948 Align NewAlign = DL.getABITypeAlign(Ty); 5949 5950 // Don't promote to an alignment that would require dynamic stack 5951 // realignment. 5952 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5953 if (!TRI->needsStackRealignment(MF)) 5954 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 5955 NewAlign = NewAlign / 2; 5956 5957 if (NewAlign > Alignment) { 5958 // Give the stack frame object a larger alignment if needed. 5959 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 5960 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5961 Alignment = NewAlign; 5962 } 5963 } 5964 5965 MachineMemOperand::Flags MMOFlags = 5966 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5967 SmallVector<SDValue, 16> OutLoadChains; 5968 SmallVector<SDValue, 16> OutStoreChains; 5969 SmallVector<SDValue, 32> OutChains; 5970 unsigned NumMemOps = MemOps.size(); 5971 uint64_t SrcOff = 0, DstOff = 0; 5972 for (unsigned i = 0; i != NumMemOps; ++i) { 5973 EVT VT = MemOps[i]; 5974 unsigned VTSize = VT.getSizeInBits() / 8; 5975 SDValue Value, Store; 5976 5977 if (VTSize > Size) { 5978 // Issuing an unaligned load / store pair that overlaps with the previous 5979 // pair. Adjust the offset accordingly. 5980 assert(i == NumMemOps-1 && i != 0); 5981 SrcOff -= VTSize - Size; 5982 DstOff -= VTSize - Size; 5983 } 5984 5985 if (CopyFromConstant && 5986 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5987 // It's unlikely a store of a vector immediate can be done in a single 5988 // instruction. It would require a load from a constantpool first. 5989 // We only handle zero vectors here. 5990 // FIXME: Handle other cases where store of vector immediate is done in 5991 // a single instruction. 5992 ConstantDataArraySlice SubSlice; 5993 if (SrcOff < Slice.Length) { 5994 SubSlice = Slice; 5995 SubSlice.move(SrcOff); 5996 } else { 5997 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5998 SubSlice.Array = nullptr; 5999 SubSlice.Offset = 0; 6000 SubSlice.Length = VTSize; 6001 } 6002 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6003 if (Value.getNode()) { 6004 Store = DAG.getStore( 6005 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6006 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6007 OutChains.push_back(Store); 6008 } 6009 } 6010 6011 if (!Store.getNode()) { 6012 // The type might not be legal for the target. This should only happen 6013 // if the type is smaller than a legal type, as on PPC, so the right 6014 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6015 // to Load/Store if NVT==VT. 6016 // FIXME does the case above also need this? 6017 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6018 assert(NVT.bitsGE(VT)); 6019 6020 bool isDereferenceable = 6021 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6022 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6023 if (isDereferenceable) 6024 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6025 6026 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 6027 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6028 SrcPtrInfo.getWithOffset(SrcOff), VT, 6029 commonAlignment(*SrcAlign, SrcOff).value(), 6030 SrcMMOFlags); 6031 OutLoadChains.push_back(Value.getValue(1)); 6032 6033 Store = DAG.getTruncStore( 6034 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6035 DstPtrInfo.getWithOffset(DstOff), VT, Alignment.value(), MMOFlags); 6036 OutStoreChains.push_back(Store); 6037 } 6038 SrcOff += VTSize; 6039 DstOff += VTSize; 6040 Size -= VTSize; 6041 } 6042 6043 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6044 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6045 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6046 6047 if (NumLdStInMemcpy) { 6048 // It may be that memcpy might be converted to memset if it's memcpy 6049 // of constants. In such a case, we won't have loads and stores, but 6050 // just stores. In the absence of loads, there is nothing to gang up. 6051 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6052 // If target does not care, just leave as it. 6053 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6054 OutChains.push_back(OutLoadChains[i]); 6055 OutChains.push_back(OutStoreChains[i]); 6056 } 6057 } else { 6058 // Ld/St less than/equal limit set by target. 6059 if (NumLdStInMemcpy <= GluedLdStLimit) { 6060 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6061 NumLdStInMemcpy, OutLoadChains, 6062 OutStoreChains); 6063 } else { 6064 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6065 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6066 unsigned GlueIter = 0; 6067 6068 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6069 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6070 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6071 6072 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6073 OutLoadChains, OutStoreChains); 6074 GlueIter += GluedLdStLimit; 6075 } 6076 6077 // Residual ld/st. 6078 if (RemainingLdStInMemcpy) { 6079 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6080 RemainingLdStInMemcpy, OutLoadChains, 6081 OutStoreChains); 6082 } 6083 } 6084 } 6085 } 6086 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6087 } 6088 6089 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6090 SDValue Chain, SDValue Dst, SDValue Src, 6091 uint64_t Size, Align Alignment, 6092 bool isVol, bool AlwaysInline, 6093 MachinePointerInfo DstPtrInfo, 6094 MachinePointerInfo SrcPtrInfo) { 6095 // Turn a memmove of undef to nop. 6096 // FIXME: We need to honor volatile even is Src is undef. 6097 if (Src.isUndef()) 6098 return Chain; 6099 6100 // Expand memmove to a series of load and store ops if the size operand falls 6101 // below a certain threshold. 6102 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6103 const DataLayout &DL = DAG.getDataLayout(); 6104 LLVMContext &C = *DAG.getContext(); 6105 std::vector<EVT> MemOps; 6106 bool DstAlignCanChange = false; 6107 MachineFunction &MF = DAG.getMachineFunction(); 6108 MachineFrameInfo &MFI = MF.getFrameInfo(); 6109 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6110 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6111 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6112 DstAlignCanChange = true; 6113 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6114 if (!SrcAlign || Alignment > *SrcAlign) 6115 SrcAlign = Alignment; 6116 assert(SrcAlign && "SrcAlign must be set"); 6117 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6118 if (!TLI.findOptimalMemOpLowering( 6119 MemOps, Limit, 6120 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6121 /*IsVolatile*/ true), 6122 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6123 MF.getFunction().getAttributes())) 6124 return SDValue(); 6125 6126 if (DstAlignCanChange) { 6127 Type *Ty = MemOps[0].getTypeForEVT(C); 6128 Align NewAlign = DL.getABITypeAlign(Ty); 6129 if (NewAlign > Alignment) { 6130 // Give the stack frame object a larger alignment if needed. 6131 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6132 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6133 Alignment = NewAlign; 6134 } 6135 } 6136 6137 MachineMemOperand::Flags MMOFlags = 6138 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6139 uint64_t SrcOff = 0, DstOff = 0; 6140 SmallVector<SDValue, 8> LoadValues; 6141 SmallVector<SDValue, 8> LoadChains; 6142 SmallVector<SDValue, 8> OutChains; 6143 unsigned NumMemOps = MemOps.size(); 6144 for (unsigned i = 0; i < NumMemOps; i++) { 6145 EVT VT = MemOps[i]; 6146 unsigned VTSize = VT.getSizeInBits() / 8; 6147 SDValue Value; 6148 6149 bool isDereferenceable = 6150 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6151 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6152 if (isDereferenceable) 6153 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6154 6155 Value = DAG.getLoad( 6156 VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6157 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign->value(), SrcMMOFlags); 6158 LoadValues.push_back(Value); 6159 LoadChains.push_back(Value.getValue(1)); 6160 SrcOff += VTSize; 6161 } 6162 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6163 OutChains.clear(); 6164 for (unsigned i = 0; i < NumMemOps; i++) { 6165 EVT VT = MemOps[i]; 6166 unsigned VTSize = VT.getSizeInBits() / 8; 6167 SDValue Store; 6168 6169 Store = DAG.getStore( 6170 Chain, dl, LoadValues[i], DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6171 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6172 OutChains.push_back(Store); 6173 DstOff += VTSize; 6174 } 6175 6176 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6177 } 6178 6179 /// Lower the call to 'memset' intrinsic function into a series of store 6180 /// operations. 6181 /// 6182 /// \param DAG Selection DAG where lowered code is placed. 6183 /// \param dl Link to corresponding IR location. 6184 /// \param Chain Control flow dependency. 6185 /// \param Dst Pointer to destination memory location. 6186 /// \param Src Value of byte to write into the memory. 6187 /// \param Size Number of bytes to write. 6188 /// \param Alignment Alignment of the destination in bytes. 6189 /// \param isVol True if destination is volatile. 6190 /// \param DstPtrInfo IR information on the memory pointer. 6191 /// \returns New head in the control flow, if lowering was successful, empty 6192 /// SDValue otherwise. 6193 /// 6194 /// The function tries to replace 'llvm.memset' intrinsic with several store 6195 /// operations and value calculation code. This is usually profitable for small 6196 /// memory size. 6197 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6198 SDValue Chain, SDValue Dst, SDValue Src, 6199 uint64_t Size, Align Alignment, bool isVol, 6200 MachinePointerInfo DstPtrInfo) { 6201 // Turn a memset of undef to nop. 6202 // FIXME: We need to honor volatile even is Src is undef. 6203 if (Src.isUndef()) 6204 return Chain; 6205 6206 // Expand memset to a series of load/store ops if the size operand 6207 // falls below a certain threshold. 6208 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6209 std::vector<EVT> MemOps; 6210 bool DstAlignCanChange = false; 6211 MachineFunction &MF = DAG.getMachineFunction(); 6212 MachineFrameInfo &MFI = MF.getFrameInfo(); 6213 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6214 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6215 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6216 DstAlignCanChange = true; 6217 bool IsZeroVal = 6218 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6219 if (!TLI.findOptimalMemOpLowering( 6220 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6221 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6222 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6223 return SDValue(); 6224 6225 if (DstAlignCanChange) { 6226 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6227 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6228 if (NewAlign > Alignment) { 6229 // Give the stack frame object a larger alignment if needed. 6230 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6231 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6232 Alignment = NewAlign; 6233 } 6234 } 6235 6236 SmallVector<SDValue, 8> OutChains; 6237 uint64_t DstOff = 0; 6238 unsigned NumMemOps = MemOps.size(); 6239 6240 // Find the largest store and generate the bit pattern for it. 6241 EVT LargestVT = MemOps[0]; 6242 for (unsigned i = 1; i < NumMemOps; i++) 6243 if (MemOps[i].bitsGT(LargestVT)) 6244 LargestVT = MemOps[i]; 6245 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6246 6247 for (unsigned i = 0; i < NumMemOps; i++) { 6248 EVT VT = MemOps[i]; 6249 unsigned VTSize = VT.getSizeInBits() / 8; 6250 if (VTSize > Size) { 6251 // Issuing an unaligned load / store pair that overlaps with the previous 6252 // pair. Adjust the offset accordingly. 6253 assert(i == NumMemOps-1 && i != 0); 6254 DstOff -= VTSize - Size; 6255 } 6256 6257 // If this store is smaller than the largest store see whether we can get 6258 // the smaller value for free with a truncate. 6259 SDValue Value = MemSetValue; 6260 if (VT.bitsLT(LargestVT)) { 6261 if (!LargestVT.isVector() && !VT.isVector() && 6262 TLI.isTruncateFree(LargestVT, VT)) 6263 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6264 else 6265 Value = getMemsetValue(Src, VT, DAG, dl); 6266 } 6267 assert(Value.getValueType() == VT && "Value with wrong type."); 6268 SDValue Store = DAG.getStore( 6269 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6270 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), 6271 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6272 OutChains.push_back(Store); 6273 DstOff += VT.getSizeInBits() / 8; 6274 Size -= VTSize; 6275 } 6276 6277 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6278 } 6279 6280 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6281 unsigned AS) { 6282 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6283 // pointer operands can be losslessly bitcasted to pointers of address space 0 6284 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6285 report_fatal_error("cannot lower memory intrinsic in address space " + 6286 Twine(AS)); 6287 } 6288 } 6289 6290 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6291 SDValue Src, SDValue Size, Align Alignment, 6292 bool isVol, bool AlwaysInline, bool isTailCall, 6293 MachinePointerInfo DstPtrInfo, 6294 MachinePointerInfo SrcPtrInfo) { 6295 // Check to see if we should lower the memcpy to loads and stores first. 6296 // For cases within the target-specified limits, this is the best choice. 6297 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6298 if (ConstantSize) { 6299 // Memcpy with size zero? Just return the original chain. 6300 if (ConstantSize->isNullValue()) 6301 return Chain; 6302 6303 SDValue Result = getMemcpyLoadsAndStores( 6304 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6305 isVol, false, DstPtrInfo, SrcPtrInfo); 6306 if (Result.getNode()) 6307 return Result; 6308 } 6309 6310 // Then check to see if we should lower the memcpy with target-specific 6311 // code. If the target chooses to do this, this is the next best. 6312 if (TSI) { 6313 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6314 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, 6315 AlwaysInline, DstPtrInfo, SrcPtrInfo); 6316 if (Result.getNode()) 6317 return Result; 6318 } 6319 6320 // If we really need inline code and the target declined to provide it, 6321 // use a (potentially long) sequence of loads and stores. 6322 if (AlwaysInline) { 6323 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6324 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6325 ConstantSize->getZExtValue(), Alignment, 6326 isVol, true, DstPtrInfo, SrcPtrInfo); 6327 } 6328 6329 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6330 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6331 6332 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6333 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6334 // respect volatile, so they may do things like read or write memory 6335 // beyond the given memory regions. But fixing this isn't easy, and most 6336 // people don't care. 6337 6338 // Emit a library call. 6339 TargetLowering::ArgListTy Args; 6340 TargetLowering::ArgListEntry Entry; 6341 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6342 Entry.Node = Dst; Args.push_back(Entry); 6343 Entry.Node = Src; Args.push_back(Entry); 6344 6345 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6346 Entry.Node = Size; Args.push_back(Entry); 6347 // FIXME: pass in SDLoc 6348 TargetLowering::CallLoweringInfo CLI(*this); 6349 CLI.setDebugLoc(dl) 6350 .setChain(Chain) 6351 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6352 Dst.getValueType().getTypeForEVT(*getContext()), 6353 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6354 TLI->getPointerTy(getDataLayout())), 6355 std::move(Args)) 6356 .setDiscardResult() 6357 .setTailCall(isTailCall); 6358 6359 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6360 return CallResult.second; 6361 } 6362 6363 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6364 SDValue Dst, unsigned DstAlign, 6365 SDValue Src, unsigned SrcAlign, 6366 SDValue Size, Type *SizeTy, 6367 unsigned ElemSz, bool isTailCall, 6368 MachinePointerInfo DstPtrInfo, 6369 MachinePointerInfo SrcPtrInfo) { 6370 // Emit a library call. 6371 TargetLowering::ArgListTy Args; 6372 TargetLowering::ArgListEntry Entry; 6373 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6374 Entry.Node = Dst; 6375 Args.push_back(Entry); 6376 6377 Entry.Node = Src; 6378 Args.push_back(Entry); 6379 6380 Entry.Ty = SizeTy; 6381 Entry.Node = Size; 6382 Args.push_back(Entry); 6383 6384 RTLIB::Libcall LibraryCall = 6385 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6386 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6387 report_fatal_error("Unsupported element size"); 6388 6389 TargetLowering::CallLoweringInfo CLI(*this); 6390 CLI.setDebugLoc(dl) 6391 .setChain(Chain) 6392 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6393 Type::getVoidTy(*getContext()), 6394 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6395 TLI->getPointerTy(getDataLayout())), 6396 std::move(Args)) 6397 .setDiscardResult() 6398 .setTailCall(isTailCall); 6399 6400 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6401 return CallResult.second; 6402 } 6403 6404 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6405 SDValue Src, SDValue Size, Align Alignment, 6406 bool isVol, bool isTailCall, 6407 MachinePointerInfo DstPtrInfo, 6408 MachinePointerInfo SrcPtrInfo) { 6409 // Check to see if we should lower the memmove to loads and stores first. 6410 // For cases within the target-specified limits, this is the best choice. 6411 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6412 if (ConstantSize) { 6413 // Memmove with size zero? Just return the original chain. 6414 if (ConstantSize->isNullValue()) 6415 return Chain; 6416 6417 SDValue Result = getMemmoveLoadsAndStores( 6418 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6419 isVol, false, DstPtrInfo, SrcPtrInfo); 6420 if (Result.getNode()) 6421 return Result; 6422 } 6423 6424 // Then check to see if we should lower the memmove with target-specific 6425 // code. If the target chooses to do this, this is the next best. 6426 if (TSI) { 6427 SDValue Result = TSI->EmitTargetCodeForMemmove( 6428 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, DstPtrInfo, 6429 SrcPtrInfo); 6430 if (Result.getNode()) 6431 return Result; 6432 } 6433 6434 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6435 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6436 6437 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6438 // not be safe. See memcpy above for more details. 6439 6440 // Emit a library call. 6441 TargetLowering::ArgListTy Args; 6442 TargetLowering::ArgListEntry Entry; 6443 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6444 Entry.Node = Dst; Args.push_back(Entry); 6445 Entry.Node = Src; Args.push_back(Entry); 6446 6447 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6448 Entry.Node = Size; Args.push_back(Entry); 6449 // FIXME: pass in SDLoc 6450 TargetLowering::CallLoweringInfo CLI(*this); 6451 CLI.setDebugLoc(dl) 6452 .setChain(Chain) 6453 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6454 Dst.getValueType().getTypeForEVT(*getContext()), 6455 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6456 TLI->getPointerTy(getDataLayout())), 6457 std::move(Args)) 6458 .setDiscardResult() 6459 .setTailCall(isTailCall); 6460 6461 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6462 return CallResult.second; 6463 } 6464 6465 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6466 SDValue Dst, unsigned DstAlign, 6467 SDValue Src, unsigned SrcAlign, 6468 SDValue Size, Type *SizeTy, 6469 unsigned ElemSz, bool isTailCall, 6470 MachinePointerInfo DstPtrInfo, 6471 MachinePointerInfo SrcPtrInfo) { 6472 // Emit a library call. 6473 TargetLowering::ArgListTy Args; 6474 TargetLowering::ArgListEntry Entry; 6475 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6476 Entry.Node = Dst; 6477 Args.push_back(Entry); 6478 6479 Entry.Node = Src; 6480 Args.push_back(Entry); 6481 6482 Entry.Ty = SizeTy; 6483 Entry.Node = Size; 6484 Args.push_back(Entry); 6485 6486 RTLIB::Libcall LibraryCall = 6487 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6488 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6489 report_fatal_error("Unsupported element size"); 6490 6491 TargetLowering::CallLoweringInfo CLI(*this); 6492 CLI.setDebugLoc(dl) 6493 .setChain(Chain) 6494 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6495 Type::getVoidTy(*getContext()), 6496 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6497 TLI->getPointerTy(getDataLayout())), 6498 std::move(Args)) 6499 .setDiscardResult() 6500 .setTailCall(isTailCall); 6501 6502 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6503 return CallResult.second; 6504 } 6505 6506 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6507 SDValue Src, SDValue Size, Align Alignment, 6508 bool isVol, bool isTailCall, 6509 MachinePointerInfo DstPtrInfo) { 6510 // Check to see if we should lower the memset to stores first. 6511 // For cases within the target-specified limits, this is the best choice. 6512 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6513 if (ConstantSize) { 6514 // Memset with size zero? Just return the original chain. 6515 if (ConstantSize->isNullValue()) 6516 return Chain; 6517 6518 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6519 ConstantSize->getZExtValue(), Alignment, 6520 isVol, DstPtrInfo); 6521 6522 if (Result.getNode()) 6523 return Result; 6524 } 6525 6526 // Then check to see if we should lower the memset with target-specific 6527 // code. If the target chooses to do this, this is the next best. 6528 if (TSI) { 6529 SDValue Result = TSI->EmitTargetCodeForMemset( 6530 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, DstPtrInfo); 6531 if (Result.getNode()) 6532 return Result; 6533 } 6534 6535 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6536 6537 // Emit a library call. 6538 TargetLowering::ArgListTy Args; 6539 TargetLowering::ArgListEntry Entry; 6540 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6541 Args.push_back(Entry); 6542 Entry.Node = Src; 6543 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6544 Args.push_back(Entry); 6545 Entry.Node = Size; 6546 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6547 Args.push_back(Entry); 6548 6549 // FIXME: pass in SDLoc 6550 TargetLowering::CallLoweringInfo CLI(*this); 6551 CLI.setDebugLoc(dl) 6552 .setChain(Chain) 6553 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6554 Dst.getValueType().getTypeForEVT(*getContext()), 6555 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6556 TLI->getPointerTy(getDataLayout())), 6557 std::move(Args)) 6558 .setDiscardResult() 6559 .setTailCall(isTailCall); 6560 6561 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6562 return CallResult.second; 6563 } 6564 6565 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6566 SDValue Dst, unsigned DstAlign, 6567 SDValue Value, SDValue Size, Type *SizeTy, 6568 unsigned ElemSz, bool isTailCall, 6569 MachinePointerInfo DstPtrInfo) { 6570 // Emit a library call. 6571 TargetLowering::ArgListTy Args; 6572 TargetLowering::ArgListEntry Entry; 6573 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6574 Entry.Node = Dst; 6575 Args.push_back(Entry); 6576 6577 Entry.Ty = Type::getInt8Ty(*getContext()); 6578 Entry.Node = Value; 6579 Args.push_back(Entry); 6580 6581 Entry.Ty = SizeTy; 6582 Entry.Node = Size; 6583 Args.push_back(Entry); 6584 6585 RTLIB::Libcall LibraryCall = 6586 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6587 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6588 report_fatal_error("Unsupported element size"); 6589 6590 TargetLowering::CallLoweringInfo CLI(*this); 6591 CLI.setDebugLoc(dl) 6592 .setChain(Chain) 6593 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6594 Type::getVoidTy(*getContext()), 6595 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6596 TLI->getPointerTy(getDataLayout())), 6597 std::move(Args)) 6598 .setDiscardResult() 6599 .setTailCall(isTailCall); 6600 6601 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6602 return CallResult.second; 6603 } 6604 6605 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6606 SDVTList VTList, ArrayRef<SDValue> Ops, 6607 MachineMemOperand *MMO) { 6608 FoldingSetNodeID ID; 6609 ID.AddInteger(MemVT.getRawBits()); 6610 AddNodeIDNode(ID, Opcode, VTList, Ops); 6611 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6612 void* IP = nullptr; 6613 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6614 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6615 return SDValue(E, 0); 6616 } 6617 6618 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6619 VTList, MemVT, MMO); 6620 createOperands(N, Ops); 6621 6622 CSEMap.InsertNode(N, IP); 6623 InsertNode(N); 6624 return SDValue(N, 0); 6625 } 6626 6627 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6628 EVT MemVT, SDVTList VTs, SDValue Chain, 6629 SDValue Ptr, SDValue Cmp, SDValue Swp, 6630 MachineMemOperand *MMO) { 6631 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6632 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6633 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6634 6635 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6636 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6637 } 6638 6639 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6640 SDValue Chain, SDValue Ptr, SDValue Val, 6641 MachineMemOperand *MMO) { 6642 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6643 Opcode == ISD::ATOMIC_LOAD_SUB || 6644 Opcode == ISD::ATOMIC_LOAD_AND || 6645 Opcode == ISD::ATOMIC_LOAD_CLR || 6646 Opcode == ISD::ATOMIC_LOAD_OR || 6647 Opcode == ISD::ATOMIC_LOAD_XOR || 6648 Opcode == ISD::ATOMIC_LOAD_NAND || 6649 Opcode == ISD::ATOMIC_LOAD_MIN || 6650 Opcode == ISD::ATOMIC_LOAD_MAX || 6651 Opcode == ISD::ATOMIC_LOAD_UMIN || 6652 Opcode == ISD::ATOMIC_LOAD_UMAX || 6653 Opcode == ISD::ATOMIC_LOAD_FADD || 6654 Opcode == ISD::ATOMIC_LOAD_FSUB || 6655 Opcode == ISD::ATOMIC_SWAP || 6656 Opcode == ISD::ATOMIC_STORE) && 6657 "Invalid Atomic Op"); 6658 6659 EVT VT = Val.getValueType(); 6660 6661 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6662 getVTList(VT, MVT::Other); 6663 SDValue Ops[] = {Chain, Ptr, Val}; 6664 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6665 } 6666 6667 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6668 EVT VT, SDValue Chain, SDValue Ptr, 6669 MachineMemOperand *MMO) { 6670 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6671 6672 SDVTList VTs = getVTList(VT, MVT::Other); 6673 SDValue Ops[] = {Chain, Ptr}; 6674 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6675 } 6676 6677 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6678 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6679 if (Ops.size() == 1) 6680 return Ops[0]; 6681 6682 SmallVector<EVT, 4> VTs; 6683 VTs.reserve(Ops.size()); 6684 for (unsigned i = 0; i < Ops.size(); ++i) 6685 VTs.push_back(Ops[i].getValueType()); 6686 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6687 } 6688 6689 SDValue SelectionDAG::getMemIntrinsicNode( 6690 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6691 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6692 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6693 if (!Size && MemVT.isScalableVector()) 6694 Size = MemoryLocation::UnknownSize; 6695 else if (!Size) 6696 Size = MemVT.getStoreSize(); 6697 6698 MachineFunction &MF = getMachineFunction(); 6699 MachineMemOperand *MMO = 6700 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6701 6702 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6703 } 6704 6705 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6706 SDVTList VTList, 6707 ArrayRef<SDValue> Ops, EVT MemVT, 6708 MachineMemOperand *MMO) { 6709 assert((Opcode == ISD::INTRINSIC_VOID || 6710 Opcode == ISD::INTRINSIC_W_CHAIN || 6711 Opcode == ISD::PREFETCH || 6712 ((int)Opcode <= std::numeric_limits<int>::max() && 6713 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6714 "Opcode is not a memory-accessing opcode!"); 6715 6716 // Memoize the node unless it returns a flag. 6717 MemIntrinsicSDNode *N; 6718 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6719 FoldingSetNodeID ID; 6720 AddNodeIDNode(ID, Opcode, VTList, Ops); 6721 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6722 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6723 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6724 void *IP = nullptr; 6725 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6726 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6727 return SDValue(E, 0); 6728 } 6729 6730 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6731 VTList, MemVT, MMO); 6732 createOperands(N, Ops); 6733 6734 CSEMap.InsertNode(N, IP); 6735 } else { 6736 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6737 VTList, MemVT, MMO); 6738 createOperands(N, Ops); 6739 } 6740 InsertNode(N); 6741 SDValue V(N, 0); 6742 NewSDValueDbgMsg(V, "Creating new node: ", this); 6743 return V; 6744 } 6745 6746 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6747 SDValue Chain, int FrameIndex, 6748 int64_t Size, int64_t Offset) { 6749 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6750 const auto VTs = getVTList(MVT::Other); 6751 SDValue Ops[2] = { 6752 Chain, 6753 getFrameIndex(FrameIndex, 6754 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6755 true)}; 6756 6757 FoldingSetNodeID ID; 6758 AddNodeIDNode(ID, Opcode, VTs, Ops); 6759 ID.AddInteger(FrameIndex); 6760 ID.AddInteger(Size); 6761 ID.AddInteger(Offset); 6762 void *IP = nullptr; 6763 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6764 return SDValue(E, 0); 6765 6766 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6767 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6768 createOperands(N, Ops); 6769 CSEMap.InsertNode(N, IP); 6770 InsertNode(N); 6771 SDValue V(N, 0); 6772 NewSDValueDbgMsg(V, "Creating new node: ", this); 6773 return V; 6774 } 6775 6776 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6777 /// MachinePointerInfo record from it. This is particularly useful because the 6778 /// code generator has many cases where it doesn't bother passing in a 6779 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6780 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6781 SelectionDAG &DAG, SDValue Ptr, 6782 int64_t Offset = 0) { 6783 // If this is FI+Offset, we can model it. 6784 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6785 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6786 FI->getIndex(), Offset); 6787 6788 // If this is (FI+Offset1)+Offset2, we can model it. 6789 if (Ptr.getOpcode() != ISD::ADD || 6790 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6791 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6792 return Info; 6793 6794 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6795 return MachinePointerInfo::getFixedStack( 6796 DAG.getMachineFunction(), FI, 6797 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6798 } 6799 6800 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6801 /// MachinePointerInfo record from it. This is particularly useful because the 6802 /// code generator has many cases where it doesn't bother passing in a 6803 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6804 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6805 SelectionDAG &DAG, SDValue Ptr, 6806 SDValue OffsetOp) { 6807 // If the 'Offset' value isn't a constant, we can't handle this. 6808 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6809 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6810 if (OffsetOp.isUndef()) 6811 return InferPointerInfo(Info, DAG, Ptr); 6812 return Info; 6813 } 6814 6815 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6816 EVT VT, const SDLoc &dl, SDValue Chain, 6817 SDValue Ptr, SDValue Offset, 6818 MachinePointerInfo PtrInfo, EVT MemVT, 6819 Align Alignment, 6820 MachineMemOperand::Flags MMOFlags, 6821 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6822 assert(Chain.getValueType() == MVT::Other && 6823 "Invalid chain type"); 6824 6825 MMOFlags |= MachineMemOperand::MOLoad; 6826 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6827 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6828 // clients. 6829 if (PtrInfo.V.isNull()) 6830 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6831 6832 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 6833 MachineFunction &MF = getMachineFunction(); 6834 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 6835 Alignment, AAInfo, Ranges); 6836 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6837 } 6838 6839 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6840 EVT VT, const SDLoc &dl, SDValue Chain, 6841 SDValue Ptr, SDValue Offset, EVT MemVT, 6842 MachineMemOperand *MMO) { 6843 if (VT == MemVT) { 6844 ExtType = ISD::NON_EXTLOAD; 6845 } else if (ExtType == ISD::NON_EXTLOAD) { 6846 assert(VT == MemVT && "Non-extending load from different memory type!"); 6847 } else { 6848 // Extending load. 6849 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6850 "Should only be an extending load, not truncating!"); 6851 assert(VT.isInteger() == MemVT.isInteger() && 6852 "Cannot convert from FP to Int or Int -> FP!"); 6853 assert(VT.isVector() == MemVT.isVector() && 6854 "Cannot use an ext load to convert to or from a vector!"); 6855 assert((!VT.isVector() || 6856 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6857 "Cannot use an ext load to change the number of vector elements!"); 6858 } 6859 6860 bool Indexed = AM != ISD::UNINDEXED; 6861 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6862 6863 SDVTList VTs = Indexed ? 6864 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6865 SDValue Ops[] = { Chain, Ptr, Offset }; 6866 FoldingSetNodeID ID; 6867 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6868 ID.AddInteger(MemVT.getRawBits()); 6869 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6870 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6871 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6872 void *IP = nullptr; 6873 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6874 cast<LoadSDNode>(E)->refineAlignment(MMO); 6875 return SDValue(E, 0); 6876 } 6877 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6878 ExtType, MemVT, MMO); 6879 createOperands(N, Ops); 6880 6881 CSEMap.InsertNode(N, IP); 6882 InsertNode(N); 6883 SDValue V(N, 0); 6884 NewSDValueDbgMsg(V, "Creating new node: ", this); 6885 return V; 6886 } 6887 6888 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6889 SDValue Ptr, MachinePointerInfo PtrInfo, 6890 MaybeAlign Alignment, 6891 MachineMemOperand::Flags MMOFlags, 6892 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6893 SDValue Undef = getUNDEF(Ptr.getValueType()); 6894 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6895 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6896 } 6897 6898 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6899 SDValue Ptr, MachineMemOperand *MMO) { 6900 SDValue Undef = getUNDEF(Ptr.getValueType()); 6901 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6902 VT, MMO); 6903 } 6904 6905 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6906 EVT VT, SDValue Chain, SDValue Ptr, 6907 MachinePointerInfo PtrInfo, EVT MemVT, 6908 MaybeAlign Alignment, 6909 MachineMemOperand::Flags MMOFlags, 6910 const AAMDNodes &AAInfo) { 6911 SDValue Undef = getUNDEF(Ptr.getValueType()); 6912 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6913 MemVT, Alignment, MMOFlags, AAInfo); 6914 } 6915 6916 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6917 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6918 MachineMemOperand *MMO) { 6919 SDValue Undef = getUNDEF(Ptr.getValueType()); 6920 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6921 MemVT, MMO); 6922 } 6923 6924 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6925 SDValue Base, SDValue Offset, 6926 ISD::MemIndexedMode AM) { 6927 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6928 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6929 // Don't propagate the invariant or dereferenceable flags. 6930 auto MMOFlags = 6931 LD->getMemOperand()->getFlags() & 6932 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6933 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6934 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6935 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6936 LD->getAAInfo()); 6937 } 6938 6939 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6940 SDValue Ptr, MachinePointerInfo PtrInfo, 6941 Align Alignment, 6942 MachineMemOperand::Flags MMOFlags, 6943 const AAMDNodes &AAInfo) { 6944 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6945 6946 MMOFlags |= MachineMemOperand::MOStore; 6947 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6948 6949 if (PtrInfo.V.isNull()) 6950 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6951 6952 MachineFunction &MF = getMachineFunction(); 6953 uint64_t Size = 6954 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 6955 MachineMemOperand *MMO = 6956 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 6957 return getStore(Chain, dl, Val, Ptr, MMO); 6958 } 6959 6960 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6961 SDValue Ptr, MachineMemOperand *MMO) { 6962 assert(Chain.getValueType() == MVT::Other && 6963 "Invalid chain type"); 6964 EVT VT = Val.getValueType(); 6965 SDVTList VTs = getVTList(MVT::Other); 6966 SDValue Undef = getUNDEF(Ptr.getValueType()); 6967 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6968 FoldingSetNodeID ID; 6969 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6970 ID.AddInteger(VT.getRawBits()); 6971 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6972 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6973 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6974 void *IP = nullptr; 6975 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6976 cast<StoreSDNode>(E)->refineAlignment(MMO); 6977 return SDValue(E, 0); 6978 } 6979 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6980 ISD::UNINDEXED, false, VT, MMO); 6981 createOperands(N, Ops); 6982 6983 CSEMap.InsertNode(N, IP); 6984 InsertNode(N); 6985 SDValue V(N, 0); 6986 NewSDValueDbgMsg(V, "Creating new node: ", this); 6987 return V; 6988 } 6989 6990 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6991 SDValue Ptr, MachinePointerInfo PtrInfo, 6992 EVT SVT, Align Alignment, 6993 MachineMemOperand::Flags MMOFlags, 6994 const AAMDNodes &AAInfo) { 6995 assert(Chain.getValueType() == MVT::Other && 6996 "Invalid chain type"); 6997 6998 MMOFlags |= MachineMemOperand::MOStore; 6999 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7000 7001 if (PtrInfo.V.isNull()) 7002 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7003 7004 MachineFunction &MF = getMachineFunction(); 7005 MachineMemOperand *MMO = MF.getMachineMemOperand( 7006 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 7007 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7008 } 7009 7010 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7011 SDValue Ptr, EVT SVT, 7012 MachineMemOperand *MMO) { 7013 EVT VT = Val.getValueType(); 7014 7015 assert(Chain.getValueType() == MVT::Other && 7016 "Invalid chain type"); 7017 if (VT == SVT) 7018 return getStore(Chain, dl, Val, Ptr, MMO); 7019 7020 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7021 "Should only be a truncating store, not extending!"); 7022 assert(VT.isInteger() == SVT.isInteger() && 7023 "Can't do FP-INT conversion!"); 7024 assert(VT.isVector() == SVT.isVector() && 7025 "Cannot use trunc store to convert to or from a vector!"); 7026 assert((!VT.isVector() || 7027 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 7028 "Cannot use trunc store to change the number of vector elements!"); 7029 7030 SDVTList VTs = getVTList(MVT::Other); 7031 SDValue Undef = getUNDEF(Ptr.getValueType()); 7032 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7033 FoldingSetNodeID ID; 7034 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7035 ID.AddInteger(SVT.getRawBits()); 7036 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7037 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7038 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7039 void *IP = nullptr; 7040 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7041 cast<StoreSDNode>(E)->refineAlignment(MMO); 7042 return SDValue(E, 0); 7043 } 7044 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7045 ISD::UNINDEXED, true, SVT, MMO); 7046 createOperands(N, Ops); 7047 7048 CSEMap.InsertNode(N, IP); 7049 InsertNode(N); 7050 SDValue V(N, 0); 7051 NewSDValueDbgMsg(V, "Creating new node: ", this); 7052 return V; 7053 } 7054 7055 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7056 SDValue Base, SDValue Offset, 7057 ISD::MemIndexedMode AM) { 7058 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7059 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7060 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7061 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7062 FoldingSetNodeID ID; 7063 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7064 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7065 ID.AddInteger(ST->getRawSubclassData()); 7066 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7067 void *IP = nullptr; 7068 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7069 return SDValue(E, 0); 7070 7071 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7072 ST->isTruncatingStore(), ST->getMemoryVT(), 7073 ST->getMemOperand()); 7074 createOperands(N, Ops); 7075 7076 CSEMap.InsertNode(N, IP); 7077 InsertNode(N); 7078 SDValue V(N, 0); 7079 NewSDValueDbgMsg(V, "Creating new node: ", this); 7080 return V; 7081 } 7082 7083 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7084 SDValue Base, SDValue Offset, SDValue Mask, 7085 SDValue PassThru, EVT MemVT, 7086 MachineMemOperand *MMO, 7087 ISD::MemIndexedMode AM, 7088 ISD::LoadExtType ExtTy, bool isExpanding) { 7089 bool Indexed = AM != ISD::UNINDEXED; 7090 assert((Indexed || Offset.isUndef()) && 7091 "Unindexed masked load with an offset!"); 7092 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7093 : getVTList(VT, MVT::Other); 7094 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7095 FoldingSetNodeID ID; 7096 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7097 ID.AddInteger(MemVT.getRawBits()); 7098 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7099 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7100 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7101 void *IP = nullptr; 7102 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7103 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7104 return SDValue(E, 0); 7105 } 7106 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7107 AM, ExtTy, isExpanding, MemVT, MMO); 7108 createOperands(N, Ops); 7109 7110 CSEMap.InsertNode(N, IP); 7111 InsertNode(N); 7112 SDValue V(N, 0); 7113 NewSDValueDbgMsg(V, "Creating new node: ", this); 7114 return V; 7115 } 7116 7117 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7118 SDValue Base, SDValue Offset, 7119 ISD::MemIndexedMode AM) { 7120 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7121 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7122 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7123 Offset, LD->getMask(), LD->getPassThru(), 7124 LD->getMemoryVT(), LD->getMemOperand(), AM, 7125 LD->getExtensionType(), LD->isExpandingLoad()); 7126 } 7127 7128 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7129 SDValue Val, SDValue Base, SDValue Offset, 7130 SDValue Mask, EVT MemVT, 7131 MachineMemOperand *MMO, 7132 ISD::MemIndexedMode AM, bool IsTruncating, 7133 bool IsCompressing) { 7134 assert(Chain.getValueType() == MVT::Other && 7135 "Invalid chain type"); 7136 bool Indexed = AM != ISD::UNINDEXED; 7137 assert((Indexed || Offset.isUndef()) && 7138 "Unindexed masked store with an offset!"); 7139 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7140 : getVTList(MVT::Other); 7141 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7142 FoldingSetNodeID ID; 7143 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7144 ID.AddInteger(MemVT.getRawBits()); 7145 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7146 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7147 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7148 void *IP = nullptr; 7149 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7150 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7151 return SDValue(E, 0); 7152 } 7153 auto *N = 7154 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7155 IsTruncating, IsCompressing, MemVT, MMO); 7156 createOperands(N, Ops); 7157 7158 CSEMap.InsertNode(N, IP); 7159 InsertNode(N); 7160 SDValue V(N, 0); 7161 NewSDValueDbgMsg(V, "Creating new node: ", this); 7162 return V; 7163 } 7164 7165 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7166 SDValue Base, SDValue Offset, 7167 ISD::MemIndexedMode AM) { 7168 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7169 assert(ST->getOffset().isUndef() && 7170 "Masked store is already a indexed store!"); 7171 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7172 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7173 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7174 } 7175 7176 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7177 ArrayRef<SDValue> Ops, 7178 MachineMemOperand *MMO, 7179 ISD::MemIndexType IndexType) { 7180 assert(Ops.size() == 6 && "Incompatible number of operands"); 7181 7182 FoldingSetNodeID ID; 7183 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7184 ID.AddInteger(VT.getRawBits()); 7185 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7186 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7187 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7188 void *IP = nullptr; 7189 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7190 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7191 return SDValue(E, 0); 7192 } 7193 7194 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7195 VTs, VT, MMO, IndexType); 7196 createOperands(N, Ops); 7197 7198 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7199 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7200 assert(N->getMask().getValueType().getVectorNumElements() == 7201 N->getValueType(0).getVectorNumElements() && 7202 "Vector width mismatch between mask and data"); 7203 assert(N->getIndex().getValueType().getVectorNumElements() >= 7204 N->getValueType(0).getVectorNumElements() && 7205 "Vector width mismatch between index and data"); 7206 assert(isa<ConstantSDNode>(N->getScale()) && 7207 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7208 "Scale should be a constant power of 2"); 7209 7210 CSEMap.InsertNode(N, IP); 7211 InsertNode(N); 7212 SDValue V(N, 0); 7213 NewSDValueDbgMsg(V, "Creating new node: ", this); 7214 return V; 7215 } 7216 7217 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7218 ArrayRef<SDValue> Ops, 7219 MachineMemOperand *MMO, 7220 ISD::MemIndexType IndexType) { 7221 assert(Ops.size() == 6 && "Incompatible number of operands"); 7222 7223 FoldingSetNodeID ID; 7224 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7225 ID.AddInteger(VT.getRawBits()); 7226 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7227 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7228 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7229 void *IP = nullptr; 7230 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7231 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7232 return SDValue(E, 0); 7233 } 7234 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7235 VTs, VT, MMO, IndexType); 7236 createOperands(N, Ops); 7237 7238 assert(N->getMask().getValueType().getVectorNumElements() == 7239 N->getValue().getValueType().getVectorNumElements() && 7240 "Vector width mismatch between mask and data"); 7241 assert(N->getIndex().getValueType().getVectorNumElements() >= 7242 N->getValue().getValueType().getVectorNumElements() && 7243 "Vector width mismatch between index and data"); 7244 assert(isa<ConstantSDNode>(N->getScale()) && 7245 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7246 "Scale should be a constant power of 2"); 7247 7248 CSEMap.InsertNode(N, IP); 7249 InsertNode(N); 7250 SDValue V(N, 0); 7251 NewSDValueDbgMsg(V, "Creating new node: ", this); 7252 return V; 7253 } 7254 7255 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7256 // select undef, T, F --> T (if T is a constant), otherwise F 7257 // select, ?, undef, F --> F 7258 // select, ?, T, undef --> T 7259 if (Cond.isUndef()) 7260 return isConstantValueOfAnyType(T) ? T : F; 7261 if (T.isUndef()) 7262 return F; 7263 if (F.isUndef()) 7264 return T; 7265 7266 // select true, T, F --> T 7267 // select false, T, F --> F 7268 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7269 return CondC->isNullValue() ? F : T; 7270 7271 // TODO: This should simplify VSELECT with constant condition using something 7272 // like this (but check boolean contents to be complete?): 7273 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7274 // return T; 7275 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7276 // return F; 7277 7278 // select ?, T, T --> T 7279 if (T == F) 7280 return T; 7281 7282 return SDValue(); 7283 } 7284 7285 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7286 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7287 if (X.isUndef()) 7288 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7289 // shift X, undef --> undef (because it may shift by the bitwidth) 7290 if (Y.isUndef()) 7291 return getUNDEF(X.getValueType()); 7292 7293 // shift 0, Y --> 0 7294 // shift X, 0 --> X 7295 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7296 return X; 7297 7298 // shift X, C >= bitwidth(X) --> undef 7299 // All vector elements must be too big (or undef) to avoid partial undefs. 7300 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7301 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7302 }; 7303 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7304 return getUNDEF(X.getValueType()); 7305 7306 return SDValue(); 7307 } 7308 7309 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7310 SDNodeFlags Flags) { 7311 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7312 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7313 // operation is poison. That result can be relaxed to undef. 7314 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7315 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7316 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7317 (YC && YC->getValueAPF().isNaN()); 7318 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7319 (YC && YC->getValueAPF().isInfinity()); 7320 7321 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7322 return getUNDEF(X.getValueType()); 7323 7324 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7325 return getUNDEF(X.getValueType()); 7326 7327 if (!YC) 7328 return SDValue(); 7329 7330 // X + -0.0 --> X 7331 if (Opcode == ISD::FADD) 7332 if (YC->getValueAPF().isNegZero()) 7333 return X; 7334 7335 // X - +0.0 --> X 7336 if (Opcode == ISD::FSUB) 7337 if (YC->getValueAPF().isPosZero()) 7338 return X; 7339 7340 // X * 1.0 --> X 7341 // X / 1.0 --> X 7342 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7343 if (YC->getValueAPF().isExactlyValue(1.0)) 7344 return X; 7345 7346 return SDValue(); 7347 } 7348 7349 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7350 SDValue Ptr, SDValue SV, unsigned Align) { 7351 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7352 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7353 } 7354 7355 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7356 ArrayRef<SDUse> Ops) { 7357 switch (Ops.size()) { 7358 case 0: return getNode(Opcode, DL, VT); 7359 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7360 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7361 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7362 default: break; 7363 } 7364 7365 // Copy from an SDUse array into an SDValue array for use with 7366 // the regular getNode logic. 7367 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7368 return getNode(Opcode, DL, VT, NewOps); 7369 } 7370 7371 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7372 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7373 unsigned NumOps = Ops.size(); 7374 switch (NumOps) { 7375 case 0: return getNode(Opcode, DL, VT); 7376 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7377 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7378 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7379 default: break; 7380 } 7381 7382 switch (Opcode) { 7383 default: break; 7384 case ISD::BUILD_VECTOR: 7385 // Attempt to simplify BUILD_VECTOR. 7386 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7387 return V; 7388 break; 7389 case ISD::CONCAT_VECTORS: 7390 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7391 return V; 7392 break; 7393 case ISD::SELECT_CC: 7394 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7395 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7396 "LHS and RHS of condition must have same type!"); 7397 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7398 "True and False arms of SelectCC must have same type!"); 7399 assert(Ops[2].getValueType() == VT && 7400 "select_cc node must be of same type as true and false value!"); 7401 break; 7402 case ISD::BR_CC: 7403 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7404 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7405 "LHS/RHS of comparison should match types!"); 7406 break; 7407 } 7408 7409 // Memoize nodes. 7410 SDNode *N; 7411 SDVTList VTs = getVTList(VT); 7412 7413 if (VT != MVT::Glue) { 7414 FoldingSetNodeID ID; 7415 AddNodeIDNode(ID, Opcode, VTs, Ops); 7416 void *IP = nullptr; 7417 7418 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7419 return SDValue(E, 0); 7420 7421 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7422 createOperands(N, Ops); 7423 7424 CSEMap.InsertNode(N, IP); 7425 } else { 7426 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7427 createOperands(N, Ops); 7428 } 7429 7430 InsertNode(N); 7431 SDValue V(N, 0); 7432 NewSDValueDbgMsg(V, "Creating new node: ", this); 7433 return V; 7434 } 7435 7436 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7437 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7438 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7439 } 7440 7441 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7442 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7443 if (VTList.NumVTs == 1) 7444 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7445 7446 switch (Opcode) { 7447 case ISD::STRICT_FP_EXTEND: 7448 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7449 "Invalid STRICT_FP_EXTEND!"); 7450 assert(VTList.VTs[0].isFloatingPoint() && 7451 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7452 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7453 "STRICT_FP_EXTEND result type should be vector iff the operand " 7454 "type is vector!"); 7455 assert((!VTList.VTs[0].isVector() || 7456 VTList.VTs[0].getVectorNumElements() == 7457 Ops[1].getValueType().getVectorNumElements()) && 7458 "Vector element count mismatch!"); 7459 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7460 "Invalid fpext node, dst <= src!"); 7461 break; 7462 case ISD::STRICT_FP_ROUND: 7463 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7464 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7465 "STRICT_FP_ROUND result type should be vector iff the operand " 7466 "type is vector!"); 7467 assert((!VTList.VTs[0].isVector() || 7468 VTList.VTs[0].getVectorNumElements() == 7469 Ops[1].getValueType().getVectorNumElements()) && 7470 "Vector element count mismatch!"); 7471 assert(VTList.VTs[0].isFloatingPoint() && 7472 Ops[1].getValueType().isFloatingPoint() && 7473 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7474 isa<ConstantSDNode>(Ops[2]) && 7475 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7476 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7477 "Invalid STRICT_FP_ROUND!"); 7478 break; 7479 #if 0 7480 // FIXME: figure out how to safely handle things like 7481 // int foo(int x) { return 1 << (x & 255); } 7482 // int bar() { return foo(256); } 7483 case ISD::SRA_PARTS: 7484 case ISD::SRL_PARTS: 7485 case ISD::SHL_PARTS: 7486 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7487 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7488 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7489 else if (N3.getOpcode() == ISD::AND) 7490 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7491 // If the and is only masking out bits that cannot effect the shift, 7492 // eliminate the and. 7493 unsigned NumBits = VT.getScalarSizeInBits()*2; 7494 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7495 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7496 } 7497 break; 7498 #endif 7499 } 7500 7501 // Memoize the node unless it returns a flag. 7502 SDNode *N; 7503 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7504 FoldingSetNodeID ID; 7505 AddNodeIDNode(ID, Opcode, VTList, Ops); 7506 void *IP = nullptr; 7507 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7508 return SDValue(E, 0); 7509 7510 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7511 N->setFlags(Flags); 7512 createOperands(N, Ops); 7513 CSEMap.InsertNode(N, IP); 7514 } else { 7515 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7516 createOperands(N, Ops); 7517 } 7518 InsertNode(N); 7519 SDValue V(N, 0); 7520 NewSDValueDbgMsg(V, "Creating new node: ", this); 7521 return V; 7522 } 7523 7524 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7525 SDVTList VTList) { 7526 return getNode(Opcode, DL, VTList, None); 7527 } 7528 7529 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7530 SDValue N1) { 7531 SDValue Ops[] = { N1 }; 7532 return getNode(Opcode, DL, VTList, Ops); 7533 } 7534 7535 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7536 SDValue N1, SDValue N2) { 7537 SDValue Ops[] = { N1, N2 }; 7538 return getNode(Opcode, DL, VTList, Ops); 7539 } 7540 7541 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7542 SDValue N1, SDValue N2, SDValue N3) { 7543 SDValue Ops[] = { N1, N2, N3 }; 7544 return getNode(Opcode, DL, VTList, Ops); 7545 } 7546 7547 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7548 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7549 SDValue Ops[] = { N1, N2, N3, N4 }; 7550 return getNode(Opcode, DL, VTList, Ops); 7551 } 7552 7553 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7554 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7555 SDValue N5) { 7556 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7557 return getNode(Opcode, DL, VTList, Ops); 7558 } 7559 7560 SDVTList SelectionDAG::getVTList(EVT VT) { 7561 return makeVTList(SDNode::getValueTypeList(VT), 1); 7562 } 7563 7564 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7565 FoldingSetNodeID ID; 7566 ID.AddInteger(2U); 7567 ID.AddInteger(VT1.getRawBits()); 7568 ID.AddInteger(VT2.getRawBits()); 7569 7570 void *IP = nullptr; 7571 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7572 if (!Result) { 7573 EVT *Array = Allocator.Allocate<EVT>(2); 7574 Array[0] = VT1; 7575 Array[1] = VT2; 7576 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7577 VTListMap.InsertNode(Result, IP); 7578 } 7579 return Result->getSDVTList(); 7580 } 7581 7582 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7583 FoldingSetNodeID ID; 7584 ID.AddInteger(3U); 7585 ID.AddInteger(VT1.getRawBits()); 7586 ID.AddInteger(VT2.getRawBits()); 7587 ID.AddInteger(VT3.getRawBits()); 7588 7589 void *IP = nullptr; 7590 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7591 if (!Result) { 7592 EVT *Array = Allocator.Allocate<EVT>(3); 7593 Array[0] = VT1; 7594 Array[1] = VT2; 7595 Array[2] = VT3; 7596 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7597 VTListMap.InsertNode(Result, IP); 7598 } 7599 return Result->getSDVTList(); 7600 } 7601 7602 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7603 FoldingSetNodeID ID; 7604 ID.AddInteger(4U); 7605 ID.AddInteger(VT1.getRawBits()); 7606 ID.AddInteger(VT2.getRawBits()); 7607 ID.AddInteger(VT3.getRawBits()); 7608 ID.AddInteger(VT4.getRawBits()); 7609 7610 void *IP = nullptr; 7611 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7612 if (!Result) { 7613 EVT *Array = Allocator.Allocate<EVT>(4); 7614 Array[0] = VT1; 7615 Array[1] = VT2; 7616 Array[2] = VT3; 7617 Array[3] = VT4; 7618 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7619 VTListMap.InsertNode(Result, IP); 7620 } 7621 return Result->getSDVTList(); 7622 } 7623 7624 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7625 unsigned NumVTs = VTs.size(); 7626 FoldingSetNodeID ID; 7627 ID.AddInteger(NumVTs); 7628 for (unsigned index = 0; index < NumVTs; index++) { 7629 ID.AddInteger(VTs[index].getRawBits()); 7630 } 7631 7632 void *IP = nullptr; 7633 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7634 if (!Result) { 7635 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7636 llvm::copy(VTs, Array); 7637 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7638 VTListMap.InsertNode(Result, IP); 7639 } 7640 return Result->getSDVTList(); 7641 } 7642 7643 7644 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7645 /// specified operands. If the resultant node already exists in the DAG, 7646 /// this does not modify the specified node, instead it returns the node that 7647 /// already exists. If the resultant node does not exist in the DAG, the 7648 /// input node is returned. As a degenerate case, if you specify the same 7649 /// input operands as the node already has, the input node is returned. 7650 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7651 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7652 7653 // Check to see if there is no change. 7654 if (Op == N->getOperand(0)) return N; 7655 7656 // See if the modified node already exists. 7657 void *InsertPos = nullptr; 7658 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7659 return Existing; 7660 7661 // Nope it doesn't. Remove the node from its current place in the maps. 7662 if (InsertPos) 7663 if (!RemoveNodeFromCSEMaps(N)) 7664 InsertPos = nullptr; 7665 7666 // Now we update the operands. 7667 N->OperandList[0].set(Op); 7668 7669 updateDivergence(N); 7670 // If this gets put into a CSE map, add it. 7671 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7672 return N; 7673 } 7674 7675 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7676 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7677 7678 // Check to see if there is no change. 7679 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7680 return N; // No operands changed, just return the input node. 7681 7682 // See if the modified node already exists. 7683 void *InsertPos = nullptr; 7684 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7685 return Existing; 7686 7687 // Nope it doesn't. Remove the node from its current place in the maps. 7688 if (InsertPos) 7689 if (!RemoveNodeFromCSEMaps(N)) 7690 InsertPos = nullptr; 7691 7692 // Now we update the operands. 7693 if (N->OperandList[0] != Op1) 7694 N->OperandList[0].set(Op1); 7695 if (N->OperandList[1] != Op2) 7696 N->OperandList[1].set(Op2); 7697 7698 updateDivergence(N); 7699 // If this gets put into a CSE map, add it. 7700 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7701 return N; 7702 } 7703 7704 SDNode *SelectionDAG:: 7705 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7706 SDValue Ops[] = { Op1, Op2, Op3 }; 7707 return UpdateNodeOperands(N, Ops); 7708 } 7709 7710 SDNode *SelectionDAG:: 7711 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7712 SDValue Op3, SDValue Op4) { 7713 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7714 return UpdateNodeOperands(N, Ops); 7715 } 7716 7717 SDNode *SelectionDAG:: 7718 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7719 SDValue Op3, SDValue Op4, SDValue Op5) { 7720 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7721 return UpdateNodeOperands(N, Ops); 7722 } 7723 7724 SDNode *SelectionDAG:: 7725 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7726 unsigned NumOps = Ops.size(); 7727 assert(N->getNumOperands() == NumOps && 7728 "Update with wrong number of operands"); 7729 7730 // If no operands changed just return the input node. 7731 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7732 return N; 7733 7734 // See if the modified node already exists. 7735 void *InsertPos = nullptr; 7736 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7737 return Existing; 7738 7739 // Nope it doesn't. Remove the node from its current place in the maps. 7740 if (InsertPos) 7741 if (!RemoveNodeFromCSEMaps(N)) 7742 InsertPos = nullptr; 7743 7744 // Now we update the operands. 7745 for (unsigned i = 0; i != NumOps; ++i) 7746 if (N->OperandList[i] != Ops[i]) 7747 N->OperandList[i].set(Ops[i]); 7748 7749 updateDivergence(N); 7750 // If this gets put into a CSE map, add it. 7751 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7752 return N; 7753 } 7754 7755 /// DropOperands - Release the operands and set this node to have 7756 /// zero operands. 7757 void SDNode::DropOperands() { 7758 // Unlike the code in MorphNodeTo that does this, we don't need to 7759 // watch for dead nodes here. 7760 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7761 SDUse &Use = *I++; 7762 Use.set(SDValue()); 7763 } 7764 } 7765 7766 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7767 ArrayRef<MachineMemOperand *> NewMemRefs) { 7768 if (NewMemRefs.empty()) { 7769 N->clearMemRefs(); 7770 return; 7771 } 7772 7773 // Check if we can avoid allocating by storing a single reference directly. 7774 if (NewMemRefs.size() == 1) { 7775 N->MemRefs = NewMemRefs[0]; 7776 N->NumMemRefs = 1; 7777 return; 7778 } 7779 7780 MachineMemOperand **MemRefsBuffer = 7781 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7782 llvm::copy(NewMemRefs, MemRefsBuffer); 7783 N->MemRefs = MemRefsBuffer; 7784 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7785 } 7786 7787 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7788 /// machine opcode. 7789 /// 7790 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7791 EVT VT) { 7792 SDVTList VTs = getVTList(VT); 7793 return SelectNodeTo(N, MachineOpc, VTs, None); 7794 } 7795 7796 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7797 EVT VT, SDValue Op1) { 7798 SDVTList VTs = getVTList(VT); 7799 SDValue Ops[] = { Op1 }; 7800 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7801 } 7802 7803 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7804 EVT VT, SDValue Op1, 7805 SDValue Op2) { 7806 SDVTList VTs = getVTList(VT); 7807 SDValue Ops[] = { Op1, Op2 }; 7808 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7809 } 7810 7811 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7812 EVT VT, SDValue Op1, 7813 SDValue Op2, SDValue Op3) { 7814 SDVTList VTs = getVTList(VT); 7815 SDValue Ops[] = { Op1, Op2, Op3 }; 7816 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7817 } 7818 7819 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7820 EVT VT, ArrayRef<SDValue> Ops) { 7821 SDVTList VTs = getVTList(VT); 7822 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7823 } 7824 7825 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7826 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7827 SDVTList VTs = getVTList(VT1, VT2); 7828 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7829 } 7830 7831 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7832 EVT VT1, EVT VT2) { 7833 SDVTList VTs = getVTList(VT1, VT2); 7834 return SelectNodeTo(N, MachineOpc, VTs, None); 7835 } 7836 7837 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7838 EVT VT1, EVT VT2, EVT VT3, 7839 ArrayRef<SDValue> Ops) { 7840 SDVTList VTs = getVTList(VT1, VT2, VT3); 7841 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7842 } 7843 7844 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7845 EVT VT1, EVT VT2, 7846 SDValue Op1, SDValue Op2) { 7847 SDVTList VTs = getVTList(VT1, VT2); 7848 SDValue Ops[] = { Op1, Op2 }; 7849 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7850 } 7851 7852 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7853 SDVTList VTs,ArrayRef<SDValue> Ops) { 7854 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7855 // Reset the NodeID to -1. 7856 New->setNodeId(-1); 7857 if (New != N) { 7858 ReplaceAllUsesWith(N, New); 7859 RemoveDeadNode(N); 7860 } 7861 return New; 7862 } 7863 7864 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7865 /// the line number information on the merged node since it is not possible to 7866 /// preserve the information that operation is associated with multiple lines. 7867 /// This will make the debugger working better at -O0, were there is a higher 7868 /// probability having other instructions associated with that line. 7869 /// 7870 /// For IROrder, we keep the smaller of the two 7871 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7872 DebugLoc NLoc = N->getDebugLoc(); 7873 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7874 N->setDebugLoc(DebugLoc()); 7875 } 7876 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7877 N->setIROrder(Order); 7878 return N; 7879 } 7880 7881 /// MorphNodeTo - This *mutates* the specified node to have the specified 7882 /// return type, opcode, and operands. 7883 /// 7884 /// Note that MorphNodeTo returns the resultant node. If there is already a 7885 /// node of the specified opcode and operands, it returns that node instead of 7886 /// the current one. Note that the SDLoc need not be the same. 7887 /// 7888 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7889 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7890 /// node, and because it doesn't require CSE recalculation for any of 7891 /// the node's users. 7892 /// 7893 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7894 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7895 /// the legalizer which maintain worklists that would need to be updated when 7896 /// deleting things. 7897 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7898 SDVTList VTs, ArrayRef<SDValue> Ops) { 7899 // If an identical node already exists, use it. 7900 void *IP = nullptr; 7901 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7902 FoldingSetNodeID ID; 7903 AddNodeIDNode(ID, Opc, VTs, Ops); 7904 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7905 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7906 } 7907 7908 if (!RemoveNodeFromCSEMaps(N)) 7909 IP = nullptr; 7910 7911 // Start the morphing. 7912 N->NodeType = Opc; 7913 N->ValueList = VTs.VTs; 7914 N->NumValues = VTs.NumVTs; 7915 7916 // Clear the operands list, updating used nodes to remove this from their 7917 // use list. Keep track of any operands that become dead as a result. 7918 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7919 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7920 SDUse &Use = *I++; 7921 SDNode *Used = Use.getNode(); 7922 Use.set(SDValue()); 7923 if (Used->use_empty()) 7924 DeadNodeSet.insert(Used); 7925 } 7926 7927 // For MachineNode, initialize the memory references information. 7928 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7929 MN->clearMemRefs(); 7930 7931 // Swap for an appropriately sized array from the recycler. 7932 removeOperands(N); 7933 createOperands(N, Ops); 7934 7935 // Delete any nodes that are still dead after adding the uses for the 7936 // new operands. 7937 if (!DeadNodeSet.empty()) { 7938 SmallVector<SDNode *, 16> DeadNodes; 7939 for (SDNode *N : DeadNodeSet) 7940 if (N->use_empty()) 7941 DeadNodes.push_back(N); 7942 RemoveDeadNodes(DeadNodes); 7943 } 7944 7945 if (IP) 7946 CSEMap.InsertNode(N, IP); // Memoize the new node. 7947 return N; 7948 } 7949 7950 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7951 unsigned OrigOpc = Node->getOpcode(); 7952 unsigned NewOpc; 7953 switch (OrigOpc) { 7954 default: 7955 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7956 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7957 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 7958 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7959 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 7960 #include "llvm/IR/ConstrainedOps.def" 7961 } 7962 7963 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 7964 7965 // We're taking this node out of the chain, so we need to re-link things. 7966 SDValue InputChain = Node->getOperand(0); 7967 SDValue OutputChain = SDValue(Node, 1); 7968 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7969 7970 SmallVector<SDValue, 3> Ops; 7971 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 7972 Ops.push_back(Node->getOperand(i)); 7973 7974 SDVTList VTs = getVTList(Node->getValueType(0)); 7975 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 7976 7977 // MorphNodeTo can operate in two ways: if an existing node with the 7978 // specified operands exists, it can just return it. Otherwise, it 7979 // updates the node in place to have the requested operands. 7980 if (Res == Node) { 7981 // If we updated the node in place, reset the node ID. To the isel, 7982 // this should be just like a newly allocated machine node. 7983 Res->setNodeId(-1); 7984 } else { 7985 ReplaceAllUsesWith(Node, Res); 7986 RemoveDeadNode(Node); 7987 } 7988 7989 return Res; 7990 } 7991 7992 /// getMachineNode - These are used for target selectors to create a new node 7993 /// with specified return type(s), MachineInstr opcode, and operands. 7994 /// 7995 /// Note that getMachineNode returns the resultant node. If there is already a 7996 /// node of the specified opcode and operands, it returns that node instead of 7997 /// the current one. 7998 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7999 EVT VT) { 8000 SDVTList VTs = getVTList(VT); 8001 return getMachineNode(Opcode, dl, VTs, None); 8002 } 8003 8004 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8005 EVT VT, SDValue Op1) { 8006 SDVTList VTs = getVTList(VT); 8007 SDValue Ops[] = { Op1 }; 8008 return getMachineNode(Opcode, dl, VTs, Ops); 8009 } 8010 8011 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8012 EVT VT, SDValue Op1, SDValue Op2) { 8013 SDVTList VTs = getVTList(VT); 8014 SDValue Ops[] = { Op1, Op2 }; 8015 return getMachineNode(Opcode, dl, VTs, Ops); 8016 } 8017 8018 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8019 EVT VT, SDValue Op1, SDValue Op2, 8020 SDValue Op3) { 8021 SDVTList VTs = getVTList(VT); 8022 SDValue Ops[] = { Op1, Op2, Op3 }; 8023 return getMachineNode(Opcode, dl, VTs, Ops); 8024 } 8025 8026 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8027 EVT VT, ArrayRef<SDValue> Ops) { 8028 SDVTList VTs = getVTList(VT); 8029 return getMachineNode(Opcode, dl, VTs, Ops); 8030 } 8031 8032 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8033 EVT VT1, EVT VT2, SDValue Op1, 8034 SDValue Op2) { 8035 SDVTList VTs = getVTList(VT1, VT2); 8036 SDValue Ops[] = { Op1, Op2 }; 8037 return getMachineNode(Opcode, dl, VTs, Ops); 8038 } 8039 8040 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8041 EVT VT1, EVT VT2, SDValue Op1, 8042 SDValue Op2, SDValue Op3) { 8043 SDVTList VTs = getVTList(VT1, VT2); 8044 SDValue Ops[] = { Op1, Op2, Op3 }; 8045 return getMachineNode(Opcode, dl, VTs, Ops); 8046 } 8047 8048 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8049 EVT VT1, EVT VT2, 8050 ArrayRef<SDValue> Ops) { 8051 SDVTList VTs = getVTList(VT1, VT2); 8052 return getMachineNode(Opcode, dl, VTs, Ops); 8053 } 8054 8055 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8056 EVT VT1, EVT VT2, EVT VT3, 8057 SDValue Op1, SDValue Op2) { 8058 SDVTList VTs = getVTList(VT1, VT2, VT3); 8059 SDValue Ops[] = { Op1, Op2 }; 8060 return getMachineNode(Opcode, dl, VTs, Ops); 8061 } 8062 8063 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8064 EVT VT1, EVT VT2, EVT VT3, 8065 SDValue Op1, SDValue Op2, 8066 SDValue Op3) { 8067 SDVTList VTs = getVTList(VT1, VT2, VT3); 8068 SDValue Ops[] = { Op1, Op2, Op3 }; 8069 return getMachineNode(Opcode, dl, VTs, Ops); 8070 } 8071 8072 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8073 EVT VT1, EVT VT2, EVT VT3, 8074 ArrayRef<SDValue> Ops) { 8075 SDVTList VTs = getVTList(VT1, VT2, VT3); 8076 return getMachineNode(Opcode, dl, VTs, Ops); 8077 } 8078 8079 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8080 ArrayRef<EVT> ResultTys, 8081 ArrayRef<SDValue> Ops) { 8082 SDVTList VTs = getVTList(ResultTys); 8083 return getMachineNode(Opcode, dl, VTs, Ops); 8084 } 8085 8086 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8087 SDVTList VTs, 8088 ArrayRef<SDValue> Ops) { 8089 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8090 MachineSDNode *N; 8091 void *IP = nullptr; 8092 8093 if (DoCSE) { 8094 FoldingSetNodeID ID; 8095 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8096 IP = nullptr; 8097 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8098 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8099 } 8100 } 8101 8102 // Allocate a new MachineSDNode. 8103 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8104 createOperands(N, Ops); 8105 8106 if (DoCSE) 8107 CSEMap.InsertNode(N, IP); 8108 8109 InsertNode(N); 8110 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8111 return N; 8112 } 8113 8114 /// getTargetExtractSubreg - A convenience function for creating 8115 /// TargetOpcode::EXTRACT_SUBREG nodes. 8116 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8117 SDValue Operand) { 8118 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8119 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8120 VT, Operand, SRIdxVal); 8121 return SDValue(Subreg, 0); 8122 } 8123 8124 /// getTargetInsertSubreg - A convenience function for creating 8125 /// TargetOpcode::INSERT_SUBREG nodes. 8126 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8127 SDValue Operand, SDValue Subreg) { 8128 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8129 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8130 VT, Operand, Subreg, SRIdxVal); 8131 return SDValue(Result, 0); 8132 } 8133 8134 /// getNodeIfExists - Get the specified node if it's already available, or 8135 /// else return NULL. 8136 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8137 ArrayRef<SDValue> Ops, 8138 const SDNodeFlags Flags) { 8139 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8140 FoldingSetNodeID ID; 8141 AddNodeIDNode(ID, Opcode, VTList, Ops); 8142 void *IP = nullptr; 8143 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8144 E->intersectFlagsWith(Flags); 8145 return E; 8146 } 8147 } 8148 return nullptr; 8149 } 8150 8151 /// getDbgValue - Creates a SDDbgValue node. 8152 /// 8153 /// SDNode 8154 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8155 SDNode *N, unsigned R, bool IsIndirect, 8156 const DebugLoc &DL, unsigned O) { 8157 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8158 "Expected inlined-at fields to agree"); 8159 return new (DbgInfo->getAlloc()) 8160 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8161 } 8162 8163 /// Constant 8164 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8165 DIExpression *Expr, 8166 const Value *C, 8167 const DebugLoc &DL, unsigned O) { 8168 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8169 "Expected inlined-at fields to agree"); 8170 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8171 } 8172 8173 /// FrameIndex 8174 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8175 DIExpression *Expr, unsigned FI, 8176 bool IsIndirect, 8177 const DebugLoc &DL, 8178 unsigned O) { 8179 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8180 "Expected inlined-at fields to agree"); 8181 return new (DbgInfo->getAlloc()) 8182 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8183 } 8184 8185 /// VReg 8186 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8187 DIExpression *Expr, 8188 unsigned VReg, bool IsIndirect, 8189 const DebugLoc &DL, unsigned O) { 8190 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8191 "Expected inlined-at fields to agree"); 8192 return new (DbgInfo->getAlloc()) 8193 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8194 } 8195 8196 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8197 unsigned OffsetInBits, unsigned SizeInBits, 8198 bool InvalidateDbg) { 8199 SDNode *FromNode = From.getNode(); 8200 SDNode *ToNode = To.getNode(); 8201 assert(FromNode && ToNode && "Can't modify dbg values"); 8202 8203 // PR35338 8204 // TODO: assert(From != To && "Redundant dbg value transfer"); 8205 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8206 if (From == To || FromNode == ToNode) 8207 return; 8208 8209 if (!FromNode->getHasDebugValue()) 8210 return; 8211 8212 SmallVector<SDDbgValue *, 2> ClonedDVs; 8213 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8214 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8215 continue; 8216 8217 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8218 8219 // Just transfer the dbg value attached to From. 8220 if (Dbg->getResNo() != From.getResNo()) 8221 continue; 8222 8223 DIVariable *Var = Dbg->getVariable(); 8224 auto *Expr = Dbg->getExpression(); 8225 // If a fragment is requested, update the expression. 8226 if (SizeInBits) { 8227 // When splitting a larger (e.g., sign-extended) value whose 8228 // lower bits are described with an SDDbgValue, do not attempt 8229 // to transfer the SDDbgValue to the upper bits. 8230 if (auto FI = Expr->getFragmentInfo()) 8231 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8232 continue; 8233 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8234 SizeInBits); 8235 if (!Fragment) 8236 continue; 8237 Expr = *Fragment; 8238 } 8239 // Clone the SDDbgValue and move it to To. 8240 SDDbgValue *Clone = getDbgValue( 8241 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8242 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8243 ClonedDVs.push_back(Clone); 8244 8245 if (InvalidateDbg) { 8246 // Invalidate value and indicate the SDDbgValue should not be emitted. 8247 Dbg->setIsInvalidated(); 8248 Dbg->setIsEmitted(); 8249 } 8250 } 8251 8252 for (SDDbgValue *Dbg : ClonedDVs) 8253 AddDbgValue(Dbg, ToNode, false); 8254 } 8255 8256 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8257 if (!N.getHasDebugValue()) 8258 return; 8259 8260 SmallVector<SDDbgValue *, 2> ClonedDVs; 8261 for (auto DV : GetDbgValues(&N)) { 8262 if (DV->isInvalidated()) 8263 continue; 8264 switch (N.getOpcode()) { 8265 default: 8266 break; 8267 case ISD::ADD: 8268 SDValue N0 = N.getOperand(0); 8269 SDValue N1 = N.getOperand(1); 8270 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8271 isConstantIntBuildVectorOrConstantInt(N1)) { 8272 uint64_t Offset = N.getConstantOperandVal(1); 8273 // Rewrite an ADD constant node into a DIExpression. Since we are 8274 // performing arithmetic to compute the variable's *value* in the 8275 // DIExpression, we need to mark the expression with a 8276 // DW_OP_stack_value. 8277 auto *DIExpr = DV->getExpression(); 8278 DIExpr = 8279 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8280 SDDbgValue *Clone = 8281 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8282 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8283 ClonedDVs.push_back(Clone); 8284 DV->setIsInvalidated(); 8285 DV->setIsEmitted(); 8286 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8287 N0.getNode()->dumprFull(this); 8288 dbgs() << " into " << *DIExpr << '\n'); 8289 } 8290 } 8291 } 8292 8293 for (SDDbgValue *Dbg : ClonedDVs) 8294 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8295 } 8296 8297 /// Creates a SDDbgLabel node. 8298 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8299 const DebugLoc &DL, unsigned O) { 8300 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8301 "Expected inlined-at fields to agree"); 8302 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8303 } 8304 8305 namespace { 8306 8307 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8308 /// pointed to by a use iterator is deleted, increment the use iterator 8309 /// so that it doesn't dangle. 8310 /// 8311 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8312 SDNode::use_iterator &UI; 8313 SDNode::use_iterator &UE; 8314 8315 void NodeDeleted(SDNode *N, SDNode *E) override { 8316 // Increment the iterator as needed. 8317 while (UI != UE && N == *UI) 8318 ++UI; 8319 } 8320 8321 public: 8322 RAUWUpdateListener(SelectionDAG &d, 8323 SDNode::use_iterator &ui, 8324 SDNode::use_iterator &ue) 8325 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8326 }; 8327 8328 } // end anonymous namespace 8329 8330 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8331 /// This can cause recursive merging of nodes in the DAG. 8332 /// 8333 /// This version assumes From has a single result value. 8334 /// 8335 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8336 SDNode *From = FromN.getNode(); 8337 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8338 "Cannot replace with this method!"); 8339 assert(From != To.getNode() && "Cannot replace uses of with self"); 8340 8341 // Preserve Debug Values 8342 transferDbgValues(FromN, To); 8343 8344 // Iterate over all the existing uses of From. New uses will be added 8345 // to the beginning of the use list, which we avoid visiting. 8346 // This specifically avoids visiting uses of From that arise while the 8347 // replacement is happening, because any such uses would be the result 8348 // of CSE: If an existing node looks like From after one of its operands 8349 // is replaced by To, we don't want to replace of all its users with To 8350 // too. See PR3018 for more info. 8351 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8352 RAUWUpdateListener Listener(*this, UI, UE); 8353 while (UI != UE) { 8354 SDNode *User = *UI; 8355 8356 // This node is about to morph, remove its old self from the CSE maps. 8357 RemoveNodeFromCSEMaps(User); 8358 8359 // A user can appear in a use list multiple times, and when this 8360 // happens the uses are usually next to each other in the list. 8361 // To help reduce the number of CSE recomputations, process all 8362 // the uses of this user that we can find this way. 8363 do { 8364 SDUse &Use = UI.getUse(); 8365 ++UI; 8366 Use.set(To); 8367 if (To->isDivergent() != From->isDivergent()) 8368 updateDivergence(User); 8369 } while (UI != UE && *UI == User); 8370 // Now that we have modified User, add it back to the CSE maps. If it 8371 // already exists there, recursively merge the results together. 8372 AddModifiedNodeToCSEMaps(User); 8373 } 8374 8375 // If we just RAUW'd the root, take note. 8376 if (FromN == getRoot()) 8377 setRoot(To); 8378 } 8379 8380 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8381 /// This can cause recursive merging of nodes in the DAG. 8382 /// 8383 /// This version assumes that for each value of From, there is a 8384 /// corresponding value in To in the same position with the same type. 8385 /// 8386 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8387 #ifndef NDEBUG 8388 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8389 assert((!From->hasAnyUseOfValue(i) || 8390 From->getValueType(i) == To->getValueType(i)) && 8391 "Cannot use this version of ReplaceAllUsesWith!"); 8392 #endif 8393 8394 // Handle the trivial case. 8395 if (From == To) 8396 return; 8397 8398 // Preserve Debug Info. Only do this if there's a use. 8399 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8400 if (From->hasAnyUseOfValue(i)) { 8401 assert((i < To->getNumValues()) && "Invalid To location"); 8402 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8403 } 8404 8405 // Iterate over just the existing users of From. See the comments in 8406 // the ReplaceAllUsesWith above. 8407 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8408 RAUWUpdateListener Listener(*this, UI, UE); 8409 while (UI != UE) { 8410 SDNode *User = *UI; 8411 8412 // This node is about to morph, remove its old self from the CSE maps. 8413 RemoveNodeFromCSEMaps(User); 8414 8415 // A user can appear in a use list multiple times, and when this 8416 // happens the uses are usually next to each other in the list. 8417 // To help reduce the number of CSE recomputations, process all 8418 // the uses of this user that we can find this way. 8419 do { 8420 SDUse &Use = UI.getUse(); 8421 ++UI; 8422 Use.setNode(To); 8423 if (To->isDivergent() != From->isDivergent()) 8424 updateDivergence(User); 8425 } while (UI != UE && *UI == User); 8426 8427 // Now that we have modified User, add it back to the CSE maps. If it 8428 // already exists there, recursively merge the results together. 8429 AddModifiedNodeToCSEMaps(User); 8430 } 8431 8432 // If we just RAUW'd the root, take note. 8433 if (From == getRoot().getNode()) 8434 setRoot(SDValue(To, getRoot().getResNo())); 8435 } 8436 8437 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8438 /// This can cause recursive merging of nodes in the DAG. 8439 /// 8440 /// This version can replace From with any result values. To must match the 8441 /// number and types of values returned by From. 8442 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8443 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8444 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8445 8446 // Preserve Debug Info. 8447 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8448 transferDbgValues(SDValue(From, i), To[i]); 8449 8450 // Iterate over just the existing users of From. See the comments in 8451 // the ReplaceAllUsesWith above. 8452 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8453 RAUWUpdateListener Listener(*this, UI, UE); 8454 while (UI != UE) { 8455 SDNode *User = *UI; 8456 8457 // This node is about to morph, remove its old self from the CSE maps. 8458 RemoveNodeFromCSEMaps(User); 8459 8460 // A user can appear in a use list multiple times, and when this happens the 8461 // uses are usually next to each other in the list. To help reduce the 8462 // number of CSE and divergence recomputations, process all the uses of this 8463 // user that we can find this way. 8464 bool To_IsDivergent = false; 8465 do { 8466 SDUse &Use = UI.getUse(); 8467 const SDValue &ToOp = To[Use.getResNo()]; 8468 ++UI; 8469 Use.set(ToOp); 8470 To_IsDivergent |= ToOp->isDivergent(); 8471 } while (UI != UE && *UI == User); 8472 8473 if (To_IsDivergent != From->isDivergent()) 8474 updateDivergence(User); 8475 8476 // Now that we have modified User, add it back to the CSE maps. If it 8477 // already exists there, recursively merge the results together. 8478 AddModifiedNodeToCSEMaps(User); 8479 } 8480 8481 // If we just RAUW'd the root, take note. 8482 if (From == getRoot().getNode()) 8483 setRoot(SDValue(To[getRoot().getResNo()])); 8484 } 8485 8486 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8487 /// uses of other values produced by From.getNode() alone. The Deleted 8488 /// vector is handled the same way as for ReplaceAllUsesWith. 8489 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8490 // Handle the really simple, really trivial case efficiently. 8491 if (From == To) return; 8492 8493 // Handle the simple, trivial, case efficiently. 8494 if (From.getNode()->getNumValues() == 1) { 8495 ReplaceAllUsesWith(From, To); 8496 return; 8497 } 8498 8499 // Preserve Debug Info. 8500 transferDbgValues(From, To); 8501 8502 // Iterate over just the existing users of From. See the comments in 8503 // the ReplaceAllUsesWith above. 8504 SDNode::use_iterator UI = From.getNode()->use_begin(), 8505 UE = From.getNode()->use_end(); 8506 RAUWUpdateListener Listener(*this, UI, UE); 8507 while (UI != UE) { 8508 SDNode *User = *UI; 8509 bool UserRemovedFromCSEMaps = false; 8510 8511 // A user can appear in a use list multiple times, and when this 8512 // happens the uses are usually next to each other in the list. 8513 // To help reduce the number of CSE recomputations, process all 8514 // the uses of this user that we can find this way. 8515 do { 8516 SDUse &Use = UI.getUse(); 8517 8518 // Skip uses of different values from the same node. 8519 if (Use.getResNo() != From.getResNo()) { 8520 ++UI; 8521 continue; 8522 } 8523 8524 // If this node hasn't been modified yet, it's still in the CSE maps, 8525 // so remove its old self from the CSE maps. 8526 if (!UserRemovedFromCSEMaps) { 8527 RemoveNodeFromCSEMaps(User); 8528 UserRemovedFromCSEMaps = true; 8529 } 8530 8531 ++UI; 8532 Use.set(To); 8533 if (To->isDivergent() != From->isDivergent()) 8534 updateDivergence(User); 8535 } while (UI != UE && *UI == User); 8536 // We are iterating over all uses of the From node, so if a use 8537 // doesn't use the specific value, no changes are made. 8538 if (!UserRemovedFromCSEMaps) 8539 continue; 8540 8541 // Now that we have modified User, add it back to the CSE maps. If it 8542 // already exists there, recursively merge the results together. 8543 AddModifiedNodeToCSEMaps(User); 8544 } 8545 8546 // If we just RAUW'd the root, take note. 8547 if (From == getRoot()) 8548 setRoot(To); 8549 } 8550 8551 namespace { 8552 8553 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8554 /// to record information about a use. 8555 struct UseMemo { 8556 SDNode *User; 8557 unsigned Index; 8558 SDUse *Use; 8559 }; 8560 8561 /// operator< - Sort Memos by User. 8562 bool operator<(const UseMemo &L, const UseMemo &R) { 8563 return (intptr_t)L.User < (intptr_t)R.User; 8564 } 8565 8566 } // end anonymous namespace 8567 8568 void SelectionDAG::updateDivergence(SDNode * N) 8569 { 8570 if (TLI->isSDNodeAlwaysUniform(N)) 8571 return; 8572 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8573 for (auto &Op : N->ops()) { 8574 if (Op.Val.getValueType() != MVT::Other) 8575 IsDivergent |= Op.getNode()->isDivergent(); 8576 } 8577 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8578 N->SDNodeBits.IsDivergent = IsDivergent; 8579 for (auto U : N->uses()) { 8580 updateDivergence(U); 8581 } 8582 } 8583 } 8584 8585 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8586 DenseMap<SDNode *, unsigned> Degree; 8587 Order.reserve(AllNodes.size()); 8588 for (auto &N : allnodes()) { 8589 unsigned NOps = N.getNumOperands(); 8590 Degree[&N] = NOps; 8591 if (0 == NOps) 8592 Order.push_back(&N); 8593 } 8594 for (size_t I = 0; I != Order.size(); ++I) { 8595 SDNode *N = Order[I]; 8596 for (auto U : N->uses()) { 8597 unsigned &UnsortedOps = Degree[U]; 8598 if (0 == --UnsortedOps) 8599 Order.push_back(U); 8600 } 8601 } 8602 } 8603 8604 #ifndef NDEBUG 8605 void SelectionDAG::VerifyDAGDiverence() { 8606 std::vector<SDNode *> TopoOrder; 8607 CreateTopologicalOrder(TopoOrder); 8608 const TargetLowering &TLI = getTargetLoweringInfo(); 8609 DenseMap<const SDNode *, bool> DivergenceMap; 8610 for (auto &N : allnodes()) { 8611 DivergenceMap[&N] = false; 8612 } 8613 for (auto N : TopoOrder) { 8614 bool IsDivergent = DivergenceMap[N]; 8615 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8616 for (auto &Op : N->ops()) { 8617 if (Op.Val.getValueType() != MVT::Other) 8618 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8619 } 8620 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8621 DivergenceMap[N] = true; 8622 } 8623 } 8624 for (auto &N : allnodes()) { 8625 (void)N; 8626 assert(DivergenceMap[&N] == N.isDivergent() && 8627 "Divergence bit inconsistency detected\n"); 8628 } 8629 } 8630 #endif 8631 8632 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8633 /// uses of other values produced by From.getNode() alone. The same value 8634 /// may appear in both the From and To list. The Deleted vector is 8635 /// handled the same way as for ReplaceAllUsesWith. 8636 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8637 const SDValue *To, 8638 unsigned Num){ 8639 // Handle the simple, trivial case efficiently. 8640 if (Num == 1) 8641 return ReplaceAllUsesOfValueWith(*From, *To); 8642 8643 transferDbgValues(*From, *To); 8644 8645 // Read up all the uses and make records of them. This helps 8646 // processing new uses that are introduced during the 8647 // replacement process. 8648 SmallVector<UseMemo, 4> Uses; 8649 for (unsigned i = 0; i != Num; ++i) { 8650 unsigned FromResNo = From[i].getResNo(); 8651 SDNode *FromNode = From[i].getNode(); 8652 for (SDNode::use_iterator UI = FromNode->use_begin(), 8653 E = FromNode->use_end(); UI != E; ++UI) { 8654 SDUse &Use = UI.getUse(); 8655 if (Use.getResNo() == FromResNo) { 8656 UseMemo Memo = { *UI, i, &Use }; 8657 Uses.push_back(Memo); 8658 } 8659 } 8660 } 8661 8662 // Sort the uses, so that all the uses from a given User are together. 8663 llvm::sort(Uses); 8664 8665 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8666 UseIndex != UseIndexEnd; ) { 8667 // We know that this user uses some value of From. If it is the right 8668 // value, update it. 8669 SDNode *User = Uses[UseIndex].User; 8670 8671 // This node is about to morph, remove its old self from the CSE maps. 8672 RemoveNodeFromCSEMaps(User); 8673 8674 // The Uses array is sorted, so all the uses for a given User 8675 // are next to each other in the list. 8676 // To help reduce the number of CSE recomputations, process all 8677 // the uses of this user that we can find this way. 8678 do { 8679 unsigned i = Uses[UseIndex].Index; 8680 SDUse &Use = *Uses[UseIndex].Use; 8681 ++UseIndex; 8682 8683 Use.set(To[i]); 8684 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8685 8686 // Now that we have modified User, add it back to the CSE maps. If it 8687 // already exists there, recursively merge the results together. 8688 AddModifiedNodeToCSEMaps(User); 8689 } 8690 } 8691 8692 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8693 /// based on their topological order. It returns the maximum id and a vector 8694 /// of the SDNodes* in assigned order by reference. 8695 unsigned SelectionDAG::AssignTopologicalOrder() { 8696 unsigned DAGSize = 0; 8697 8698 // SortedPos tracks the progress of the algorithm. Nodes before it are 8699 // sorted, nodes after it are unsorted. When the algorithm completes 8700 // it is at the end of the list. 8701 allnodes_iterator SortedPos = allnodes_begin(); 8702 8703 // Visit all the nodes. Move nodes with no operands to the front of 8704 // the list immediately. Annotate nodes that do have operands with their 8705 // operand count. Before we do this, the Node Id fields of the nodes 8706 // may contain arbitrary values. After, the Node Id fields for nodes 8707 // before SortedPos will contain the topological sort index, and the 8708 // Node Id fields for nodes At SortedPos and after will contain the 8709 // count of outstanding operands. 8710 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8711 SDNode *N = &*I++; 8712 checkForCycles(N, this); 8713 unsigned Degree = N->getNumOperands(); 8714 if (Degree == 0) { 8715 // A node with no uses, add it to the result array immediately. 8716 N->setNodeId(DAGSize++); 8717 allnodes_iterator Q(N); 8718 if (Q != SortedPos) 8719 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8720 assert(SortedPos != AllNodes.end() && "Overran node list"); 8721 ++SortedPos; 8722 } else { 8723 // Temporarily use the Node Id as scratch space for the degree count. 8724 N->setNodeId(Degree); 8725 } 8726 } 8727 8728 // Visit all the nodes. As we iterate, move nodes into sorted order, 8729 // such that by the time the end is reached all nodes will be sorted. 8730 for (SDNode &Node : allnodes()) { 8731 SDNode *N = &Node; 8732 checkForCycles(N, this); 8733 // N is in sorted position, so all its uses have one less operand 8734 // that needs to be sorted. 8735 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8736 UI != UE; ++UI) { 8737 SDNode *P = *UI; 8738 unsigned Degree = P->getNodeId(); 8739 assert(Degree != 0 && "Invalid node degree"); 8740 --Degree; 8741 if (Degree == 0) { 8742 // All of P's operands are sorted, so P may sorted now. 8743 P->setNodeId(DAGSize++); 8744 if (P->getIterator() != SortedPos) 8745 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8746 assert(SortedPos != AllNodes.end() && "Overran node list"); 8747 ++SortedPos; 8748 } else { 8749 // Update P's outstanding operand count. 8750 P->setNodeId(Degree); 8751 } 8752 } 8753 if (Node.getIterator() == SortedPos) { 8754 #ifndef NDEBUG 8755 allnodes_iterator I(N); 8756 SDNode *S = &*++I; 8757 dbgs() << "Overran sorted position:\n"; 8758 S->dumprFull(this); dbgs() << "\n"; 8759 dbgs() << "Checking if this is due to cycles\n"; 8760 checkForCycles(this, true); 8761 #endif 8762 llvm_unreachable(nullptr); 8763 } 8764 } 8765 8766 assert(SortedPos == AllNodes.end() && 8767 "Topological sort incomplete!"); 8768 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8769 "First node in topological sort is not the entry token!"); 8770 assert(AllNodes.front().getNodeId() == 0 && 8771 "First node in topological sort has non-zero id!"); 8772 assert(AllNodes.front().getNumOperands() == 0 && 8773 "First node in topological sort has operands!"); 8774 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8775 "Last node in topologic sort has unexpected id!"); 8776 assert(AllNodes.back().use_empty() && 8777 "Last node in topologic sort has users!"); 8778 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8779 return DAGSize; 8780 } 8781 8782 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8783 /// value is produced by SD. 8784 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8785 if (SD) { 8786 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8787 SD->setHasDebugValue(true); 8788 } 8789 DbgInfo->add(DB, SD, isParameter); 8790 } 8791 8792 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8793 DbgInfo->add(DB); 8794 } 8795 8796 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8797 SDValue NewMemOp) { 8798 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8799 // The new memory operation must have the same position as the old load in 8800 // terms of memory dependency. Create a TokenFactor for the old load and new 8801 // memory operation and update uses of the old load's output chain to use that 8802 // TokenFactor. 8803 SDValue OldChain = SDValue(OldLoad, 1); 8804 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8805 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8806 return NewChain; 8807 8808 SDValue TokenFactor = 8809 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8810 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8811 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8812 return TokenFactor; 8813 } 8814 8815 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8816 Function **OutFunction) { 8817 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8818 8819 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8820 auto *Module = MF->getFunction().getParent(); 8821 auto *Function = Module->getFunction(Symbol); 8822 8823 if (OutFunction != nullptr) 8824 *OutFunction = Function; 8825 8826 if (Function != nullptr) { 8827 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8828 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8829 } 8830 8831 std::string ErrorStr; 8832 raw_string_ostream ErrorFormatter(ErrorStr); 8833 8834 ErrorFormatter << "Undefined external symbol "; 8835 ErrorFormatter << '"' << Symbol << '"'; 8836 ErrorFormatter.flush(); 8837 8838 report_fatal_error(ErrorStr); 8839 } 8840 8841 //===----------------------------------------------------------------------===// 8842 // SDNode Class 8843 //===----------------------------------------------------------------------===// 8844 8845 bool llvm::isNullConstant(SDValue V) { 8846 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8847 return Const != nullptr && Const->isNullValue(); 8848 } 8849 8850 bool llvm::isNullFPConstant(SDValue V) { 8851 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8852 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8853 } 8854 8855 bool llvm::isAllOnesConstant(SDValue V) { 8856 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8857 return Const != nullptr && Const->isAllOnesValue(); 8858 } 8859 8860 bool llvm::isOneConstant(SDValue V) { 8861 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8862 return Const != nullptr && Const->isOne(); 8863 } 8864 8865 SDValue llvm::peekThroughBitcasts(SDValue V) { 8866 while (V.getOpcode() == ISD::BITCAST) 8867 V = V.getOperand(0); 8868 return V; 8869 } 8870 8871 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8872 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8873 V = V.getOperand(0); 8874 return V; 8875 } 8876 8877 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8878 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8879 V = V.getOperand(0); 8880 return V; 8881 } 8882 8883 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8884 if (V.getOpcode() != ISD::XOR) 8885 return false; 8886 V = peekThroughBitcasts(V.getOperand(1)); 8887 unsigned NumBits = V.getScalarValueSizeInBits(); 8888 ConstantSDNode *C = 8889 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8890 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8891 } 8892 8893 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8894 bool AllowTruncation) { 8895 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8896 return CN; 8897 8898 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8899 BitVector UndefElements; 8900 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8901 8902 // BuildVectors can truncate their operands. Ignore that case here unless 8903 // AllowTruncation is set. 8904 if (CN && (UndefElements.none() || AllowUndefs)) { 8905 EVT CVT = CN->getValueType(0); 8906 EVT NSVT = N.getValueType().getScalarType(); 8907 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8908 if (AllowTruncation || (CVT == NSVT)) 8909 return CN; 8910 } 8911 } 8912 8913 return nullptr; 8914 } 8915 8916 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8917 bool AllowUndefs, 8918 bool AllowTruncation) { 8919 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8920 return CN; 8921 8922 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8923 BitVector UndefElements; 8924 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8925 8926 // BuildVectors can truncate their operands. Ignore that case here unless 8927 // AllowTruncation is set. 8928 if (CN && (UndefElements.none() || AllowUndefs)) { 8929 EVT CVT = CN->getValueType(0); 8930 EVT NSVT = N.getValueType().getScalarType(); 8931 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8932 if (AllowTruncation || (CVT == NSVT)) 8933 return CN; 8934 } 8935 } 8936 8937 return nullptr; 8938 } 8939 8940 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8941 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8942 return CN; 8943 8944 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8945 BitVector UndefElements; 8946 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8947 if (CN && (UndefElements.none() || AllowUndefs)) 8948 return CN; 8949 } 8950 8951 return nullptr; 8952 } 8953 8954 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 8955 const APInt &DemandedElts, 8956 bool AllowUndefs) { 8957 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8958 return CN; 8959 8960 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8961 BitVector UndefElements; 8962 ConstantFPSDNode *CN = 8963 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 8964 if (CN && (UndefElements.none() || AllowUndefs)) 8965 return CN; 8966 } 8967 8968 return nullptr; 8969 } 8970 8971 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 8972 // TODO: may want to use peekThroughBitcast() here. 8973 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 8974 return C && C->isNullValue(); 8975 } 8976 8977 bool llvm::isOneOrOneSplat(SDValue N) { 8978 // TODO: may want to use peekThroughBitcast() here. 8979 unsigned BitWidth = N.getScalarValueSizeInBits(); 8980 ConstantSDNode *C = isConstOrConstSplat(N); 8981 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8982 } 8983 8984 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8985 N = peekThroughBitcasts(N); 8986 unsigned BitWidth = N.getScalarValueSizeInBits(); 8987 ConstantSDNode *C = isConstOrConstSplat(N); 8988 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8989 } 8990 8991 HandleSDNode::~HandleSDNode() { 8992 DropOperands(); 8993 } 8994 8995 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8996 const DebugLoc &DL, 8997 const GlobalValue *GA, EVT VT, 8998 int64_t o, unsigned TF) 8999 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9000 TheGlobal = GA; 9001 } 9002 9003 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9004 EVT VT, unsigned SrcAS, 9005 unsigned DestAS) 9006 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9007 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9008 9009 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9010 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9011 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9012 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9013 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9014 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9015 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9016 9017 // We check here that the size of the memory operand fits within the size of 9018 // the MMO. This is because the MMO might indicate only a possible address 9019 // range instead of specifying the affected memory addresses precisely. 9020 // TODO: Make MachineMemOperands aware of scalable vectors. 9021 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9022 "Size mismatch!"); 9023 } 9024 9025 /// Profile - Gather unique data for the node. 9026 /// 9027 void SDNode::Profile(FoldingSetNodeID &ID) const { 9028 AddNodeIDNode(ID, this); 9029 } 9030 9031 namespace { 9032 9033 struct EVTArray { 9034 std::vector<EVT> VTs; 9035 9036 EVTArray() { 9037 VTs.reserve(MVT::LAST_VALUETYPE); 9038 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9039 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9040 } 9041 }; 9042 9043 } // end anonymous namespace 9044 9045 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9046 static ManagedStatic<EVTArray> SimpleVTArray; 9047 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9048 9049 /// getValueTypeList - Return a pointer to the specified value type. 9050 /// 9051 const EVT *SDNode::getValueTypeList(EVT VT) { 9052 if (VT.isExtended()) { 9053 sys::SmartScopedLock<true> Lock(*VTMutex); 9054 return &(*EVTs->insert(VT).first); 9055 } else { 9056 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9057 "Value type out of range!"); 9058 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9059 } 9060 } 9061 9062 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9063 /// indicated value. This method ignores uses of other values defined by this 9064 /// operation. 9065 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9066 assert(Value < getNumValues() && "Bad value!"); 9067 9068 // TODO: Only iterate over uses of a given value of the node 9069 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9070 if (UI.getUse().getResNo() == Value) { 9071 if (NUses == 0) 9072 return false; 9073 --NUses; 9074 } 9075 } 9076 9077 // Found exactly the right number of uses? 9078 return NUses == 0; 9079 } 9080 9081 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9082 /// value. This method ignores uses of other values defined by this operation. 9083 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9084 assert(Value < getNumValues() && "Bad value!"); 9085 9086 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9087 if (UI.getUse().getResNo() == Value) 9088 return true; 9089 9090 return false; 9091 } 9092 9093 /// isOnlyUserOf - Return true if this node is the only use of N. 9094 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9095 bool Seen = false; 9096 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9097 SDNode *User = *I; 9098 if (User == this) 9099 Seen = true; 9100 else 9101 return false; 9102 } 9103 9104 return Seen; 9105 } 9106 9107 /// Return true if the only users of N are contained in Nodes. 9108 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9109 bool Seen = false; 9110 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9111 SDNode *User = *I; 9112 if (llvm::any_of(Nodes, 9113 [&User](const SDNode *Node) { return User == Node; })) 9114 Seen = true; 9115 else 9116 return false; 9117 } 9118 9119 return Seen; 9120 } 9121 9122 /// isOperand - Return true if this node is an operand of N. 9123 bool SDValue::isOperandOf(const SDNode *N) const { 9124 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9125 } 9126 9127 bool SDNode::isOperandOf(const SDNode *N) const { 9128 return any_of(N->op_values(), 9129 [this](SDValue Op) { return this == Op.getNode(); }); 9130 } 9131 9132 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9133 /// be a chain) reaches the specified operand without crossing any 9134 /// side-effecting instructions on any chain path. In practice, this looks 9135 /// through token factors and non-volatile loads. In order to remain efficient, 9136 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9137 /// 9138 /// Note that we only need to examine chains when we're searching for 9139 /// side-effects; SelectionDAG requires that all side-effects are represented 9140 /// by chains, even if another operand would force a specific ordering. This 9141 /// constraint is necessary to allow transformations like splitting loads. 9142 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9143 unsigned Depth) const { 9144 if (*this == Dest) return true; 9145 9146 // Don't search too deeply, we just want to be able to see through 9147 // TokenFactor's etc. 9148 if (Depth == 0) return false; 9149 9150 // If this is a token factor, all inputs to the TF happen in parallel. 9151 if (getOpcode() == ISD::TokenFactor) { 9152 // First, try a shallow search. 9153 if (is_contained((*this)->ops(), Dest)) { 9154 // We found the chain we want as an operand of this TokenFactor. 9155 // Essentially, we reach the chain without side-effects if we could 9156 // serialize the TokenFactor into a simple chain of operations with 9157 // Dest as the last operation. This is automatically true if the 9158 // chain has one use: there are no other ordering constraints. 9159 // If the chain has more than one use, we give up: some other 9160 // use of Dest might force a side-effect between Dest and the current 9161 // node. 9162 if (Dest.hasOneUse()) 9163 return true; 9164 } 9165 // Next, try a deep search: check whether every operand of the TokenFactor 9166 // reaches Dest. 9167 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9168 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9169 }); 9170 } 9171 9172 // Loads don't have side effects, look through them. 9173 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9174 if (Ld->isUnordered()) 9175 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9176 } 9177 return false; 9178 } 9179 9180 bool SDNode::hasPredecessor(const SDNode *N) const { 9181 SmallPtrSet<const SDNode *, 32> Visited; 9182 SmallVector<const SDNode *, 16> Worklist; 9183 Worklist.push_back(this); 9184 return hasPredecessorHelper(N, Visited, Worklist); 9185 } 9186 9187 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9188 this->Flags.intersectWith(Flags); 9189 } 9190 9191 SDValue 9192 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9193 ArrayRef<ISD::NodeType> CandidateBinOps, 9194 bool AllowPartials) { 9195 // The pattern must end in an extract from index 0. 9196 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9197 !isNullConstant(Extract->getOperand(1))) 9198 return SDValue(); 9199 9200 // Match against one of the candidate binary ops. 9201 SDValue Op = Extract->getOperand(0); 9202 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9203 return Op.getOpcode() == unsigned(BinOp); 9204 })) 9205 return SDValue(); 9206 9207 // Floating-point reductions may require relaxed constraints on the final step 9208 // of the reduction because they may reorder intermediate operations. 9209 unsigned CandidateBinOp = Op.getOpcode(); 9210 if (Op.getValueType().isFloatingPoint()) { 9211 SDNodeFlags Flags = Op->getFlags(); 9212 switch (CandidateBinOp) { 9213 case ISD::FADD: 9214 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9215 return SDValue(); 9216 break; 9217 default: 9218 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9219 } 9220 } 9221 9222 // Matching failed - attempt to see if we did enough stages that a partial 9223 // reduction from a subvector is possible. 9224 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9225 if (!AllowPartials || !Op) 9226 return SDValue(); 9227 EVT OpVT = Op.getValueType(); 9228 EVT OpSVT = OpVT.getScalarType(); 9229 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9230 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9231 return SDValue(); 9232 BinOp = (ISD::NodeType)CandidateBinOp; 9233 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9234 getVectorIdxConstant(0, SDLoc(Op))); 9235 }; 9236 9237 // At each stage, we're looking for something that looks like: 9238 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9239 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9240 // i32 undef, i32 undef, i32 undef, i32 undef> 9241 // %a = binop <8 x i32> %op, %s 9242 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9243 // we expect something like: 9244 // <4,5,6,7,u,u,u,u> 9245 // <2,3,u,u,u,u,u,u> 9246 // <1,u,u,u,u,u,u,u> 9247 // While a partial reduction match would be: 9248 // <2,3,u,u,u,u,u,u> 9249 // <1,u,u,u,u,u,u,u> 9250 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9251 SDValue PrevOp; 9252 for (unsigned i = 0; i < Stages; ++i) { 9253 unsigned MaskEnd = (1 << i); 9254 9255 if (Op.getOpcode() != CandidateBinOp) 9256 return PartialReduction(PrevOp, MaskEnd); 9257 9258 SDValue Op0 = Op.getOperand(0); 9259 SDValue Op1 = Op.getOperand(1); 9260 9261 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9262 if (Shuffle) { 9263 Op = Op1; 9264 } else { 9265 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9266 Op = Op0; 9267 } 9268 9269 // The first operand of the shuffle should be the same as the other operand 9270 // of the binop. 9271 if (!Shuffle || Shuffle->getOperand(0) != Op) 9272 return PartialReduction(PrevOp, MaskEnd); 9273 9274 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9275 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9276 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9277 return PartialReduction(PrevOp, MaskEnd); 9278 9279 PrevOp = Op; 9280 } 9281 9282 BinOp = (ISD::NodeType)CandidateBinOp; 9283 return Op; 9284 } 9285 9286 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9287 assert(N->getNumValues() == 1 && 9288 "Can't unroll a vector with multiple results!"); 9289 9290 EVT VT = N->getValueType(0); 9291 unsigned NE = VT.getVectorNumElements(); 9292 EVT EltVT = VT.getVectorElementType(); 9293 SDLoc dl(N); 9294 9295 SmallVector<SDValue, 8> Scalars; 9296 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9297 9298 // If ResNE is 0, fully unroll the vector op. 9299 if (ResNE == 0) 9300 ResNE = NE; 9301 else if (NE > ResNE) 9302 NE = ResNE; 9303 9304 unsigned i; 9305 for (i= 0; i != NE; ++i) { 9306 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9307 SDValue Operand = N->getOperand(j); 9308 EVT OperandVT = Operand.getValueType(); 9309 if (OperandVT.isVector()) { 9310 // A vector operand; extract a single element. 9311 EVT OperandEltVT = OperandVT.getVectorElementType(); 9312 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9313 Operand, getVectorIdxConstant(i, dl)); 9314 } else { 9315 // A scalar operand; just use it as is. 9316 Operands[j] = Operand; 9317 } 9318 } 9319 9320 switch (N->getOpcode()) { 9321 default: { 9322 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9323 N->getFlags())); 9324 break; 9325 } 9326 case ISD::VSELECT: 9327 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9328 break; 9329 case ISD::SHL: 9330 case ISD::SRA: 9331 case ISD::SRL: 9332 case ISD::ROTL: 9333 case ISD::ROTR: 9334 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9335 getShiftAmountOperand(Operands[0].getValueType(), 9336 Operands[1]))); 9337 break; 9338 case ISD::SIGN_EXTEND_INREG: { 9339 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9340 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9341 Operands[0], 9342 getValueType(ExtVT))); 9343 } 9344 } 9345 } 9346 9347 for (; i < ResNE; ++i) 9348 Scalars.push_back(getUNDEF(EltVT)); 9349 9350 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9351 return getBuildVector(VecVT, dl, Scalars); 9352 } 9353 9354 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9355 SDNode *N, unsigned ResNE) { 9356 unsigned Opcode = N->getOpcode(); 9357 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9358 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9359 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9360 "Expected an overflow opcode"); 9361 9362 EVT ResVT = N->getValueType(0); 9363 EVT OvVT = N->getValueType(1); 9364 EVT ResEltVT = ResVT.getVectorElementType(); 9365 EVT OvEltVT = OvVT.getVectorElementType(); 9366 SDLoc dl(N); 9367 9368 // If ResNE is 0, fully unroll the vector op. 9369 unsigned NE = ResVT.getVectorNumElements(); 9370 if (ResNE == 0) 9371 ResNE = NE; 9372 else if (NE > ResNE) 9373 NE = ResNE; 9374 9375 SmallVector<SDValue, 8> LHSScalars; 9376 SmallVector<SDValue, 8> RHSScalars; 9377 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9378 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9379 9380 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9381 SDVTList VTs = getVTList(ResEltVT, SVT); 9382 SmallVector<SDValue, 8> ResScalars; 9383 SmallVector<SDValue, 8> OvScalars; 9384 for (unsigned i = 0; i < NE; ++i) { 9385 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9386 SDValue Ov = 9387 getSelect(dl, OvEltVT, Res.getValue(1), 9388 getBoolConstant(true, dl, OvEltVT, ResVT), 9389 getConstant(0, dl, OvEltVT)); 9390 9391 ResScalars.push_back(Res); 9392 OvScalars.push_back(Ov); 9393 } 9394 9395 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9396 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9397 9398 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9399 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9400 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9401 getBuildVector(NewOvVT, dl, OvScalars)); 9402 } 9403 9404 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9405 LoadSDNode *Base, 9406 unsigned Bytes, 9407 int Dist) const { 9408 if (LD->isVolatile() || Base->isVolatile()) 9409 return false; 9410 // TODO: probably too restrictive for atomics, revisit 9411 if (!LD->isSimple()) 9412 return false; 9413 if (LD->isIndexed() || Base->isIndexed()) 9414 return false; 9415 if (LD->getChain() != Base->getChain()) 9416 return false; 9417 EVT VT = LD->getValueType(0); 9418 if (VT.getSizeInBits() / 8 != Bytes) 9419 return false; 9420 9421 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9422 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9423 9424 int64_t Offset = 0; 9425 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9426 return (Dist * Bytes == Offset); 9427 return false; 9428 } 9429 9430 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9431 /// if it cannot be inferred. 9432 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9433 // If this is a GlobalAddress + cst, return the alignment. 9434 const GlobalValue *GV = nullptr; 9435 int64_t GVOffset = 0; 9436 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9437 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9438 KnownBits Known(PtrWidth); 9439 llvm::computeKnownBits(GV, Known, getDataLayout()); 9440 unsigned AlignBits = Known.countMinTrailingZeros(); 9441 if (AlignBits) 9442 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9443 } 9444 9445 // If this is a direct reference to a stack slot, use information about the 9446 // stack slot's alignment. 9447 int FrameIdx = INT_MIN; 9448 int64_t FrameOffset = 0; 9449 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9450 FrameIdx = FI->getIndex(); 9451 } else if (isBaseWithConstantOffset(Ptr) && 9452 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9453 // Handle FI+Cst 9454 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9455 FrameOffset = Ptr.getConstantOperandVal(1); 9456 } 9457 9458 if (FrameIdx != INT_MIN) { 9459 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9460 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9461 } 9462 9463 return None; 9464 } 9465 9466 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9467 /// which is split (or expanded) into two not necessarily identical pieces. 9468 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9469 // Currently all types are split in half. 9470 EVT LoVT, HiVT; 9471 if (!VT.isVector()) 9472 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9473 else 9474 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9475 9476 return std::make_pair(LoVT, HiVT); 9477 } 9478 9479 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9480 /// type, dependent on an enveloping VT that has been split into two identical 9481 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9482 std::pair<EVT, EVT> 9483 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9484 bool *HiIsEmpty) const { 9485 EVT EltTp = VT.getVectorElementType(); 9486 bool IsScalable = VT.isScalableVector(); 9487 // Examples: 9488 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9489 // custom VL=9 with enveloping VL=8/8 yields 8/1 9490 // custom VL=10 with enveloping VL=8/8 yields 8/2 9491 // etc. 9492 unsigned VTNumElts = VT.getVectorNumElements(); 9493 unsigned EnvNumElts = EnvVT.getVectorNumElements(); 9494 EVT LoVT, HiVT; 9495 if (VTNumElts > EnvNumElts) { 9496 LoVT = EnvVT; 9497 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts, 9498 IsScalable); 9499 *HiIsEmpty = false; 9500 } else { 9501 // Flag that hi type has zero storage size, but return split envelop type 9502 // (this would be easier if vector types with zero elements were allowed). 9503 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts, IsScalable); 9504 HiVT = EnvVT; 9505 *HiIsEmpty = true; 9506 } 9507 return std::make_pair(LoVT, HiVT); 9508 } 9509 9510 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9511 /// low/high part. 9512 std::pair<SDValue, SDValue> 9513 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9514 const EVT &HiVT) { 9515 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9516 N.getValueType().getVectorNumElements() && 9517 "More vector elements requested than available!"); 9518 SDValue Lo, Hi; 9519 Lo = 9520 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9521 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9522 getVectorIdxConstant(LoVT.getVectorNumElements(), DL)); 9523 return std::make_pair(Lo, Hi); 9524 } 9525 9526 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9527 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9528 EVT VT = N.getValueType(); 9529 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9530 NextPowerOf2(VT.getVectorNumElements())); 9531 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9532 getVectorIdxConstant(0, DL)); 9533 } 9534 9535 void SelectionDAG::ExtractVectorElements(SDValue Op, 9536 SmallVectorImpl<SDValue> &Args, 9537 unsigned Start, unsigned Count, 9538 EVT EltVT) { 9539 EVT VT = Op.getValueType(); 9540 if (Count == 0) 9541 Count = VT.getVectorNumElements(); 9542 if (EltVT == EVT()) 9543 EltVT = VT.getVectorElementType(); 9544 SDLoc SL(Op); 9545 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9546 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9547 getVectorIdxConstant(i, SL))); 9548 } 9549 } 9550 9551 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9552 unsigned GlobalAddressSDNode::getAddressSpace() const { 9553 return getGlobal()->getType()->getAddressSpace(); 9554 } 9555 9556 Type *ConstantPoolSDNode::getType() const { 9557 if (isMachineConstantPoolEntry()) 9558 return Val.MachineCPVal->getType(); 9559 return Val.ConstVal->getType(); 9560 } 9561 9562 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9563 unsigned &SplatBitSize, 9564 bool &HasAnyUndefs, 9565 unsigned MinSplatBits, 9566 bool IsBigEndian) const { 9567 EVT VT = getValueType(0); 9568 assert(VT.isVector() && "Expected a vector type"); 9569 unsigned VecWidth = VT.getSizeInBits(); 9570 if (MinSplatBits > VecWidth) 9571 return false; 9572 9573 // FIXME: The widths are based on this node's type, but build vectors can 9574 // truncate their operands. 9575 SplatValue = APInt(VecWidth, 0); 9576 SplatUndef = APInt(VecWidth, 0); 9577 9578 // Get the bits. Bits with undefined values (when the corresponding element 9579 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9580 // in SplatValue. If any of the values are not constant, give up and return 9581 // false. 9582 unsigned int NumOps = getNumOperands(); 9583 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9584 unsigned EltWidth = VT.getScalarSizeInBits(); 9585 9586 for (unsigned j = 0; j < NumOps; ++j) { 9587 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9588 SDValue OpVal = getOperand(i); 9589 unsigned BitPos = j * EltWidth; 9590 9591 if (OpVal.isUndef()) 9592 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9593 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9594 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9595 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9596 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9597 else 9598 return false; 9599 } 9600 9601 // The build_vector is all constants or undefs. Find the smallest element 9602 // size that splats the vector. 9603 HasAnyUndefs = (SplatUndef != 0); 9604 9605 // FIXME: This does not work for vectors with elements less than 8 bits. 9606 while (VecWidth > 8) { 9607 unsigned HalfSize = VecWidth / 2; 9608 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9609 APInt LowValue = SplatValue.trunc(HalfSize); 9610 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9611 APInt LowUndef = SplatUndef.trunc(HalfSize); 9612 9613 // If the two halves do not match (ignoring undef bits), stop here. 9614 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9615 MinSplatBits > HalfSize) 9616 break; 9617 9618 SplatValue = HighValue | LowValue; 9619 SplatUndef = HighUndef & LowUndef; 9620 9621 VecWidth = HalfSize; 9622 } 9623 9624 SplatBitSize = VecWidth; 9625 return true; 9626 } 9627 9628 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9629 BitVector *UndefElements) const { 9630 if (UndefElements) { 9631 UndefElements->clear(); 9632 UndefElements->resize(getNumOperands()); 9633 } 9634 assert(getNumOperands() == DemandedElts.getBitWidth() && 9635 "Unexpected vector size"); 9636 if (!DemandedElts) 9637 return SDValue(); 9638 SDValue Splatted; 9639 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9640 if (!DemandedElts[i]) 9641 continue; 9642 SDValue Op = getOperand(i); 9643 if (Op.isUndef()) { 9644 if (UndefElements) 9645 (*UndefElements)[i] = true; 9646 } else if (!Splatted) { 9647 Splatted = Op; 9648 } else if (Splatted != Op) { 9649 return SDValue(); 9650 } 9651 } 9652 9653 if (!Splatted) { 9654 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9655 assert(getOperand(FirstDemandedIdx).isUndef() && 9656 "Can only have a splat without a constant for all undefs."); 9657 return getOperand(FirstDemandedIdx); 9658 } 9659 9660 return Splatted; 9661 } 9662 9663 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9664 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9665 return getSplatValue(DemandedElts, UndefElements); 9666 } 9667 9668 ConstantSDNode * 9669 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9670 BitVector *UndefElements) const { 9671 return dyn_cast_or_null<ConstantSDNode>( 9672 getSplatValue(DemandedElts, UndefElements)); 9673 } 9674 9675 ConstantSDNode * 9676 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9677 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9678 } 9679 9680 ConstantFPSDNode * 9681 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9682 BitVector *UndefElements) const { 9683 return dyn_cast_or_null<ConstantFPSDNode>( 9684 getSplatValue(DemandedElts, UndefElements)); 9685 } 9686 9687 ConstantFPSDNode * 9688 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9689 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9690 } 9691 9692 int32_t 9693 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9694 uint32_t BitWidth) const { 9695 if (ConstantFPSDNode *CN = 9696 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9697 bool IsExact; 9698 APSInt IntVal(BitWidth); 9699 const APFloat &APF = CN->getValueAPF(); 9700 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9701 APFloat::opOK || 9702 !IsExact) 9703 return -1; 9704 9705 return IntVal.exactLogBase2(); 9706 } 9707 return -1; 9708 } 9709 9710 bool BuildVectorSDNode::isConstant() const { 9711 for (const SDValue &Op : op_values()) { 9712 unsigned Opc = Op.getOpcode(); 9713 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9714 return false; 9715 } 9716 return true; 9717 } 9718 9719 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9720 // Find the first non-undef value in the shuffle mask. 9721 unsigned i, e; 9722 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9723 /* search */; 9724 9725 // If all elements are undefined, this shuffle can be considered a splat 9726 // (although it should eventually get simplified away completely). 9727 if (i == e) 9728 return true; 9729 9730 // Make sure all remaining elements are either undef or the same as the first 9731 // non-undef value. 9732 for (int Idx = Mask[i]; i != e; ++i) 9733 if (Mask[i] >= 0 && Mask[i] != Idx) 9734 return false; 9735 return true; 9736 } 9737 9738 // Returns the SDNode if it is a constant integer BuildVector 9739 // or constant integer. 9740 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9741 if (isa<ConstantSDNode>(N)) 9742 return N.getNode(); 9743 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9744 return N.getNode(); 9745 // Treat a GlobalAddress supporting constant offset folding as a 9746 // constant integer. 9747 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9748 if (GA->getOpcode() == ISD::GlobalAddress && 9749 TLI->isOffsetFoldingLegal(GA)) 9750 return GA; 9751 return nullptr; 9752 } 9753 9754 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9755 if (isa<ConstantFPSDNode>(N)) 9756 return N.getNode(); 9757 9758 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9759 return N.getNode(); 9760 9761 return nullptr; 9762 } 9763 9764 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9765 assert(!Node->OperandList && "Node already has operands"); 9766 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9767 "too many operands to fit into SDNode"); 9768 SDUse *Ops = OperandRecycler.allocate( 9769 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9770 9771 bool IsDivergent = false; 9772 for (unsigned I = 0; I != Vals.size(); ++I) { 9773 Ops[I].setUser(Node); 9774 Ops[I].setInitial(Vals[I]); 9775 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9776 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9777 } 9778 Node->NumOperands = Vals.size(); 9779 Node->OperandList = Ops; 9780 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9781 if (!TLI->isSDNodeAlwaysUniform(Node)) 9782 Node->SDNodeBits.IsDivergent = IsDivergent; 9783 checkForCycles(Node); 9784 } 9785 9786 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9787 SmallVectorImpl<SDValue> &Vals) { 9788 size_t Limit = SDNode::getMaxNumOperands(); 9789 while (Vals.size() > Limit) { 9790 unsigned SliceIdx = Vals.size() - Limit; 9791 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9792 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9793 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9794 Vals.emplace_back(NewTF); 9795 } 9796 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9797 } 9798 9799 #ifndef NDEBUG 9800 static void checkForCyclesHelper(const SDNode *N, 9801 SmallPtrSetImpl<const SDNode*> &Visited, 9802 SmallPtrSetImpl<const SDNode*> &Checked, 9803 const llvm::SelectionDAG *DAG) { 9804 // If this node has already been checked, don't check it again. 9805 if (Checked.count(N)) 9806 return; 9807 9808 // If a node has already been visited on this depth-first walk, reject it as 9809 // a cycle. 9810 if (!Visited.insert(N).second) { 9811 errs() << "Detected cycle in SelectionDAG\n"; 9812 dbgs() << "Offending node:\n"; 9813 N->dumprFull(DAG); dbgs() << "\n"; 9814 abort(); 9815 } 9816 9817 for (const SDValue &Op : N->op_values()) 9818 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9819 9820 Checked.insert(N); 9821 Visited.erase(N); 9822 } 9823 #endif 9824 9825 void llvm::checkForCycles(const llvm::SDNode *N, 9826 const llvm::SelectionDAG *DAG, 9827 bool force) { 9828 #ifndef NDEBUG 9829 bool check = force; 9830 #ifdef EXPENSIVE_CHECKS 9831 check = true; 9832 #endif // EXPENSIVE_CHECKS 9833 if (check) { 9834 assert(N && "Checking nonexistent SDNode"); 9835 SmallPtrSet<const SDNode*, 32> visited; 9836 SmallPtrSet<const SDNode*, 32> checked; 9837 checkForCyclesHelper(N, visited, checked, DAG); 9838 } 9839 #endif // !NDEBUG 9840 } 9841 9842 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9843 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9844 } 9845