1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetLowering.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/CodeGen/TargetSubtargetInfo.h" 45 #include "llvm/CodeGen/ValueTypes.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/Metadata.h" 55 #include "llvm/IR/Type.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CodeGen.h" 59 #include "llvm/Support/Compiler.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/KnownBits.h" 63 #include "llvm/Support/MachineValueType.h" 64 #include "llvm/Support/ManagedStatic.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/Mutex.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Target/TargetMachine.h" 69 #include "llvm/Target/TargetOptions.h" 70 #include "llvm/Transforms/Utils/SizeOpts.h" 71 #include <algorithm> 72 #include <cassert> 73 #include <cstdint> 74 #include <cstdlib> 75 #include <limits> 76 #include <set> 77 #include <string> 78 #include <utility> 79 #include <vector> 80 81 using namespace llvm; 82 83 /// makeVTList - Return an instance of the SDVTList struct initialized with the 84 /// specified members. 85 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 86 SDVTList Res = {VTs, NumVTs}; 87 return Res; 88 } 89 90 // Default null implementations of the callbacks. 91 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 93 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 94 95 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 96 97 #define DEBUG_TYPE "selectiondag" 98 99 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 100 cl::Hidden, cl::init(true), 101 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 102 103 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 104 cl::desc("Number limit for gluing ld/st of memcpy."), 105 cl::Hidden, cl::init(0)); 106 107 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 108 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 109 } 110 111 //===----------------------------------------------------------------------===// 112 // ConstantFPSDNode Class 113 //===----------------------------------------------------------------------===// 114 115 /// isExactlyValue - We don't rely on operator== working on double values, as 116 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 117 /// As such, this method can be used to do an exact bit-for-bit comparison of 118 /// two floating point values. 119 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 120 return getValueAPF().bitwiseIsEqual(V); 121 } 122 123 bool ConstantFPSDNode::isValueValidForType(EVT VT, 124 const APFloat& Val) { 125 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 126 127 // convert modifies in place, so make a copy. 128 APFloat Val2 = APFloat(Val); 129 bool losesInfo; 130 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 131 APFloat::rmNearestTiesToEven, 132 &losesInfo); 133 return !losesInfo; 134 } 135 136 //===----------------------------------------------------------------------===// 137 // ISD Namespace 138 //===----------------------------------------------------------------------===// 139 140 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 141 auto *BV = dyn_cast<BuildVectorSDNode>(N); 142 if (!BV) 143 return false; 144 145 APInt SplatUndef; 146 unsigned SplatBitSize; 147 bool HasUndefs; 148 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 149 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 150 EltSize) && 151 EltSize == SplatBitSize; 152 } 153 154 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 155 // specializations of the more general isConstantSplatVector()? 156 157 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 158 // Look through a bit convert. 159 while (N->getOpcode() == ISD::BITCAST) 160 N = N->getOperand(0).getNode(); 161 162 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 163 164 unsigned i = 0, e = N->getNumOperands(); 165 166 // Skip over all of the undef values. 167 while (i != e && N->getOperand(i).isUndef()) 168 ++i; 169 170 // Do not accept an all-undef vector. 171 if (i == e) return false; 172 173 // Do not accept build_vectors that aren't all constants or which have non-~0 174 // elements. We have to be a bit careful here, as the type of the constant 175 // may not be the same as the type of the vector elements due to type 176 // legalization (the elements are promoted to a legal type for the target and 177 // a vector of a type may be legal when the base element type is not). 178 // We only want to check enough bits to cover the vector elements, because 179 // we care if the resultant vector is all ones, not whether the individual 180 // constants are. 181 SDValue NotZero = N->getOperand(i); 182 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 183 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 184 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 185 return false; 186 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 187 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 188 return false; 189 } else 190 return false; 191 192 // Okay, we have at least one ~0 value, check to see if the rest match or are 193 // undefs. Even with the above element type twiddling, this should be OK, as 194 // the same type legalization should have applied to all the elements. 195 for (++i; i != e; ++i) 196 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 197 return false; 198 return true; 199 } 200 201 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 202 // Look through a bit convert. 203 while (N->getOpcode() == ISD::BITCAST) 204 N = N->getOperand(0).getNode(); 205 206 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 207 208 bool IsAllUndef = true; 209 for (const SDValue &Op : N->op_values()) { 210 if (Op.isUndef()) 211 continue; 212 IsAllUndef = false; 213 // Do not accept build_vectors that aren't all constants or which have non-0 214 // elements. We have to be a bit careful here, as the type of the constant 215 // may not be the same as the type of the vector elements due to type 216 // legalization (the elements are promoted to a legal type for the target 217 // and a vector of a type may be legal when the base element type is not). 218 // We only want to check enough bits to cover the vector elements, because 219 // we care if the resultant vector is all zeros, not whether the individual 220 // constants are. 221 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 222 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 223 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 224 return false; 225 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 226 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 227 return false; 228 } else 229 return false; 230 } 231 232 // Do not accept an all-undef vector. 233 if (IsAllUndef) 234 return false; 235 return true; 236 } 237 238 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 239 if (N->getOpcode() != ISD::BUILD_VECTOR) 240 return false; 241 242 for (const SDValue &Op : N->op_values()) { 243 if (Op.isUndef()) 244 continue; 245 if (!isa<ConstantSDNode>(Op)) 246 return false; 247 } 248 return true; 249 } 250 251 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 252 if (N->getOpcode() != ISD::BUILD_VECTOR) 253 return false; 254 255 for (const SDValue &Op : N->op_values()) { 256 if (Op.isUndef()) 257 continue; 258 if (!isa<ConstantFPSDNode>(Op)) 259 return false; 260 } 261 return true; 262 } 263 264 bool ISD::allOperandsUndef(const SDNode *N) { 265 // Return false if the node has no operands. 266 // This is "logically inconsistent" with the definition of "all" but 267 // is probably the desired behavior. 268 if (N->getNumOperands() == 0) 269 return false; 270 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 271 } 272 273 bool ISD::matchUnaryPredicate(SDValue Op, 274 std::function<bool(ConstantSDNode *)> Match, 275 bool AllowUndefs) { 276 // FIXME: Add support for scalar UNDEF cases? 277 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 278 return Match(Cst); 279 280 // FIXME: Add support for vector UNDEF cases? 281 if (ISD::BUILD_VECTOR != Op.getOpcode()) 282 return false; 283 284 EVT SVT = Op.getValueType().getScalarType(); 285 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 286 if (AllowUndefs && Op.getOperand(i).isUndef()) { 287 if (!Match(nullptr)) 288 return false; 289 continue; 290 } 291 292 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 293 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 294 return false; 295 } 296 return true; 297 } 298 299 bool ISD::matchBinaryPredicate( 300 SDValue LHS, SDValue RHS, 301 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 302 bool AllowUndefs, bool AllowTypeMismatch) { 303 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 304 return false; 305 306 // TODO: Add support for scalar UNDEF cases? 307 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 308 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 309 return Match(LHSCst, RHSCst); 310 311 // TODO: Add support for vector UNDEF cases? 312 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 313 ISD::BUILD_VECTOR != RHS.getOpcode()) 314 return false; 315 316 EVT SVT = LHS.getValueType().getScalarType(); 317 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 318 SDValue LHSOp = LHS.getOperand(i); 319 SDValue RHSOp = RHS.getOperand(i); 320 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 321 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 322 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 323 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 324 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 325 return false; 326 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 327 LHSOp.getValueType() != RHSOp.getValueType())) 328 return false; 329 if (!Match(LHSCst, RHSCst)) 330 return false; 331 } 332 return true; 333 } 334 335 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 336 switch (ExtType) { 337 case ISD::EXTLOAD: 338 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 339 case ISD::SEXTLOAD: 340 return ISD::SIGN_EXTEND; 341 case ISD::ZEXTLOAD: 342 return ISD::ZERO_EXTEND; 343 default: 344 break; 345 } 346 347 llvm_unreachable("Invalid LoadExtType"); 348 } 349 350 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 351 // To perform this operation, we just need to swap the L and G bits of the 352 // operation. 353 unsigned OldL = (Operation >> 2) & 1; 354 unsigned OldG = (Operation >> 1) & 1; 355 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 356 (OldL << 1) | // New G bit 357 (OldG << 2)); // New L bit. 358 } 359 360 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 361 unsigned Operation = Op; 362 if (isIntegerLike) 363 Operation ^= 7; // Flip L, G, E bits, but not U. 364 else 365 Operation ^= 15; // Flip all of the condition bits. 366 367 if (Operation > ISD::SETTRUE2) 368 Operation &= ~8; // Don't let N and U bits get set. 369 370 return ISD::CondCode(Operation); 371 } 372 373 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 374 return getSetCCInverseImpl(Op, Type.isInteger()); 375 } 376 377 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 378 bool isIntegerLike) { 379 return getSetCCInverseImpl(Op, isIntegerLike); 380 } 381 382 /// For an integer comparison, return 1 if the comparison is a signed operation 383 /// and 2 if the result is an unsigned comparison. Return zero if the operation 384 /// does not depend on the sign of the input (setne and seteq). 385 static int isSignedOp(ISD::CondCode Opcode) { 386 switch (Opcode) { 387 default: llvm_unreachable("Illegal integer setcc operation!"); 388 case ISD::SETEQ: 389 case ISD::SETNE: return 0; 390 case ISD::SETLT: 391 case ISD::SETLE: 392 case ISD::SETGT: 393 case ISD::SETGE: return 1; 394 case ISD::SETULT: 395 case ISD::SETULE: 396 case ISD::SETUGT: 397 case ISD::SETUGE: return 2; 398 } 399 } 400 401 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 402 EVT Type) { 403 bool IsInteger = Type.isInteger(); 404 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 405 // Cannot fold a signed integer setcc with an unsigned integer setcc. 406 return ISD::SETCC_INVALID; 407 408 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 409 410 // If the N and U bits get set, then the resultant comparison DOES suddenly 411 // care about orderedness, and it is true when ordered. 412 if (Op > ISD::SETTRUE2) 413 Op &= ~16; // Clear the U bit if the N bit is set. 414 415 // Canonicalize illegal integer setcc's. 416 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 417 Op = ISD::SETNE; 418 419 return ISD::CondCode(Op); 420 } 421 422 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 423 EVT Type) { 424 bool IsInteger = Type.isInteger(); 425 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 426 // Cannot fold a signed setcc with an unsigned setcc. 427 return ISD::SETCC_INVALID; 428 429 // Combine all of the condition bits. 430 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 431 432 // Canonicalize illegal integer setcc's. 433 if (IsInteger) { 434 switch (Result) { 435 default: break; 436 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 437 case ISD::SETOEQ: // SETEQ & SETU[LG]E 438 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 439 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 440 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 441 } 442 } 443 444 return Result; 445 } 446 447 //===----------------------------------------------------------------------===// 448 // SDNode Profile Support 449 //===----------------------------------------------------------------------===// 450 451 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 452 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 453 ID.AddInteger(OpC); 454 } 455 456 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 457 /// solely with their pointer. 458 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 459 ID.AddPointer(VTList.VTs); 460 } 461 462 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 463 static void AddNodeIDOperands(FoldingSetNodeID &ID, 464 ArrayRef<SDValue> Ops) { 465 for (auto& Op : Ops) { 466 ID.AddPointer(Op.getNode()); 467 ID.AddInteger(Op.getResNo()); 468 } 469 } 470 471 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 472 static void AddNodeIDOperands(FoldingSetNodeID &ID, 473 ArrayRef<SDUse> Ops) { 474 for (auto& Op : Ops) { 475 ID.AddPointer(Op.getNode()); 476 ID.AddInteger(Op.getResNo()); 477 } 478 } 479 480 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 481 SDVTList VTList, ArrayRef<SDValue> OpList) { 482 AddNodeIDOpcode(ID, OpC); 483 AddNodeIDValueTypes(ID, VTList); 484 AddNodeIDOperands(ID, OpList); 485 } 486 487 /// If this is an SDNode with special info, add this info to the NodeID data. 488 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 489 switch (N->getOpcode()) { 490 case ISD::TargetExternalSymbol: 491 case ISD::ExternalSymbol: 492 case ISD::MCSymbol: 493 llvm_unreachable("Should only be used on nodes with operands"); 494 default: break; // Normal nodes don't need extra info. 495 case ISD::TargetConstant: 496 case ISD::Constant: { 497 const ConstantSDNode *C = cast<ConstantSDNode>(N); 498 ID.AddPointer(C->getConstantIntValue()); 499 ID.AddBoolean(C->isOpaque()); 500 break; 501 } 502 case ISD::TargetConstantFP: 503 case ISD::ConstantFP: 504 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 505 break; 506 case ISD::TargetGlobalAddress: 507 case ISD::GlobalAddress: 508 case ISD::TargetGlobalTLSAddress: 509 case ISD::GlobalTLSAddress: { 510 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 511 ID.AddPointer(GA->getGlobal()); 512 ID.AddInteger(GA->getOffset()); 513 ID.AddInteger(GA->getTargetFlags()); 514 break; 515 } 516 case ISD::BasicBlock: 517 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 518 break; 519 case ISD::Register: 520 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 521 break; 522 case ISD::RegisterMask: 523 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 524 break; 525 case ISD::SRCVALUE: 526 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 527 break; 528 case ISD::FrameIndex: 529 case ISD::TargetFrameIndex: 530 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 531 break; 532 case ISD::LIFETIME_START: 533 case ISD::LIFETIME_END: 534 if (cast<LifetimeSDNode>(N)->hasOffset()) { 535 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 536 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 537 } 538 break; 539 case ISD::JumpTable: 540 case ISD::TargetJumpTable: 541 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 542 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 543 break; 544 case ISD::ConstantPool: 545 case ISD::TargetConstantPool: { 546 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 547 ID.AddInteger(CP->getAlign().value()); 548 ID.AddInteger(CP->getOffset()); 549 if (CP->isMachineConstantPoolEntry()) 550 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 551 else 552 ID.AddPointer(CP->getConstVal()); 553 ID.AddInteger(CP->getTargetFlags()); 554 break; 555 } 556 case ISD::TargetIndex: { 557 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 558 ID.AddInteger(TI->getIndex()); 559 ID.AddInteger(TI->getOffset()); 560 ID.AddInteger(TI->getTargetFlags()); 561 break; 562 } 563 case ISD::LOAD: { 564 const LoadSDNode *LD = cast<LoadSDNode>(N); 565 ID.AddInteger(LD->getMemoryVT().getRawBits()); 566 ID.AddInteger(LD->getRawSubclassData()); 567 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 568 break; 569 } 570 case ISD::STORE: { 571 const StoreSDNode *ST = cast<StoreSDNode>(N); 572 ID.AddInteger(ST->getMemoryVT().getRawBits()); 573 ID.AddInteger(ST->getRawSubclassData()); 574 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 575 break; 576 } 577 case ISD::MLOAD: { 578 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 579 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 580 ID.AddInteger(MLD->getRawSubclassData()); 581 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 582 break; 583 } 584 case ISD::MSTORE: { 585 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 586 ID.AddInteger(MST->getMemoryVT().getRawBits()); 587 ID.AddInteger(MST->getRawSubclassData()); 588 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 589 break; 590 } 591 case ISD::MGATHER: { 592 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 593 ID.AddInteger(MG->getMemoryVT().getRawBits()); 594 ID.AddInteger(MG->getRawSubclassData()); 595 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 596 break; 597 } 598 case ISD::MSCATTER: { 599 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 600 ID.AddInteger(MS->getMemoryVT().getRawBits()); 601 ID.AddInteger(MS->getRawSubclassData()); 602 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 603 break; 604 } 605 case ISD::ATOMIC_CMP_SWAP: 606 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 607 case ISD::ATOMIC_SWAP: 608 case ISD::ATOMIC_LOAD_ADD: 609 case ISD::ATOMIC_LOAD_SUB: 610 case ISD::ATOMIC_LOAD_AND: 611 case ISD::ATOMIC_LOAD_CLR: 612 case ISD::ATOMIC_LOAD_OR: 613 case ISD::ATOMIC_LOAD_XOR: 614 case ISD::ATOMIC_LOAD_NAND: 615 case ISD::ATOMIC_LOAD_MIN: 616 case ISD::ATOMIC_LOAD_MAX: 617 case ISD::ATOMIC_LOAD_UMIN: 618 case ISD::ATOMIC_LOAD_UMAX: 619 case ISD::ATOMIC_LOAD: 620 case ISD::ATOMIC_STORE: { 621 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 622 ID.AddInteger(AT->getMemoryVT().getRawBits()); 623 ID.AddInteger(AT->getRawSubclassData()); 624 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 625 break; 626 } 627 case ISD::PREFETCH: { 628 const MemSDNode *PF = cast<MemSDNode>(N); 629 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 630 break; 631 } 632 case ISD::VECTOR_SHUFFLE: { 633 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 634 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 635 i != e; ++i) 636 ID.AddInteger(SVN->getMaskElt(i)); 637 break; 638 } 639 case ISD::TargetBlockAddress: 640 case ISD::BlockAddress: { 641 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 642 ID.AddPointer(BA->getBlockAddress()); 643 ID.AddInteger(BA->getOffset()); 644 ID.AddInteger(BA->getTargetFlags()); 645 break; 646 } 647 } // end switch (N->getOpcode()) 648 649 // Target specific memory nodes could also have address spaces to check. 650 if (N->isTargetMemoryOpcode()) 651 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 652 } 653 654 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 655 /// data. 656 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 657 AddNodeIDOpcode(ID, N->getOpcode()); 658 // Add the return value info. 659 AddNodeIDValueTypes(ID, N->getVTList()); 660 // Add the operand info. 661 AddNodeIDOperands(ID, N->ops()); 662 663 // Handle SDNode leafs with special info. 664 AddNodeIDCustom(ID, N); 665 } 666 667 //===----------------------------------------------------------------------===// 668 // SelectionDAG Class 669 //===----------------------------------------------------------------------===// 670 671 /// doNotCSE - Return true if CSE should not be performed for this node. 672 static bool doNotCSE(SDNode *N) { 673 if (N->getValueType(0) == MVT::Glue) 674 return true; // Never CSE anything that produces a flag. 675 676 switch (N->getOpcode()) { 677 default: break; 678 case ISD::HANDLENODE: 679 case ISD::EH_LABEL: 680 return true; // Never CSE these nodes. 681 } 682 683 // Check that remaining values produced are not flags. 684 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 685 if (N->getValueType(i) == MVT::Glue) 686 return true; // Never CSE anything that produces a flag. 687 688 return false; 689 } 690 691 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 692 /// SelectionDAG. 693 void SelectionDAG::RemoveDeadNodes() { 694 // Create a dummy node (which is not added to allnodes), that adds a reference 695 // to the root node, preventing it from being deleted. 696 HandleSDNode Dummy(getRoot()); 697 698 SmallVector<SDNode*, 128> DeadNodes; 699 700 // Add all obviously-dead nodes to the DeadNodes worklist. 701 for (SDNode &Node : allnodes()) 702 if (Node.use_empty()) 703 DeadNodes.push_back(&Node); 704 705 RemoveDeadNodes(DeadNodes); 706 707 // If the root changed (e.g. it was a dead load, update the root). 708 setRoot(Dummy.getValue()); 709 } 710 711 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 712 /// given list, and any nodes that become unreachable as a result. 713 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 714 715 // Process the worklist, deleting the nodes and adding their uses to the 716 // worklist. 717 while (!DeadNodes.empty()) { 718 SDNode *N = DeadNodes.pop_back_val(); 719 // Skip to next node if we've already managed to delete the node. This could 720 // happen if replacing a node causes a node previously added to the node to 721 // be deleted. 722 if (N->getOpcode() == ISD::DELETED_NODE) 723 continue; 724 725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 726 DUL->NodeDeleted(N, nullptr); 727 728 // Take the node out of the appropriate CSE map. 729 RemoveNodeFromCSEMaps(N); 730 731 // Next, brutally remove the operand list. This is safe to do, as there are 732 // no cycles in the graph. 733 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 734 SDUse &Use = *I++; 735 SDNode *Operand = Use.getNode(); 736 Use.set(SDValue()); 737 738 // Now that we removed this operand, see if there are no uses of it left. 739 if (Operand->use_empty()) 740 DeadNodes.push_back(Operand); 741 } 742 743 DeallocateNode(N); 744 } 745 } 746 747 void SelectionDAG::RemoveDeadNode(SDNode *N){ 748 SmallVector<SDNode*, 16> DeadNodes(1, N); 749 750 // Create a dummy node that adds a reference to the root node, preventing 751 // it from being deleted. (This matters if the root is an operand of the 752 // dead node.) 753 HandleSDNode Dummy(getRoot()); 754 755 RemoveDeadNodes(DeadNodes); 756 } 757 758 void SelectionDAG::DeleteNode(SDNode *N) { 759 // First take this out of the appropriate CSE map. 760 RemoveNodeFromCSEMaps(N); 761 762 // Finally, remove uses due to operands of this node, remove from the 763 // AllNodes list, and delete the node. 764 DeleteNodeNotInCSEMaps(N); 765 } 766 767 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 768 assert(N->getIterator() != AllNodes.begin() && 769 "Cannot delete the entry node!"); 770 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 771 772 // Drop all of the operands and decrement used node's use counts. 773 N->DropOperands(); 774 775 DeallocateNode(N); 776 } 777 778 void SDDbgInfo::erase(const SDNode *Node) { 779 DbgValMapType::iterator I = DbgValMap.find(Node); 780 if (I == DbgValMap.end()) 781 return; 782 for (auto &Val: I->second) 783 Val->setIsInvalidated(); 784 DbgValMap.erase(I); 785 } 786 787 void SelectionDAG::DeallocateNode(SDNode *N) { 788 // If we have operands, deallocate them. 789 removeOperands(N); 790 791 NodeAllocator.Deallocate(AllNodes.remove(N)); 792 793 // Set the opcode to DELETED_NODE to help catch bugs when node 794 // memory is reallocated. 795 // FIXME: There are places in SDag that have grown a dependency on the opcode 796 // value in the released node. 797 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 798 N->NodeType = ISD::DELETED_NODE; 799 800 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 801 // them and forget about that node. 802 DbgInfo->erase(N); 803 } 804 805 #ifndef NDEBUG 806 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 807 static void VerifySDNode(SDNode *N) { 808 switch (N->getOpcode()) { 809 default: 810 break; 811 case ISD::BUILD_PAIR: { 812 EVT VT = N->getValueType(0); 813 assert(N->getNumValues() == 1 && "Too many results!"); 814 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 815 "Wrong return type!"); 816 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 817 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 818 "Mismatched operand types!"); 819 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 820 "Wrong operand type!"); 821 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 822 "Wrong return type size"); 823 break; 824 } 825 case ISD::BUILD_VECTOR: { 826 assert(N->getNumValues() == 1 && "Too many results!"); 827 assert(N->getValueType(0).isVector() && "Wrong return type!"); 828 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 829 "Wrong number of operands!"); 830 EVT EltVT = N->getValueType(0).getVectorElementType(); 831 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 832 assert((I->getValueType() == EltVT || 833 (EltVT.isInteger() && I->getValueType().isInteger() && 834 EltVT.bitsLE(I->getValueType()))) && 835 "Wrong operand type!"); 836 assert(I->getValueType() == N->getOperand(0).getValueType() && 837 "Operands must all have the same type"); 838 } 839 break; 840 } 841 } 842 } 843 #endif // NDEBUG 844 845 /// Insert a newly allocated node into the DAG. 846 /// 847 /// Handles insertion into the all nodes list and CSE map, as well as 848 /// verification and other common operations when a new node is allocated. 849 void SelectionDAG::InsertNode(SDNode *N) { 850 AllNodes.push_back(N); 851 #ifndef NDEBUG 852 N->PersistentId = NextPersistentId++; 853 VerifySDNode(N); 854 #endif 855 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 856 DUL->NodeInserted(N); 857 } 858 859 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 860 /// correspond to it. This is useful when we're about to delete or repurpose 861 /// the node. We don't want future request for structurally identical nodes 862 /// to return N anymore. 863 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 864 bool Erased = false; 865 switch (N->getOpcode()) { 866 case ISD::HANDLENODE: return false; // noop. 867 case ISD::CONDCODE: 868 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 869 "Cond code doesn't exist!"); 870 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 871 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 872 break; 873 case ISD::ExternalSymbol: 874 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 875 break; 876 case ISD::TargetExternalSymbol: { 877 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 878 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 879 ESN->getSymbol(), ESN->getTargetFlags())); 880 break; 881 } 882 case ISD::MCSymbol: { 883 auto *MCSN = cast<MCSymbolSDNode>(N); 884 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 885 break; 886 } 887 case ISD::VALUETYPE: { 888 EVT VT = cast<VTSDNode>(N)->getVT(); 889 if (VT.isExtended()) { 890 Erased = ExtendedValueTypeNodes.erase(VT); 891 } else { 892 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 893 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 894 } 895 break; 896 } 897 default: 898 // Remove it from the CSE Map. 899 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 900 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 901 Erased = CSEMap.RemoveNode(N); 902 break; 903 } 904 #ifndef NDEBUG 905 // Verify that the node was actually in one of the CSE maps, unless it has a 906 // flag result (which cannot be CSE'd) or is one of the special cases that are 907 // not subject to CSE. 908 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 909 !N->isMachineOpcode() && !doNotCSE(N)) { 910 N->dump(this); 911 dbgs() << "\n"; 912 llvm_unreachable("Node is not in map!"); 913 } 914 #endif 915 return Erased; 916 } 917 918 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 919 /// maps and modified in place. Add it back to the CSE maps, unless an identical 920 /// node already exists, in which case transfer all its users to the existing 921 /// node. This transfer can potentially trigger recursive merging. 922 void 923 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 924 // For node types that aren't CSE'd, just act as if no identical node 925 // already exists. 926 if (!doNotCSE(N)) { 927 SDNode *Existing = CSEMap.GetOrInsertNode(N); 928 if (Existing != N) { 929 // If there was already an existing matching node, use ReplaceAllUsesWith 930 // to replace the dead one with the existing one. This can cause 931 // recursive merging of other unrelated nodes down the line. 932 ReplaceAllUsesWith(N, Existing); 933 934 // N is now dead. Inform the listeners and delete it. 935 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 936 DUL->NodeDeleted(N, Existing); 937 DeleteNodeNotInCSEMaps(N); 938 return; 939 } 940 } 941 942 // If the node doesn't already exist, we updated it. Inform listeners. 943 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 944 DUL->NodeUpdated(N); 945 } 946 947 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 948 /// were replaced with those specified. If this node is never memoized, 949 /// return null, otherwise return a pointer to the slot it would take. If a 950 /// node already exists with these operands, the slot will be non-null. 951 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 952 void *&InsertPos) { 953 if (doNotCSE(N)) 954 return nullptr; 955 956 SDValue Ops[] = { Op }; 957 FoldingSetNodeID ID; 958 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 959 AddNodeIDCustom(ID, N); 960 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 961 if (Node) 962 Node->intersectFlagsWith(N->getFlags()); 963 return Node; 964 } 965 966 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 967 /// were replaced with those specified. If this node is never memoized, 968 /// return null, otherwise return a pointer to the slot it would take. If a 969 /// node already exists with these operands, the slot will be non-null. 970 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 971 SDValue Op1, SDValue Op2, 972 void *&InsertPos) { 973 if (doNotCSE(N)) 974 return nullptr; 975 976 SDValue Ops[] = { Op1, Op2 }; 977 FoldingSetNodeID ID; 978 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 979 AddNodeIDCustom(ID, N); 980 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 981 if (Node) 982 Node->intersectFlagsWith(N->getFlags()); 983 return Node; 984 } 985 986 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 987 /// were replaced with those specified. If this node is never memoized, 988 /// return null, otherwise return a pointer to the slot it would take. If a 989 /// node already exists with these operands, the slot will be non-null. 990 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 991 void *&InsertPos) { 992 if (doNotCSE(N)) 993 return nullptr; 994 995 FoldingSetNodeID ID; 996 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 997 AddNodeIDCustom(ID, N); 998 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 999 if (Node) 1000 Node->intersectFlagsWith(N->getFlags()); 1001 return Node; 1002 } 1003 1004 Align SelectionDAG::getEVTAlign(EVT VT) const { 1005 Type *Ty = VT == MVT::iPTR ? 1006 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1007 VT.getTypeForEVT(*getContext()); 1008 1009 return getDataLayout().getABITypeAlign(Ty); 1010 } 1011 1012 // EntryNode could meaningfully have debug info if we can find it... 1013 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1014 : TM(tm), OptLevel(OL), 1015 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1016 Root(getEntryNode()) { 1017 InsertNode(&EntryNode); 1018 DbgInfo = new SDDbgInfo(); 1019 } 1020 1021 void SelectionDAG::init(MachineFunction &NewMF, 1022 OptimizationRemarkEmitter &NewORE, 1023 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1024 LegacyDivergenceAnalysis * Divergence, 1025 ProfileSummaryInfo *PSIin, 1026 BlockFrequencyInfo *BFIin) { 1027 MF = &NewMF; 1028 SDAGISelPass = PassPtr; 1029 ORE = &NewORE; 1030 TLI = getSubtarget().getTargetLowering(); 1031 TSI = getSubtarget().getSelectionDAGInfo(); 1032 LibInfo = LibraryInfo; 1033 Context = &MF->getFunction().getContext(); 1034 DA = Divergence; 1035 PSI = PSIin; 1036 BFI = BFIin; 1037 } 1038 1039 SelectionDAG::~SelectionDAG() { 1040 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1041 allnodes_clear(); 1042 OperandRecycler.clear(OperandAllocator); 1043 delete DbgInfo; 1044 } 1045 1046 bool SelectionDAG::shouldOptForSize() const { 1047 return MF->getFunction().hasOptSize() || 1048 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1049 } 1050 1051 void SelectionDAG::allnodes_clear() { 1052 assert(&*AllNodes.begin() == &EntryNode); 1053 AllNodes.remove(AllNodes.begin()); 1054 while (!AllNodes.empty()) 1055 DeallocateNode(&AllNodes.front()); 1056 #ifndef NDEBUG 1057 NextPersistentId = 0; 1058 #endif 1059 } 1060 1061 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1062 void *&InsertPos) { 1063 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1064 if (N) { 1065 switch (N->getOpcode()) { 1066 default: break; 1067 case ISD::Constant: 1068 case ISD::ConstantFP: 1069 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1070 "debug location. Use another overload."); 1071 } 1072 } 1073 return N; 1074 } 1075 1076 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1077 const SDLoc &DL, void *&InsertPos) { 1078 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1079 if (N) { 1080 switch (N->getOpcode()) { 1081 case ISD::Constant: 1082 case ISD::ConstantFP: 1083 // Erase debug location from the node if the node is used at several 1084 // different places. Do not propagate one location to all uses as it 1085 // will cause a worse single stepping debugging experience. 1086 if (N->getDebugLoc() != DL.getDebugLoc()) 1087 N->setDebugLoc(DebugLoc()); 1088 break; 1089 default: 1090 // When the node's point of use is located earlier in the instruction 1091 // sequence than its prior point of use, update its debug info to the 1092 // earlier location. 1093 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1094 N->setDebugLoc(DL.getDebugLoc()); 1095 break; 1096 } 1097 } 1098 return N; 1099 } 1100 1101 void SelectionDAG::clear() { 1102 allnodes_clear(); 1103 OperandRecycler.clear(OperandAllocator); 1104 OperandAllocator.Reset(); 1105 CSEMap.clear(); 1106 1107 ExtendedValueTypeNodes.clear(); 1108 ExternalSymbols.clear(); 1109 TargetExternalSymbols.clear(); 1110 MCSymbols.clear(); 1111 SDCallSiteDbgInfo.clear(); 1112 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1113 static_cast<CondCodeSDNode*>(nullptr)); 1114 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1115 static_cast<SDNode*>(nullptr)); 1116 1117 EntryNode.UseList = nullptr; 1118 InsertNode(&EntryNode); 1119 Root = getEntryNode(); 1120 DbgInfo->clear(); 1121 } 1122 1123 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1124 return VT.bitsGT(Op.getValueType()) 1125 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1126 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1127 } 1128 1129 std::pair<SDValue, SDValue> 1130 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1131 const SDLoc &DL, EVT VT) { 1132 assert(!VT.bitsEq(Op.getValueType()) && 1133 "Strict no-op FP extend/round not allowed."); 1134 SDValue Res = 1135 VT.bitsGT(Op.getValueType()) 1136 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1137 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1138 {Chain, Op, getIntPtrConstant(0, DL)}); 1139 1140 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1141 } 1142 1143 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1144 return VT.bitsGT(Op.getValueType()) ? 1145 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1146 getNode(ISD::TRUNCATE, DL, VT, Op); 1147 } 1148 1149 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1150 return VT.bitsGT(Op.getValueType()) ? 1151 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1152 getNode(ISD::TRUNCATE, DL, VT, Op); 1153 } 1154 1155 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1156 return VT.bitsGT(Op.getValueType()) ? 1157 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1158 getNode(ISD::TRUNCATE, DL, VT, Op); 1159 } 1160 1161 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1162 EVT OpVT) { 1163 if (VT.bitsLE(Op.getValueType())) 1164 return getNode(ISD::TRUNCATE, SL, VT, Op); 1165 1166 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1167 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1168 } 1169 1170 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1171 EVT OpVT = Op.getValueType(); 1172 assert(VT.isInteger() && OpVT.isInteger() && 1173 "Cannot getZeroExtendInReg FP types"); 1174 assert(VT.isVector() == OpVT.isVector() && 1175 "getZeroExtendInReg type should be vector iff the operand " 1176 "type is vector!"); 1177 assert((!VT.isVector() || 1178 VT.getVectorElementCount() == OpVT.getVectorElementCount()) && 1179 "Vector element counts must match in getZeroExtendInReg"); 1180 assert(VT.bitsLE(OpVT) && "Not extending!"); 1181 if (OpVT == VT) 1182 return Op; 1183 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1184 VT.getScalarSizeInBits()); 1185 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1186 } 1187 1188 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1189 // Only unsigned pointer semantics are supported right now. In the future this 1190 // might delegate to TLI to check pointer signedness. 1191 return getZExtOrTrunc(Op, DL, VT); 1192 } 1193 1194 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1195 // Only unsigned pointer semantics are supported right now. In the future this 1196 // might delegate to TLI to check pointer signedness. 1197 return getZeroExtendInReg(Op, DL, VT); 1198 } 1199 1200 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1201 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1202 EVT EltVT = VT.getScalarType(); 1203 SDValue NegOne = 1204 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1205 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1206 } 1207 1208 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1209 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1210 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1211 } 1212 1213 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1214 EVT OpVT) { 1215 if (!V) 1216 return getConstant(0, DL, VT); 1217 1218 switch (TLI->getBooleanContents(OpVT)) { 1219 case TargetLowering::ZeroOrOneBooleanContent: 1220 case TargetLowering::UndefinedBooleanContent: 1221 return getConstant(1, DL, VT); 1222 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1223 return getAllOnesConstant(DL, VT); 1224 } 1225 llvm_unreachable("Unexpected boolean content enum!"); 1226 } 1227 1228 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1229 bool isT, bool isO) { 1230 EVT EltVT = VT.getScalarType(); 1231 assert((EltVT.getSizeInBits() >= 64 || 1232 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1233 "getConstant with a uint64_t value that doesn't fit in the type!"); 1234 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1235 } 1236 1237 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1238 bool isT, bool isO) { 1239 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1240 } 1241 1242 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1243 EVT VT, bool isT, bool isO) { 1244 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1245 1246 EVT EltVT = VT.getScalarType(); 1247 const ConstantInt *Elt = &Val; 1248 1249 // In some cases the vector type is legal but the element type is illegal and 1250 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1251 // inserted value (the type does not need to match the vector element type). 1252 // Any extra bits introduced will be truncated away. 1253 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1254 TargetLowering::TypePromoteInteger) { 1255 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1256 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1257 Elt = ConstantInt::get(*getContext(), NewVal); 1258 } 1259 // In other cases the element type is illegal and needs to be expanded, for 1260 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1261 // the value into n parts and use a vector type with n-times the elements. 1262 // Then bitcast to the type requested. 1263 // Legalizing constants too early makes the DAGCombiner's job harder so we 1264 // only legalize if the DAG tells us we must produce legal types. 1265 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1266 TLI->getTypeAction(*getContext(), EltVT) == 1267 TargetLowering::TypeExpandInteger) { 1268 const APInt &NewVal = Elt->getValue(); 1269 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1270 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1271 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1272 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1273 1274 // Check the temporary vector is the correct size. If this fails then 1275 // getTypeToTransformTo() probably returned a type whose size (in bits) 1276 // isn't a power-of-2 factor of the requested type size. 1277 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1278 1279 SmallVector<SDValue, 2> EltParts; 1280 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1281 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1282 .zextOrTrunc(ViaEltSizeInBits), DL, 1283 ViaEltVT, isT, isO)); 1284 } 1285 1286 // EltParts is currently in little endian order. If we actually want 1287 // big-endian order then reverse it now. 1288 if (getDataLayout().isBigEndian()) 1289 std::reverse(EltParts.begin(), EltParts.end()); 1290 1291 // The elements must be reversed when the element order is different 1292 // to the endianness of the elements (because the BITCAST is itself a 1293 // vector shuffle in this situation). However, we do not need any code to 1294 // perform this reversal because getConstant() is producing a vector 1295 // splat. 1296 // This situation occurs in MIPS MSA. 1297 1298 SmallVector<SDValue, 8> Ops; 1299 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1300 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1301 1302 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1303 return V; 1304 } 1305 1306 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1307 "APInt size does not match type size!"); 1308 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1309 FoldingSetNodeID ID; 1310 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1311 ID.AddPointer(Elt); 1312 ID.AddBoolean(isO); 1313 void *IP = nullptr; 1314 SDNode *N = nullptr; 1315 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1316 if (!VT.isVector()) 1317 return SDValue(N, 0); 1318 1319 if (!N) { 1320 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1321 CSEMap.InsertNode(N, IP); 1322 InsertNode(N); 1323 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1324 } 1325 1326 SDValue Result(N, 0); 1327 if (VT.isScalableVector()) 1328 Result = getSplatVector(VT, DL, Result); 1329 else if (VT.isVector()) 1330 Result = getSplatBuildVector(VT, DL, Result); 1331 1332 return Result; 1333 } 1334 1335 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1336 bool isTarget) { 1337 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1338 } 1339 1340 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1341 const SDLoc &DL, bool LegalTypes) { 1342 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1343 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1344 return getConstant(Val, DL, ShiftVT); 1345 } 1346 1347 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1348 bool isTarget) { 1349 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1350 } 1351 1352 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1353 bool isTarget) { 1354 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1355 } 1356 1357 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1358 EVT VT, bool isTarget) { 1359 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1360 1361 EVT EltVT = VT.getScalarType(); 1362 1363 // Do the map lookup using the actual bit pattern for the floating point 1364 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1365 // we don't have issues with SNANs. 1366 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1367 FoldingSetNodeID ID; 1368 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1369 ID.AddPointer(&V); 1370 void *IP = nullptr; 1371 SDNode *N = nullptr; 1372 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1373 if (!VT.isVector()) 1374 return SDValue(N, 0); 1375 1376 if (!N) { 1377 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1378 CSEMap.InsertNode(N, IP); 1379 InsertNode(N); 1380 } 1381 1382 SDValue Result(N, 0); 1383 if (VT.isScalableVector()) 1384 Result = getSplatVector(VT, DL, Result); 1385 else if (VT.isVector()) 1386 Result = getSplatBuildVector(VT, DL, Result); 1387 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1388 return Result; 1389 } 1390 1391 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1392 bool isTarget) { 1393 EVT EltVT = VT.getScalarType(); 1394 if (EltVT == MVT::f32) 1395 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1396 else if (EltVT == MVT::f64) 1397 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1398 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1399 EltVT == MVT::f16 || EltVT == MVT::bf16) { 1400 bool Ignored; 1401 APFloat APF = APFloat(Val); 1402 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1403 &Ignored); 1404 return getConstantFP(APF, DL, VT, isTarget); 1405 } else 1406 llvm_unreachable("Unsupported type in getConstantFP"); 1407 } 1408 1409 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1410 EVT VT, int64_t Offset, bool isTargetGA, 1411 unsigned TargetFlags) { 1412 assert((TargetFlags == 0 || isTargetGA) && 1413 "Cannot set target flags on target-independent globals"); 1414 1415 // Truncate (with sign-extension) the offset value to the pointer size. 1416 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1417 if (BitWidth < 64) 1418 Offset = SignExtend64(Offset, BitWidth); 1419 1420 unsigned Opc; 1421 if (GV->isThreadLocal()) 1422 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1423 else 1424 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1425 1426 FoldingSetNodeID ID; 1427 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1428 ID.AddPointer(GV); 1429 ID.AddInteger(Offset); 1430 ID.AddInteger(TargetFlags); 1431 void *IP = nullptr; 1432 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1433 return SDValue(E, 0); 1434 1435 auto *N = newSDNode<GlobalAddressSDNode>( 1436 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1437 CSEMap.InsertNode(N, IP); 1438 InsertNode(N); 1439 return SDValue(N, 0); 1440 } 1441 1442 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1443 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1444 FoldingSetNodeID ID; 1445 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1446 ID.AddInteger(FI); 1447 void *IP = nullptr; 1448 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1449 return SDValue(E, 0); 1450 1451 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1452 CSEMap.InsertNode(N, IP); 1453 InsertNode(N); 1454 return SDValue(N, 0); 1455 } 1456 1457 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1458 unsigned TargetFlags) { 1459 assert((TargetFlags == 0 || isTarget) && 1460 "Cannot set target flags on target-independent jump tables"); 1461 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1462 FoldingSetNodeID ID; 1463 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1464 ID.AddInteger(JTI); 1465 ID.AddInteger(TargetFlags); 1466 void *IP = nullptr; 1467 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1468 return SDValue(E, 0); 1469 1470 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1471 CSEMap.InsertNode(N, IP); 1472 InsertNode(N); 1473 return SDValue(N, 0); 1474 } 1475 1476 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1477 MaybeAlign Alignment, int Offset, 1478 bool isTarget, unsigned TargetFlags) { 1479 assert((TargetFlags == 0 || isTarget) && 1480 "Cannot set target flags on target-independent globals"); 1481 if (!Alignment) 1482 Alignment = shouldOptForSize() 1483 ? getDataLayout().getABITypeAlign(C->getType()) 1484 : getDataLayout().getPrefTypeAlign(C->getType()); 1485 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1486 FoldingSetNodeID ID; 1487 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1488 ID.AddInteger(Alignment->value()); 1489 ID.AddInteger(Offset); 1490 ID.AddPointer(C); 1491 ID.AddInteger(TargetFlags); 1492 void *IP = nullptr; 1493 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1494 return SDValue(E, 0); 1495 1496 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1497 TargetFlags); 1498 CSEMap.InsertNode(N, IP); 1499 InsertNode(N); 1500 SDValue V = SDValue(N, 0); 1501 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1502 return V; 1503 } 1504 1505 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1506 MaybeAlign Alignment, int Offset, 1507 bool isTarget, unsigned TargetFlags) { 1508 assert((TargetFlags == 0 || isTarget) && 1509 "Cannot set target flags on target-independent globals"); 1510 if (!Alignment) 1511 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1512 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1513 FoldingSetNodeID ID; 1514 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1515 ID.AddInteger(Alignment->value()); 1516 ID.AddInteger(Offset); 1517 C->addSelectionDAGCSEId(ID); 1518 ID.AddInteger(TargetFlags); 1519 void *IP = nullptr; 1520 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1521 return SDValue(E, 0); 1522 1523 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1524 TargetFlags); 1525 CSEMap.InsertNode(N, IP); 1526 InsertNode(N); 1527 return SDValue(N, 0); 1528 } 1529 1530 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1531 unsigned TargetFlags) { 1532 FoldingSetNodeID ID; 1533 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1534 ID.AddInteger(Index); 1535 ID.AddInteger(Offset); 1536 ID.AddInteger(TargetFlags); 1537 void *IP = nullptr; 1538 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1539 return SDValue(E, 0); 1540 1541 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1542 CSEMap.InsertNode(N, IP); 1543 InsertNode(N); 1544 return SDValue(N, 0); 1545 } 1546 1547 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1548 FoldingSetNodeID ID; 1549 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1550 ID.AddPointer(MBB); 1551 void *IP = nullptr; 1552 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1553 return SDValue(E, 0); 1554 1555 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1556 CSEMap.InsertNode(N, IP); 1557 InsertNode(N); 1558 return SDValue(N, 0); 1559 } 1560 1561 SDValue SelectionDAG::getValueType(EVT VT) { 1562 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1563 ValueTypeNodes.size()) 1564 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1565 1566 SDNode *&N = VT.isExtended() ? 1567 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1568 1569 if (N) return SDValue(N, 0); 1570 N = newSDNode<VTSDNode>(VT); 1571 InsertNode(N); 1572 return SDValue(N, 0); 1573 } 1574 1575 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1576 SDNode *&N = ExternalSymbols[Sym]; 1577 if (N) return SDValue(N, 0); 1578 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1579 InsertNode(N); 1580 return SDValue(N, 0); 1581 } 1582 1583 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1584 SDNode *&N = MCSymbols[Sym]; 1585 if (N) 1586 return SDValue(N, 0); 1587 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1588 InsertNode(N); 1589 return SDValue(N, 0); 1590 } 1591 1592 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1593 unsigned TargetFlags) { 1594 SDNode *&N = 1595 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1596 if (N) return SDValue(N, 0); 1597 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1598 InsertNode(N); 1599 return SDValue(N, 0); 1600 } 1601 1602 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1603 if ((unsigned)Cond >= CondCodeNodes.size()) 1604 CondCodeNodes.resize(Cond+1); 1605 1606 if (!CondCodeNodes[Cond]) { 1607 auto *N = newSDNode<CondCodeSDNode>(Cond); 1608 CondCodeNodes[Cond] = N; 1609 InsertNode(N); 1610 } 1611 1612 return SDValue(CondCodeNodes[Cond], 0); 1613 } 1614 1615 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1616 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1617 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1618 std::swap(N1, N2); 1619 ShuffleVectorSDNode::commuteMask(M); 1620 } 1621 1622 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1623 SDValue N2, ArrayRef<int> Mask) { 1624 assert(VT.getVectorNumElements() == Mask.size() && 1625 "Must have the same number of vector elements as mask elements!"); 1626 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1627 "Invalid VECTOR_SHUFFLE"); 1628 1629 // Canonicalize shuffle undef, undef -> undef 1630 if (N1.isUndef() && N2.isUndef()) 1631 return getUNDEF(VT); 1632 1633 // Validate that all indices in Mask are within the range of the elements 1634 // input to the shuffle. 1635 int NElts = Mask.size(); 1636 assert(llvm::all_of(Mask, 1637 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1638 "Index out of range"); 1639 1640 // Copy the mask so we can do any needed cleanup. 1641 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1642 1643 // Canonicalize shuffle v, v -> v, undef 1644 if (N1 == N2) { 1645 N2 = getUNDEF(VT); 1646 for (int i = 0; i != NElts; ++i) 1647 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1648 } 1649 1650 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1651 if (N1.isUndef()) 1652 commuteShuffle(N1, N2, MaskVec); 1653 1654 if (TLI->hasVectorBlend()) { 1655 // If shuffling a splat, try to blend the splat instead. We do this here so 1656 // that even when this arises during lowering we don't have to re-handle it. 1657 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1658 BitVector UndefElements; 1659 SDValue Splat = BV->getSplatValue(&UndefElements); 1660 if (!Splat) 1661 return; 1662 1663 for (int i = 0; i < NElts; ++i) { 1664 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1665 continue; 1666 1667 // If this input comes from undef, mark it as such. 1668 if (UndefElements[MaskVec[i] - Offset]) { 1669 MaskVec[i] = -1; 1670 continue; 1671 } 1672 1673 // If we can blend a non-undef lane, use that instead. 1674 if (!UndefElements[i]) 1675 MaskVec[i] = i + Offset; 1676 } 1677 }; 1678 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1679 BlendSplat(N1BV, 0); 1680 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1681 BlendSplat(N2BV, NElts); 1682 } 1683 1684 // Canonicalize all index into lhs, -> shuffle lhs, undef 1685 // Canonicalize all index into rhs, -> shuffle rhs, undef 1686 bool AllLHS = true, AllRHS = true; 1687 bool N2Undef = N2.isUndef(); 1688 for (int i = 0; i != NElts; ++i) { 1689 if (MaskVec[i] >= NElts) { 1690 if (N2Undef) 1691 MaskVec[i] = -1; 1692 else 1693 AllLHS = false; 1694 } else if (MaskVec[i] >= 0) { 1695 AllRHS = false; 1696 } 1697 } 1698 if (AllLHS && AllRHS) 1699 return getUNDEF(VT); 1700 if (AllLHS && !N2Undef) 1701 N2 = getUNDEF(VT); 1702 if (AllRHS) { 1703 N1 = getUNDEF(VT); 1704 commuteShuffle(N1, N2, MaskVec); 1705 } 1706 // Reset our undef status after accounting for the mask. 1707 N2Undef = N2.isUndef(); 1708 // Re-check whether both sides ended up undef. 1709 if (N1.isUndef() && N2Undef) 1710 return getUNDEF(VT); 1711 1712 // If Identity shuffle return that node. 1713 bool Identity = true, AllSame = true; 1714 for (int i = 0; i != NElts; ++i) { 1715 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1716 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1717 } 1718 if (Identity && NElts) 1719 return N1; 1720 1721 // Shuffling a constant splat doesn't change the result. 1722 if (N2Undef) { 1723 SDValue V = N1; 1724 1725 // Look through any bitcasts. We check that these don't change the number 1726 // (and size) of elements and just changes their types. 1727 while (V.getOpcode() == ISD::BITCAST) 1728 V = V->getOperand(0); 1729 1730 // A splat should always show up as a build vector node. 1731 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1732 BitVector UndefElements; 1733 SDValue Splat = BV->getSplatValue(&UndefElements); 1734 // If this is a splat of an undef, shuffling it is also undef. 1735 if (Splat && Splat.isUndef()) 1736 return getUNDEF(VT); 1737 1738 bool SameNumElts = 1739 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1740 1741 // We only have a splat which can skip shuffles if there is a splatted 1742 // value and no undef lanes rearranged by the shuffle. 1743 if (Splat && UndefElements.none()) { 1744 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1745 // number of elements match or the value splatted is a zero constant. 1746 if (SameNumElts) 1747 return N1; 1748 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1749 if (C->isNullValue()) 1750 return N1; 1751 } 1752 1753 // If the shuffle itself creates a splat, build the vector directly. 1754 if (AllSame && SameNumElts) { 1755 EVT BuildVT = BV->getValueType(0); 1756 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1757 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1758 1759 // We may have jumped through bitcasts, so the type of the 1760 // BUILD_VECTOR may not match the type of the shuffle. 1761 if (BuildVT != VT) 1762 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1763 return NewBV; 1764 } 1765 } 1766 } 1767 1768 FoldingSetNodeID ID; 1769 SDValue Ops[2] = { N1, N2 }; 1770 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1771 for (int i = 0; i != NElts; ++i) 1772 ID.AddInteger(MaskVec[i]); 1773 1774 void* IP = nullptr; 1775 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1776 return SDValue(E, 0); 1777 1778 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1779 // SDNode doesn't have access to it. This memory will be "leaked" when 1780 // the node is deallocated, but recovered when the NodeAllocator is released. 1781 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1782 llvm::copy(MaskVec, MaskAlloc); 1783 1784 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1785 dl.getDebugLoc(), MaskAlloc); 1786 createOperands(N, Ops); 1787 1788 CSEMap.InsertNode(N, IP); 1789 InsertNode(N); 1790 SDValue V = SDValue(N, 0); 1791 NewSDValueDbgMsg(V, "Creating new node: ", this); 1792 return V; 1793 } 1794 1795 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1796 EVT VT = SV.getValueType(0); 1797 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1798 ShuffleVectorSDNode::commuteMask(MaskVec); 1799 1800 SDValue Op0 = SV.getOperand(0); 1801 SDValue Op1 = SV.getOperand(1); 1802 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1803 } 1804 1805 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1806 FoldingSetNodeID ID; 1807 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1808 ID.AddInteger(RegNo); 1809 void *IP = nullptr; 1810 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1811 return SDValue(E, 0); 1812 1813 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1814 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1815 CSEMap.InsertNode(N, IP); 1816 InsertNode(N); 1817 return SDValue(N, 0); 1818 } 1819 1820 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1821 FoldingSetNodeID ID; 1822 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1823 ID.AddPointer(RegMask); 1824 void *IP = nullptr; 1825 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1826 return SDValue(E, 0); 1827 1828 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1829 CSEMap.InsertNode(N, IP); 1830 InsertNode(N); 1831 return SDValue(N, 0); 1832 } 1833 1834 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1835 MCSymbol *Label) { 1836 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1837 } 1838 1839 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1840 SDValue Root, MCSymbol *Label) { 1841 FoldingSetNodeID ID; 1842 SDValue Ops[] = { Root }; 1843 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1844 ID.AddPointer(Label); 1845 void *IP = nullptr; 1846 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1847 return SDValue(E, 0); 1848 1849 auto *N = 1850 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1851 createOperands(N, Ops); 1852 1853 CSEMap.InsertNode(N, IP); 1854 InsertNode(N); 1855 return SDValue(N, 0); 1856 } 1857 1858 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1859 int64_t Offset, bool isTarget, 1860 unsigned TargetFlags) { 1861 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1862 1863 FoldingSetNodeID ID; 1864 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1865 ID.AddPointer(BA); 1866 ID.AddInteger(Offset); 1867 ID.AddInteger(TargetFlags); 1868 void *IP = nullptr; 1869 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1870 return SDValue(E, 0); 1871 1872 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1873 CSEMap.InsertNode(N, IP); 1874 InsertNode(N); 1875 return SDValue(N, 0); 1876 } 1877 1878 SDValue SelectionDAG::getSrcValue(const Value *V) { 1879 FoldingSetNodeID ID; 1880 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1881 ID.AddPointer(V); 1882 1883 void *IP = nullptr; 1884 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1885 return SDValue(E, 0); 1886 1887 auto *N = newSDNode<SrcValueSDNode>(V); 1888 CSEMap.InsertNode(N, IP); 1889 InsertNode(N); 1890 return SDValue(N, 0); 1891 } 1892 1893 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1894 FoldingSetNodeID ID; 1895 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1896 ID.AddPointer(MD); 1897 1898 void *IP = nullptr; 1899 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1900 return SDValue(E, 0); 1901 1902 auto *N = newSDNode<MDNodeSDNode>(MD); 1903 CSEMap.InsertNode(N, IP); 1904 InsertNode(N); 1905 return SDValue(N, 0); 1906 } 1907 1908 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1909 if (VT == V.getValueType()) 1910 return V; 1911 1912 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1913 } 1914 1915 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1916 unsigned SrcAS, unsigned DestAS) { 1917 SDValue Ops[] = {Ptr}; 1918 FoldingSetNodeID ID; 1919 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1920 ID.AddInteger(SrcAS); 1921 ID.AddInteger(DestAS); 1922 1923 void *IP = nullptr; 1924 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1925 return SDValue(E, 0); 1926 1927 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1928 VT, SrcAS, DestAS); 1929 createOperands(N, Ops); 1930 1931 CSEMap.InsertNode(N, IP); 1932 InsertNode(N); 1933 return SDValue(N, 0); 1934 } 1935 1936 SDValue SelectionDAG::getFreeze(SDValue V) { 1937 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 1938 } 1939 1940 /// getShiftAmountOperand - Return the specified value casted to 1941 /// the target's desired shift amount type. 1942 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1943 EVT OpTy = Op.getValueType(); 1944 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1945 if (OpTy == ShTy || OpTy.isVector()) return Op; 1946 1947 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1948 } 1949 1950 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1951 SDLoc dl(Node); 1952 const TargetLowering &TLI = getTargetLoweringInfo(); 1953 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1954 EVT VT = Node->getValueType(0); 1955 SDValue Tmp1 = Node->getOperand(0); 1956 SDValue Tmp2 = Node->getOperand(1); 1957 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1958 1959 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1960 Tmp2, MachinePointerInfo(V)); 1961 SDValue VAList = VAListLoad; 1962 1963 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1964 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1965 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1966 1967 VAList = 1968 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1969 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1970 } 1971 1972 // Increment the pointer, VAList, to the next vaarg 1973 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1974 getConstant(getDataLayout().getTypeAllocSize( 1975 VT.getTypeForEVT(*getContext())), 1976 dl, VAList.getValueType())); 1977 // Store the incremented VAList to the legalized pointer 1978 Tmp1 = 1979 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1980 // Load the actual argument out of the pointer VAList 1981 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1982 } 1983 1984 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1985 SDLoc dl(Node); 1986 const TargetLowering &TLI = getTargetLoweringInfo(); 1987 // This defaults to loading a pointer from the input and storing it to the 1988 // output, returning the chain. 1989 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1990 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1991 SDValue Tmp1 = 1992 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1993 Node->getOperand(2), MachinePointerInfo(VS)); 1994 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1995 MachinePointerInfo(VD)); 1996 } 1997 1998 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { 1999 const DataLayout &DL = getDataLayout(); 2000 Type *Ty = VT.getTypeForEVT(*getContext()); 2001 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2002 2003 if (TLI->isTypeLegal(VT) || !VT.isVector()) 2004 return RedAlign; 2005 2006 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2007 const Align StackAlign = TFI->getStackAlign(); 2008 2009 // See if we can choose a smaller ABI alignment in cases where it's an 2010 // illegal vector type that will get broken down. 2011 if (RedAlign > StackAlign) { 2012 EVT IntermediateVT; 2013 MVT RegisterVT; 2014 unsigned NumIntermediates; 2015 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, 2016 NumIntermediates, RegisterVT); 2017 Ty = IntermediateVT.getTypeForEVT(*getContext()); 2018 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2019 if (RedAlign2 < RedAlign) 2020 RedAlign = RedAlign2; 2021 } 2022 2023 return RedAlign; 2024 } 2025 2026 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 2027 MachineFrameInfo &MFI = MF->getFrameInfo(); 2028 int FrameIdx = MFI.CreateStackObject(Bytes, Alignment, false); 2029 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2030 } 2031 2032 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2033 Type *Ty = VT.getTypeForEVT(*getContext()); 2034 Align StackAlign = 2035 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2036 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2037 } 2038 2039 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2040 TypeSize Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 2041 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2042 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2043 const DataLayout &DL = getDataLayout(); 2044 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2045 return CreateStackTemporary(Bytes, Align); 2046 } 2047 2048 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2049 ISD::CondCode Cond, const SDLoc &dl) { 2050 EVT OpVT = N1.getValueType(); 2051 2052 // These setcc operations always fold. 2053 switch (Cond) { 2054 default: break; 2055 case ISD::SETFALSE: 2056 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2057 case ISD::SETTRUE: 2058 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2059 2060 case ISD::SETOEQ: 2061 case ISD::SETOGT: 2062 case ISD::SETOGE: 2063 case ISD::SETOLT: 2064 case ISD::SETOLE: 2065 case ISD::SETONE: 2066 case ISD::SETO: 2067 case ISD::SETUO: 2068 case ISD::SETUEQ: 2069 case ISD::SETUNE: 2070 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2071 break; 2072 } 2073 2074 if (OpVT.isInteger()) { 2075 // For EQ and NE, we can always pick a value for the undef to make the 2076 // predicate pass or fail, so we can return undef. 2077 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2078 // icmp eq/ne X, undef -> undef. 2079 if ((N1.isUndef() || N2.isUndef()) && 2080 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2081 return getUNDEF(VT); 2082 2083 // If both operands are undef, we can return undef for int comparison. 2084 // icmp undef, undef -> undef. 2085 if (N1.isUndef() && N2.isUndef()) 2086 return getUNDEF(VT); 2087 2088 // icmp X, X -> true/false 2089 // icmp X, undef -> true/false because undef could be X. 2090 if (N1 == N2) 2091 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2092 } 2093 2094 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2095 const APInt &C2 = N2C->getAPIntValue(); 2096 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2097 const APInt &C1 = N1C->getAPIntValue(); 2098 2099 switch (Cond) { 2100 default: llvm_unreachable("Unknown integer setcc!"); 2101 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2102 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2103 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2104 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2105 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2106 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2107 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2108 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2109 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2110 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2111 } 2112 } 2113 } 2114 2115 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2116 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2117 2118 if (N1CFP && N2CFP) { 2119 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2120 switch (Cond) { 2121 default: break; 2122 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2123 return getUNDEF(VT); 2124 LLVM_FALLTHROUGH; 2125 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2126 OpVT); 2127 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2128 return getUNDEF(VT); 2129 LLVM_FALLTHROUGH; 2130 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2131 R==APFloat::cmpLessThan, dl, VT, 2132 OpVT); 2133 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2134 return getUNDEF(VT); 2135 LLVM_FALLTHROUGH; 2136 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2137 OpVT); 2138 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2139 return getUNDEF(VT); 2140 LLVM_FALLTHROUGH; 2141 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2142 VT, OpVT); 2143 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2144 return getUNDEF(VT); 2145 LLVM_FALLTHROUGH; 2146 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2147 R==APFloat::cmpEqual, dl, VT, 2148 OpVT); 2149 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2150 return getUNDEF(VT); 2151 LLVM_FALLTHROUGH; 2152 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2153 R==APFloat::cmpEqual, dl, VT, OpVT); 2154 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2155 OpVT); 2156 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2157 OpVT); 2158 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2159 R==APFloat::cmpEqual, dl, VT, 2160 OpVT); 2161 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2162 OpVT); 2163 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2164 R==APFloat::cmpLessThan, dl, VT, 2165 OpVT); 2166 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2167 R==APFloat::cmpUnordered, dl, VT, 2168 OpVT); 2169 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2170 VT, OpVT); 2171 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2172 OpVT); 2173 } 2174 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2175 // Ensure that the constant occurs on the RHS. 2176 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2177 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2178 return SDValue(); 2179 return getSetCC(dl, VT, N2, N1, SwappedCond); 2180 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2181 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2182 // If an operand is known to be a nan (or undef that could be a nan), we can 2183 // fold it. 2184 // Choosing NaN for the undef will always make unordered comparison succeed 2185 // and ordered comparison fails. 2186 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2187 switch (ISD::getUnorderedFlavor(Cond)) { 2188 default: 2189 llvm_unreachable("Unknown flavor!"); 2190 case 0: // Known false. 2191 return getBoolConstant(false, dl, VT, OpVT); 2192 case 1: // Known true. 2193 return getBoolConstant(true, dl, VT, OpVT); 2194 case 2: // Undefined. 2195 return getUNDEF(VT); 2196 } 2197 } 2198 2199 // Could not fold it. 2200 return SDValue(); 2201 } 2202 2203 /// See if the specified operand can be simplified with the knowledge that only 2204 /// the bits specified by DemandedBits are used. 2205 /// TODO: really we should be making this into the DAG equivalent of 2206 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2207 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2208 EVT VT = V.getValueType(); 2209 APInt DemandedElts = VT.isVector() 2210 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2211 : APInt(1, 1); 2212 return GetDemandedBits(V, DemandedBits, DemandedElts); 2213 } 2214 2215 /// See if the specified operand can be simplified with the knowledge that only 2216 /// the bits specified by DemandedBits are used in the elements specified by 2217 /// DemandedElts. 2218 /// TODO: really we should be making this into the DAG equivalent of 2219 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2220 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2221 const APInt &DemandedElts) { 2222 switch (V.getOpcode()) { 2223 default: 2224 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2225 *this, 0); 2226 case ISD::Constant: { 2227 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); 2228 APInt NewVal = CVal & DemandedBits; 2229 if (NewVal != CVal) 2230 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2231 break; 2232 } 2233 case ISD::SRL: 2234 // Only look at single-use SRLs. 2235 if (!V.getNode()->hasOneUse()) 2236 break; 2237 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2238 // See if we can recursively simplify the LHS. 2239 unsigned Amt = RHSC->getZExtValue(); 2240 2241 // Watch out for shift count overflow though. 2242 if (Amt >= DemandedBits.getBitWidth()) 2243 break; 2244 APInt SrcDemandedBits = DemandedBits << Amt; 2245 if (SDValue SimplifyLHS = 2246 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2247 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2248 V.getOperand(1)); 2249 } 2250 break; 2251 } 2252 return SDValue(); 2253 } 2254 2255 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2256 /// use this predicate to simplify operations downstream. 2257 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2258 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2259 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2260 } 2261 2262 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2263 /// this predicate to simplify operations downstream. Mask is known to be zero 2264 /// for bits that V cannot have. 2265 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2266 unsigned Depth) const { 2267 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); 2268 } 2269 2270 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2271 /// DemandedElts. We use this predicate to simplify operations downstream. 2272 /// Mask is known to be zero for bits that V cannot have. 2273 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2274 const APInt &DemandedElts, 2275 unsigned Depth) const { 2276 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2277 } 2278 2279 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2280 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2281 unsigned Depth) const { 2282 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2283 } 2284 2285 /// isSplatValue - Return true if the vector V has the same value 2286 /// across all DemandedElts. For scalable vectors it does not make 2287 /// sense to specify which elements are demanded or undefined, therefore 2288 /// they are simply ignored. 2289 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2290 APInt &UndefElts) { 2291 EVT VT = V.getValueType(); 2292 assert(VT.isVector() && "Vector type expected"); 2293 2294 if (!VT.isScalableVector() && !DemandedElts) 2295 return false; // No demanded elts, better to assume we don't know anything. 2296 2297 // Deal with some common cases here that work for both fixed and scalable 2298 // vector types. 2299 switch (V.getOpcode()) { 2300 case ISD::SPLAT_VECTOR: 2301 return true; 2302 case ISD::ADD: 2303 case ISD::SUB: 2304 case ISD::AND: { 2305 APInt UndefLHS, UndefRHS; 2306 SDValue LHS = V.getOperand(0); 2307 SDValue RHS = V.getOperand(1); 2308 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2309 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2310 UndefElts = UndefLHS | UndefRHS; 2311 return true; 2312 } 2313 break; 2314 } 2315 case ISD::TRUNCATE: 2316 case ISD::SIGN_EXTEND: 2317 case ISD::ZERO_EXTEND: 2318 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts); 2319 } 2320 2321 // We don't support other cases than those above for scalable vectors at 2322 // the moment. 2323 if (VT.isScalableVector()) 2324 return false; 2325 2326 unsigned NumElts = VT.getVectorNumElements(); 2327 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2328 UndefElts = APInt::getNullValue(NumElts); 2329 2330 switch (V.getOpcode()) { 2331 case ISD::BUILD_VECTOR: { 2332 SDValue Scl; 2333 for (unsigned i = 0; i != NumElts; ++i) { 2334 SDValue Op = V.getOperand(i); 2335 if (Op.isUndef()) { 2336 UndefElts.setBit(i); 2337 continue; 2338 } 2339 if (!DemandedElts[i]) 2340 continue; 2341 if (Scl && Scl != Op) 2342 return false; 2343 Scl = Op; 2344 } 2345 return true; 2346 } 2347 case ISD::VECTOR_SHUFFLE: { 2348 // Check if this is a shuffle node doing a splat. 2349 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2350 int SplatIndex = -1; 2351 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2352 for (int i = 0; i != (int)NumElts; ++i) { 2353 int M = Mask[i]; 2354 if (M < 0) { 2355 UndefElts.setBit(i); 2356 continue; 2357 } 2358 if (!DemandedElts[i]) 2359 continue; 2360 if (0 <= SplatIndex && SplatIndex != M) 2361 return false; 2362 SplatIndex = M; 2363 } 2364 return true; 2365 } 2366 case ISD::EXTRACT_SUBVECTOR: { 2367 // Offset the demanded elts by the subvector index. 2368 SDValue Src = V.getOperand(0); 2369 uint64_t Idx = V.getConstantOperandVal(1); 2370 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2371 APInt UndefSrcElts; 2372 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2373 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts)) { 2374 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2375 return true; 2376 } 2377 break; 2378 } 2379 } 2380 2381 return false; 2382 } 2383 2384 /// Helper wrapper to main isSplatValue function. 2385 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2386 EVT VT = V.getValueType(); 2387 assert(VT.isVector() && "Vector type expected"); 2388 2389 APInt UndefElts; 2390 APInt DemandedElts; 2391 2392 // For now we don't support this with scalable vectors. 2393 if (!VT.isScalableVector()) 2394 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2395 return isSplatValue(V, DemandedElts, UndefElts) && 2396 (AllowUndefs || !UndefElts); 2397 } 2398 2399 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2400 V = peekThroughExtractSubvectors(V); 2401 2402 EVT VT = V.getValueType(); 2403 unsigned Opcode = V.getOpcode(); 2404 switch (Opcode) { 2405 default: { 2406 APInt UndefElts; 2407 APInt DemandedElts; 2408 2409 if (!VT.isScalableVector()) 2410 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2411 2412 if (isSplatValue(V, DemandedElts, UndefElts)) { 2413 if (VT.isScalableVector()) { 2414 // DemandedElts and UndefElts are ignored for scalable vectors, since 2415 // the only supported cases are SPLAT_VECTOR nodes. 2416 SplatIdx = 0; 2417 } else { 2418 // Handle case where all demanded elements are UNDEF. 2419 if (DemandedElts.isSubsetOf(UndefElts)) { 2420 SplatIdx = 0; 2421 return getUNDEF(VT); 2422 } 2423 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2424 } 2425 return V; 2426 } 2427 break; 2428 } 2429 case ISD::SPLAT_VECTOR: 2430 SplatIdx = 0; 2431 return V; 2432 case ISD::VECTOR_SHUFFLE: { 2433 if (VT.isScalableVector()) 2434 return SDValue(); 2435 2436 // Check if this is a shuffle node doing a splat. 2437 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2438 // getTargetVShiftNode currently struggles without the splat source. 2439 auto *SVN = cast<ShuffleVectorSDNode>(V); 2440 if (!SVN->isSplat()) 2441 break; 2442 int Idx = SVN->getSplatIndex(); 2443 int NumElts = V.getValueType().getVectorNumElements(); 2444 SplatIdx = Idx % NumElts; 2445 return V.getOperand(Idx / NumElts); 2446 } 2447 } 2448 2449 return SDValue(); 2450 } 2451 2452 SDValue SelectionDAG::getSplatValue(SDValue V) { 2453 int SplatIdx; 2454 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2455 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2456 SrcVector.getValueType().getScalarType(), SrcVector, 2457 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2458 return SDValue(); 2459 } 2460 2461 const APInt * 2462 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2463 const APInt &DemandedElts) const { 2464 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2465 V.getOpcode() == ISD::SRA) && 2466 "Unknown shift node"); 2467 unsigned BitWidth = V.getScalarValueSizeInBits(); 2468 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2469 // Shifting more than the bitwidth is not valid. 2470 const APInt &ShAmt = SA->getAPIntValue(); 2471 if (ShAmt.ult(BitWidth)) 2472 return &ShAmt; 2473 } 2474 return nullptr; 2475 } 2476 2477 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2478 SDValue V, const APInt &DemandedElts) const { 2479 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2480 V.getOpcode() == ISD::SRA) && 2481 "Unknown shift node"); 2482 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2483 return ValidAmt; 2484 unsigned BitWidth = V.getScalarValueSizeInBits(); 2485 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2486 if (!BV) 2487 return nullptr; 2488 const APInt *MinShAmt = nullptr; 2489 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2490 if (!DemandedElts[i]) 2491 continue; 2492 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2493 if (!SA) 2494 return nullptr; 2495 // Shifting more than the bitwidth is not valid. 2496 const APInt &ShAmt = SA->getAPIntValue(); 2497 if (ShAmt.uge(BitWidth)) 2498 return nullptr; 2499 if (MinShAmt && MinShAmt->ule(ShAmt)) 2500 continue; 2501 MinShAmt = &ShAmt; 2502 } 2503 return MinShAmt; 2504 } 2505 2506 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2507 SDValue V, const APInt &DemandedElts) const { 2508 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2509 V.getOpcode() == ISD::SRA) && 2510 "Unknown shift node"); 2511 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2512 return ValidAmt; 2513 unsigned BitWidth = V.getScalarValueSizeInBits(); 2514 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2515 if (!BV) 2516 return nullptr; 2517 const APInt *MaxShAmt = nullptr; 2518 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2519 if (!DemandedElts[i]) 2520 continue; 2521 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2522 if (!SA) 2523 return nullptr; 2524 // Shifting more than the bitwidth is not valid. 2525 const APInt &ShAmt = SA->getAPIntValue(); 2526 if (ShAmt.uge(BitWidth)) 2527 return nullptr; 2528 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2529 continue; 2530 MaxShAmt = &ShAmt; 2531 } 2532 return MaxShAmt; 2533 } 2534 2535 /// Determine which bits of Op are known to be either zero or one and return 2536 /// them in Known. For vectors, the known bits are those that are shared by 2537 /// every vector element. 2538 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2539 EVT VT = Op.getValueType(); 2540 2541 // TOOD: Until we have a plan for how to represent demanded elements for 2542 // scalable vectors, we can just bail out for now. 2543 if (Op.getValueType().isScalableVector()) { 2544 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2545 return KnownBits(BitWidth); 2546 } 2547 2548 APInt DemandedElts = VT.isVector() 2549 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2550 : APInt(1, 1); 2551 return computeKnownBits(Op, DemandedElts, Depth); 2552 } 2553 2554 /// Determine which bits of Op are known to be either zero or one and return 2555 /// them in Known. The DemandedElts argument allows us to only collect the known 2556 /// bits that are shared by the requested vector elements. 2557 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2558 unsigned Depth) const { 2559 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2560 2561 KnownBits Known(BitWidth); // Don't know anything. 2562 2563 // TOOD: Until we have a plan for how to represent demanded elements for 2564 // scalable vectors, we can just bail out for now. 2565 if (Op.getValueType().isScalableVector()) 2566 return Known; 2567 2568 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2569 // We know all of the bits for a constant! 2570 Known.One = C->getAPIntValue(); 2571 Known.Zero = ~Known.One; 2572 return Known; 2573 } 2574 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2575 // We know all of the bits for a constant fp! 2576 Known.One = C->getValueAPF().bitcastToAPInt(); 2577 Known.Zero = ~Known.One; 2578 return Known; 2579 } 2580 2581 if (Depth >= MaxRecursionDepth) 2582 return Known; // Limit search depth. 2583 2584 KnownBits Known2; 2585 unsigned NumElts = DemandedElts.getBitWidth(); 2586 assert((!Op.getValueType().isVector() || 2587 NumElts == Op.getValueType().getVectorNumElements()) && 2588 "Unexpected vector size"); 2589 2590 if (!DemandedElts) 2591 return Known; // No demanded elts, better to assume we don't know anything. 2592 2593 unsigned Opcode = Op.getOpcode(); 2594 switch (Opcode) { 2595 case ISD::BUILD_VECTOR: 2596 // Collect the known bits that are shared by every demanded vector element. 2597 Known.Zero.setAllBits(); Known.One.setAllBits(); 2598 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2599 if (!DemandedElts[i]) 2600 continue; 2601 2602 SDValue SrcOp = Op.getOperand(i); 2603 Known2 = computeKnownBits(SrcOp, Depth + 1); 2604 2605 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2606 if (SrcOp.getValueSizeInBits() != BitWidth) { 2607 assert(SrcOp.getValueSizeInBits() > BitWidth && 2608 "Expected BUILD_VECTOR implicit truncation"); 2609 Known2 = Known2.trunc(BitWidth); 2610 } 2611 2612 // Known bits are the values that are shared by every demanded element. 2613 Known.One &= Known2.One; 2614 Known.Zero &= Known2.Zero; 2615 2616 // If we don't know any bits, early out. 2617 if (Known.isUnknown()) 2618 break; 2619 } 2620 break; 2621 case ISD::VECTOR_SHUFFLE: { 2622 // Collect the known bits that are shared by every vector element referenced 2623 // by the shuffle. 2624 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2625 Known.Zero.setAllBits(); Known.One.setAllBits(); 2626 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2627 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2628 for (unsigned i = 0; i != NumElts; ++i) { 2629 if (!DemandedElts[i]) 2630 continue; 2631 2632 int M = SVN->getMaskElt(i); 2633 if (M < 0) { 2634 // For UNDEF elements, we don't know anything about the common state of 2635 // the shuffle result. 2636 Known.resetAll(); 2637 DemandedLHS.clearAllBits(); 2638 DemandedRHS.clearAllBits(); 2639 break; 2640 } 2641 2642 if ((unsigned)M < NumElts) 2643 DemandedLHS.setBit((unsigned)M % NumElts); 2644 else 2645 DemandedRHS.setBit((unsigned)M % NumElts); 2646 } 2647 // Known bits are the values that are shared by every demanded element. 2648 if (!!DemandedLHS) { 2649 SDValue LHS = Op.getOperand(0); 2650 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2651 Known.One &= Known2.One; 2652 Known.Zero &= Known2.Zero; 2653 } 2654 // If we don't know any bits, early out. 2655 if (Known.isUnknown()) 2656 break; 2657 if (!!DemandedRHS) { 2658 SDValue RHS = Op.getOperand(1); 2659 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2660 Known.One &= Known2.One; 2661 Known.Zero &= Known2.Zero; 2662 } 2663 break; 2664 } 2665 case ISD::CONCAT_VECTORS: { 2666 // Split DemandedElts and test each of the demanded subvectors. 2667 Known.Zero.setAllBits(); Known.One.setAllBits(); 2668 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2669 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2670 unsigned NumSubVectors = Op.getNumOperands(); 2671 for (unsigned i = 0; i != NumSubVectors; ++i) { 2672 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2673 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2674 if (!!DemandedSub) { 2675 SDValue Sub = Op.getOperand(i); 2676 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2677 Known.One &= Known2.One; 2678 Known.Zero &= Known2.Zero; 2679 } 2680 // If we don't know any bits, early out. 2681 if (Known.isUnknown()) 2682 break; 2683 } 2684 break; 2685 } 2686 case ISD::INSERT_SUBVECTOR: { 2687 // Demand any elements from the subvector and the remainder from the src its 2688 // inserted into. 2689 SDValue Src = Op.getOperand(0); 2690 SDValue Sub = Op.getOperand(1); 2691 uint64_t Idx = Op.getConstantOperandVal(2); 2692 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2693 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2694 APInt DemandedSrcElts = DemandedElts; 2695 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2696 2697 Known.One.setAllBits(); 2698 Known.Zero.setAllBits(); 2699 if (!!DemandedSubElts) { 2700 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2701 if (Known.isUnknown()) 2702 break; // early-out. 2703 } 2704 if (!!DemandedSrcElts) { 2705 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2706 Known.One &= Known2.One; 2707 Known.Zero &= Known2.Zero; 2708 } 2709 break; 2710 } 2711 case ISD::EXTRACT_SUBVECTOR: { 2712 // Offset the demanded elts by the subvector index. 2713 SDValue Src = Op.getOperand(0); 2714 // Bail until we can represent demanded elements for scalable vectors. 2715 if (Src.getValueType().isScalableVector()) 2716 break; 2717 uint64_t Idx = Op.getConstantOperandVal(1); 2718 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2719 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2720 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2721 break; 2722 } 2723 case ISD::SCALAR_TO_VECTOR: { 2724 // We know about scalar_to_vector as much as we know about it source, 2725 // which becomes the first element of otherwise unknown vector. 2726 if (DemandedElts != 1) 2727 break; 2728 2729 SDValue N0 = Op.getOperand(0); 2730 Known = computeKnownBits(N0, Depth + 1); 2731 if (N0.getValueSizeInBits() != BitWidth) 2732 Known = Known.trunc(BitWidth); 2733 2734 break; 2735 } 2736 case ISD::BITCAST: { 2737 SDValue N0 = Op.getOperand(0); 2738 EVT SubVT = N0.getValueType(); 2739 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2740 2741 // Ignore bitcasts from unsupported types. 2742 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2743 break; 2744 2745 // Fast handling of 'identity' bitcasts. 2746 if (BitWidth == SubBitWidth) { 2747 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2748 break; 2749 } 2750 2751 bool IsLE = getDataLayout().isLittleEndian(); 2752 2753 // Bitcast 'small element' vector to 'large element' scalar/vector. 2754 if ((BitWidth % SubBitWidth) == 0) { 2755 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2756 2757 // Collect known bits for the (larger) output by collecting the known 2758 // bits from each set of sub elements and shift these into place. 2759 // We need to separately call computeKnownBits for each set of 2760 // sub elements as the knownbits for each is likely to be different. 2761 unsigned SubScale = BitWidth / SubBitWidth; 2762 APInt SubDemandedElts(NumElts * SubScale, 0); 2763 for (unsigned i = 0; i != NumElts; ++i) 2764 if (DemandedElts[i]) 2765 SubDemandedElts.setBit(i * SubScale); 2766 2767 for (unsigned i = 0; i != SubScale; ++i) { 2768 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2769 Depth + 1); 2770 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2771 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2772 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2773 } 2774 } 2775 2776 // Bitcast 'large element' scalar/vector to 'small element' vector. 2777 if ((SubBitWidth % BitWidth) == 0) { 2778 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2779 2780 // Collect known bits for the (smaller) output by collecting the known 2781 // bits from the overlapping larger input elements and extracting the 2782 // sub sections we actually care about. 2783 unsigned SubScale = SubBitWidth / BitWidth; 2784 APInt SubDemandedElts(NumElts / SubScale, 0); 2785 for (unsigned i = 0; i != NumElts; ++i) 2786 if (DemandedElts[i]) 2787 SubDemandedElts.setBit(i / SubScale); 2788 2789 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2790 2791 Known.Zero.setAllBits(); Known.One.setAllBits(); 2792 for (unsigned i = 0; i != NumElts; ++i) 2793 if (DemandedElts[i]) { 2794 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2795 unsigned Offset = (Shifts % SubScale) * BitWidth; 2796 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2797 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2798 // If we don't know any bits, early out. 2799 if (Known.isUnknown()) 2800 break; 2801 } 2802 } 2803 break; 2804 } 2805 case ISD::AND: 2806 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2807 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2808 2809 Known &= Known2; 2810 break; 2811 case ISD::OR: 2812 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2813 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2814 2815 Known |= Known2; 2816 break; 2817 case ISD::XOR: 2818 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2819 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2820 2821 Known ^= Known2; 2822 break; 2823 case ISD::MUL: { 2824 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2825 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2826 2827 // If low bits are zero in either operand, output low known-0 bits. 2828 // Also compute a conservative estimate for high known-0 bits. 2829 // More trickiness is possible, but this is sufficient for the 2830 // interesting case of alignment computation. 2831 unsigned TrailZ = Known.countMinTrailingZeros() + 2832 Known2.countMinTrailingZeros(); 2833 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2834 Known2.countMinLeadingZeros(), 2835 BitWidth) - BitWidth; 2836 2837 Known.resetAll(); 2838 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2839 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2840 break; 2841 } 2842 case ISD::UDIV: { 2843 // For the purposes of computing leading zeros we can conservatively 2844 // treat a udiv as a logical right shift by the power of 2 known to 2845 // be less than the denominator. 2846 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2847 unsigned LeadZ = Known2.countMinLeadingZeros(); 2848 2849 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2850 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2851 if (RHSMaxLeadingZeros != BitWidth) 2852 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2853 2854 Known.Zero.setHighBits(LeadZ); 2855 break; 2856 } 2857 case ISD::SELECT: 2858 case ISD::VSELECT: 2859 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2860 // If we don't know any bits, early out. 2861 if (Known.isUnknown()) 2862 break; 2863 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2864 2865 // Only known if known in both the LHS and RHS. 2866 Known.One &= Known2.One; 2867 Known.Zero &= Known2.Zero; 2868 break; 2869 case ISD::SELECT_CC: 2870 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2871 // If we don't know any bits, early out. 2872 if (Known.isUnknown()) 2873 break; 2874 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2875 2876 // Only known if known in both the LHS and RHS. 2877 Known.One &= Known2.One; 2878 Known.Zero &= Known2.Zero; 2879 break; 2880 case ISD::SMULO: 2881 case ISD::UMULO: 2882 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2883 if (Op.getResNo() != 1) 2884 break; 2885 // The boolean result conforms to getBooleanContents. 2886 // If we know the result of a setcc has the top bits zero, use this info. 2887 // We know that we have an integer-based boolean since these operations 2888 // are only available for integer. 2889 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2890 TargetLowering::ZeroOrOneBooleanContent && 2891 BitWidth > 1) 2892 Known.Zero.setBitsFrom(1); 2893 break; 2894 case ISD::SETCC: 2895 case ISD::STRICT_FSETCC: 2896 case ISD::STRICT_FSETCCS: { 2897 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2898 // If we know the result of a setcc has the top bits zero, use this info. 2899 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2900 TargetLowering::ZeroOrOneBooleanContent && 2901 BitWidth > 1) 2902 Known.Zero.setBitsFrom(1); 2903 break; 2904 } 2905 case ISD::SHL: 2906 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2907 2908 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2909 unsigned Shift = ShAmt->getZExtValue(); 2910 Known.Zero <<= Shift; 2911 Known.One <<= Shift; 2912 // Low bits are known zero. 2913 Known.Zero.setLowBits(Shift); 2914 break; 2915 } 2916 2917 // No matter the shift amount, the trailing zeros will stay zero. 2918 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros()); 2919 Known.One.clearAllBits(); 2920 2921 // Minimum shift low bits are known zero. 2922 if (const APInt *ShMinAmt = 2923 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2924 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 2925 break; 2926 case ISD::SRL: 2927 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2928 2929 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2930 unsigned Shift = ShAmt->getZExtValue(); 2931 Known.Zero.lshrInPlace(Shift); 2932 Known.One.lshrInPlace(Shift); 2933 // High bits are known zero. 2934 Known.Zero.setHighBits(Shift); 2935 break; 2936 } 2937 2938 // No matter the shift amount, the leading zeros will stay zero. 2939 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros()); 2940 Known.One.clearAllBits(); 2941 2942 // Minimum shift high bits are known zero. 2943 if (const APInt *ShMinAmt = 2944 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2945 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2946 break; 2947 case ISD::SRA: 2948 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2949 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2950 unsigned Shift = ShAmt->getZExtValue(); 2951 // Sign extend known zero/one bit (else is unknown). 2952 Known.Zero.ashrInPlace(Shift); 2953 Known.One.ashrInPlace(Shift); 2954 } 2955 break; 2956 case ISD::FSHL: 2957 case ISD::FSHR: 2958 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2959 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2960 2961 // For fshl, 0-shift returns the 1st arg. 2962 // For fshr, 0-shift returns the 2nd arg. 2963 if (Amt == 0) { 2964 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2965 DemandedElts, Depth + 1); 2966 break; 2967 } 2968 2969 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2970 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2971 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2972 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2973 if (Opcode == ISD::FSHL) { 2974 Known.One <<= Amt; 2975 Known.Zero <<= Amt; 2976 Known2.One.lshrInPlace(BitWidth - Amt); 2977 Known2.Zero.lshrInPlace(BitWidth - Amt); 2978 } else { 2979 Known.One <<= BitWidth - Amt; 2980 Known.Zero <<= BitWidth - Amt; 2981 Known2.One.lshrInPlace(Amt); 2982 Known2.Zero.lshrInPlace(Amt); 2983 } 2984 Known.One |= Known2.One; 2985 Known.Zero |= Known2.Zero; 2986 } 2987 break; 2988 case ISD::SIGN_EXTEND_INREG: { 2989 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2990 unsigned EBits = EVT.getScalarSizeInBits(); 2991 2992 // Sign extension. Compute the demanded bits in the result that are not 2993 // present in the input. 2994 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2995 2996 APInt InSignMask = APInt::getSignMask(EBits); 2997 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2998 2999 // If the sign extended bits are demanded, we know that the sign 3000 // bit is demanded. 3001 InSignMask = InSignMask.zext(BitWidth); 3002 if (NewBits.getBoolValue()) 3003 InputDemandedBits |= InSignMask; 3004 3005 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3006 Known.One &= InputDemandedBits; 3007 Known.Zero &= InputDemandedBits; 3008 3009 // If the sign bit of the input is known set or clear, then we know the 3010 // top bits of the result. 3011 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 3012 Known.Zero |= NewBits; 3013 Known.One &= ~NewBits; 3014 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 3015 Known.One |= NewBits; 3016 Known.Zero &= ~NewBits; 3017 } else { // Input sign bit unknown 3018 Known.Zero &= ~NewBits; 3019 Known.One &= ~NewBits; 3020 } 3021 break; 3022 } 3023 case ISD::CTTZ: 3024 case ISD::CTTZ_ZERO_UNDEF: { 3025 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3026 // If we have a known 1, its position is our upper bound. 3027 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 3028 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3029 Known.Zero.setBitsFrom(LowBits); 3030 break; 3031 } 3032 case ISD::CTLZ: 3033 case ISD::CTLZ_ZERO_UNDEF: { 3034 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3035 // If we have a known 1, its position is our upper bound. 3036 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3037 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3038 Known.Zero.setBitsFrom(LowBits); 3039 break; 3040 } 3041 case ISD::CTPOP: { 3042 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3043 // If we know some of the bits are zero, they can't be one. 3044 unsigned PossibleOnes = Known2.countMaxPopulation(); 3045 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3046 break; 3047 } 3048 case ISD::LOAD: { 3049 LoadSDNode *LD = cast<LoadSDNode>(Op); 3050 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3051 if (ISD::isNON_EXTLoad(LD) && Cst) { 3052 // Determine any common known bits from the loaded constant pool value. 3053 Type *CstTy = Cst->getType(); 3054 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3055 // If its a vector splat, then we can (quickly) reuse the scalar path. 3056 // NOTE: We assume all elements match and none are UNDEF. 3057 if (CstTy->isVectorTy()) { 3058 if (const Constant *Splat = Cst->getSplatValue()) { 3059 Cst = Splat; 3060 CstTy = Cst->getType(); 3061 } 3062 } 3063 // TODO - do we need to handle different bitwidths? 3064 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3065 // Iterate across all vector elements finding common known bits. 3066 Known.One.setAllBits(); 3067 Known.Zero.setAllBits(); 3068 for (unsigned i = 0; i != NumElts; ++i) { 3069 if (!DemandedElts[i]) 3070 continue; 3071 if (Constant *Elt = Cst->getAggregateElement(i)) { 3072 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3073 const APInt &Value = CInt->getValue(); 3074 Known.One &= Value; 3075 Known.Zero &= ~Value; 3076 continue; 3077 } 3078 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3079 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3080 Known.One &= Value; 3081 Known.Zero &= ~Value; 3082 continue; 3083 } 3084 } 3085 Known.One.clearAllBits(); 3086 Known.Zero.clearAllBits(); 3087 break; 3088 } 3089 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3090 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3091 const APInt &Value = CInt->getValue(); 3092 Known.One = Value; 3093 Known.Zero = ~Value; 3094 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3095 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3096 Known.One = Value; 3097 Known.Zero = ~Value; 3098 } 3099 } 3100 } 3101 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3102 // If this is a ZEXTLoad and we are looking at the loaded value. 3103 EVT VT = LD->getMemoryVT(); 3104 unsigned MemBits = VT.getScalarSizeInBits(); 3105 Known.Zero.setBitsFrom(MemBits); 3106 } else if (const MDNode *Ranges = LD->getRanges()) { 3107 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3108 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3109 } 3110 break; 3111 } 3112 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3113 EVT InVT = Op.getOperand(0).getValueType(); 3114 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3115 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3116 Known = Known.zext(BitWidth); 3117 break; 3118 } 3119 case ISD::ZERO_EXTEND: { 3120 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3121 Known = Known.zext(BitWidth); 3122 break; 3123 } 3124 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3125 EVT InVT = Op.getOperand(0).getValueType(); 3126 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3127 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3128 // If the sign bit is known to be zero or one, then sext will extend 3129 // it to the top bits, else it will just zext. 3130 Known = Known.sext(BitWidth); 3131 break; 3132 } 3133 case ISD::SIGN_EXTEND: { 3134 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3135 // If the sign bit is known to be zero or one, then sext will extend 3136 // it to the top bits, else it will just zext. 3137 Known = Known.sext(BitWidth); 3138 break; 3139 } 3140 case ISD::ANY_EXTEND_VECTOR_INREG: { 3141 EVT InVT = Op.getOperand(0).getValueType(); 3142 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3143 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3144 Known = Known.anyext(BitWidth); 3145 break; 3146 } 3147 case ISD::ANY_EXTEND: { 3148 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3149 Known = Known.anyext(BitWidth); 3150 break; 3151 } 3152 case ISD::TRUNCATE: { 3153 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3154 Known = Known.trunc(BitWidth); 3155 break; 3156 } 3157 case ISD::AssertZext: { 3158 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3159 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3160 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3161 Known.Zero |= (~InMask); 3162 Known.One &= (~Known.Zero); 3163 break; 3164 } 3165 case ISD::AssertAlign: { 3166 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); 3167 assert(LogOfAlign != 0); 3168 // If a node is guaranteed to be aligned, set low zero bits accordingly as 3169 // well as clearing one bits. 3170 Known.Zero.setLowBits(LogOfAlign); 3171 Known.One.clearLowBits(LogOfAlign); 3172 break; 3173 } 3174 case ISD::FGETSIGN: 3175 // All bits are zero except the low bit. 3176 Known.Zero.setBitsFrom(1); 3177 break; 3178 case ISD::USUBO: 3179 case ISD::SSUBO: 3180 if (Op.getResNo() == 1) { 3181 // If we know the result of a setcc has the top bits zero, use this info. 3182 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3183 TargetLowering::ZeroOrOneBooleanContent && 3184 BitWidth > 1) 3185 Known.Zero.setBitsFrom(1); 3186 break; 3187 } 3188 LLVM_FALLTHROUGH; 3189 case ISD::SUB: 3190 case ISD::SUBC: { 3191 assert(Op.getResNo() == 0 && 3192 "We only compute knownbits for the difference here."); 3193 3194 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3195 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3196 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3197 Known, Known2); 3198 break; 3199 } 3200 case ISD::UADDO: 3201 case ISD::SADDO: 3202 case ISD::ADDCARRY: 3203 if (Op.getResNo() == 1) { 3204 // If we know the result of a setcc has the top bits zero, use this info. 3205 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3206 TargetLowering::ZeroOrOneBooleanContent && 3207 BitWidth > 1) 3208 Known.Zero.setBitsFrom(1); 3209 break; 3210 } 3211 LLVM_FALLTHROUGH; 3212 case ISD::ADD: 3213 case ISD::ADDC: 3214 case ISD::ADDE: { 3215 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3216 3217 // With ADDE and ADDCARRY, a carry bit may be added in. 3218 KnownBits Carry(1); 3219 if (Opcode == ISD::ADDE) 3220 // Can't track carry from glue, set carry to unknown. 3221 Carry.resetAll(); 3222 else if (Opcode == ISD::ADDCARRY) 3223 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3224 // the trouble (how often will we find a known carry bit). And I haven't 3225 // tested this very much yet, but something like this might work: 3226 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3227 // Carry = Carry.zextOrTrunc(1, false); 3228 Carry.resetAll(); 3229 else 3230 Carry.setAllZero(); 3231 3232 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3233 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3234 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3235 break; 3236 } 3237 case ISD::SREM: 3238 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3239 const APInt &RA = Rem->getAPIntValue().abs(); 3240 if (RA.isPowerOf2()) { 3241 APInt LowBits = RA - 1; 3242 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3243 3244 // The low bits of the first operand are unchanged by the srem. 3245 Known.Zero = Known2.Zero & LowBits; 3246 Known.One = Known2.One & LowBits; 3247 3248 // If the first operand is non-negative or has all low bits zero, then 3249 // the upper bits are all zero. 3250 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3251 Known.Zero |= ~LowBits; 3252 3253 // If the first operand is negative and not all low bits are zero, then 3254 // the upper bits are all one. 3255 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3256 Known.One |= ~LowBits; 3257 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3258 } 3259 } 3260 break; 3261 case ISD::UREM: { 3262 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3263 const APInt &RA = Rem->getAPIntValue(); 3264 if (RA.isPowerOf2()) { 3265 APInt LowBits = (RA - 1); 3266 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3267 3268 // The upper bits are all zero, the lower ones are unchanged. 3269 Known.Zero = Known2.Zero | ~LowBits; 3270 Known.One = Known2.One & LowBits; 3271 break; 3272 } 3273 } 3274 3275 // Since the result is less than or equal to either operand, any leading 3276 // zero bits in either operand must also exist in the result. 3277 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3278 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3279 3280 uint32_t Leaders = 3281 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3282 Known.resetAll(); 3283 Known.Zero.setHighBits(Leaders); 3284 break; 3285 } 3286 case ISD::EXTRACT_ELEMENT: { 3287 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3288 const unsigned Index = Op.getConstantOperandVal(1); 3289 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3290 3291 // Remove low part of known bits mask 3292 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3293 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3294 3295 // Remove high part of known bit mask 3296 Known = Known.trunc(EltBitWidth); 3297 break; 3298 } 3299 case ISD::EXTRACT_VECTOR_ELT: { 3300 SDValue InVec = Op.getOperand(0); 3301 SDValue EltNo = Op.getOperand(1); 3302 EVT VecVT = InVec.getValueType(); 3303 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3304 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3305 3306 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3307 // anything about the extended bits. 3308 if (BitWidth > EltBitWidth) 3309 Known = Known.trunc(EltBitWidth); 3310 3311 // If we know the element index, just demand that vector element, else for 3312 // an unknown element index, ignore DemandedElts and demand them all. 3313 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3314 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3315 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3316 DemandedSrcElts = 3317 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3318 3319 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3320 if (BitWidth > EltBitWidth) 3321 Known = Known.anyext(BitWidth); 3322 break; 3323 } 3324 case ISD::INSERT_VECTOR_ELT: { 3325 // If we know the element index, split the demand between the 3326 // source vector and the inserted element, otherwise assume we need 3327 // the original demanded vector elements and the value. 3328 SDValue InVec = Op.getOperand(0); 3329 SDValue InVal = Op.getOperand(1); 3330 SDValue EltNo = Op.getOperand(2); 3331 bool DemandedVal = true; 3332 APInt DemandedVecElts = DemandedElts; 3333 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3334 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3335 unsigned EltIdx = CEltNo->getZExtValue(); 3336 DemandedVal = !!DemandedElts[EltIdx]; 3337 DemandedVecElts.clearBit(EltIdx); 3338 } 3339 Known.One.setAllBits(); 3340 Known.Zero.setAllBits(); 3341 if (DemandedVal) { 3342 Known2 = computeKnownBits(InVal, Depth + 1); 3343 Known.One &= Known2.One.zextOrTrunc(BitWidth); 3344 Known.Zero &= Known2.Zero.zextOrTrunc(BitWidth); 3345 } 3346 if (!!DemandedVecElts) { 3347 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3348 Known.One &= Known2.One; 3349 Known.Zero &= Known2.Zero; 3350 } 3351 break; 3352 } 3353 case ISD::BITREVERSE: { 3354 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3355 Known.Zero = Known2.Zero.reverseBits(); 3356 Known.One = Known2.One.reverseBits(); 3357 break; 3358 } 3359 case ISD::BSWAP: { 3360 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3361 Known.Zero = Known2.Zero.byteSwap(); 3362 Known.One = Known2.One.byteSwap(); 3363 break; 3364 } 3365 case ISD::ABS: { 3366 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3367 3368 // If the source's MSB is zero then we know the rest of the bits already. 3369 if (Known2.isNonNegative()) { 3370 Known.Zero = Known2.Zero; 3371 Known.One = Known2.One; 3372 break; 3373 } 3374 3375 // We only know that the absolute values's MSB will be zero iff there is 3376 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3377 Known2.One.clearSignBit(); 3378 if (Known2.One.getBoolValue()) { 3379 Known.Zero = APInt::getSignMask(BitWidth); 3380 break; 3381 } 3382 break; 3383 } 3384 case ISD::UMIN: { 3385 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3386 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3387 3388 // UMIN - we know that the result will have the maximum of the 3389 // known zero leading bits of the inputs. 3390 unsigned LeadZero = Known.countMinLeadingZeros(); 3391 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3392 3393 Known.Zero &= Known2.Zero; 3394 Known.One &= Known2.One; 3395 Known.Zero.setHighBits(LeadZero); 3396 break; 3397 } 3398 case ISD::UMAX: { 3399 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3400 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3401 3402 // UMAX - we know that the result will have the maximum of the 3403 // known one leading bits of the inputs. 3404 unsigned LeadOne = Known.countMinLeadingOnes(); 3405 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3406 3407 Known.Zero &= Known2.Zero; 3408 Known.One &= Known2.One; 3409 Known.One.setHighBits(LeadOne); 3410 break; 3411 } 3412 case ISD::SMIN: 3413 case ISD::SMAX: { 3414 // If we have a clamp pattern, we know that the number of sign bits will be 3415 // the minimum of the clamp min/max range. 3416 bool IsMax = (Opcode == ISD::SMAX); 3417 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3418 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3419 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3420 CstHigh = 3421 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3422 if (CstLow && CstHigh) { 3423 if (!IsMax) 3424 std::swap(CstLow, CstHigh); 3425 3426 const APInt &ValueLow = CstLow->getAPIntValue(); 3427 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3428 if (ValueLow.sle(ValueHigh)) { 3429 unsigned LowSignBits = ValueLow.getNumSignBits(); 3430 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3431 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3432 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3433 Known.One.setHighBits(MinSignBits); 3434 break; 3435 } 3436 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3437 Known.Zero.setHighBits(MinSignBits); 3438 break; 3439 } 3440 } 3441 } 3442 3443 // Fallback - just get the shared known bits of the operands. 3444 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3445 if (Known.isUnknown()) break; // Early-out 3446 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3447 Known.Zero &= Known2.Zero; 3448 Known.One &= Known2.One; 3449 break; 3450 } 3451 case ISD::FrameIndex: 3452 case ISD::TargetFrameIndex: 3453 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), 3454 Known, getMachineFunction()); 3455 break; 3456 3457 default: 3458 if (Opcode < ISD::BUILTIN_OP_END) 3459 break; 3460 LLVM_FALLTHROUGH; 3461 case ISD::INTRINSIC_WO_CHAIN: 3462 case ISD::INTRINSIC_W_CHAIN: 3463 case ISD::INTRINSIC_VOID: 3464 // Allow the target to implement this method for its nodes. 3465 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3466 break; 3467 } 3468 3469 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3470 return Known; 3471 } 3472 3473 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3474 SDValue N1) const { 3475 // X + 0 never overflow 3476 if (isNullConstant(N1)) 3477 return OFK_Never; 3478 3479 KnownBits N1Known = computeKnownBits(N1); 3480 if (N1Known.Zero.getBoolValue()) { 3481 KnownBits N0Known = computeKnownBits(N0); 3482 3483 bool overflow; 3484 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3485 if (!overflow) 3486 return OFK_Never; 3487 } 3488 3489 // mulhi + 1 never overflow 3490 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3491 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3492 return OFK_Never; 3493 3494 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3495 KnownBits N0Known = computeKnownBits(N0); 3496 3497 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3498 return OFK_Never; 3499 } 3500 3501 return OFK_Sometime; 3502 } 3503 3504 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3505 EVT OpVT = Val.getValueType(); 3506 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3507 3508 // Is the constant a known power of 2? 3509 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3510 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3511 3512 // A left-shift of a constant one will have exactly one bit set because 3513 // shifting the bit off the end is undefined. 3514 if (Val.getOpcode() == ISD::SHL) { 3515 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3516 if (C && C->getAPIntValue() == 1) 3517 return true; 3518 } 3519 3520 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3521 // one bit set. 3522 if (Val.getOpcode() == ISD::SRL) { 3523 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3524 if (C && C->getAPIntValue().isSignMask()) 3525 return true; 3526 } 3527 3528 // Are all operands of a build vector constant powers of two? 3529 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3530 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3531 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3532 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3533 return false; 3534 })) 3535 return true; 3536 3537 // More could be done here, though the above checks are enough 3538 // to handle some common cases. 3539 3540 // Fall back to computeKnownBits to catch other known cases. 3541 KnownBits Known = computeKnownBits(Val); 3542 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3543 } 3544 3545 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3546 EVT VT = Op.getValueType(); 3547 3548 // TODO: Assume we don't know anything for now. 3549 if (VT.isScalableVector()) 3550 return 1; 3551 3552 APInt DemandedElts = VT.isVector() 3553 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3554 : APInt(1, 1); 3555 return ComputeNumSignBits(Op, DemandedElts, Depth); 3556 } 3557 3558 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3559 unsigned Depth) const { 3560 EVT VT = Op.getValueType(); 3561 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3562 unsigned VTBits = VT.getScalarSizeInBits(); 3563 unsigned NumElts = DemandedElts.getBitWidth(); 3564 unsigned Tmp, Tmp2; 3565 unsigned FirstAnswer = 1; 3566 3567 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3568 const APInt &Val = C->getAPIntValue(); 3569 return Val.getNumSignBits(); 3570 } 3571 3572 if (Depth >= MaxRecursionDepth) 3573 return 1; // Limit search depth. 3574 3575 if (!DemandedElts || VT.isScalableVector()) 3576 return 1; // No demanded elts, better to assume we don't know anything. 3577 3578 unsigned Opcode = Op.getOpcode(); 3579 switch (Opcode) { 3580 default: break; 3581 case ISD::AssertSext: 3582 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3583 return VTBits-Tmp+1; 3584 case ISD::AssertZext: 3585 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3586 return VTBits-Tmp; 3587 3588 case ISD::BUILD_VECTOR: 3589 Tmp = VTBits; 3590 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3591 if (!DemandedElts[i]) 3592 continue; 3593 3594 SDValue SrcOp = Op.getOperand(i); 3595 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); 3596 3597 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3598 if (SrcOp.getValueSizeInBits() != VTBits) { 3599 assert(SrcOp.getValueSizeInBits() > VTBits && 3600 "Expected BUILD_VECTOR implicit truncation"); 3601 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3602 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3603 } 3604 Tmp = std::min(Tmp, Tmp2); 3605 } 3606 return Tmp; 3607 3608 case ISD::VECTOR_SHUFFLE: { 3609 // Collect the minimum number of sign bits that are shared by every vector 3610 // element referenced by the shuffle. 3611 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3612 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3613 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3614 for (unsigned i = 0; i != NumElts; ++i) { 3615 int M = SVN->getMaskElt(i); 3616 if (!DemandedElts[i]) 3617 continue; 3618 // For UNDEF elements, we don't know anything about the common state of 3619 // the shuffle result. 3620 if (M < 0) 3621 return 1; 3622 if ((unsigned)M < NumElts) 3623 DemandedLHS.setBit((unsigned)M % NumElts); 3624 else 3625 DemandedRHS.setBit((unsigned)M % NumElts); 3626 } 3627 Tmp = std::numeric_limits<unsigned>::max(); 3628 if (!!DemandedLHS) 3629 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3630 if (!!DemandedRHS) { 3631 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3632 Tmp = std::min(Tmp, Tmp2); 3633 } 3634 // If we don't know anything, early out and try computeKnownBits fall-back. 3635 if (Tmp == 1) 3636 break; 3637 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3638 return Tmp; 3639 } 3640 3641 case ISD::BITCAST: { 3642 SDValue N0 = Op.getOperand(0); 3643 EVT SrcVT = N0.getValueType(); 3644 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3645 3646 // Ignore bitcasts from unsupported types.. 3647 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3648 break; 3649 3650 // Fast handling of 'identity' bitcasts. 3651 if (VTBits == SrcBits) 3652 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3653 3654 bool IsLE = getDataLayout().isLittleEndian(); 3655 3656 // Bitcast 'large element' scalar/vector to 'small element' vector. 3657 if ((SrcBits % VTBits) == 0) { 3658 assert(VT.isVector() && "Expected bitcast to vector"); 3659 3660 unsigned Scale = SrcBits / VTBits; 3661 APInt SrcDemandedElts(NumElts / Scale, 0); 3662 for (unsigned i = 0; i != NumElts; ++i) 3663 if (DemandedElts[i]) 3664 SrcDemandedElts.setBit(i / Scale); 3665 3666 // Fast case - sign splat can be simply split across the small elements. 3667 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3668 if (Tmp == SrcBits) 3669 return VTBits; 3670 3671 // Slow case - determine how far the sign extends into each sub-element. 3672 Tmp2 = VTBits; 3673 for (unsigned i = 0; i != NumElts; ++i) 3674 if (DemandedElts[i]) { 3675 unsigned SubOffset = i % Scale; 3676 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3677 SubOffset = SubOffset * VTBits; 3678 if (Tmp <= SubOffset) 3679 return 1; 3680 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3681 } 3682 return Tmp2; 3683 } 3684 break; 3685 } 3686 3687 case ISD::SIGN_EXTEND: 3688 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3689 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3690 case ISD::SIGN_EXTEND_INREG: 3691 // Max of the input and what this extends. 3692 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3693 Tmp = VTBits-Tmp+1; 3694 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3695 return std::max(Tmp, Tmp2); 3696 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3697 SDValue Src = Op.getOperand(0); 3698 EVT SrcVT = Src.getValueType(); 3699 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3700 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3701 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3702 } 3703 case ISD::SRA: 3704 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3705 // SRA X, C -> adds C sign bits. 3706 if (const APInt *ShAmt = 3707 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3708 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3709 return Tmp; 3710 case ISD::SHL: 3711 if (const APInt *ShAmt = 3712 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3713 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3714 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3715 if (ShAmt->ult(Tmp)) 3716 return Tmp - ShAmt->getZExtValue(); 3717 } 3718 break; 3719 case ISD::AND: 3720 case ISD::OR: 3721 case ISD::XOR: // NOT is handled here. 3722 // Logical binary ops preserve the number of sign bits at the worst. 3723 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3724 if (Tmp != 1) { 3725 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3726 FirstAnswer = std::min(Tmp, Tmp2); 3727 // We computed what we know about the sign bits as our first 3728 // answer. Now proceed to the generic code that uses 3729 // computeKnownBits, and pick whichever answer is better. 3730 } 3731 break; 3732 3733 case ISD::SELECT: 3734 case ISD::VSELECT: 3735 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3736 if (Tmp == 1) return 1; // Early out. 3737 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3738 return std::min(Tmp, Tmp2); 3739 case ISD::SELECT_CC: 3740 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3741 if (Tmp == 1) return 1; // Early out. 3742 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3743 return std::min(Tmp, Tmp2); 3744 3745 case ISD::SMIN: 3746 case ISD::SMAX: { 3747 // If we have a clamp pattern, we know that the number of sign bits will be 3748 // the minimum of the clamp min/max range. 3749 bool IsMax = (Opcode == ISD::SMAX); 3750 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3751 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3752 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3753 CstHigh = 3754 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3755 if (CstLow && CstHigh) { 3756 if (!IsMax) 3757 std::swap(CstLow, CstHigh); 3758 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3759 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3760 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3761 return std::min(Tmp, Tmp2); 3762 } 3763 } 3764 3765 // Fallback - just get the minimum number of sign bits of the operands. 3766 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3767 if (Tmp == 1) 3768 return 1; // Early out. 3769 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3770 return std::min(Tmp, Tmp2); 3771 } 3772 case ISD::UMIN: 3773 case ISD::UMAX: 3774 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3775 if (Tmp == 1) 3776 return 1; // Early out. 3777 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3778 return std::min(Tmp, Tmp2); 3779 case ISD::SADDO: 3780 case ISD::UADDO: 3781 case ISD::SSUBO: 3782 case ISD::USUBO: 3783 case ISD::SMULO: 3784 case ISD::UMULO: 3785 if (Op.getResNo() != 1) 3786 break; 3787 // The boolean result conforms to getBooleanContents. Fall through. 3788 // If setcc returns 0/-1, all bits are sign bits. 3789 // We know that we have an integer-based boolean since these operations 3790 // are only available for integer. 3791 if (TLI->getBooleanContents(VT.isVector(), false) == 3792 TargetLowering::ZeroOrNegativeOneBooleanContent) 3793 return VTBits; 3794 break; 3795 case ISD::SETCC: 3796 case ISD::STRICT_FSETCC: 3797 case ISD::STRICT_FSETCCS: { 3798 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3799 // If setcc returns 0/-1, all bits are sign bits. 3800 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3801 TargetLowering::ZeroOrNegativeOneBooleanContent) 3802 return VTBits; 3803 break; 3804 } 3805 case ISD::ROTL: 3806 case ISD::ROTR: 3807 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3808 3809 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3810 if (Tmp == VTBits) 3811 return VTBits; 3812 3813 if (ConstantSDNode *C = 3814 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3815 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3816 3817 // Handle rotate right by N like a rotate left by 32-N. 3818 if (Opcode == ISD::ROTR) 3819 RotAmt = (VTBits - RotAmt) % VTBits; 3820 3821 // If we aren't rotating out all of the known-in sign bits, return the 3822 // number that are left. This handles rotl(sext(x), 1) for example. 3823 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3824 } 3825 break; 3826 case ISD::ADD: 3827 case ISD::ADDC: 3828 // Add can have at most one carry bit. Thus we know that the output 3829 // is, at worst, one more bit than the inputs. 3830 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3831 if (Tmp == 1) return 1; // Early out. 3832 3833 // Special case decrementing a value (ADD X, -1): 3834 if (ConstantSDNode *CRHS = 3835 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3836 if (CRHS->isAllOnesValue()) { 3837 KnownBits Known = 3838 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3839 3840 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3841 // sign bits set. 3842 if ((Known.Zero | 1).isAllOnesValue()) 3843 return VTBits; 3844 3845 // If we are subtracting one from a positive number, there is no carry 3846 // out of the result. 3847 if (Known.isNonNegative()) 3848 return Tmp; 3849 } 3850 3851 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3852 if (Tmp2 == 1) return 1; // Early out. 3853 return std::min(Tmp, Tmp2) - 1; 3854 case ISD::SUB: 3855 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3856 if (Tmp2 == 1) return 1; // Early out. 3857 3858 // Handle NEG. 3859 if (ConstantSDNode *CLHS = 3860 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3861 if (CLHS->isNullValue()) { 3862 KnownBits Known = 3863 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3864 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3865 // sign bits set. 3866 if ((Known.Zero | 1).isAllOnesValue()) 3867 return VTBits; 3868 3869 // If the input is known to be positive (the sign bit is known clear), 3870 // the output of the NEG has the same number of sign bits as the input. 3871 if (Known.isNonNegative()) 3872 return Tmp2; 3873 3874 // Otherwise, we treat this like a SUB. 3875 } 3876 3877 // Sub can have at most one carry bit. Thus we know that the output 3878 // is, at worst, one more bit than the inputs. 3879 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3880 if (Tmp == 1) return 1; // Early out. 3881 return std::min(Tmp, Tmp2) - 1; 3882 case ISD::MUL: { 3883 // The output of the Mul can be at most twice the valid bits in the inputs. 3884 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3885 if (SignBitsOp0 == 1) 3886 break; 3887 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3888 if (SignBitsOp1 == 1) 3889 break; 3890 unsigned OutValidBits = 3891 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3892 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3893 } 3894 case ISD::TRUNCATE: { 3895 // Check if the sign bits of source go down as far as the truncated value. 3896 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3897 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3898 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3899 return NumSrcSignBits - (NumSrcBits - VTBits); 3900 break; 3901 } 3902 case ISD::EXTRACT_ELEMENT: { 3903 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3904 const int BitWidth = Op.getValueSizeInBits(); 3905 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3906 3907 // Get reverse index (starting from 1), Op1 value indexes elements from 3908 // little end. Sign starts at big end. 3909 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3910 3911 // If the sign portion ends in our element the subtraction gives correct 3912 // result. Otherwise it gives either negative or > bitwidth result 3913 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3914 } 3915 case ISD::INSERT_VECTOR_ELT: { 3916 // If we know the element index, split the demand between the 3917 // source vector and the inserted element, otherwise assume we need 3918 // the original demanded vector elements and the value. 3919 SDValue InVec = Op.getOperand(0); 3920 SDValue InVal = Op.getOperand(1); 3921 SDValue EltNo = Op.getOperand(2); 3922 bool DemandedVal = true; 3923 APInt DemandedVecElts = DemandedElts; 3924 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3925 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3926 unsigned EltIdx = CEltNo->getZExtValue(); 3927 DemandedVal = !!DemandedElts[EltIdx]; 3928 DemandedVecElts.clearBit(EltIdx); 3929 } 3930 Tmp = std::numeric_limits<unsigned>::max(); 3931 if (DemandedVal) { 3932 // TODO - handle implicit truncation of inserted elements. 3933 if (InVal.getScalarValueSizeInBits() != VTBits) 3934 break; 3935 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3936 Tmp = std::min(Tmp, Tmp2); 3937 } 3938 if (!!DemandedVecElts) { 3939 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3940 Tmp = std::min(Tmp, Tmp2); 3941 } 3942 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3943 return Tmp; 3944 } 3945 case ISD::EXTRACT_VECTOR_ELT: { 3946 SDValue InVec = Op.getOperand(0); 3947 SDValue EltNo = Op.getOperand(1); 3948 EVT VecVT = InVec.getValueType(); 3949 const unsigned BitWidth = Op.getValueSizeInBits(); 3950 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3951 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3952 3953 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3954 // anything about sign bits. But if the sizes match we can derive knowledge 3955 // about sign bits from the vector operand. 3956 if (BitWidth != EltBitWidth) 3957 break; 3958 3959 // If we know the element index, just demand that vector element, else for 3960 // an unknown element index, ignore DemandedElts and demand them all. 3961 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3962 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3963 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3964 DemandedSrcElts = 3965 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3966 3967 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3968 } 3969 case ISD::EXTRACT_SUBVECTOR: { 3970 // Offset the demanded elts by the subvector index. 3971 SDValue Src = Op.getOperand(0); 3972 // Bail until we can represent demanded elements for scalable vectors. 3973 if (Src.getValueType().isScalableVector()) 3974 break; 3975 uint64_t Idx = Op.getConstantOperandVal(1); 3976 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3977 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3978 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3979 } 3980 case ISD::CONCAT_VECTORS: { 3981 // Determine the minimum number of sign bits across all demanded 3982 // elts of the input vectors. Early out if the result is already 1. 3983 Tmp = std::numeric_limits<unsigned>::max(); 3984 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3985 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3986 unsigned NumSubVectors = Op.getNumOperands(); 3987 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3988 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3989 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3990 if (!DemandedSub) 3991 continue; 3992 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3993 Tmp = std::min(Tmp, Tmp2); 3994 } 3995 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3996 return Tmp; 3997 } 3998 case ISD::INSERT_SUBVECTOR: { 3999 // Demand any elements from the subvector and the remainder from the src its 4000 // inserted into. 4001 SDValue Src = Op.getOperand(0); 4002 SDValue Sub = Op.getOperand(1); 4003 uint64_t Idx = Op.getConstantOperandVal(2); 4004 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 4005 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 4006 APInt DemandedSrcElts = DemandedElts; 4007 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 4008 4009 Tmp = std::numeric_limits<unsigned>::max(); 4010 if (!!DemandedSubElts) { 4011 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 4012 if (Tmp == 1) 4013 return 1; // early-out 4014 } 4015 if (!!DemandedSrcElts) { 4016 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 4017 Tmp = std::min(Tmp, Tmp2); 4018 } 4019 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 4020 return Tmp; 4021 } 4022 } 4023 4024 // If we are looking at the loaded value of the SDNode. 4025 if (Op.getResNo() == 0) { 4026 // Handle LOADX separately here. EXTLOAD case will fallthrough. 4027 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 4028 unsigned ExtType = LD->getExtensionType(); 4029 switch (ExtType) { 4030 default: break; 4031 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 4032 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4033 return VTBits - Tmp + 1; 4034 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 4035 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4036 return VTBits - Tmp; 4037 case ISD::NON_EXTLOAD: 4038 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 4039 // We only need to handle vectors - computeKnownBits should handle 4040 // scalar cases. 4041 Type *CstTy = Cst->getType(); 4042 if (CstTy->isVectorTy() && 4043 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4044 Tmp = VTBits; 4045 for (unsigned i = 0; i != NumElts; ++i) { 4046 if (!DemandedElts[i]) 4047 continue; 4048 if (Constant *Elt = Cst->getAggregateElement(i)) { 4049 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4050 const APInt &Value = CInt->getValue(); 4051 Tmp = std::min(Tmp, Value.getNumSignBits()); 4052 continue; 4053 } 4054 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4055 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4056 Tmp = std::min(Tmp, Value.getNumSignBits()); 4057 continue; 4058 } 4059 } 4060 // Unknown type. Conservatively assume no bits match sign bit. 4061 return 1; 4062 } 4063 return Tmp; 4064 } 4065 } 4066 break; 4067 } 4068 } 4069 } 4070 4071 // Allow the target to implement this method for its nodes. 4072 if (Opcode >= ISD::BUILTIN_OP_END || 4073 Opcode == ISD::INTRINSIC_WO_CHAIN || 4074 Opcode == ISD::INTRINSIC_W_CHAIN || 4075 Opcode == ISD::INTRINSIC_VOID) { 4076 unsigned NumBits = 4077 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4078 if (NumBits > 1) 4079 FirstAnswer = std::max(FirstAnswer, NumBits); 4080 } 4081 4082 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4083 // use this information. 4084 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4085 4086 APInt Mask; 4087 if (Known.isNonNegative()) { // sign bit is 0 4088 Mask = Known.Zero; 4089 } else if (Known.isNegative()) { // sign bit is 1; 4090 Mask = Known.One; 4091 } else { 4092 // Nothing known. 4093 return FirstAnswer; 4094 } 4095 4096 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4097 // the number of identical bits in the top of the input value. 4098 Mask <<= Mask.getBitWidth()-VTBits; 4099 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4100 } 4101 4102 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4103 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4104 !isa<ConstantSDNode>(Op.getOperand(1))) 4105 return false; 4106 4107 if (Op.getOpcode() == ISD::OR && 4108 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4109 return false; 4110 4111 return true; 4112 } 4113 4114 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4115 // If we're told that NaNs won't happen, assume they won't. 4116 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4117 return true; 4118 4119 if (Depth >= MaxRecursionDepth) 4120 return false; // Limit search depth. 4121 4122 // TODO: Handle vectors. 4123 // If the value is a constant, we can obviously see if it is a NaN or not. 4124 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4125 return !C->getValueAPF().isNaN() || 4126 (SNaN && !C->getValueAPF().isSignaling()); 4127 } 4128 4129 unsigned Opcode = Op.getOpcode(); 4130 switch (Opcode) { 4131 case ISD::FADD: 4132 case ISD::FSUB: 4133 case ISD::FMUL: 4134 case ISD::FDIV: 4135 case ISD::FREM: 4136 case ISD::FSIN: 4137 case ISD::FCOS: { 4138 if (SNaN) 4139 return true; 4140 // TODO: Need isKnownNeverInfinity 4141 return false; 4142 } 4143 case ISD::FCANONICALIZE: 4144 case ISD::FEXP: 4145 case ISD::FEXP2: 4146 case ISD::FTRUNC: 4147 case ISD::FFLOOR: 4148 case ISD::FCEIL: 4149 case ISD::FROUND: 4150 case ISD::FROUNDEVEN: 4151 case ISD::FRINT: 4152 case ISD::FNEARBYINT: { 4153 if (SNaN) 4154 return true; 4155 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4156 } 4157 case ISD::FABS: 4158 case ISD::FNEG: 4159 case ISD::FCOPYSIGN: { 4160 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4161 } 4162 case ISD::SELECT: 4163 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4164 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4165 case ISD::FP_EXTEND: 4166 case ISD::FP_ROUND: { 4167 if (SNaN) 4168 return true; 4169 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4170 } 4171 case ISD::SINT_TO_FP: 4172 case ISD::UINT_TO_FP: 4173 return true; 4174 case ISD::FMA: 4175 case ISD::FMAD: { 4176 if (SNaN) 4177 return true; 4178 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4179 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4180 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4181 } 4182 case ISD::FSQRT: // Need is known positive 4183 case ISD::FLOG: 4184 case ISD::FLOG2: 4185 case ISD::FLOG10: 4186 case ISD::FPOWI: 4187 case ISD::FPOW: { 4188 if (SNaN) 4189 return true; 4190 // TODO: Refine on operand 4191 return false; 4192 } 4193 case ISD::FMINNUM: 4194 case ISD::FMAXNUM: { 4195 // Only one needs to be known not-nan, since it will be returned if the 4196 // other ends up being one. 4197 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4198 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4199 } 4200 case ISD::FMINNUM_IEEE: 4201 case ISD::FMAXNUM_IEEE: { 4202 if (SNaN) 4203 return true; 4204 // This can return a NaN if either operand is an sNaN, or if both operands 4205 // are NaN. 4206 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4207 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4208 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4209 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4210 } 4211 case ISD::FMINIMUM: 4212 case ISD::FMAXIMUM: { 4213 // TODO: Does this quiet or return the origina NaN as-is? 4214 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4215 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4216 } 4217 case ISD::EXTRACT_VECTOR_ELT: { 4218 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4219 } 4220 default: 4221 if (Opcode >= ISD::BUILTIN_OP_END || 4222 Opcode == ISD::INTRINSIC_WO_CHAIN || 4223 Opcode == ISD::INTRINSIC_W_CHAIN || 4224 Opcode == ISD::INTRINSIC_VOID) { 4225 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4226 } 4227 4228 return false; 4229 } 4230 } 4231 4232 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4233 assert(Op.getValueType().isFloatingPoint() && 4234 "Floating point type expected"); 4235 4236 // If the value is a constant, we can obviously see if it is a zero or not. 4237 // TODO: Add BuildVector support. 4238 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4239 return !C->isZero(); 4240 return false; 4241 } 4242 4243 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4244 assert(!Op.getValueType().isFloatingPoint() && 4245 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4246 4247 // If the value is a constant, we can obviously see if it is a zero or not. 4248 if (ISD::matchUnaryPredicate( 4249 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4250 return true; 4251 4252 // TODO: Recognize more cases here. 4253 switch (Op.getOpcode()) { 4254 default: break; 4255 case ISD::OR: 4256 if (isKnownNeverZero(Op.getOperand(1)) || 4257 isKnownNeverZero(Op.getOperand(0))) 4258 return true; 4259 break; 4260 } 4261 4262 return false; 4263 } 4264 4265 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4266 // Check the obvious case. 4267 if (A == B) return true; 4268 4269 // For for negative and positive zero. 4270 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4271 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4272 if (CA->isZero() && CB->isZero()) return true; 4273 4274 // Otherwise they may not be equal. 4275 return false; 4276 } 4277 4278 // FIXME: unify with llvm::haveNoCommonBitsSet. 4279 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4280 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4281 assert(A.getValueType() == B.getValueType() && 4282 "Values must have the same type"); 4283 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4284 } 4285 4286 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4287 ArrayRef<SDValue> Ops, 4288 SelectionDAG &DAG) { 4289 int NumOps = Ops.size(); 4290 assert(NumOps != 0 && "Can't build an empty vector!"); 4291 assert(!VT.isScalableVector() && 4292 "BUILD_VECTOR cannot be used with scalable types"); 4293 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4294 "Incorrect element count in BUILD_VECTOR!"); 4295 4296 // BUILD_VECTOR of UNDEFs is UNDEF. 4297 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4298 return DAG.getUNDEF(VT); 4299 4300 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4301 SDValue IdentitySrc; 4302 bool IsIdentity = true; 4303 for (int i = 0; i != NumOps; ++i) { 4304 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4305 Ops[i].getOperand(0).getValueType() != VT || 4306 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4307 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4308 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4309 IsIdentity = false; 4310 break; 4311 } 4312 IdentitySrc = Ops[i].getOperand(0); 4313 } 4314 if (IsIdentity) 4315 return IdentitySrc; 4316 4317 return SDValue(); 4318 } 4319 4320 /// Try to simplify vector concatenation to an input value, undef, or build 4321 /// vector. 4322 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4323 ArrayRef<SDValue> Ops, 4324 SelectionDAG &DAG) { 4325 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4326 assert(llvm::all_of(Ops, 4327 [Ops](SDValue Op) { 4328 return Ops[0].getValueType() == Op.getValueType(); 4329 }) && 4330 "Concatenation of vectors with inconsistent value types!"); 4331 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == 4332 VT.getVectorElementCount() && 4333 "Incorrect element count in vector concatenation!"); 4334 4335 if (Ops.size() == 1) 4336 return Ops[0]; 4337 4338 // Concat of UNDEFs is UNDEF. 4339 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4340 return DAG.getUNDEF(VT); 4341 4342 // Scan the operands and look for extract operations from a single source 4343 // that correspond to insertion at the same location via this concatenation: 4344 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4345 SDValue IdentitySrc; 4346 bool IsIdentity = true; 4347 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4348 SDValue Op = Ops[i]; 4349 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); 4350 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4351 Op.getOperand(0).getValueType() != VT || 4352 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4353 Op.getConstantOperandVal(1) != IdentityIndex) { 4354 IsIdentity = false; 4355 break; 4356 } 4357 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4358 "Unexpected identity source vector for concat of extracts"); 4359 IdentitySrc = Op.getOperand(0); 4360 } 4361 if (IsIdentity) { 4362 assert(IdentitySrc && "Failed to set source vector of extracts"); 4363 return IdentitySrc; 4364 } 4365 4366 // The code below this point is only designed to work for fixed width 4367 // vectors, so we bail out for now. 4368 if (VT.isScalableVector()) 4369 return SDValue(); 4370 4371 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4372 // simplified to one big BUILD_VECTOR. 4373 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4374 EVT SVT = VT.getScalarType(); 4375 SmallVector<SDValue, 16> Elts; 4376 for (SDValue Op : Ops) { 4377 EVT OpVT = Op.getValueType(); 4378 if (Op.isUndef()) 4379 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4380 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4381 Elts.append(Op->op_begin(), Op->op_end()); 4382 else 4383 return SDValue(); 4384 } 4385 4386 // BUILD_VECTOR requires all inputs to be of the same type, find the 4387 // maximum type and extend them all. 4388 for (SDValue Op : Elts) 4389 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4390 4391 if (SVT.bitsGT(VT.getScalarType())) 4392 for (SDValue &Op : Elts) 4393 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4394 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4395 : DAG.getSExtOrTrunc(Op, DL, SVT); 4396 4397 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4398 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4399 return V; 4400 } 4401 4402 /// Gets or creates the specified node. 4403 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4404 FoldingSetNodeID ID; 4405 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4406 void *IP = nullptr; 4407 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4408 return SDValue(E, 0); 4409 4410 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4411 getVTList(VT)); 4412 CSEMap.InsertNode(N, IP); 4413 4414 InsertNode(N); 4415 SDValue V = SDValue(N, 0); 4416 NewSDValueDbgMsg(V, "Creating new node: ", this); 4417 return V; 4418 } 4419 4420 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4421 SDValue Operand, const SDNodeFlags Flags) { 4422 // Constant fold unary operations with an integer constant operand. Even 4423 // opaque constant will be folded, because the folding of unary operations 4424 // doesn't create new constants with different values. Nevertheless, the 4425 // opaque flag is preserved during folding to prevent future folding with 4426 // other constants. 4427 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4428 const APInt &Val = C->getAPIntValue(); 4429 switch (Opcode) { 4430 default: break; 4431 case ISD::SIGN_EXTEND: 4432 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4433 C->isTargetOpcode(), C->isOpaque()); 4434 case ISD::TRUNCATE: 4435 if (C->isOpaque()) 4436 break; 4437 LLVM_FALLTHROUGH; 4438 case ISD::ANY_EXTEND: 4439 case ISD::ZERO_EXTEND: 4440 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4441 C->isTargetOpcode(), C->isOpaque()); 4442 case ISD::UINT_TO_FP: 4443 case ISD::SINT_TO_FP: { 4444 APFloat apf(EVTToAPFloatSemantics(VT), 4445 APInt::getNullValue(VT.getSizeInBits())); 4446 (void)apf.convertFromAPInt(Val, 4447 Opcode==ISD::SINT_TO_FP, 4448 APFloat::rmNearestTiesToEven); 4449 return getConstantFP(apf, DL, VT); 4450 } 4451 case ISD::BITCAST: 4452 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4453 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4454 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4455 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4456 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4457 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4458 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4459 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4460 break; 4461 case ISD::ABS: 4462 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4463 C->isOpaque()); 4464 case ISD::BITREVERSE: 4465 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4466 C->isOpaque()); 4467 case ISD::BSWAP: 4468 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4469 C->isOpaque()); 4470 case ISD::CTPOP: 4471 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4472 C->isOpaque()); 4473 case ISD::CTLZ: 4474 case ISD::CTLZ_ZERO_UNDEF: 4475 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4476 C->isOpaque()); 4477 case ISD::CTTZ: 4478 case ISD::CTTZ_ZERO_UNDEF: 4479 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4480 C->isOpaque()); 4481 case ISD::FP16_TO_FP: { 4482 bool Ignored; 4483 APFloat FPV(APFloat::IEEEhalf(), 4484 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4485 4486 // This can return overflow, underflow, or inexact; we don't care. 4487 // FIXME need to be more flexible about rounding mode. 4488 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4489 APFloat::rmNearestTiesToEven, &Ignored); 4490 return getConstantFP(FPV, DL, VT); 4491 } 4492 } 4493 } 4494 4495 // Constant fold unary operations with a floating point constant operand. 4496 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4497 APFloat V = C->getValueAPF(); // make copy 4498 switch (Opcode) { 4499 case ISD::FNEG: 4500 V.changeSign(); 4501 return getConstantFP(V, DL, VT); 4502 case ISD::FABS: 4503 V.clearSign(); 4504 return getConstantFP(V, DL, VT); 4505 case ISD::FCEIL: { 4506 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4507 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4508 return getConstantFP(V, DL, VT); 4509 break; 4510 } 4511 case ISD::FTRUNC: { 4512 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4513 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4514 return getConstantFP(V, DL, VT); 4515 break; 4516 } 4517 case ISD::FFLOOR: { 4518 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4519 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4520 return getConstantFP(V, DL, VT); 4521 break; 4522 } 4523 case ISD::FP_EXTEND: { 4524 bool ignored; 4525 // This can return overflow, underflow, or inexact; we don't care. 4526 // FIXME need to be more flexible about rounding mode. 4527 (void)V.convert(EVTToAPFloatSemantics(VT), 4528 APFloat::rmNearestTiesToEven, &ignored); 4529 return getConstantFP(V, DL, VT); 4530 } 4531 case ISD::FP_TO_SINT: 4532 case ISD::FP_TO_UINT: { 4533 bool ignored; 4534 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4535 // FIXME need to be more flexible about rounding mode. 4536 APFloat::opStatus s = 4537 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4538 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4539 break; 4540 return getConstant(IntVal, DL, VT); 4541 } 4542 case ISD::BITCAST: 4543 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4544 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4545 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4546 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4547 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4548 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4549 break; 4550 case ISD::FP_TO_FP16: { 4551 bool Ignored; 4552 // This can return overflow, underflow, or inexact; we don't care. 4553 // FIXME need to be more flexible about rounding mode. 4554 (void)V.convert(APFloat::IEEEhalf(), 4555 APFloat::rmNearestTiesToEven, &Ignored); 4556 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4557 } 4558 } 4559 } 4560 4561 // Constant fold unary operations with a vector integer or float operand. 4562 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4563 if (BV->isConstant()) { 4564 switch (Opcode) { 4565 default: 4566 // FIXME: Entirely reasonable to perform folding of other unary 4567 // operations here as the need arises. 4568 break; 4569 case ISD::FNEG: 4570 case ISD::FABS: 4571 case ISD::FCEIL: 4572 case ISD::FTRUNC: 4573 case ISD::FFLOOR: 4574 case ISD::FP_EXTEND: 4575 case ISD::FP_TO_SINT: 4576 case ISD::FP_TO_UINT: 4577 case ISD::TRUNCATE: 4578 case ISD::ANY_EXTEND: 4579 case ISD::ZERO_EXTEND: 4580 case ISD::SIGN_EXTEND: 4581 case ISD::UINT_TO_FP: 4582 case ISD::SINT_TO_FP: 4583 case ISD::ABS: 4584 case ISD::BITREVERSE: 4585 case ISD::BSWAP: 4586 case ISD::CTLZ: 4587 case ISD::CTLZ_ZERO_UNDEF: 4588 case ISD::CTTZ: 4589 case ISD::CTTZ_ZERO_UNDEF: 4590 case ISD::CTPOP: { 4591 SDValue Ops = { Operand }; 4592 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4593 return Fold; 4594 } 4595 } 4596 } 4597 } 4598 4599 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4600 switch (Opcode) { 4601 case ISD::FREEZE: 4602 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4603 break; 4604 case ISD::TokenFactor: 4605 case ISD::MERGE_VALUES: 4606 case ISD::CONCAT_VECTORS: 4607 return Operand; // Factor, merge or concat of one node? No need. 4608 case ISD::BUILD_VECTOR: { 4609 // Attempt to simplify BUILD_VECTOR. 4610 SDValue Ops[] = {Operand}; 4611 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4612 return V; 4613 break; 4614 } 4615 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4616 case ISD::FP_EXTEND: 4617 assert(VT.isFloatingPoint() && 4618 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4619 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4620 assert((!VT.isVector() || 4621 VT.getVectorNumElements() == 4622 Operand.getValueType().getVectorNumElements()) && 4623 "Vector element count mismatch!"); 4624 assert(Operand.getValueType().bitsLT(VT) && 4625 "Invalid fpext node, dst < src!"); 4626 if (Operand.isUndef()) 4627 return getUNDEF(VT); 4628 break; 4629 case ISD::FP_TO_SINT: 4630 case ISD::FP_TO_UINT: 4631 if (Operand.isUndef()) 4632 return getUNDEF(VT); 4633 break; 4634 case ISD::SINT_TO_FP: 4635 case ISD::UINT_TO_FP: 4636 // [us]itofp(undef) = 0, because the result value is bounded. 4637 if (Operand.isUndef()) 4638 return getConstantFP(0.0, DL, VT); 4639 break; 4640 case ISD::SIGN_EXTEND: 4641 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4642 "Invalid SIGN_EXTEND!"); 4643 assert(VT.isVector() == Operand.getValueType().isVector() && 4644 "SIGN_EXTEND result type type should be vector iff the operand " 4645 "type is vector!"); 4646 if (Operand.getValueType() == VT) return Operand; // noop extension 4647 assert((!VT.isVector() || 4648 VT.getVectorElementCount() == 4649 Operand.getValueType().getVectorElementCount()) && 4650 "Vector element count mismatch!"); 4651 assert(Operand.getValueType().bitsLT(VT) && 4652 "Invalid sext node, dst < src!"); 4653 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4654 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4655 else if (OpOpcode == ISD::UNDEF) 4656 // sext(undef) = 0, because the top bits will all be the same. 4657 return getConstant(0, DL, VT); 4658 break; 4659 case ISD::ZERO_EXTEND: 4660 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4661 "Invalid ZERO_EXTEND!"); 4662 assert(VT.isVector() == Operand.getValueType().isVector() && 4663 "ZERO_EXTEND result type type should be vector iff the operand " 4664 "type is vector!"); 4665 if (Operand.getValueType() == VT) return Operand; // noop extension 4666 assert((!VT.isVector() || 4667 VT.getVectorElementCount() == 4668 Operand.getValueType().getVectorElementCount()) && 4669 "Vector element count mismatch!"); 4670 assert(Operand.getValueType().bitsLT(VT) && 4671 "Invalid zext node, dst < src!"); 4672 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4673 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4674 else if (OpOpcode == ISD::UNDEF) 4675 // zext(undef) = 0, because the top bits will be zero. 4676 return getConstant(0, DL, VT); 4677 break; 4678 case ISD::ANY_EXTEND: 4679 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4680 "Invalid ANY_EXTEND!"); 4681 assert(VT.isVector() == Operand.getValueType().isVector() && 4682 "ANY_EXTEND result type type should be vector iff the operand " 4683 "type is vector!"); 4684 if (Operand.getValueType() == VT) return Operand; // noop extension 4685 assert((!VT.isVector() || 4686 VT.getVectorElementCount() == 4687 Operand.getValueType().getVectorElementCount()) && 4688 "Vector element count mismatch!"); 4689 assert(Operand.getValueType().bitsLT(VT) && 4690 "Invalid anyext node, dst < src!"); 4691 4692 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4693 OpOpcode == ISD::ANY_EXTEND) 4694 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4695 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4696 else if (OpOpcode == ISD::UNDEF) 4697 return getUNDEF(VT); 4698 4699 // (ext (trunc x)) -> x 4700 if (OpOpcode == ISD::TRUNCATE) { 4701 SDValue OpOp = Operand.getOperand(0); 4702 if (OpOp.getValueType() == VT) { 4703 transferDbgValues(Operand, OpOp); 4704 return OpOp; 4705 } 4706 } 4707 break; 4708 case ISD::TRUNCATE: 4709 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4710 "Invalid TRUNCATE!"); 4711 assert(VT.isVector() == Operand.getValueType().isVector() && 4712 "TRUNCATE result type type should be vector iff the operand " 4713 "type is vector!"); 4714 if (Operand.getValueType() == VT) return Operand; // noop truncate 4715 assert((!VT.isVector() || 4716 VT.getVectorElementCount() == 4717 Operand.getValueType().getVectorElementCount()) && 4718 "Vector element count mismatch!"); 4719 assert(Operand.getValueType().bitsGT(VT) && 4720 "Invalid truncate node, src < dst!"); 4721 if (OpOpcode == ISD::TRUNCATE) 4722 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4723 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4724 OpOpcode == ISD::ANY_EXTEND) { 4725 // If the source is smaller than the dest, we still need an extend. 4726 if (Operand.getOperand(0).getValueType().getScalarType() 4727 .bitsLT(VT.getScalarType())) 4728 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4729 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4730 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4731 return Operand.getOperand(0); 4732 } 4733 if (OpOpcode == ISD::UNDEF) 4734 return getUNDEF(VT); 4735 break; 4736 case ISD::ANY_EXTEND_VECTOR_INREG: 4737 case ISD::ZERO_EXTEND_VECTOR_INREG: 4738 case ISD::SIGN_EXTEND_VECTOR_INREG: 4739 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4740 assert(Operand.getValueType().bitsLE(VT) && 4741 "The input must be the same size or smaller than the result."); 4742 assert(VT.getVectorNumElements() < 4743 Operand.getValueType().getVectorNumElements() && 4744 "The destination vector type must have fewer lanes than the input."); 4745 break; 4746 case ISD::ABS: 4747 assert(VT.isInteger() && VT == Operand.getValueType() && 4748 "Invalid ABS!"); 4749 if (OpOpcode == ISD::UNDEF) 4750 return getUNDEF(VT); 4751 break; 4752 case ISD::BSWAP: 4753 assert(VT.isInteger() && VT == Operand.getValueType() && 4754 "Invalid BSWAP!"); 4755 assert((VT.getScalarSizeInBits() % 16 == 0) && 4756 "BSWAP types must be a multiple of 16 bits!"); 4757 if (OpOpcode == ISD::UNDEF) 4758 return getUNDEF(VT); 4759 break; 4760 case ISD::BITREVERSE: 4761 assert(VT.isInteger() && VT == Operand.getValueType() && 4762 "Invalid BITREVERSE!"); 4763 if (OpOpcode == ISD::UNDEF) 4764 return getUNDEF(VT); 4765 break; 4766 case ISD::BITCAST: 4767 // Basic sanity checking. 4768 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4769 "Cannot BITCAST between types of different sizes!"); 4770 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4771 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4772 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4773 if (OpOpcode == ISD::UNDEF) 4774 return getUNDEF(VT); 4775 break; 4776 case ISD::SCALAR_TO_VECTOR: 4777 assert(VT.isVector() && !Operand.getValueType().isVector() && 4778 (VT.getVectorElementType() == Operand.getValueType() || 4779 (VT.getVectorElementType().isInteger() && 4780 Operand.getValueType().isInteger() && 4781 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4782 "Illegal SCALAR_TO_VECTOR node!"); 4783 if (OpOpcode == ISD::UNDEF) 4784 return getUNDEF(VT); 4785 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4786 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4787 isa<ConstantSDNode>(Operand.getOperand(1)) && 4788 Operand.getConstantOperandVal(1) == 0 && 4789 Operand.getOperand(0).getValueType() == VT) 4790 return Operand.getOperand(0); 4791 break; 4792 case ISD::FNEG: 4793 // Negation of an unknown bag of bits is still completely undefined. 4794 if (OpOpcode == ISD::UNDEF) 4795 return getUNDEF(VT); 4796 4797 if (OpOpcode == ISD::FNEG) // --X -> X 4798 return Operand.getOperand(0); 4799 break; 4800 case ISD::FABS: 4801 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4802 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4803 break; 4804 case ISD::VSCALE: 4805 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4806 break; 4807 } 4808 4809 SDNode *N; 4810 SDVTList VTs = getVTList(VT); 4811 SDValue Ops[] = {Operand}; 4812 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4813 FoldingSetNodeID ID; 4814 AddNodeIDNode(ID, Opcode, VTs, Ops); 4815 void *IP = nullptr; 4816 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4817 E->intersectFlagsWith(Flags); 4818 return SDValue(E, 0); 4819 } 4820 4821 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4822 N->setFlags(Flags); 4823 createOperands(N, Ops); 4824 CSEMap.InsertNode(N, IP); 4825 } else { 4826 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4827 createOperands(N, Ops); 4828 } 4829 4830 InsertNode(N); 4831 SDValue V = SDValue(N, 0); 4832 NewSDValueDbgMsg(V, "Creating new node: ", this); 4833 return V; 4834 } 4835 4836 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4837 const APInt &C2) { 4838 switch (Opcode) { 4839 case ISD::ADD: return C1 + C2; 4840 case ISD::SUB: return C1 - C2; 4841 case ISD::MUL: return C1 * C2; 4842 case ISD::AND: return C1 & C2; 4843 case ISD::OR: return C1 | C2; 4844 case ISD::XOR: return C1 ^ C2; 4845 case ISD::SHL: return C1 << C2; 4846 case ISD::SRL: return C1.lshr(C2); 4847 case ISD::SRA: return C1.ashr(C2); 4848 case ISD::ROTL: return C1.rotl(C2); 4849 case ISD::ROTR: return C1.rotr(C2); 4850 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4851 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4852 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4853 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4854 case ISD::SADDSAT: return C1.sadd_sat(C2); 4855 case ISD::UADDSAT: return C1.uadd_sat(C2); 4856 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4857 case ISD::USUBSAT: return C1.usub_sat(C2); 4858 case ISD::UDIV: 4859 if (!C2.getBoolValue()) 4860 break; 4861 return C1.udiv(C2); 4862 case ISD::UREM: 4863 if (!C2.getBoolValue()) 4864 break; 4865 return C1.urem(C2); 4866 case ISD::SDIV: 4867 if (!C2.getBoolValue()) 4868 break; 4869 return C1.sdiv(C2); 4870 case ISD::SREM: 4871 if (!C2.getBoolValue()) 4872 break; 4873 return C1.srem(C2); 4874 } 4875 return llvm::None; 4876 } 4877 4878 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4879 const GlobalAddressSDNode *GA, 4880 const SDNode *N2) { 4881 if (GA->getOpcode() != ISD::GlobalAddress) 4882 return SDValue(); 4883 if (!TLI->isOffsetFoldingLegal(GA)) 4884 return SDValue(); 4885 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4886 if (!C2) 4887 return SDValue(); 4888 int64_t Offset = C2->getSExtValue(); 4889 switch (Opcode) { 4890 case ISD::ADD: break; 4891 case ISD::SUB: Offset = -uint64_t(Offset); break; 4892 default: return SDValue(); 4893 } 4894 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4895 GA->getOffset() + uint64_t(Offset)); 4896 } 4897 4898 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4899 switch (Opcode) { 4900 case ISD::SDIV: 4901 case ISD::UDIV: 4902 case ISD::SREM: 4903 case ISD::UREM: { 4904 // If a divisor is zero/undef or any element of a divisor vector is 4905 // zero/undef, the whole op is undef. 4906 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4907 SDValue Divisor = Ops[1]; 4908 if (Divisor.isUndef() || isNullConstant(Divisor)) 4909 return true; 4910 4911 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4912 llvm::any_of(Divisor->op_values(), 4913 [](SDValue V) { return V.isUndef() || 4914 isNullConstant(V); }); 4915 // TODO: Handle signed overflow. 4916 } 4917 // TODO: Handle oversized shifts. 4918 default: 4919 return false; 4920 } 4921 } 4922 4923 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4924 EVT VT, ArrayRef<SDValue> Ops) { 4925 // If the opcode is a target-specific ISD node, there's nothing we can 4926 // do here and the operand rules may not line up with the below, so 4927 // bail early. 4928 if (Opcode >= ISD::BUILTIN_OP_END) 4929 return SDValue(); 4930 4931 // For now, the array Ops should only contain two values. 4932 // This enforcement will be removed once this function is merged with 4933 // FoldConstantVectorArithmetic 4934 if (Ops.size() != 2) 4935 return SDValue(); 4936 4937 if (isUndef(Opcode, Ops)) 4938 return getUNDEF(VT); 4939 4940 SDNode *N1 = Ops[0].getNode(); 4941 SDNode *N2 = Ops[1].getNode(); 4942 4943 // Handle the case of two scalars. 4944 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4945 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4946 if (C1->isOpaque() || C2->isOpaque()) 4947 return SDValue(); 4948 4949 Optional<APInt> FoldAttempt = 4950 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4951 if (!FoldAttempt) 4952 return SDValue(); 4953 4954 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4955 assert((!Folded || !VT.isVector()) && 4956 "Can't fold vectors ops with scalar operands"); 4957 return Folded; 4958 } 4959 } 4960 4961 // fold (add Sym, c) -> Sym+c 4962 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4963 return FoldSymbolOffset(Opcode, VT, GA, N2); 4964 if (TLI->isCommutativeBinOp(Opcode)) 4965 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4966 return FoldSymbolOffset(Opcode, VT, GA, N1); 4967 4968 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4969 // vector width, however we should be able to do constant folds involving 4970 // splat vector nodes too. 4971 if (VT.isScalableVector()) 4972 return SDValue(); 4973 4974 // For fixed width vectors, extract each constant element and fold them 4975 // individually. Either input may be an undef value. 4976 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4977 if (!BV1 && !N1->isUndef()) 4978 return SDValue(); 4979 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4980 if (!BV2 && !N2->isUndef()) 4981 return SDValue(); 4982 // If both operands are undef, that's handled the same way as scalars. 4983 if (!BV1 && !BV2) 4984 return SDValue(); 4985 4986 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4987 "Vector binop with different number of elements in operands?"); 4988 4989 EVT SVT = VT.getScalarType(); 4990 EVT LegalSVT = SVT; 4991 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4992 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4993 if (LegalSVT.bitsLT(SVT)) 4994 return SDValue(); 4995 } 4996 SmallVector<SDValue, 4> Outputs; 4997 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4998 for (unsigned I = 0; I != NumOps; ++I) { 4999 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 5000 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 5001 if (SVT.isInteger()) { 5002 if (V1->getValueType(0).bitsGT(SVT)) 5003 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 5004 if (V2->getValueType(0).bitsGT(SVT)) 5005 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 5006 } 5007 5008 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 5009 return SDValue(); 5010 5011 // Fold one vector element. 5012 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 5013 if (LegalSVT != SVT) 5014 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5015 5016 // Scalar folding only succeeded if the result is a constant or UNDEF. 5017 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5018 ScalarResult.getOpcode() != ISD::ConstantFP) 5019 return SDValue(); 5020 Outputs.push_back(ScalarResult); 5021 } 5022 5023 assert(VT.getVectorNumElements() == Outputs.size() && 5024 "Vector size mismatch!"); 5025 5026 // We may have a vector type but a scalar result. Create a splat. 5027 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 5028 5029 // Build a big vector out of the scalar elements we generated. 5030 return getBuildVector(VT, SDLoc(), Outputs); 5031 } 5032 5033 // TODO: Merge with FoldConstantArithmetic 5034 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 5035 const SDLoc &DL, EVT VT, 5036 ArrayRef<SDValue> Ops, 5037 const SDNodeFlags Flags) { 5038 // If the opcode is a target-specific ISD node, there's nothing we can 5039 // do here and the operand rules may not line up with the below, so 5040 // bail early. 5041 if (Opcode >= ISD::BUILTIN_OP_END) 5042 return SDValue(); 5043 5044 if (isUndef(Opcode, Ops)) 5045 return getUNDEF(VT); 5046 5047 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 5048 if (!VT.isVector()) 5049 return SDValue(); 5050 5051 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5052 // vector width, however we should be able to do constant folds involving 5053 // splat vector nodes too. 5054 if (VT.isScalableVector()) 5055 return SDValue(); 5056 5057 // From this point onwards all vectors are assumed to be fixed width. 5058 unsigned NumElts = VT.getVectorNumElements(); 5059 5060 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5061 return !Op.getValueType().isVector() || 5062 Op.getValueType().getVectorNumElements() == NumElts; 5063 }; 5064 5065 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5066 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5067 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5068 (BV && BV->isConstant()); 5069 }; 5070 5071 // All operands must be vector types with the same number of elements as 5072 // the result type and must be either UNDEF or a build vector of constant 5073 // or UNDEF scalars. 5074 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5075 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5076 return SDValue(); 5077 5078 // If we are comparing vectors, then the result needs to be a i1 boolean 5079 // that is then sign-extended back to the legal result type. 5080 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5081 5082 // Find legal integer scalar type for constant promotion and 5083 // ensure that its scalar size is at least as large as source. 5084 EVT LegalSVT = VT.getScalarType(); 5085 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5086 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5087 if (LegalSVT.bitsLT(VT.getScalarType())) 5088 return SDValue(); 5089 } 5090 5091 // Constant fold each scalar lane separately. 5092 SmallVector<SDValue, 4> ScalarResults; 5093 for (unsigned i = 0; i != NumElts; i++) { 5094 SmallVector<SDValue, 4> ScalarOps; 5095 for (SDValue Op : Ops) { 5096 EVT InSVT = Op.getValueType().getScalarType(); 5097 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5098 if (!InBV) { 5099 // We've checked that this is UNDEF or a constant of some kind. 5100 if (Op.isUndef()) 5101 ScalarOps.push_back(getUNDEF(InSVT)); 5102 else 5103 ScalarOps.push_back(Op); 5104 continue; 5105 } 5106 5107 SDValue ScalarOp = InBV->getOperand(i); 5108 EVT ScalarVT = ScalarOp.getValueType(); 5109 5110 // Build vector (integer) scalar operands may need implicit 5111 // truncation - do this before constant folding. 5112 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5113 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5114 5115 ScalarOps.push_back(ScalarOp); 5116 } 5117 5118 // Constant fold the scalar operands. 5119 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5120 5121 // Legalize the (integer) scalar constant if necessary. 5122 if (LegalSVT != SVT) 5123 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5124 5125 // Scalar folding only succeeded if the result is a constant or UNDEF. 5126 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5127 ScalarResult.getOpcode() != ISD::ConstantFP) 5128 return SDValue(); 5129 ScalarResults.push_back(ScalarResult); 5130 } 5131 5132 SDValue V = getBuildVector(VT, DL, ScalarResults); 5133 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5134 return V; 5135 } 5136 5137 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5138 EVT VT, SDValue N1, SDValue N2) { 5139 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5140 // should. That will require dealing with a potentially non-default 5141 // rounding mode, checking the "opStatus" return value from the APFloat 5142 // math calculations, and possibly other variations. 5143 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5144 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5145 if (N1CFP && N2CFP) { 5146 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5147 switch (Opcode) { 5148 case ISD::FADD: 5149 C1.add(C2, APFloat::rmNearestTiesToEven); 5150 return getConstantFP(C1, DL, VT); 5151 case ISD::FSUB: 5152 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5153 return getConstantFP(C1, DL, VT); 5154 case ISD::FMUL: 5155 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5156 return getConstantFP(C1, DL, VT); 5157 case ISD::FDIV: 5158 C1.divide(C2, APFloat::rmNearestTiesToEven); 5159 return getConstantFP(C1, DL, VT); 5160 case ISD::FREM: 5161 C1.mod(C2); 5162 return getConstantFP(C1, DL, VT); 5163 case ISD::FCOPYSIGN: 5164 C1.copySign(C2); 5165 return getConstantFP(C1, DL, VT); 5166 default: break; 5167 } 5168 } 5169 if (N1CFP && Opcode == ISD::FP_ROUND) { 5170 APFloat C1 = N1CFP->getValueAPF(); // make copy 5171 bool Unused; 5172 // This can return overflow, underflow, or inexact; we don't care. 5173 // FIXME need to be more flexible about rounding mode. 5174 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5175 &Unused); 5176 return getConstantFP(C1, DL, VT); 5177 } 5178 5179 switch (Opcode) { 5180 case ISD::FSUB: 5181 // -0.0 - undef --> undef (consistent with "fneg undef") 5182 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5183 return getUNDEF(VT); 5184 LLVM_FALLTHROUGH; 5185 5186 case ISD::FADD: 5187 case ISD::FMUL: 5188 case ISD::FDIV: 5189 case ISD::FREM: 5190 // If both operands are undef, the result is undef. If 1 operand is undef, 5191 // the result is NaN. This should match the behavior of the IR optimizer. 5192 if (N1.isUndef() && N2.isUndef()) 5193 return getUNDEF(VT); 5194 if (N1.isUndef() || N2.isUndef()) 5195 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5196 } 5197 return SDValue(); 5198 } 5199 5200 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { 5201 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!"); 5202 5203 // There's no need to assert on a byte-aligned pointer. All pointers are at 5204 // least byte aligned. 5205 if (A == Align(1)) 5206 return Val; 5207 5208 FoldingSetNodeID ID; 5209 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); 5210 ID.AddInteger(A.value()); 5211 5212 void *IP = nullptr; 5213 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5214 return SDValue(E, 0); 5215 5216 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), 5217 Val.getValueType(), A); 5218 createOperands(N, {Val}); 5219 5220 CSEMap.InsertNode(N, IP); 5221 InsertNode(N); 5222 5223 SDValue V(N, 0); 5224 NewSDValueDbgMsg(V, "Creating new node: ", this); 5225 return V; 5226 } 5227 5228 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5229 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5230 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5231 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5232 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5233 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5234 5235 // Canonicalize constant to RHS if commutative. 5236 if (TLI->isCommutativeBinOp(Opcode)) { 5237 if (N1C && !N2C) { 5238 std::swap(N1C, N2C); 5239 std::swap(N1, N2); 5240 } else if (N1CFP && !N2CFP) { 5241 std::swap(N1CFP, N2CFP); 5242 std::swap(N1, N2); 5243 } 5244 } 5245 5246 switch (Opcode) { 5247 default: break; 5248 case ISD::TokenFactor: 5249 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5250 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5251 // Fold trivial token factors. 5252 if (N1.getOpcode() == ISD::EntryToken) return N2; 5253 if (N2.getOpcode() == ISD::EntryToken) return N1; 5254 if (N1 == N2) return N1; 5255 break; 5256 case ISD::BUILD_VECTOR: { 5257 // Attempt to simplify BUILD_VECTOR. 5258 SDValue Ops[] = {N1, N2}; 5259 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5260 return V; 5261 break; 5262 } 5263 case ISD::CONCAT_VECTORS: { 5264 SDValue Ops[] = {N1, N2}; 5265 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5266 return V; 5267 break; 5268 } 5269 case ISD::AND: 5270 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5271 assert(N1.getValueType() == N2.getValueType() && 5272 N1.getValueType() == VT && "Binary operator types must match!"); 5273 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5274 // worth handling here. 5275 if (N2C && N2C->isNullValue()) 5276 return N2; 5277 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5278 return N1; 5279 break; 5280 case ISD::OR: 5281 case ISD::XOR: 5282 case ISD::ADD: 5283 case ISD::SUB: 5284 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5285 assert(N1.getValueType() == N2.getValueType() && 5286 N1.getValueType() == VT && "Binary operator types must match!"); 5287 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5288 // it's worth handling here. 5289 if (N2C && N2C->isNullValue()) 5290 return N1; 5291 break; 5292 case ISD::MUL: 5293 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5294 assert(N1.getValueType() == N2.getValueType() && 5295 N1.getValueType() == VT && "Binary operator types must match!"); 5296 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5297 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5298 APInt N2CImm = N2C->getAPIntValue(); 5299 return getVScale(DL, VT, MulImm * N2CImm); 5300 } 5301 break; 5302 case ISD::UDIV: 5303 case ISD::UREM: 5304 case ISD::MULHU: 5305 case ISD::MULHS: 5306 case ISD::SDIV: 5307 case ISD::SREM: 5308 case ISD::SMIN: 5309 case ISD::SMAX: 5310 case ISD::UMIN: 5311 case ISD::UMAX: 5312 case ISD::SADDSAT: 5313 case ISD::SSUBSAT: 5314 case ISD::UADDSAT: 5315 case ISD::USUBSAT: 5316 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5317 assert(N1.getValueType() == N2.getValueType() && 5318 N1.getValueType() == VT && "Binary operator types must match!"); 5319 break; 5320 case ISD::FADD: 5321 case ISD::FSUB: 5322 case ISD::FMUL: 5323 case ISD::FDIV: 5324 case ISD::FREM: 5325 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5326 assert(N1.getValueType() == N2.getValueType() && 5327 N1.getValueType() == VT && "Binary operator types must match!"); 5328 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5329 return V; 5330 break; 5331 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5332 assert(N1.getValueType() == VT && 5333 N1.getValueType().isFloatingPoint() && 5334 N2.getValueType().isFloatingPoint() && 5335 "Invalid FCOPYSIGN!"); 5336 break; 5337 case ISD::SHL: 5338 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5339 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5340 APInt ShiftImm = N2C->getAPIntValue(); 5341 return getVScale(DL, VT, MulImm << ShiftImm); 5342 } 5343 LLVM_FALLTHROUGH; 5344 case ISD::SRA: 5345 case ISD::SRL: 5346 if (SDValue V = simplifyShift(N1, N2)) 5347 return V; 5348 LLVM_FALLTHROUGH; 5349 case ISD::ROTL: 5350 case ISD::ROTR: 5351 assert(VT == N1.getValueType() && 5352 "Shift operators return type must be the same as their first arg"); 5353 assert(VT.isInteger() && N2.getValueType().isInteger() && 5354 "Shifts only work on integers"); 5355 assert((!VT.isVector() || VT == N2.getValueType()) && 5356 "Vector shift amounts must be in the same as their first arg"); 5357 // Verify that the shift amount VT is big enough to hold valid shift 5358 // amounts. This catches things like trying to shift an i1024 value by an 5359 // i8, which is easy to fall into in generic code that uses 5360 // TLI.getShiftAmount(). 5361 assert(N2.getValueType().getScalarSizeInBits().getFixedSize() >= 5362 Log2_32_Ceil(VT.getScalarSizeInBits().getFixedSize()) && 5363 "Invalid use of small shift amount with oversized value!"); 5364 5365 // Always fold shifts of i1 values so the code generator doesn't need to 5366 // handle them. Since we know the size of the shift has to be less than the 5367 // size of the value, the shift/rotate count is guaranteed to be zero. 5368 if (VT == MVT::i1) 5369 return N1; 5370 if (N2C && N2C->isNullValue()) 5371 return N1; 5372 break; 5373 case ISD::FP_ROUND: 5374 assert(VT.isFloatingPoint() && 5375 N1.getValueType().isFloatingPoint() && 5376 VT.bitsLE(N1.getValueType()) && 5377 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5378 "Invalid FP_ROUND!"); 5379 if (N1.getValueType() == VT) return N1; // noop conversion. 5380 break; 5381 case ISD::AssertSext: 5382 case ISD::AssertZext: { 5383 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5384 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5385 assert(VT.isInteger() && EVT.isInteger() && 5386 "Cannot *_EXTEND_INREG FP types"); 5387 assert(!EVT.isVector() && 5388 "AssertSExt/AssertZExt type should be the vector element type " 5389 "rather than the vector type!"); 5390 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5391 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5392 break; 5393 } 5394 case ISD::SIGN_EXTEND_INREG: { 5395 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5396 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5397 assert(VT.isInteger() && EVT.isInteger() && 5398 "Cannot *_EXTEND_INREG FP types"); 5399 assert(EVT.isVector() == VT.isVector() && 5400 "SIGN_EXTEND_INREG type should be vector iff the operand " 5401 "type is vector!"); 5402 assert((!EVT.isVector() || 5403 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5404 "Vector element counts must match in SIGN_EXTEND_INREG"); 5405 assert(EVT.bitsLE(VT) && "Not extending!"); 5406 if (EVT == VT) return N1; // Not actually extending 5407 5408 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5409 unsigned FromBits = EVT.getScalarSizeInBits(); 5410 Val <<= Val.getBitWidth() - FromBits; 5411 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5412 return getConstant(Val, DL, ConstantVT); 5413 }; 5414 5415 if (N1C) { 5416 const APInt &Val = N1C->getAPIntValue(); 5417 return SignExtendInReg(Val, VT); 5418 } 5419 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5420 SmallVector<SDValue, 8> Ops; 5421 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5422 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5423 SDValue Op = N1.getOperand(i); 5424 if (Op.isUndef()) { 5425 Ops.push_back(getUNDEF(OpVT)); 5426 continue; 5427 } 5428 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5429 APInt Val = C->getAPIntValue(); 5430 Ops.push_back(SignExtendInReg(Val, OpVT)); 5431 } 5432 return getBuildVector(VT, DL, Ops); 5433 } 5434 break; 5435 } 5436 case ISD::EXTRACT_VECTOR_ELT: 5437 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5438 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5439 element type of the vector."); 5440 5441 // Extract from an undefined value or using an undefined index is undefined. 5442 if (N1.isUndef() || N2.isUndef()) 5443 return getUNDEF(VT); 5444 5445 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length 5446 // vectors. For scalable vectors we will provide appropriate support for 5447 // dealing with arbitrary indices. 5448 if (N2C && N1.getValueType().isFixedLengthVector() && 5449 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5450 return getUNDEF(VT); 5451 5452 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5453 // expanding copies of large vectors from registers. This only works for 5454 // fixed length vectors, since we need to know the exact number of 5455 // elements. 5456 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && 5457 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { 5458 unsigned Factor = 5459 N1.getOperand(0).getValueType().getVectorNumElements(); 5460 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5461 N1.getOperand(N2C->getZExtValue() / Factor), 5462 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5463 } 5464 5465 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while 5466 // lowering is expanding large vector constants. 5467 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || 5468 N1.getOpcode() == ISD::SPLAT_VECTOR)) { 5469 assert((N1.getOpcode() != ISD::BUILD_VECTOR || 5470 N1.getValueType().isFixedLengthVector()) && 5471 "BUILD_VECTOR used for scalable vectors"); 5472 unsigned Index = 5473 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; 5474 SDValue Elt = N1.getOperand(Index); 5475 5476 if (VT != Elt.getValueType()) 5477 // If the vector element type is not legal, the BUILD_VECTOR operands 5478 // are promoted and implicitly truncated, and the result implicitly 5479 // extended. Make that explicit here. 5480 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5481 5482 return Elt; 5483 } 5484 5485 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5486 // operations are lowered to scalars. 5487 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5488 // If the indices are the same, return the inserted element else 5489 // if the indices are known different, extract the element from 5490 // the original vector. 5491 SDValue N1Op2 = N1.getOperand(2); 5492 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5493 5494 if (N1Op2C && N2C) { 5495 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5496 if (VT == N1.getOperand(1).getValueType()) 5497 return N1.getOperand(1); 5498 else 5499 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5500 } 5501 5502 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5503 } 5504 } 5505 5506 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5507 // when vector types are scalarized and v1iX is legal. 5508 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). 5509 // Here we are completely ignoring the extract element index (N2), 5510 // which is fine for fixed width vectors, since any index other than 0 5511 // is undefined anyway. However, this cannot be ignored for scalable 5512 // vectors - in theory we could support this, but we don't want to do this 5513 // without a profitability check. 5514 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5515 N1.getValueType().isFixedLengthVector() && 5516 N1.getValueType().getVectorNumElements() == 1) { 5517 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5518 N1.getOperand(1)); 5519 } 5520 break; 5521 case ISD::EXTRACT_ELEMENT: 5522 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5523 assert(!N1.getValueType().isVector() && !VT.isVector() && 5524 (N1.getValueType().isInteger() == VT.isInteger()) && 5525 N1.getValueType() != VT && 5526 "Wrong types for EXTRACT_ELEMENT!"); 5527 5528 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5529 // 64-bit integers into 32-bit parts. Instead of building the extract of 5530 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5531 if (N1.getOpcode() == ISD::BUILD_PAIR) 5532 return N1.getOperand(N2C->getZExtValue()); 5533 5534 // EXTRACT_ELEMENT of a constant int is also very common. 5535 if (N1C) { 5536 unsigned ElementSize = VT.getSizeInBits(); 5537 unsigned Shift = ElementSize * N2C->getZExtValue(); 5538 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5539 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5540 } 5541 break; 5542 case ISD::EXTRACT_SUBVECTOR: 5543 EVT N1VT = N1.getValueType(); 5544 assert(VT.isVector() && N1VT.isVector() && 5545 "Extract subvector VTs must be vectors!"); 5546 assert(VT.getVectorElementType() == N1VT.getVectorElementType() && 5547 "Extract subvector VTs must have the same element type!"); 5548 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && 5549 "Cannot extract a scalable vector from a fixed length vector!"); 5550 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5551 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && 5552 "Extract subvector must be from larger vector to smaller vector!"); 5553 assert(N2C && "Extract subvector index must be a constant"); 5554 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5555 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= 5556 N1VT.getVectorMinNumElements()) && 5557 "Extract subvector overflow!"); 5558 5559 // Trivial extraction. 5560 if (VT == N1VT) 5561 return N1; 5562 5563 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5564 if (N1.isUndef()) 5565 return getUNDEF(VT); 5566 5567 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5568 // the concat have the same type as the extract. 5569 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5570 N1.getNumOperands() > 0 && VT == N1.getOperand(0).getValueType()) { 5571 unsigned Factor = VT.getVectorMinNumElements(); 5572 return N1.getOperand(N2C->getZExtValue() / Factor); 5573 } 5574 5575 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5576 // during shuffle legalization. 5577 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5578 VT == N1.getOperand(1).getValueType()) 5579 return N1.getOperand(1); 5580 break; 5581 } 5582 5583 // Perform trivial constant folding. 5584 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5585 return SV; 5586 5587 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5588 return V; 5589 5590 // Canonicalize an UNDEF to the RHS, even over a constant. 5591 if (N1.isUndef()) { 5592 if (TLI->isCommutativeBinOp(Opcode)) { 5593 std::swap(N1, N2); 5594 } else { 5595 switch (Opcode) { 5596 case ISD::SIGN_EXTEND_INREG: 5597 case ISD::SUB: 5598 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5599 case ISD::UDIV: 5600 case ISD::SDIV: 5601 case ISD::UREM: 5602 case ISD::SREM: 5603 case ISD::SSUBSAT: 5604 case ISD::USUBSAT: 5605 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5606 } 5607 } 5608 } 5609 5610 // Fold a bunch of operators when the RHS is undef. 5611 if (N2.isUndef()) { 5612 switch (Opcode) { 5613 case ISD::XOR: 5614 if (N1.isUndef()) 5615 // Handle undef ^ undef -> 0 special case. This is a common 5616 // idiom (misuse). 5617 return getConstant(0, DL, VT); 5618 LLVM_FALLTHROUGH; 5619 case ISD::ADD: 5620 case ISD::SUB: 5621 case ISD::UDIV: 5622 case ISD::SDIV: 5623 case ISD::UREM: 5624 case ISD::SREM: 5625 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5626 case ISD::MUL: 5627 case ISD::AND: 5628 case ISD::SSUBSAT: 5629 case ISD::USUBSAT: 5630 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5631 case ISD::OR: 5632 case ISD::SADDSAT: 5633 case ISD::UADDSAT: 5634 return getAllOnesConstant(DL, VT); 5635 } 5636 } 5637 5638 // Memoize this node if possible. 5639 SDNode *N; 5640 SDVTList VTs = getVTList(VT); 5641 SDValue Ops[] = {N1, N2}; 5642 if (VT != MVT::Glue) { 5643 FoldingSetNodeID ID; 5644 AddNodeIDNode(ID, Opcode, VTs, Ops); 5645 void *IP = nullptr; 5646 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5647 E->intersectFlagsWith(Flags); 5648 return SDValue(E, 0); 5649 } 5650 5651 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5652 N->setFlags(Flags); 5653 createOperands(N, Ops); 5654 CSEMap.InsertNode(N, IP); 5655 } else { 5656 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5657 createOperands(N, Ops); 5658 } 5659 5660 InsertNode(N); 5661 SDValue V = SDValue(N, 0); 5662 NewSDValueDbgMsg(V, "Creating new node: ", this); 5663 return V; 5664 } 5665 5666 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5667 SDValue N1, SDValue N2, SDValue N3, 5668 const SDNodeFlags Flags) { 5669 // Perform various simplifications. 5670 switch (Opcode) { 5671 case ISD::FMA: { 5672 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5673 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5674 N3.getValueType() == VT && "FMA types must match!"); 5675 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5676 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5677 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5678 if (N1CFP && N2CFP && N3CFP) { 5679 APFloat V1 = N1CFP->getValueAPF(); 5680 const APFloat &V2 = N2CFP->getValueAPF(); 5681 const APFloat &V3 = N3CFP->getValueAPF(); 5682 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5683 return getConstantFP(V1, DL, VT); 5684 } 5685 break; 5686 } 5687 case ISD::BUILD_VECTOR: { 5688 // Attempt to simplify BUILD_VECTOR. 5689 SDValue Ops[] = {N1, N2, N3}; 5690 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5691 return V; 5692 break; 5693 } 5694 case ISD::CONCAT_VECTORS: { 5695 SDValue Ops[] = {N1, N2, N3}; 5696 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5697 return V; 5698 break; 5699 } 5700 case ISD::SETCC: { 5701 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5702 assert(N1.getValueType() == N2.getValueType() && 5703 "SETCC operands must have the same type!"); 5704 assert(VT.isVector() == N1.getValueType().isVector() && 5705 "SETCC type should be vector iff the operand type is vector!"); 5706 assert((!VT.isVector() || VT.getVectorElementCount() == 5707 N1.getValueType().getVectorElementCount()) && 5708 "SETCC vector element counts must match!"); 5709 // Use FoldSetCC to simplify SETCC's. 5710 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5711 return V; 5712 // Vector constant folding. 5713 SDValue Ops[] = {N1, N2, N3}; 5714 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5715 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5716 return V; 5717 } 5718 break; 5719 } 5720 case ISD::SELECT: 5721 case ISD::VSELECT: 5722 if (SDValue V = simplifySelect(N1, N2, N3)) 5723 return V; 5724 break; 5725 case ISD::VECTOR_SHUFFLE: 5726 llvm_unreachable("should use getVectorShuffle constructor!"); 5727 case ISD::INSERT_VECTOR_ELT: { 5728 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5729 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5730 // for scalable vectors where we will generate appropriate code to 5731 // deal with out-of-bounds cases correctly. 5732 if (N3C && N1.getValueType().isFixedLengthVector() && 5733 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5734 return getUNDEF(VT); 5735 5736 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5737 if (N3.isUndef()) 5738 return getUNDEF(VT); 5739 5740 // If the inserted element is an UNDEF, just use the input vector. 5741 if (N2.isUndef()) 5742 return N1; 5743 5744 break; 5745 } 5746 case ISD::INSERT_SUBVECTOR: { 5747 // Inserting undef into undef is still undef. 5748 if (N1.isUndef() && N2.isUndef()) 5749 return getUNDEF(VT); 5750 5751 EVT N2VT = N2.getValueType(); 5752 assert(VT == N1.getValueType() && 5753 "Dest and insert subvector source types must match!"); 5754 assert(VT.isVector() && N2VT.isVector() && 5755 "Insert subvector VTs must be vectors!"); 5756 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && 5757 "Cannot insert a scalable vector into a fixed length vector!"); 5758 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5759 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && 5760 "Insert subvector must be from smaller vector to larger vector!"); 5761 assert(isa<ConstantSDNode>(N3) && 5762 "Insert subvector index must be constant"); 5763 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5764 (N2VT.getVectorMinNumElements() + 5765 cast<ConstantSDNode>(N3)->getZExtValue()) <= 5766 VT.getVectorMinNumElements()) && 5767 "Insert subvector overflow!"); 5768 5769 // Trivial insertion. 5770 if (VT == N2VT) 5771 return N2; 5772 5773 // If this is an insert of an extracted vector into an undef vector, we 5774 // can just use the input to the extract. 5775 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5776 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5777 return N2.getOperand(0); 5778 break; 5779 } 5780 case ISD::BITCAST: 5781 // Fold bit_convert nodes from a type to themselves. 5782 if (N1.getValueType() == VT) 5783 return N1; 5784 break; 5785 } 5786 5787 // Memoize node if it doesn't produce a flag. 5788 SDNode *N; 5789 SDVTList VTs = getVTList(VT); 5790 SDValue Ops[] = {N1, N2, N3}; 5791 if (VT != MVT::Glue) { 5792 FoldingSetNodeID ID; 5793 AddNodeIDNode(ID, Opcode, VTs, Ops); 5794 void *IP = nullptr; 5795 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5796 E->intersectFlagsWith(Flags); 5797 return SDValue(E, 0); 5798 } 5799 5800 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5801 N->setFlags(Flags); 5802 createOperands(N, Ops); 5803 CSEMap.InsertNode(N, IP); 5804 } else { 5805 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5806 createOperands(N, Ops); 5807 } 5808 5809 InsertNode(N); 5810 SDValue V = SDValue(N, 0); 5811 NewSDValueDbgMsg(V, "Creating new node: ", this); 5812 return V; 5813 } 5814 5815 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5816 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5817 SDValue Ops[] = { N1, N2, N3, N4 }; 5818 return getNode(Opcode, DL, VT, Ops); 5819 } 5820 5821 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5822 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5823 SDValue N5) { 5824 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5825 return getNode(Opcode, DL, VT, Ops); 5826 } 5827 5828 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5829 /// the incoming stack arguments to be loaded from the stack. 5830 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5831 SmallVector<SDValue, 8> ArgChains; 5832 5833 // Include the original chain at the beginning of the list. When this is 5834 // used by target LowerCall hooks, this helps legalize find the 5835 // CALLSEQ_BEGIN node. 5836 ArgChains.push_back(Chain); 5837 5838 // Add a chain value for each stack argument. 5839 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5840 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5841 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5842 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5843 if (FI->getIndex() < 0) 5844 ArgChains.push_back(SDValue(L, 1)); 5845 5846 // Build a tokenfactor for all the chains. 5847 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5848 } 5849 5850 /// getMemsetValue - Vectorized representation of the memset value 5851 /// operand. 5852 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5853 const SDLoc &dl) { 5854 assert(!Value.isUndef()); 5855 5856 unsigned NumBits = VT.getScalarSizeInBits(); 5857 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5858 assert(C->getAPIntValue().getBitWidth() == 8); 5859 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5860 if (VT.isInteger()) { 5861 bool IsOpaque = VT.getSizeInBits() > 64 || 5862 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5863 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5864 } 5865 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5866 VT); 5867 } 5868 5869 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5870 EVT IntVT = VT.getScalarType(); 5871 if (!IntVT.isInteger()) 5872 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5873 5874 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5875 if (NumBits > 8) { 5876 // Use a multiplication with 0x010101... to extend the input to the 5877 // required length. 5878 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5879 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5880 DAG.getConstant(Magic, dl, IntVT)); 5881 } 5882 5883 if (VT != Value.getValueType() && !VT.isInteger()) 5884 Value = DAG.getBitcast(VT.getScalarType(), Value); 5885 if (VT != Value.getValueType()) 5886 Value = DAG.getSplatBuildVector(VT, dl, Value); 5887 5888 return Value; 5889 } 5890 5891 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5892 /// used when a memcpy is turned into a memset when the source is a constant 5893 /// string ptr. 5894 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5895 const TargetLowering &TLI, 5896 const ConstantDataArraySlice &Slice) { 5897 // Handle vector with all elements zero. 5898 if (Slice.Array == nullptr) { 5899 if (VT.isInteger()) 5900 return DAG.getConstant(0, dl, VT); 5901 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5902 return DAG.getConstantFP(0.0, dl, VT); 5903 else if (VT.isVector()) { 5904 unsigned NumElts = VT.getVectorNumElements(); 5905 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5906 return DAG.getNode(ISD::BITCAST, dl, VT, 5907 DAG.getConstant(0, dl, 5908 EVT::getVectorVT(*DAG.getContext(), 5909 EltVT, NumElts))); 5910 } else 5911 llvm_unreachable("Expected type!"); 5912 } 5913 5914 assert(!VT.isVector() && "Can't handle vector type here!"); 5915 unsigned NumVTBits = VT.getSizeInBits(); 5916 unsigned NumVTBytes = NumVTBits / 8; 5917 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5918 5919 APInt Val(NumVTBits, 0); 5920 if (DAG.getDataLayout().isLittleEndian()) { 5921 for (unsigned i = 0; i != NumBytes; ++i) 5922 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5923 } else { 5924 for (unsigned i = 0; i != NumBytes; ++i) 5925 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5926 } 5927 5928 // If the "cost" of materializing the integer immediate is less than the cost 5929 // of a load, then it is cost effective to turn the load into the immediate. 5930 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5931 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5932 return DAG.getConstant(Val, dl, VT); 5933 return SDValue(nullptr, 0); 5934 } 5935 5936 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5937 const SDLoc &DL, 5938 const SDNodeFlags Flags) { 5939 EVT VT = Base.getValueType(); 5940 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5941 } 5942 5943 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5944 const SDLoc &DL, 5945 const SDNodeFlags Flags) { 5946 assert(Offset.getValueType().isInteger()); 5947 EVT BasePtrVT = Ptr.getValueType(); 5948 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5949 } 5950 5951 /// Returns true if memcpy source is constant data. 5952 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5953 uint64_t SrcDelta = 0; 5954 GlobalAddressSDNode *G = nullptr; 5955 if (Src.getOpcode() == ISD::GlobalAddress) 5956 G = cast<GlobalAddressSDNode>(Src); 5957 else if (Src.getOpcode() == ISD::ADD && 5958 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5959 Src.getOperand(1).getOpcode() == ISD::Constant) { 5960 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5961 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5962 } 5963 if (!G) 5964 return false; 5965 5966 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5967 SrcDelta + G->getOffset()); 5968 } 5969 5970 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5971 SelectionDAG &DAG) { 5972 // On Darwin, -Os means optimize for size without hurting performance, so 5973 // only really optimize for size when -Oz (MinSize) is used. 5974 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5975 return MF.getFunction().hasMinSize(); 5976 return DAG.shouldOptForSize(); 5977 } 5978 5979 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5980 SmallVector<SDValue, 32> &OutChains, unsigned From, 5981 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5982 SmallVector<SDValue, 16> &OutStoreChains) { 5983 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5984 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5985 SmallVector<SDValue, 16> GluedLoadChains; 5986 for (unsigned i = From; i < To; ++i) { 5987 OutChains.push_back(OutLoadChains[i]); 5988 GluedLoadChains.push_back(OutLoadChains[i]); 5989 } 5990 5991 // Chain for all loads. 5992 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5993 GluedLoadChains); 5994 5995 for (unsigned i = From; i < To; ++i) { 5996 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5997 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5998 ST->getBasePtr(), ST->getMemoryVT(), 5999 ST->getMemOperand()); 6000 OutChains.push_back(NewStore); 6001 } 6002 } 6003 6004 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6005 SDValue Chain, SDValue Dst, SDValue Src, 6006 uint64_t Size, Align Alignment, 6007 bool isVol, bool AlwaysInline, 6008 MachinePointerInfo DstPtrInfo, 6009 MachinePointerInfo SrcPtrInfo) { 6010 // Turn a memcpy of undef to nop. 6011 // FIXME: We need to honor volatile even is Src is undef. 6012 if (Src.isUndef()) 6013 return Chain; 6014 6015 // Expand memcpy to a series of load and store ops if the size operand falls 6016 // below a certain threshold. 6017 // TODO: In the AlwaysInline case, if the size is big then generate a loop 6018 // rather than maybe a humongous number of loads and stores. 6019 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6020 const DataLayout &DL = DAG.getDataLayout(); 6021 LLVMContext &C = *DAG.getContext(); 6022 std::vector<EVT> MemOps; 6023 bool DstAlignCanChange = false; 6024 MachineFunction &MF = DAG.getMachineFunction(); 6025 MachineFrameInfo &MFI = MF.getFrameInfo(); 6026 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6027 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6028 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6029 DstAlignCanChange = true; 6030 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6031 if (!SrcAlign || Alignment > *SrcAlign) 6032 SrcAlign = Alignment; 6033 assert(SrcAlign && "SrcAlign must be set"); 6034 ConstantDataArraySlice Slice; 6035 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 6036 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 6037 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 6038 const MemOp Op = isZeroConstant 6039 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 6040 /*IsZeroMemset*/ true, isVol) 6041 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 6042 *SrcAlign, isVol, CopyFromConstant); 6043 if (!TLI.findOptimalMemOpLowering( 6044 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 6045 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 6046 return SDValue(); 6047 6048 if (DstAlignCanChange) { 6049 Type *Ty = MemOps[0].getTypeForEVT(C); 6050 Align NewAlign = DL.getABITypeAlign(Ty); 6051 6052 // Don't promote to an alignment that would require dynamic stack 6053 // realignment. 6054 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 6055 if (!TRI->needsStackRealignment(MF)) 6056 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 6057 NewAlign = NewAlign / 2; 6058 6059 if (NewAlign > Alignment) { 6060 // Give the stack frame object a larger alignment if needed. 6061 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6062 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6063 Alignment = NewAlign; 6064 } 6065 } 6066 6067 MachineMemOperand::Flags MMOFlags = 6068 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6069 SmallVector<SDValue, 16> OutLoadChains; 6070 SmallVector<SDValue, 16> OutStoreChains; 6071 SmallVector<SDValue, 32> OutChains; 6072 unsigned NumMemOps = MemOps.size(); 6073 uint64_t SrcOff = 0, DstOff = 0; 6074 for (unsigned i = 0; i != NumMemOps; ++i) { 6075 EVT VT = MemOps[i]; 6076 unsigned VTSize = VT.getSizeInBits() / 8; 6077 SDValue Value, Store; 6078 6079 if (VTSize > Size) { 6080 // Issuing an unaligned load / store pair that overlaps with the previous 6081 // pair. Adjust the offset accordingly. 6082 assert(i == NumMemOps-1 && i != 0); 6083 SrcOff -= VTSize - Size; 6084 DstOff -= VTSize - Size; 6085 } 6086 6087 if (CopyFromConstant && 6088 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 6089 // It's unlikely a store of a vector immediate can be done in a single 6090 // instruction. It would require a load from a constantpool first. 6091 // We only handle zero vectors here. 6092 // FIXME: Handle other cases where store of vector immediate is done in 6093 // a single instruction. 6094 ConstantDataArraySlice SubSlice; 6095 if (SrcOff < Slice.Length) { 6096 SubSlice = Slice; 6097 SubSlice.move(SrcOff); 6098 } else { 6099 // This is an out-of-bounds access and hence UB. Pretend we read zero. 6100 SubSlice.Array = nullptr; 6101 SubSlice.Offset = 0; 6102 SubSlice.Length = VTSize; 6103 } 6104 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6105 if (Value.getNode()) { 6106 Store = DAG.getStore( 6107 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6108 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6109 OutChains.push_back(Store); 6110 } 6111 } 6112 6113 if (!Store.getNode()) { 6114 // The type might not be legal for the target. This should only happen 6115 // if the type is smaller than a legal type, as on PPC, so the right 6116 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6117 // to Load/Store if NVT==VT. 6118 // FIXME does the case above also need this? 6119 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6120 assert(NVT.bitsGE(VT)); 6121 6122 bool isDereferenceable = 6123 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6124 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6125 if (isDereferenceable) 6126 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6127 6128 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 6129 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6130 SrcPtrInfo.getWithOffset(SrcOff), VT, 6131 commonAlignment(*SrcAlign, SrcOff).value(), 6132 SrcMMOFlags); 6133 OutLoadChains.push_back(Value.getValue(1)); 6134 6135 Store = DAG.getTruncStore( 6136 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6137 DstPtrInfo.getWithOffset(DstOff), VT, Alignment.value(), MMOFlags); 6138 OutStoreChains.push_back(Store); 6139 } 6140 SrcOff += VTSize; 6141 DstOff += VTSize; 6142 Size -= VTSize; 6143 } 6144 6145 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6146 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6147 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6148 6149 if (NumLdStInMemcpy) { 6150 // It may be that memcpy might be converted to memset if it's memcpy 6151 // of constants. In such a case, we won't have loads and stores, but 6152 // just stores. In the absence of loads, there is nothing to gang up. 6153 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6154 // If target does not care, just leave as it. 6155 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6156 OutChains.push_back(OutLoadChains[i]); 6157 OutChains.push_back(OutStoreChains[i]); 6158 } 6159 } else { 6160 // Ld/St less than/equal limit set by target. 6161 if (NumLdStInMemcpy <= GluedLdStLimit) { 6162 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6163 NumLdStInMemcpy, OutLoadChains, 6164 OutStoreChains); 6165 } else { 6166 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6167 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6168 unsigned GlueIter = 0; 6169 6170 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6171 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6172 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6173 6174 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6175 OutLoadChains, OutStoreChains); 6176 GlueIter += GluedLdStLimit; 6177 } 6178 6179 // Residual ld/st. 6180 if (RemainingLdStInMemcpy) { 6181 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6182 RemainingLdStInMemcpy, OutLoadChains, 6183 OutStoreChains); 6184 } 6185 } 6186 } 6187 } 6188 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6189 } 6190 6191 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6192 SDValue Chain, SDValue Dst, SDValue Src, 6193 uint64_t Size, Align Alignment, 6194 bool isVol, bool AlwaysInline, 6195 MachinePointerInfo DstPtrInfo, 6196 MachinePointerInfo SrcPtrInfo) { 6197 // Turn a memmove of undef to nop. 6198 // FIXME: We need to honor volatile even is Src is undef. 6199 if (Src.isUndef()) 6200 return Chain; 6201 6202 // Expand memmove to a series of load and store ops if the size operand falls 6203 // below a certain threshold. 6204 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6205 const DataLayout &DL = DAG.getDataLayout(); 6206 LLVMContext &C = *DAG.getContext(); 6207 std::vector<EVT> MemOps; 6208 bool DstAlignCanChange = false; 6209 MachineFunction &MF = DAG.getMachineFunction(); 6210 MachineFrameInfo &MFI = MF.getFrameInfo(); 6211 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6212 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6213 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6214 DstAlignCanChange = true; 6215 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6216 if (!SrcAlign || Alignment > *SrcAlign) 6217 SrcAlign = Alignment; 6218 assert(SrcAlign && "SrcAlign must be set"); 6219 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6220 if (!TLI.findOptimalMemOpLowering( 6221 MemOps, Limit, 6222 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6223 /*IsVolatile*/ true), 6224 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6225 MF.getFunction().getAttributes())) 6226 return SDValue(); 6227 6228 if (DstAlignCanChange) { 6229 Type *Ty = MemOps[0].getTypeForEVT(C); 6230 Align NewAlign = DL.getABITypeAlign(Ty); 6231 if (NewAlign > Alignment) { 6232 // Give the stack frame object a larger alignment if needed. 6233 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6234 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6235 Alignment = NewAlign; 6236 } 6237 } 6238 6239 MachineMemOperand::Flags MMOFlags = 6240 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6241 uint64_t SrcOff = 0, DstOff = 0; 6242 SmallVector<SDValue, 8> LoadValues; 6243 SmallVector<SDValue, 8> LoadChains; 6244 SmallVector<SDValue, 8> OutChains; 6245 unsigned NumMemOps = MemOps.size(); 6246 for (unsigned i = 0; i < NumMemOps; i++) { 6247 EVT VT = MemOps[i]; 6248 unsigned VTSize = VT.getSizeInBits() / 8; 6249 SDValue Value; 6250 6251 bool isDereferenceable = 6252 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6253 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6254 if (isDereferenceable) 6255 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6256 6257 Value = DAG.getLoad( 6258 VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6259 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign->value(), SrcMMOFlags); 6260 LoadValues.push_back(Value); 6261 LoadChains.push_back(Value.getValue(1)); 6262 SrcOff += VTSize; 6263 } 6264 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6265 OutChains.clear(); 6266 for (unsigned i = 0; i < NumMemOps; i++) { 6267 EVT VT = MemOps[i]; 6268 unsigned VTSize = VT.getSizeInBits() / 8; 6269 SDValue Store; 6270 6271 Store = DAG.getStore( 6272 Chain, dl, LoadValues[i], DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6273 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6274 OutChains.push_back(Store); 6275 DstOff += VTSize; 6276 } 6277 6278 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6279 } 6280 6281 /// Lower the call to 'memset' intrinsic function into a series of store 6282 /// operations. 6283 /// 6284 /// \param DAG Selection DAG where lowered code is placed. 6285 /// \param dl Link to corresponding IR location. 6286 /// \param Chain Control flow dependency. 6287 /// \param Dst Pointer to destination memory location. 6288 /// \param Src Value of byte to write into the memory. 6289 /// \param Size Number of bytes to write. 6290 /// \param Alignment Alignment of the destination in bytes. 6291 /// \param isVol True if destination is volatile. 6292 /// \param DstPtrInfo IR information on the memory pointer. 6293 /// \returns New head in the control flow, if lowering was successful, empty 6294 /// SDValue otherwise. 6295 /// 6296 /// The function tries to replace 'llvm.memset' intrinsic with several store 6297 /// operations and value calculation code. This is usually profitable for small 6298 /// memory size. 6299 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6300 SDValue Chain, SDValue Dst, SDValue Src, 6301 uint64_t Size, Align Alignment, bool isVol, 6302 MachinePointerInfo DstPtrInfo) { 6303 // Turn a memset of undef to nop. 6304 // FIXME: We need to honor volatile even is Src is undef. 6305 if (Src.isUndef()) 6306 return Chain; 6307 6308 // Expand memset to a series of load/store ops if the size operand 6309 // falls below a certain threshold. 6310 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6311 std::vector<EVT> MemOps; 6312 bool DstAlignCanChange = false; 6313 MachineFunction &MF = DAG.getMachineFunction(); 6314 MachineFrameInfo &MFI = MF.getFrameInfo(); 6315 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6316 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6317 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6318 DstAlignCanChange = true; 6319 bool IsZeroVal = 6320 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6321 if (!TLI.findOptimalMemOpLowering( 6322 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6323 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6324 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6325 return SDValue(); 6326 6327 if (DstAlignCanChange) { 6328 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6329 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6330 if (NewAlign > Alignment) { 6331 // Give the stack frame object a larger alignment if needed. 6332 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6333 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6334 Alignment = NewAlign; 6335 } 6336 } 6337 6338 SmallVector<SDValue, 8> OutChains; 6339 uint64_t DstOff = 0; 6340 unsigned NumMemOps = MemOps.size(); 6341 6342 // Find the largest store and generate the bit pattern for it. 6343 EVT LargestVT = MemOps[0]; 6344 for (unsigned i = 1; i < NumMemOps; i++) 6345 if (MemOps[i].bitsGT(LargestVT)) 6346 LargestVT = MemOps[i]; 6347 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6348 6349 for (unsigned i = 0; i < NumMemOps; i++) { 6350 EVT VT = MemOps[i]; 6351 unsigned VTSize = VT.getSizeInBits() / 8; 6352 if (VTSize > Size) { 6353 // Issuing an unaligned load / store pair that overlaps with the previous 6354 // pair. Adjust the offset accordingly. 6355 assert(i == NumMemOps-1 && i != 0); 6356 DstOff -= VTSize - Size; 6357 } 6358 6359 // If this store is smaller than the largest store see whether we can get 6360 // the smaller value for free with a truncate. 6361 SDValue Value = MemSetValue; 6362 if (VT.bitsLT(LargestVT)) { 6363 if (!LargestVT.isVector() && !VT.isVector() && 6364 TLI.isTruncateFree(LargestVT, VT)) 6365 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6366 else 6367 Value = getMemsetValue(Src, VT, DAG, dl); 6368 } 6369 assert(Value.getValueType() == VT && "Value with wrong type."); 6370 SDValue Store = DAG.getStore( 6371 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6372 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), 6373 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6374 OutChains.push_back(Store); 6375 DstOff += VT.getSizeInBits() / 8; 6376 Size -= VTSize; 6377 } 6378 6379 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6380 } 6381 6382 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6383 unsigned AS) { 6384 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6385 // pointer operands can be losslessly bitcasted to pointers of address space 0 6386 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { 6387 report_fatal_error("cannot lower memory intrinsic in address space " + 6388 Twine(AS)); 6389 } 6390 } 6391 6392 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6393 SDValue Src, SDValue Size, Align Alignment, 6394 bool isVol, bool AlwaysInline, bool isTailCall, 6395 MachinePointerInfo DstPtrInfo, 6396 MachinePointerInfo SrcPtrInfo) { 6397 // Check to see if we should lower the memcpy to loads and stores first. 6398 // For cases within the target-specified limits, this is the best choice. 6399 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6400 if (ConstantSize) { 6401 // Memcpy with size zero? Just return the original chain. 6402 if (ConstantSize->isNullValue()) 6403 return Chain; 6404 6405 SDValue Result = getMemcpyLoadsAndStores( 6406 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6407 isVol, false, DstPtrInfo, SrcPtrInfo); 6408 if (Result.getNode()) 6409 return Result; 6410 } 6411 6412 // Then check to see if we should lower the memcpy with target-specific 6413 // code. If the target chooses to do this, this is the next best. 6414 if (TSI) { 6415 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6416 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, 6417 DstPtrInfo, SrcPtrInfo); 6418 if (Result.getNode()) 6419 return Result; 6420 } 6421 6422 // If we really need inline code and the target declined to provide it, 6423 // use a (potentially long) sequence of loads and stores. 6424 if (AlwaysInline) { 6425 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6426 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6427 ConstantSize->getZExtValue(), Alignment, 6428 isVol, true, DstPtrInfo, SrcPtrInfo); 6429 } 6430 6431 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6432 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6433 6434 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6435 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6436 // respect volatile, so they may do things like read or write memory 6437 // beyond the given memory regions. But fixing this isn't easy, and most 6438 // people don't care. 6439 6440 // Emit a library call. 6441 TargetLowering::ArgListTy Args; 6442 TargetLowering::ArgListEntry Entry; 6443 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6444 Entry.Node = Dst; Args.push_back(Entry); 6445 Entry.Node = Src; Args.push_back(Entry); 6446 6447 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6448 Entry.Node = Size; Args.push_back(Entry); 6449 // FIXME: pass in SDLoc 6450 TargetLowering::CallLoweringInfo CLI(*this); 6451 CLI.setDebugLoc(dl) 6452 .setChain(Chain) 6453 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6454 Dst.getValueType().getTypeForEVT(*getContext()), 6455 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6456 TLI->getPointerTy(getDataLayout())), 6457 std::move(Args)) 6458 .setDiscardResult() 6459 .setTailCall(isTailCall); 6460 6461 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6462 return CallResult.second; 6463 } 6464 6465 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6466 SDValue Dst, unsigned DstAlign, 6467 SDValue Src, unsigned SrcAlign, 6468 SDValue Size, Type *SizeTy, 6469 unsigned ElemSz, bool isTailCall, 6470 MachinePointerInfo DstPtrInfo, 6471 MachinePointerInfo SrcPtrInfo) { 6472 // Emit a library call. 6473 TargetLowering::ArgListTy Args; 6474 TargetLowering::ArgListEntry Entry; 6475 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6476 Entry.Node = Dst; 6477 Args.push_back(Entry); 6478 6479 Entry.Node = Src; 6480 Args.push_back(Entry); 6481 6482 Entry.Ty = SizeTy; 6483 Entry.Node = Size; 6484 Args.push_back(Entry); 6485 6486 RTLIB::Libcall LibraryCall = 6487 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6488 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6489 report_fatal_error("Unsupported element size"); 6490 6491 TargetLowering::CallLoweringInfo CLI(*this); 6492 CLI.setDebugLoc(dl) 6493 .setChain(Chain) 6494 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6495 Type::getVoidTy(*getContext()), 6496 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6497 TLI->getPointerTy(getDataLayout())), 6498 std::move(Args)) 6499 .setDiscardResult() 6500 .setTailCall(isTailCall); 6501 6502 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6503 return CallResult.second; 6504 } 6505 6506 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6507 SDValue Src, SDValue Size, Align Alignment, 6508 bool isVol, bool isTailCall, 6509 MachinePointerInfo DstPtrInfo, 6510 MachinePointerInfo SrcPtrInfo) { 6511 // Check to see if we should lower the memmove to loads and stores first. 6512 // For cases within the target-specified limits, this is the best choice. 6513 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6514 if (ConstantSize) { 6515 // Memmove with size zero? Just return the original chain. 6516 if (ConstantSize->isNullValue()) 6517 return Chain; 6518 6519 SDValue Result = getMemmoveLoadsAndStores( 6520 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6521 isVol, false, DstPtrInfo, SrcPtrInfo); 6522 if (Result.getNode()) 6523 return Result; 6524 } 6525 6526 // Then check to see if we should lower the memmove with target-specific 6527 // code. If the target chooses to do this, this is the next best. 6528 if (TSI) { 6529 SDValue Result = 6530 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, 6531 Alignment, isVol, DstPtrInfo, SrcPtrInfo); 6532 if (Result.getNode()) 6533 return Result; 6534 } 6535 6536 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6537 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6538 6539 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6540 // not be safe. See memcpy above for more details. 6541 6542 // Emit a library call. 6543 TargetLowering::ArgListTy Args; 6544 TargetLowering::ArgListEntry Entry; 6545 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6546 Entry.Node = Dst; Args.push_back(Entry); 6547 Entry.Node = Src; Args.push_back(Entry); 6548 6549 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6550 Entry.Node = Size; Args.push_back(Entry); 6551 // FIXME: pass in SDLoc 6552 TargetLowering::CallLoweringInfo CLI(*this); 6553 CLI.setDebugLoc(dl) 6554 .setChain(Chain) 6555 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6556 Dst.getValueType().getTypeForEVT(*getContext()), 6557 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6558 TLI->getPointerTy(getDataLayout())), 6559 std::move(Args)) 6560 .setDiscardResult() 6561 .setTailCall(isTailCall); 6562 6563 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6564 return CallResult.second; 6565 } 6566 6567 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6568 SDValue Dst, unsigned DstAlign, 6569 SDValue Src, unsigned SrcAlign, 6570 SDValue Size, Type *SizeTy, 6571 unsigned ElemSz, bool isTailCall, 6572 MachinePointerInfo DstPtrInfo, 6573 MachinePointerInfo SrcPtrInfo) { 6574 // Emit a library call. 6575 TargetLowering::ArgListTy Args; 6576 TargetLowering::ArgListEntry Entry; 6577 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6578 Entry.Node = Dst; 6579 Args.push_back(Entry); 6580 6581 Entry.Node = Src; 6582 Args.push_back(Entry); 6583 6584 Entry.Ty = SizeTy; 6585 Entry.Node = Size; 6586 Args.push_back(Entry); 6587 6588 RTLIB::Libcall LibraryCall = 6589 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6590 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6591 report_fatal_error("Unsupported element size"); 6592 6593 TargetLowering::CallLoweringInfo CLI(*this); 6594 CLI.setDebugLoc(dl) 6595 .setChain(Chain) 6596 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6597 Type::getVoidTy(*getContext()), 6598 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6599 TLI->getPointerTy(getDataLayout())), 6600 std::move(Args)) 6601 .setDiscardResult() 6602 .setTailCall(isTailCall); 6603 6604 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6605 return CallResult.second; 6606 } 6607 6608 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6609 SDValue Src, SDValue Size, Align Alignment, 6610 bool isVol, bool isTailCall, 6611 MachinePointerInfo DstPtrInfo) { 6612 // Check to see if we should lower the memset to stores first. 6613 // For cases within the target-specified limits, this is the best choice. 6614 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6615 if (ConstantSize) { 6616 // Memset with size zero? Just return the original chain. 6617 if (ConstantSize->isNullValue()) 6618 return Chain; 6619 6620 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6621 ConstantSize->getZExtValue(), Alignment, 6622 isVol, DstPtrInfo); 6623 6624 if (Result.getNode()) 6625 return Result; 6626 } 6627 6628 // Then check to see if we should lower the memset with target-specific 6629 // code. If the target chooses to do this, this is the next best. 6630 if (TSI) { 6631 SDValue Result = TSI->EmitTargetCodeForMemset( 6632 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo); 6633 if (Result.getNode()) 6634 return Result; 6635 } 6636 6637 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6638 6639 // Emit a library call. 6640 TargetLowering::ArgListTy Args; 6641 TargetLowering::ArgListEntry Entry; 6642 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6643 Args.push_back(Entry); 6644 Entry.Node = Src; 6645 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6646 Args.push_back(Entry); 6647 Entry.Node = Size; 6648 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6649 Args.push_back(Entry); 6650 6651 // FIXME: pass in SDLoc 6652 TargetLowering::CallLoweringInfo CLI(*this); 6653 CLI.setDebugLoc(dl) 6654 .setChain(Chain) 6655 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6656 Dst.getValueType().getTypeForEVT(*getContext()), 6657 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6658 TLI->getPointerTy(getDataLayout())), 6659 std::move(Args)) 6660 .setDiscardResult() 6661 .setTailCall(isTailCall); 6662 6663 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6664 return CallResult.second; 6665 } 6666 6667 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6668 SDValue Dst, unsigned DstAlign, 6669 SDValue Value, SDValue Size, Type *SizeTy, 6670 unsigned ElemSz, bool isTailCall, 6671 MachinePointerInfo DstPtrInfo) { 6672 // Emit a library call. 6673 TargetLowering::ArgListTy Args; 6674 TargetLowering::ArgListEntry Entry; 6675 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6676 Entry.Node = Dst; 6677 Args.push_back(Entry); 6678 6679 Entry.Ty = Type::getInt8Ty(*getContext()); 6680 Entry.Node = Value; 6681 Args.push_back(Entry); 6682 6683 Entry.Ty = SizeTy; 6684 Entry.Node = Size; 6685 Args.push_back(Entry); 6686 6687 RTLIB::Libcall LibraryCall = 6688 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6689 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6690 report_fatal_error("Unsupported element size"); 6691 6692 TargetLowering::CallLoweringInfo CLI(*this); 6693 CLI.setDebugLoc(dl) 6694 .setChain(Chain) 6695 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6696 Type::getVoidTy(*getContext()), 6697 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6698 TLI->getPointerTy(getDataLayout())), 6699 std::move(Args)) 6700 .setDiscardResult() 6701 .setTailCall(isTailCall); 6702 6703 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6704 return CallResult.second; 6705 } 6706 6707 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6708 SDVTList VTList, ArrayRef<SDValue> Ops, 6709 MachineMemOperand *MMO) { 6710 FoldingSetNodeID ID; 6711 ID.AddInteger(MemVT.getRawBits()); 6712 AddNodeIDNode(ID, Opcode, VTList, Ops); 6713 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6714 void* IP = nullptr; 6715 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6716 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6717 return SDValue(E, 0); 6718 } 6719 6720 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6721 VTList, MemVT, MMO); 6722 createOperands(N, Ops); 6723 6724 CSEMap.InsertNode(N, IP); 6725 InsertNode(N); 6726 return SDValue(N, 0); 6727 } 6728 6729 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6730 EVT MemVT, SDVTList VTs, SDValue Chain, 6731 SDValue Ptr, SDValue Cmp, SDValue Swp, 6732 MachineMemOperand *MMO) { 6733 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6734 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6735 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6736 6737 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6738 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6739 } 6740 6741 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6742 SDValue Chain, SDValue Ptr, SDValue Val, 6743 MachineMemOperand *MMO) { 6744 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6745 Opcode == ISD::ATOMIC_LOAD_SUB || 6746 Opcode == ISD::ATOMIC_LOAD_AND || 6747 Opcode == ISD::ATOMIC_LOAD_CLR || 6748 Opcode == ISD::ATOMIC_LOAD_OR || 6749 Opcode == ISD::ATOMIC_LOAD_XOR || 6750 Opcode == ISD::ATOMIC_LOAD_NAND || 6751 Opcode == ISD::ATOMIC_LOAD_MIN || 6752 Opcode == ISD::ATOMIC_LOAD_MAX || 6753 Opcode == ISD::ATOMIC_LOAD_UMIN || 6754 Opcode == ISD::ATOMIC_LOAD_UMAX || 6755 Opcode == ISD::ATOMIC_LOAD_FADD || 6756 Opcode == ISD::ATOMIC_LOAD_FSUB || 6757 Opcode == ISD::ATOMIC_SWAP || 6758 Opcode == ISD::ATOMIC_STORE) && 6759 "Invalid Atomic Op"); 6760 6761 EVT VT = Val.getValueType(); 6762 6763 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6764 getVTList(VT, MVT::Other); 6765 SDValue Ops[] = {Chain, Ptr, Val}; 6766 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6767 } 6768 6769 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6770 EVT VT, SDValue Chain, SDValue Ptr, 6771 MachineMemOperand *MMO) { 6772 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6773 6774 SDVTList VTs = getVTList(VT, MVT::Other); 6775 SDValue Ops[] = {Chain, Ptr}; 6776 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6777 } 6778 6779 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6780 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6781 if (Ops.size() == 1) 6782 return Ops[0]; 6783 6784 SmallVector<EVT, 4> VTs; 6785 VTs.reserve(Ops.size()); 6786 for (unsigned i = 0; i < Ops.size(); ++i) 6787 VTs.push_back(Ops[i].getValueType()); 6788 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6789 } 6790 6791 SDValue SelectionDAG::getMemIntrinsicNode( 6792 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6793 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6794 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6795 if (!Size && MemVT.isScalableVector()) 6796 Size = MemoryLocation::UnknownSize; 6797 else if (!Size) 6798 Size = MemVT.getStoreSize(); 6799 6800 MachineFunction &MF = getMachineFunction(); 6801 MachineMemOperand *MMO = 6802 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6803 6804 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6805 } 6806 6807 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6808 SDVTList VTList, 6809 ArrayRef<SDValue> Ops, EVT MemVT, 6810 MachineMemOperand *MMO) { 6811 assert((Opcode == ISD::INTRINSIC_VOID || 6812 Opcode == ISD::INTRINSIC_W_CHAIN || 6813 Opcode == ISD::PREFETCH || 6814 ((int)Opcode <= std::numeric_limits<int>::max() && 6815 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6816 "Opcode is not a memory-accessing opcode!"); 6817 6818 // Memoize the node unless it returns a flag. 6819 MemIntrinsicSDNode *N; 6820 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6821 FoldingSetNodeID ID; 6822 AddNodeIDNode(ID, Opcode, VTList, Ops); 6823 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6824 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6825 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6826 void *IP = nullptr; 6827 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6828 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6829 return SDValue(E, 0); 6830 } 6831 6832 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6833 VTList, MemVT, MMO); 6834 createOperands(N, Ops); 6835 6836 CSEMap.InsertNode(N, IP); 6837 } else { 6838 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6839 VTList, MemVT, MMO); 6840 createOperands(N, Ops); 6841 } 6842 InsertNode(N); 6843 SDValue V(N, 0); 6844 NewSDValueDbgMsg(V, "Creating new node: ", this); 6845 return V; 6846 } 6847 6848 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6849 SDValue Chain, int FrameIndex, 6850 int64_t Size, int64_t Offset) { 6851 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6852 const auto VTs = getVTList(MVT::Other); 6853 SDValue Ops[2] = { 6854 Chain, 6855 getFrameIndex(FrameIndex, 6856 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6857 true)}; 6858 6859 FoldingSetNodeID ID; 6860 AddNodeIDNode(ID, Opcode, VTs, Ops); 6861 ID.AddInteger(FrameIndex); 6862 ID.AddInteger(Size); 6863 ID.AddInteger(Offset); 6864 void *IP = nullptr; 6865 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6866 return SDValue(E, 0); 6867 6868 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6869 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6870 createOperands(N, Ops); 6871 CSEMap.InsertNode(N, IP); 6872 InsertNode(N); 6873 SDValue V(N, 0); 6874 NewSDValueDbgMsg(V, "Creating new node: ", this); 6875 return V; 6876 } 6877 6878 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6879 /// MachinePointerInfo record from it. This is particularly useful because the 6880 /// code generator has many cases where it doesn't bother passing in a 6881 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6882 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6883 SelectionDAG &DAG, SDValue Ptr, 6884 int64_t Offset = 0) { 6885 // If this is FI+Offset, we can model it. 6886 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6887 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6888 FI->getIndex(), Offset); 6889 6890 // If this is (FI+Offset1)+Offset2, we can model it. 6891 if (Ptr.getOpcode() != ISD::ADD || 6892 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6893 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6894 return Info; 6895 6896 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6897 return MachinePointerInfo::getFixedStack( 6898 DAG.getMachineFunction(), FI, 6899 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6900 } 6901 6902 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6903 /// MachinePointerInfo record from it. This is particularly useful because the 6904 /// code generator has many cases where it doesn't bother passing in a 6905 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6906 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6907 SelectionDAG &DAG, SDValue Ptr, 6908 SDValue OffsetOp) { 6909 // If the 'Offset' value isn't a constant, we can't handle this. 6910 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6911 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6912 if (OffsetOp.isUndef()) 6913 return InferPointerInfo(Info, DAG, Ptr); 6914 return Info; 6915 } 6916 6917 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6918 EVT VT, const SDLoc &dl, SDValue Chain, 6919 SDValue Ptr, SDValue Offset, 6920 MachinePointerInfo PtrInfo, EVT MemVT, 6921 Align Alignment, 6922 MachineMemOperand::Flags MMOFlags, 6923 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6924 assert(Chain.getValueType() == MVT::Other && 6925 "Invalid chain type"); 6926 6927 MMOFlags |= MachineMemOperand::MOLoad; 6928 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6929 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6930 // clients. 6931 if (PtrInfo.V.isNull()) 6932 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6933 6934 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 6935 MachineFunction &MF = getMachineFunction(); 6936 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 6937 Alignment, AAInfo, Ranges); 6938 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6939 } 6940 6941 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6942 EVT VT, const SDLoc &dl, SDValue Chain, 6943 SDValue Ptr, SDValue Offset, EVT MemVT, 6944 MachineMemOperand *MMO) { 6945 if (VT == MemVT) { 6946 ExtType = ISD::NON_EXTLOAD; 6947 } else if (ExtType == ISD::NON_EXTLOAD) { 6948 assert(VT == MemVT && "Non-extending load from different memory type!"); 6949 } else { 6950 // Extending load. 6951 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6952 "Should only be an extending load, not truncating!"); 6953 assert(VT.isInteger() == MemVT.isInteger() && 6954 "Cannot convert from FP to Int or Int -> FP!"); 6955 assert(VT.isVector() == MemVT.isVector() && 6956 "Cannot use an ext load to convert to or from a vector!"); 6957 assert((!VT.isVector() || 6958 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6959 "Cannot use an ext load to change the number of vector elements!"); 6960 } 6961 6962 bool Indexed = AM != ISD::UNINDEXED; 6963 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6964 6965 SDVTList VTs = Indexed ? 6966 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6967 SDValue Ops[] = { Chain, Ptr, Offset }; 6968 FoldingSetNodeID ID; 6969 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6970 ID.AddInteger(MemVT.getRawBits()); 6971 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6972 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6973 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6974 void *IP = nullptr; 6975 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6976 cast<LoadSDNode>(E)->refineAlignment(MMO); 6977 return SDValue(E, 0); 6978 } 6979 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6980 ExtType, MemVT, MMO); 6981 createOperands(N, Ops); 6982 6983 CSEMap.InsertNode(N, IP); 6984 InsertNode(N); 6985 SDValue V(N, 0); 6986 NewSDValueDbgMsg(V, "Creating new node: ", this); 6987 return V; 6988 } 6989 6990 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6991 SDValue Ptr, MachinePointerInfo PtrInfo, 6992 MaybeAlign Alignment, 6993 MachineMemOperand::Flags MMOFlags, 6994 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6995 SDValue Undef = getUNDEF(Ptr.getValueType()); 6996 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6997 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6998 } 6999 7000 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7001 SDValue Ptr, MachineMemOperand *MMO) { 7002 SDValue Undef = getUNDEF(Ptr.getValueType()); 7003 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7004 VT, MMO); 7005 } 7006 7007 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7008 EVT VT, SDValue Chain, SDValue Ptr, 7009 MachinePointerInfo PtrInfo, EVT MemVT, 7010 MaybeAlign Alignment, 7011 MachineMemOperand::Flags MMOFlags, 7012 const AAMDNodes &AAInfo) { 7013 SDValue Undef = getUNDEF(Ptr.getValueType()); 7014 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 7015 MemVT, Alignment, MMOFlags, AAInfo); 7016 } 7017 7018 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7019 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 7020 MachineMemOperand *MMO) { 7021 SDValue Undef = getUNDEF(Ptr.getValueType()); 7022 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 7023 MemVT, MMO); 7024 } 7025 7026 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 7027 SDValue Base, SDValue Offset, 7028 ISD::MemIndexedMode AM) { 7029 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 7030 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 7031 // Don't propagate the invariant or dereferenceable flags. 7032 auto MMOFlags = 7033 LD->getMemOperand()->getFlags() & 7034 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 7035 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 7036 LD->getChain(), Base, Offset, LD->getPointerInfo(), 7037 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 7038 LD->getAAInfo()); 7039 } 7040 7041 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7042 SDValue Ptr, MachinePointerInfo PtrInfo, 7043 Align Alignment, 7044 MachineMemOperand::Flags MMOFlags, 7045 const AAMDNodes &AAInfo) { 7046 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 7047 7048 MMOFlags |= MachineMemOperand::MOStore; 7049 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7050 7051 if (PtrInfo.V.isNull()) 7052 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7053 7054 MachineFunction &MF = getMachineFunction(); 7055 uint64_t Size = 7056 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 7057 MachineMemOperand *MMO = 7058 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 7059 return getStore(Chain, dl, Val, Ptr, MMO); 7060 } 7061 7062 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7063 SDValue Ptr, MachineMemOperand *MMO) { 7064 assert(Chain.getValueType() == MVT::Other && 7065 "Invalid chain type"); 7066 EVT VT = Val.getValueType(); 7067 SDVTList VTs = getVTList(MVT::Other); 7068 SDValue Undef = getUNDEF(Ptr.getValueType()); 7069 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7070 FoldingSetNodeID ID; 7071 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7072 ID.AddInteger(VT.getRawBits()); 7073 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7074 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 7075 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7076 void *IP = nullptr; 7077 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7078 cast<StoreSDNode>(E)->refineAlignment(MMO); 7079 return SDValue(E, 0); 7080 } 7081 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7082 ISD::UNINDEXED, false, VT, MMO); 7083 createOperands(N, Ops); 7084 7085 CSEMap.InsertNode(N, IP); 7086 InsertNode(N); 7087 SDValue V(N, 0); 7088 NewSDValueDbgMsg(V, "Creating new node: ", this); 7089 return V; 7090 } 7091 7092 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7093 SDValue Ptr, MachinePointerInfo PtrInfo, 7094 EVT SVT, Align Alignment, 7095 MachineMemOperand::Flags MMOFlags, 7096 const AAMDNodes &AAInfo) { 7097 assert(Chain.getValueType() == MVT::Other && 7098 "Invalid chain type"); 7099 7100 MMOFlags |= MachineMemOperand::MOStore; 7101 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7102 7103 if (PtrInfo.V.isNull()) 7104 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7105 7106 MachineFunction &MF = getMachineFunction(); 7107 MachineMemOperand *MMO = MF.getMachineMemOperand( 7108 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 7109 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7110 } 7111 7112 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7113 SDValue Ptr, EVT SVT, 7114 MachineMemOperand *MMO) { 7115 EVT VT = Val.getValueType(); 7116 7117 assert(Chain.getValueType() == MVT::Other && 7118 "Invalid chain type"); 7119 if (VT == SVT) 7120 return getStore(Chain, dl, Val, Ptr, MMO); 7121 7122 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7123 "Should only be a truncating store, not extending!"); 7124 assert(VT.isInteger() == SVT.isInteger() && 7125 "Can't do FP-INT conversion!"); 7126 assert(VT.isVector() == SVT.isVector() && 7127 "Cannot use trunc store to convert to or from a vector!"); 7128 assert((!VT.isVector() || 7129 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 7130 "Cannot use trunc store to change the number of vector elements!"); 7131 7132 SDVTList VTs = getVTList(MVT::Other); 7133 SDValue Undef = getUNDEF(Ptr.getValueType()); 7134 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7135 FoldingSetNodeID ID; 7136 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7137 ID.AddInteger(SVT.getRawBits()); 7138 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7139 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7140 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7141 void *IP = nullptr; 7142 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7143 cast<StoreSDNode>(E)->refineAlignment(MMO); 7144 return SDValue(E, 0); 7145 } 7146 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7147 ISD::UNINDEXED, true, SVT, MMO); 7148 createOperands(N, Ops); 7149 7150 CSEMap.InsertNode(N, IP); 7151 InsertNode(N); 7152 SDValue V(N, 0); 7153 NewSDValueDbgMsg(V, "Creating new node: ", this); 7154 return V; 7155 } 7156 7157 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7158 SDValue Base, SDValue Offset, 7159 ISD::MemIndexedMode AM) { 7160 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7161 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7162 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7163 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7164 FoldingSetNodeID ID; 7165 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7166 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7167 ID.AddInteger(ST->getRawSubclassData()); 7168 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7169 void *IP = nullptr; 7170 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7171 return SDValue(E, 0); 7172 7173 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7174 ST->isTruncatingStore(), ST->getMemoryVT(), 7175 ST->getMemOperand()); 7176 createOperands(N, Ops); 7177 7178 CSEMap.InsertNode(N, IP); 7179 InsertNode(N); 7180 SDValue V(N, 0); 7181 NewSDValueDbgMsg(V, "Creating new node: ", this); 7182 return V; 7183 } 7184 7185 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7186 SDValue Base, SDValue Offset, SDValue Mask, 7187 SDValue PassThru, EVT MemVT, 7188 MachineMemOperand *MMO, 7189 ISD::MemIndexedMode AM, 7190 ISD::LoadExtType ExtTy, bool isExpanding) { 7191 bool Indexed = AM != ISD::UNINDEXED; 7192 assert((Indexed || Offset.isUndef()) && 7193 "Unindexed masked load with an offset!"); 7194 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7195 : getVTList(VT, MVT::Other); 7196 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7197 FoldingSetNodeID ID; 7198 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7199 ID.AddInteger(MemVT.getRawBits()); 7200 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7201 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7202 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7203 void *IP = nullptr; 7204 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7205 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7206 return SDValue(E, 0); 7207 } 7208 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7209 AM, ExtTy, isExpanding, MemVT, MMO); 7210 createOperands(N, Ops); 7211 7212 CSEMap.InsertNode(N, IP); 7213 InsertNode(N); 7214 SDValue V(N, 0); 7215 NewSDValueDbgMsg(V, "Creating new node: ", this); 7216 return V; 7217 } 7218 7219 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7220 SDValue Base, SDValue Offset, 7221 ISD::MemIndexedMode AM) { 7222 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7223 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7224 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7225 Offset, LD->getMask(), LD->getPassThru(), 7226 LD->getMemoryVT(), LD->getMemOperand(), AM, 7227 LD->getExtensionType(), LD->isExpandingLoad()); 7228 } 7229 7230 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7231 SDValue Val, SDValue Base, SDValue Offset, 7232 SDValue Mask, EVT MemVT, 7233 MachineMemOperand *MMO, 7234 ISD::MemIndexedMode AM, bool IsTruncating, 7235 bool IsCompressing) { 7236 assert(Chain.getValueType() == MVT::Other && 7237 "Invalid chain type"); 7238 bool Indexed = AM != ISD::UNINDEXED; 7239 assert((Indexed || Offset.isUndef()) && 7240 "Unindexed masked store with an offset!"); 7241 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7242 : getVTList(MVT::Other); 7243 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7244 FoldingSetNodeID ID; 7245 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7246 ID.AddInteger(MemVT.getRawBits()); 7247 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7248 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7249 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7250 void *IP = nullptr; 7251 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7252 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7253 return SDValue(E, 0); 7254 } 7255 auto *N = 7256 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7257 IsTruncating, IsCompressing, MemVT, MMO); 7258 createOperands(N, Ops); 7259 7260 CSEMap.InsertNode(N, IP); 7261 InsertNode(N); 7262 SDValue V(N, 0); 7263 NewSDValueDbgMsg(V, "Creating new node: ", this); 7264 return V; 7265 } 7266 7267 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7268 SDValue Base, SDValue Offset, 7269 ISD::MemIndexedMode AM) { 7270 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7271 assert(ST->getOffset().isUndef() && 7272 "Masked store is already a indexed store!"); 7273 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7274 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7275 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7276 } 7277 7278 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7279 ArrayRef<SDValue> Ops, 7280 MachineMemOperand *MMO, 7281 ISD::MemIndexType IndexType) { 7282 assert(Ops.size() == 6 && "Incompatible number of operands"); 7283 7284 FoldingSetNodeID ID; 7285 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7286 ID.AddInteger(VT.getRawBits()); 7287 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7288 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7289 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7290 void *IP = nullptr; 7291 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7292 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7293 return SDValue(E, 0); 7294 } 7295 7296 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7297 VTs, VT, MMO, IndexType); 7298 createOperands(N, Ops); 7299 7300 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7301 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7302 assert(N->getMask().getValueType().getVectorNumElements() == 7303 N->getValueType(0).getVectorNumElements() && 7304 "Vector width mismatch between mask and data"); 7305 assert(N->getIndex().getValueType().getVectorNumElements() >= 7306 N->getValueType(0).getVectorNumElements() && 7307 "Vector width mismatch between index and data"); 7308 assert(isa<ConstantSDNode>(N->getScale()) && 7309 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7310 "Scale should be a constant power of 2"); 7311 7312 CSEMap.InsertNode(N, IP); 7313 InsertNode(N); 7314 SDValue V(N, 0); 7315 NewSDValueDbgMsg(V, "Creating new node: ", this); 7316 return V; 7317 } 7318 7319 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7320 ArrayRef<SDValue> Ops, 7321 MachineMemOperand *MMO, 7322 ISD::MemIndexType IndexType) { 7323 assert(Ops.size() == 6 && "Incompatible number of operands"); 7324 7325 FoldingSetNodeID ID; 7326 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7327 ID.AddInteger(VT.getRawBits()); 7328 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7329 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7330 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7331 void *IP = nullptr; 7332 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7333 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7334 return SDValue(E, 0); 7335 } 7336 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7337 VTs, VT, MMO, IndexType); 7338 createOperands(N, Ops); 7339 7340 assert(N->getMask().getValueType().getVectorNumElements() == 7341 N->getValue().getValueType().getVectorNumElements() && 7342 "Vector width mismatch between mask and data"); 7343 assert(N->getIndex().getValueType().getVectorNumElements() >= 7344 N->getValue().getValueType().getVectorNumElements() && 7345 "Vector width mismatch between index and data"); 7346 assert(isa<ConstantSDNode>(N->getScale()) && 7347 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7348 "Scale should be a constant power of 2"); 7349 7350 CSEMap.InsertNode(N, IP); 7351 InsertNode(N); 7352 SDValue V(N, 0); 7353 NewSDValueDbgMsg(V, "Creating new node: ", this); 7354 return V; 7355 } 7356 7357 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7358 // select undef, T, F --> T (if T is a constant), otherwise F 7359 // select, ?, undef, F --> F 7360 // select, ?, T, undef --> T 7361 if (Cond.isUndef()) 7362 return isConstantValueOfAnyType(T) ? T : F; 7363 if (T.isUndef()) 7364 return F; 7365 if (F.isUndef()) 7366 return T; 7367 7368 // select true, T, F --> T 7369 // select false, T, F --> F 7370 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7371 return CondC->isNullValue() ? F : T; 7372 7373 // TODO: This should simplify VSELECT with constant condition using something 7374 // like this (but check boolean contents to be complete?): 7375 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7376 // return T; 7377 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7378 // return F; 7379 7380 // select ?, T, T --> T 7381 if (T == F) 7382 return T; 7383 7384 return SDValue(); 7385 } 7386 7387 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7388 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7389 if (X.isUndef()) 7390 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7391 // shift X, undef --> undef (because it may shift by the bitwidth) 7392 if (Y.isUndef()) 7393 return getUNDEF(X.getValueType()); 7394 7395 // shift 0, Y --> 0 7396 // shift X, 0 --> X 7397 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7398 return X; 7399 7400 // shift X, C >= bitwidth(X) --> undef 7401 // All vector elements must be too big (or undef) to avoid partial undefs. 7402 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7403 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7404 }; 7405 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7406 return getUNDEF(X.getValueType()); 7407 7408 return SDValue(); 7409 } 7410 7411 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7412 SDNodeFlags Flags) { 7413 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7414 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7415 // operation is poison. That result can be relaxed to undef. 7416 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7417 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7418 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7419 (YC && YC->getValueAPF().isNaN()); 7420 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7421 (YC && YC->getValueAPF().isInfinity()); 7422 7423 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7424 return getUNDEF(X.getValueType()); 7425 7426 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7427 return getUNDEF(X.getValueType()); 7428 7429 if (!YC) 7430 return SDValue(); 7431 7432 // X + -0.0 --> X 7433 if (Opcode == ISD::FADD) 7434 if (YC->getValueAPF().isNegZero()) 7435 return X; 7436 7437 // X - +0.0 --> X 7438 if (Opcode == ISD::FSUB) 7439 if (YC->getValueAPF().isPosZero()) 7440 return X; 7441 7442 // X * 1.0 --> X 7443 // X / 1.0 --> X 7444 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7445 if (YC->getValueAPF().isExactlyValue(1.0)) 7446 return X; 7447 7448 return SDValue(); 7449 } 7450 7451 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7452 SDValue Ptr, SDValue SV, unsigned Align) { 7453 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7454 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7455 } 7456 7457 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7458 ArrayRef<SDUse> Ops) { 7459 switch (Ops.size()) { 7460 case 0: return getNode(Opcode, DL, VT); 7461 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7462 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7463 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7464 default: break; 7465 } 7466 7467 // Copy from an SDUse array into an SDValue array for use with 7468 // the regular getNode logic. 7469 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7470 return getNode(Opcode, DL, VT, NewOps); 7471 } 7472 7473 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7474 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7475 unsigned NumOps = Ops.size(); 7476 switch (NumOps) { 7477 case 0: return getNode(Opcode, DL, VT); 7478 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7479 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7480 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7481 default: break; 7482 } 7483 7484 switch (Opcode) { 7485 default: break; 7486 case ISD::BUILD_VECTOR: 7487 // Attempt to simplify BUILD_VECTOR. 7488 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7489 return V; 7490 break; 7491 case ISD::CONCAT_VECTORS: 7492 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7493 return V; 7494 break; 7495 case ISD::SELECT_CC: 7496 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7497 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7498 "LHS and RHS of condition must have same type!"); 7499 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7500 "True and False arms of SelectCC must have same type!"); 7501 assert(Ops[2].getValueType() == VT && 7502 "select_cc node must be of same type as true and false value!"); 7503 break; 7504 case ISD::BR_CC: 7505 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7506 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7507 "LHS/RHS of comparison should match types!"); 7508 break; 7509 } 7510 7511 // Memoize nodes. 7512 SDNode *N; 7513 SDVTList VTs = getVTList(VT); 7514 7515 if (VT != MVT::Glue) { 7516 FoldingSetNodeID ID; 7517 AddNodeIDNode(ID, Opcode, VTs, Ops); 7518 void *IP = nullptr; 7519 7520 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7521 return SDValue(E, 0); 7522 7523 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7524 createOperands(N, Ops); 7525 7526 CSEMap.InsertNode(N, IP); 7527 } else { 7528 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7529 createOperands(N, Ops); 7530 } 7531 7532 N->setFlags(Flags); 7533 InsertNode(N); 7534 SDValue V(N, 0); 7535 NewSDValueDbgMsg(V, "Creating new node: ", this); 7536 return V; 7537 } 7538 7539 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7540 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7541 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7542 } 7543 7544 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7545 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7546 if (VTList.NumVTs == 1) 7547 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7548 7549 switch (Opcode) { 7550 case ISD::STRICT_FP_EXTEND: 7551 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7552 "Invalid STRICT_FP_EXTEND!"); 7553 assert(VTList.VTs[0].isFloatingPoint() && 7554 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7555 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7556 "STRICT_FP_EXTEND result type should be vector iff the operand " 7557 "type is vector!"); 7558 assert((!VTList.VTs[0].isVector() || 7559 VTList.VTs[0].getVectorNumElements() == 7560 Ops[1].getValueType().getVectorNumElements()) && 7561 "Vector element count mismatch!"); 7562 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7563 "Invalid fpext node, dst <= src!"); 7564 break; 7565 case ISD::STRICT_FP_ROUND: 7566 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7567 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7568 "STRICT_FP_ROUND result type should be vector iff the operand " 7569 "type is vector!"); 7570 assert((!VTList.VTs[0].isVector() || 7571 VTList.VTs[0].getVectorNumElements() == 7572 Ops[1].getValueType().getVectorNumElements()) && 7573 "Vector element count mismatch!"); 7574 assert(VTList.VTs[0].isFloatingPoint() && 7575 Ops[1].getValueType().isFloatingPoint() && 7576 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7577 isa<ConstantSDNode>(Ops[2]) && 7578 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7579 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7580 "Invalid STRICT_FP_ROUND!"); 7581 break; 7582 #if 0 7583 // FIXME: figure out how to safely handle things like 7584 // int foo(int x) { return 1 << (x & 255); } 7585 // int bar() { return foo(256); } 7586 case ISD::SRA_PARTS: 7587 case ISD::SRL_PARTS: 7588 case ISD::SHL_PARTS: 7589 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7590 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7591 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7592 else if (N3.getOpcode() == ISD::AND) 7593 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7594 // If the and is only masking out bits that cannot effect the shift, 7595 // eliminate the and. 7596 unsigned NumBits = VT.getScalarSizeInBits()*2; 7597 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7598 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7599 } 7600 break; 7601 #endif 7602 } 7603 7604 // Memoize the node unless it returns a flag. 7605 SDNode *N; 7606 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7607 FoldingSetNodeID ID; 7608 AddNodeIDNode(ID, Opcode, VTList, Ops); 7609 void *IP = nullptr; 7610 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7611 return SDValue(E, 0); 7612 7613 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7614 createOperands(N, Ops); 7615 CSEMap.InsertNode(N, IP); 7616 } else { 7617 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7618 createOperands(N, Ops); 7619 } 7620 7621 N->setFlags(Flags); 7622 InsertNode(N); 7623 SDValue V(N, 0); 7624 NewSDValueDbgMsg(V, "Creating new node: ", this); 7625 return V; 7626 } 7627 7628 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7629 SDVTList VTList) { 7630 return getNode(Opcode, DL, VTList, None); 7631 } 7632 7633 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7634 SDValue N1) { 7635 SDValue Ops[] = { N1 }; 7636 return getNode(Opcode, DL, VTList, Ops); 7637 } 7638 7639 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7640 SDValue N1, SDValue N2) { 7641 SDValue Ops[] = { N1, N2 }; 7642 return getNode(Opcode, DL, VTList, Ops); 7643 } 7644 7645 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7646 SDValue N1, SDValue N2, SDValue N3) { 7647 SDValue Ops[] = { N1, N2, N3 }; 7648 return getNode(Opcode, DL, VTList, Ops); 7649 } 7650 7651 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7652 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7653 SDValue Ops[] = { N1, N2, N3, N4 }; 7654 return getNode(Opcode, DL, VTList, Ops); 7655 } 7656 7657 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7658 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7659 SDValue N5) { 7660 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7661 return getNode(Opcode, DL, VTList, Ops); 7662 } 7663 7664 SDVTList SelectionDAG::getVTList(EVT VT) { 7665 return makeVTList(SDNode::getValueTypeList(VT), 1); 7666 } 7667 7668 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7669 FoldingSetNodeID ID; 7670 ID.AddInteger(2U); 7671 ID.AddInteger(VT1.getRawBits()); 7672 ID.AddInteger(VT2.getRawBits()); 7673 7674 void *IP = nullptr; 7675 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7676 if (!Result) { 7677 EVT *Array = Allocator.Allocate<EVT>(2); 7678 Array[0] = VT1; 7679 Array[1] = VT2; 7680 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7681 VTListMap.InsertNode(Result, IP); 7682 } 7683 return Result->getSDVTList(); 7684 } 7685 7686 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7687 FoldingSetNodeID ID; 7688 ID.AddInteger(3U); 7689 ID.AddInteger(VT1.getRawBits()); 7690 ID.AddInteger(VT2.getRawBits()); 7691 ID.AddInteger(VT3.getRawBits()); 7692 7693 void *IP = nullptr; 7694 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7695 if (!Result) { 7696 EVT *Array = Allocator.Allocate<EVT>(3); 7697 Array[0] = VT1; 7698 Array[1] = VT2; 7699 Array[2] = VT3; 7700 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7701 VTListMap.InsertNode(Result, IP); 7702 } 7703 return Result->getSDVTList(); 7704 } 7705 7706 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7707 FoldingSetNodeID ID; 7708 ID.AddInteger(4U); 7709 ID.AddInteger(VT1.getRawBits()); 7710 ID.AddInteger(VT2.getRawBits()); 7711 ID.AddInteger(VT3.getRawBits()); 7712 ID.AddInteger(VT4.getRawBits()); 7713 7714 void *IP = nullptr; 7715 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7716 if (!Result) { 7717 EVT *Array = Allocator.Allocate<EVT>(4); 7718 Array[0] = VT1; 7719 Array[1] = VT2; 7720 Array[2] = VT3; 7721 Array[3] = VT4; 7722 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7723 VTListMap.InsertNode(Result, IP); 7724 } 7725 return Result->getSDVTList(); 7726 } 7727 7728 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7729 unsigned NumVTs = VTs.size(); 7730 FoldingSetNodeID ID; 7731 ID.AddInteger(NumVTs); 7732 for (unsigned index = 0; index < NumVTs; index++) { 7733 ID.AddInteger(VTs[index].getRawBits()); 7734 } 7735 7736 void *IP = nullptr; 7737 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7738 if (!Result) { 7739 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7740 llvm::copy(VTs, Array); 7741 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7742 VTListMap.InsertNode(Result, IP); 7743 } 7744 return Result->getSDVTList(); 7745 } 7746 7747 7748 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7749 /// specified operands. If the resultant node already exists in the DAG, 7750 /// this does not modify the specified node, instead it returns the node that 7751 /// already exists. If the resultant node does not exist in the DAG, the 7752 /// input node is returned. As a degenerate case, if you specify the same 7753 /// input operands as the node already has, the input node is returned. 7754 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7755 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7756 7757 // Check to see if there is no change. 7758 if (Op == N->getOperand(0)) return N; 7759 7760 // See if the modified node already exists. 7761 void *InsertPos = nullptr; 7762 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7763 return Existing; 7764 7765 // Nope it doesn't. Remove the node from its current place in the maps. 7766 if (InsertPos) 7767 if (!RemoveNodeFromCSEMaps(N)) 7768 InsertPos = nullptr; 7769 7770 // Now we update the operands. 7771 N->OperandList[0].set(Op); 7772 7773 updateDivergence(N); 7774 // If this gets put into a CSE map, add it. 7775 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7776 return N; 7777 } 7778 7779 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7780 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7781 7782 // Check to see if there is no change. 7783 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7784 return N; // No operands changed, just return the input node. 7785 7786 // See if the modified node already exists. 7787 void *InsertPos = nullptr; 7788 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7789 return Existing; 7790 7791 // Nope it doesn't. Remove the node from its current place in the maps. 7792 if (InsertPos) 7793 if (!RemoveNodeFromCSEMaps(N)) 7794 InsertPos = nullptr; 7795 7796 // Now we update the operands. 7797 if (N->OperandList[0] != Op1) 7798 N->OperandList[0].set(Op1); 7799 if (N->OperandList[1] != Op2) 7800 N->OperandList[1].set(Op2); 7801 7802 updateDivergence(N); 7803 // If this gets put into a CSE map, add it. 7804 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7805 return N; 7806 } 7807 7808 SDNode *SelectionDAG:: 7809 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7810 SDValue Ops[] = { Op1, Op2, Op3 }; 7811 return UpdateNodeOperands(N, Ops); 7812 } 7813 7814 SDNode *SelectionDAG:: 7815 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7816 SDValue Op3, SDValue Op4) { 7817 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7818 return UpdateNodeOperands(N, Ops); 7819 } 7820 7821 SDNode *SelectionDAG:: 7822 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7823 SDValue Op3, SDValue Op4, SDValue Op5) { 7824 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7825 return UpdateNodeOperands(N, Ops); 7826 } 7827 7828 SDNode *SelectionDAG:: 7829 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7830 unsigned NumOps = Ops.size(); 7831 assert(N->getNumOperands() == NumOps && 7832 "Update with wrong number of operands"); 7833 7834 // If no operands changed just return the input node. 7835 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7836 return N; 7837 7838 // See if the modified node already exists. 7839 void *InsertPos = nullptr; 7840 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7841 return Existing; 7842 7843 // Nope it doesn't. Remove the node from its current place in the maps. 7844 if (InsertPos) 7845 if (!RemoveNodeFromCSEMaps(N)) 7846 InsertPos = nullptr; 7847 7848 // Now we update the operands. 7849 for (unsigned i = 0; i != NumOps; ++i) 7850 if (N->OperandList[i] != Ops[i]) 7851 N->OperandList[i].set(Ops[i]); 7852 7853 updateDivergence(N); 7854 // If this gets put into a CSE map, add it. 7855 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7856 return N; 7857 } 7858 7859 /// DropOperands - Release the operands and set this node to have 7860 /// zero operands. 7861 void SDNode::DropOperands() { 7862 // Unlike the code in MorphNodeTo that does this, we don't need to 7863 // watch for dead nodes here. 7864 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7865 SDUse &Use = *I++; 7866 Use.set(SDValue()); 7867 } 7868 } 7869 7870 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7871 ArrayRef<MachineMemOperand *> NewMemRefs) { 7872 if (NewMemRefs.empty()) { 7873 N->clearMemRefs(); 7874 return; 7875 } 7876 7877 // Check if we can avoid allocating by storing a single reference directly. 7878 if (NewMemRefs.size() == 1) { 7879 N->MemRefs = NewMemRefs[0]; 7880 N->NumMemRefs = 1; 7881 return; 7882 } 7883 7884 MachineMemOperand **MemRefsBuffer = 7885 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7886 llvm::copy(NewMemRefs, MemRefsBuffer); 7887 N->MemRefs = MemRefsBuffer; 7888 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7889 } 7890 7891 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7892 /// machine opcode. 7893 /// 7894 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7895 EVT VT) { 7896 SDVTList VTs = getVTList(VT); 7897 return SelectNodeTo(N, MachineOpc, VTs, None); 7898 } 7899 7900 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7901 EVT VT, SDValue Op1) { 7902 SDVTList VTs = getVTList(VT); 7903 SDValue Ops[] = { Op1 }; 7904 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7905 } 7906 7907 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7908 EVT VT, SDValue Op1, 7909 SDValue Op2) { 7910 SDVTList VTs = getVTList(VT); 7911 SDValue Ops[] = { Op1, Op2 }; 7912 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7913 } 7914 7915 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7916 EVT VT, SDValue Op1, 7917 SDValue Op2, SDValue Op3) { 7918 SDVTList VTs = getVTList(VT); 7919 SDValue Ops[] = { Op1, Op2, Op3 }; 7920 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7921 } 7922 7923 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7924 EVT VT, ArrayRef<SDValue> Ops) { 7925 SDVTList VTs = getVTList(VT); 7926 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7927 } 7928 7929 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7930 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7931 SDVTList VTs = getVTList(VT1, VT2); 7932 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7933 } 7934 7935 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7936 EVT VT1, EVT VT2) { 7937 SDVTList VTs = getVTList(VT1, VT2); 7938 return SelectNodeTo(N, MachineOpc, VTs, None); 7939 } 7940 7941 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7942 EVT VT1, EVT VT2, EVT VT3, 7943 ArrayRef<SDValue> Ops) { 7944 SDVTList VTs = getVTList(VT1, VT2, VT3); 7945 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7946 } 7947 7948 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7949 EVT VT1, EVT VT2, 7950 SDValue Op1, SDValue Op2) { 7951 SDVTList VTs = getVTList(VT1, VT2); 7952 SDValue Ops[] = { Op1, Op2 }; 7953 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7954 } 7955 7956 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7957 SDVTList VTs,ArrayRef<SDValue> Ops) { 7958 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7959 // Reset the NodeID to -1. 7960 New->setNodeId(-1); 7961 if (New != N) { 7962 ReplaceAllUsesWith(N, New); 7963 RemoveDeadNode(N); 7964 } 7965 return New; 7966 } 7967 7968 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7969 /// the line number information on the merged node since it is not possible to 7970 /// preserve the information that operation is associated with multiple lines. 7971 /// This will make the debugger working better at -O0, were there is a higher 7972 /// probability having other instructions associated with that line. 7973 /// 7974 /// For IROrder, we keep the smaller of the two 7975 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7976 DebugLoc NLoc = N->getDebugLoc(); 7977 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7978 N->setDebugLoc(DebugLoc()); 7979 } 7980 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7981 N->setIROrder(Order); 7982 return N; 7983 } 7984 7985 /// MorphNodeTo - This *mutates* the specified node to have the specified 7986 /// return type, opcode, and operands. 7987 /// 7988 /// Note that MorphNodeTo returns the resultant node. If there is already a 7989 /// node of the specified opcode and operands, it returns that node instead of 7990 /// the current one. Note that the SDLoc need not be the same. 7991 /// 7992 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7993 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7994 /// node, and because it doesn't require CSE recalculation for any of 7995 /// the node's users. 7996 /// 7997 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7998 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7999 /// the legalizer which maintain worklists that would need to be updated when 8000 /// deleting things. 8001 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 8002 SDVTList VTs, ArrayRef<SDValue> Ops) { 8003 // If an identical node already exists, use it. 8004 void *IP = nullptr; 8005 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 8006 FoldingSetNodeID ID; 8007 AddNodeIDNode(ID, Opc, VTs, Ops); 8008 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 8009 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 8010 } 8011 8012 if (!RemoveNodeFromCSEMaps(N)) 8013 IP = nullptr; 8014 8015 // Start the morphing. 8016 N->NodeType = Opc; 8017 N->ValueList = VTs.VTs; 8018 N->NumValues = VTs.NumVTs; 8019 8020 // Clear the operands list, updating used nodes to remove this from their 8021 // use list. Keep track of any operands that become dead as a result. 8022 SmallPtrSet<SDNode*, 16> DeadNodeSet; 8023 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 8024 SDUse &Use = *I++; 8025 SDNode *Used = Use.getNode(); 8026 Use.set(SDValue()); 8027 if (Used->use_empty()) 8028 DeadNodeSet.insert(Used); 8029 } 8030 8031 // For MachineNode, initialize the memory references information. 8032 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 8033 MN->clearMemRefs(); 8034 8035 // Swap for an appropriately sized array from the recycler. 8036 removeOperands(N); 8037 createOperands(N, Ops); 8038 8039 // Delete any nodes that are still dead after adding the uses for the 8040 // new operands. 8041 if (!DeadNodeSet.empty()) { 8042 SmallVector<SDNode *, 16> DeadNodes; 8043 for (SDNode *N : DeadNodeSet) 8044 if (N->use_empty()) 8045 DeadNodes.push_back(N); 8046 RemoveDeadNodes(DeadNodes); 8047 } 8048 8049 if (IP) 8050 CSEMap.InsertNode(N, IP); // Memoize the new node. 8051 return N; 8052 } 8053 8054 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 8055 unsigned OrigOpc = Node->getOpcode(); 8056 unsigned NewOpc; 8057 switch (OrigOpc) { 8058 default: 8059 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 8060 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8061 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 8062 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8063 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 8064 #include "llvm/IR/ConstrainedOps.def" 8065 } 8066 8067 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 8068 8069 // We're taking this node out of the chain, so we need to re-link things. 8070 SDValue InputChain = Node->getOperand(0); 8071 SDValue OutputChain = SDValue(Node, 1); 8072 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 8073 8074 SmallVector<SDValue, 3> Ops; 8075 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 8076 Ops.push_back(Node->getOperand(i)); 8077 8078 SDVTList VTs = getVTList(Node->getValueType(0)); 8079 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 8080 8081 // MorphNodeTo can operate in two ways: if an existing node with the 8082 // specified operands exists, it can just return it. Otherwise, it 8083 // updates the node in place to have the requested operands. 8084 if (Res == Node) { 8085 // If we updated the node in place, reset the node ID. To the isel, 8086 // this should be just like a newly allocated machine node. 8087 Res->setNodeId(-1); 8088 } else { 8089 ReplaceAllUsesWith(Node, Res); 8090 RemoveDeadNode(Node); 8091 } 8092 8093 return Res; 8094 } 8095 8096 /// getMachineNode - These are used for target selectors to create a new node 8097 /// with specified return type(s), MachineInstr opcode, and operands. 8098 /// 8099 /// Note that getMachineNode returns the resultant node. If there is already a 8100 /// node of the specified opcode and operands, it returns that node instead of 8101 /// the current one. 8102 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8103 EVT VT) { 8104 SDVTList VTs = getVTList(VT); 8105 return getMachineNode(Opcode, dl, VTs, None); 8106 } 8107 8108 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8109 EVT VT, SDValue Op1) { 8110 SDVTList VTs = getVTList(VT); 8111 SDValue Ops[] = { Op1 }; 8112 return getMachineNode(Opcode, dl, VTs, Ops); 8113 } 8114 8115 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8116 EVT VT, SDValue Op1, SDValue Op2) { 8117 SDVTList VTs = getVTList(VT); 8118 SDValue Ops[] = { Op1, Op2 }; 8119 return getMachineNode(Opcode, dl, VTs, Ops); 8120 } 8121 8122 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8123 EVT VT, SDValue Op1, SDValue Op2, 8124 SDValue Op3) { 8125 SDVTList VTs = getVTList(VT); 8126 SDValue Ops[] = { Op1, Op2, Op3 }; 8127 return getMachineNode(Opcode, dl, VTs, Ops); 8128 } 8129 8130 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8131 EVT VT, ArrayRef<SDValue> Ops) { 8132 SDVTList VTs = getVTList(VT); 8133 return getMachineNode(Opcode, dl, VTs, Ops); 8134 } 8135 8136 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8137 EVT VT1, EVT VT2, SDValue Op1, 8138 SDValue Op2) { 8139 SDVTList VTs = getVTList(VT1, VT2); 8140 SDValue Ops[] = { Op1, Op2 }; 8141 return getMachineNode(Opcode, dl, VTs, Ops); 8142 } 8143 8144 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8145 EVT VT1, EVT VT2, SDValue Op1, 8146 SDValue Op2, SDValue Op3) { 8147 SDVTList VTs = getVTList(VT1, VT2); 8148 SDValue Ops[] = { Op1, Op2, Op3 }; 8149 return getMachineNode(Opcode, dl, VTs, Ops); 8150 } 8151 8152 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8153 EVT VT1, EVT VT2, 8154 ArrayRef<SDValue> Ops) { 8155 SDVTList VTs = getVTList(VT1, VT2); 8156 return getMachineNode(Opcode, dl, VTs, Ops); 8157 } 8158 8159 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8160 EVT VT1, EVT VT2, EVT VT3, 8161 SDValue Op1, SDValue Op2) { 8162 SDVTList VTs = getVTList(VT1, VT2, VT3); 8163 SDValue Ops[] = { Op1, Op2 }; 8164 return getMachineNode(Opcode, dl, VTs, Ops); 8165 } 8166 8167 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8168 EVT VT1, EVT VT2, EVT VT3, 8169 SDValue Op1, SDValue Op2, 8170 SDValue Op3) { 8171 SDVTList VTs = getVTList(VT1, VT2, VT3); 8172 SDValue Ops[] = { Op1, Op2, Op3 }; 8173 return getMachineNode(Opcode, dl, VTs, Ops); 8174 } 8175 8176 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8177 EVT VT1, EVT VT2, EVT VT3, 8178 ArrayRef<SDValue> Ops) { 8179 SDVTList VTs = getVTList(VT1, VT2, VT3); 8180 return getMachineNode(Opcode, dl, VTs, Ops); 8181 } 8182 8183 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8184 ArrayRef<EVT> ResultTys, 8185 ArrayRef<SDValue> Ops) { 8186 SDVTList VTs = getVTList(ResultTys); 8187 return getMachineNode(Opcode, dl, VTs, Ops); 8188 } 8189 8190 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8191 SDVTList VTs, 8192 ArrayRef<SDValue> Ops) { 8193 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8194 MachineSDNode *N; 8195 void *IP = nullptr; 8196 8197 if (DoCSE) { 8198 FoldingSetNodeID ID; 8199 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8200 IP = nullptr; 8201 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8202 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8203 } 8204 } 8205 8206 // Allocate a new MachineSDNode. 8207 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8208 createOperands(N, Ops); 8209 8210 if (DoCSE) 8211 CSEMap.InsertNode(N, IP); 8212 8213 InsertNode(N); 8214 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8215 return N; 8216 } 8217 8218 /// getTargetExtractSubreg - A convenience function for creating 8219 /// TargetOpcode::EXTRACT_SUBREG nodes. 8220 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8221 SDValue Operand) { 8222 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8223 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8224 VT, Operand, SRIdxVal); 8225 return SDValue(Subreg, 0); 8226 } 8227 8228 /// getTargetInsertSubreg - A convenience function for creating 8229 /// TargetOpcode::INSERT_SUBREG nodes. 8230 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8231 SDValue Operand, SDValue Subreg) { 8232 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8233 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8234 VT, Operand, Subreg, SRIdxVal); 8235 return SDValue(Result, 0); 8236 } 8237 8238 /// getNodeIfExists - Get the specified node if it's already available, or 8239 /// else return NULL. 8240 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8241 ArrayRef<SDValue> Ops, 8242 const SDNodeFlags Flags) { 8243 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8244 FoldingSetNodeID ID; 8245 AddNodeIDNode(ID, Opcode, VTList, Ops); 8246 void *IP = nullptr; 8247 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8248 E->intersectFlagsWith(Flags); 8249 return E; 8250 } 8251 } 8252 return nullptr; 8253 } 8254 8255 /// getDbgValue - Creates a SDDbgValue node. 8256 /// 8257 /// SDNode 8258 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8259 SDNode *N, unsigned R, bool IsIndirect, 8260 const DebugLoc &DL, unsigned O) { 8261 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8262 "Expected inlined-at fields to agree"); 8263 return new (DbgInfo->getAlloc()) 8264 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8265 } 8266 8267 /// Constant 8268 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8269 DIExpression *Expr, 8270 const Value *C, 8271 const DebugLoc &DL, unsigned O) { 8272 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8273 "Expected inlined-at fields to agree"); 8274 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8275 } 8276 8277 /// FrameIndex 8278 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8279 DIExpression *Expr, unsigned FI, 8280 bool IsIndirect, 8281 const DebugLoc &DL, 8282 unsigned O) { 8283 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8284 "Expected inlined-at fields to agree"); 8285 return new (DbgInfo->getAlloc()) 8286 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8287 } 8288 8289 /// VReg 8290 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8291 DIExpression *Expr, 8292 unsigned VReg, bool IsIndirect, 8293 const DebugLoc &DL, unsigned O) { 8294 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8295 "Expected inlined-at fields to agree"); 8296 return new (DbgInfo->getAlloc()) 8297 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8298 } 8299 8300 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8301 unsigned OffsetInBits, unsigned SizeInBits, 8302 bool InvalidateDbg) { 8303 SDNode *FromNode = From.getNode(); 8304 SDNode *ToNode = To.getNode(); 8305 assert(FromNode && ToNode && "Can't modify dbg values"); 8306 8307 // PR35338 8308 // TODO: assert(From != To && "Redundant dbg value transfer"); 8309 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8310 if (From == To || FromNode == ToNode) 8311 return; 8312 8313 if (!FromNode->getHasDebugValue()) 8314 return; 8315 8316 SmallVector<SDDbgValue *, 2> ClonedDVs; 8317 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8318 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8319 continue; 8320 8321 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8322 8323 // Just transfer the dbg value attached to From. 8324 if (Dbg->getResNo() != From.getResNo()) 8325 continue; 8326 8327 DIVariable *Var = Dbg->getVariable(); 8328 auto *Expr = Dbg->getExpression(); 8329 // If a fragment is requested, update the expression. 8330 if (SizeInBits) { 8331 // When splitting a larger (e.g., sign-extended) value whose 8332 // lower bits are described with an SDDbgValue, do not attempt 8333 // to transfer the SDDbgValue to the upper bits. 8334 if (auto FI = Expr->getFragmentInfo()) 8335 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8336 continue; 8337 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8338 SizeInBits); 8339 if (!Fragment) 8340 continue; 8341 Expr = *Fragment; 8342 } 8343 // Clone the SDDbgValue and move it to To. 8344 SDDbgValue *Clone = getDbgValue( 8345 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8346 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8347 ClonedDVs.push_back(Clone); 8348 8349 if (InvalidateDbg) { 8350 // Invalidate value and indicate the SDDbgValue should not be emitted. 8351 Dbg->setIsInvalidated(); 8352 Dbg->setIsEmitted(); 8353 } 8354 } 8355 8356 for (SDDbgValue *Dbg : ClonedDVs) 8357 AddDbgValue(Dbg, ToNode, false); 8358 } 8359 8360 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8361 if (!N.getHasDebugValue()) 8362 return; 8363 8364 SmallVector<SDDbgValue *, 2> ClonedDVs; 8365 for (auto DV : GetDbgValues(&N)) { 8366 if (DV->isInvalidated()) 8367 continue; 8368 switch (N.getOpcode()) { 8369 default: 8370 break; 8371 case ISD::ADD: 8372 SDValue N0 = N.getOperand(0); 8373 SDValue N1 = N.getOperand(1); 8374 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8375 isConstantIntBuildVectorOrConstantInt(N1)) { 8376 uint64_t Offset = N.getConstantOperandVal(1); 8377 // Rewrite an ADD constant node into a DIExpression. Since we are 8378 // performing arithmetic to compute the variable's *value* in the 8379 // DIExpression, we need to mark the expression with a 8380 // DW_OP_stack_value. 8381 auto *DIExpr = DV->getExpression(); 8382 DIExpr = 8383 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8384 SDDbgValue *Clone = 8385 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8386 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8387 ClonedDVs.push_back(Clone); 8388 DV->setIsInvalidated(); 8389 DV->setIsEmitted(); 8390 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8391 N0.getNode()->dumprFull(this); 8392 dbgs() << " into " << *DIExpr << '\n'); 8393 } 8394 } 8395 } 8396 8397 for (SDDbgValue *Dbg : ClonedDVs) 8398 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8399 } 8400 8401 /// Creates a SDDbgLabel node. 8402 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8403 const DebugLoc &DL, unsigned O) { 8404 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8405 "Expected inlined-at fields to agree"); 8406 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8407 } 8408 8409 namespace { 8410 8411 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8412 /// pointed to by a use iterator is deleted, increment the use iterator 8413 /// so that it doesn't dangle. 8414 /// 8415 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8416 SDNode::use_iterator &UI; 8417 SDNode::use_iterator &UE; 8418 8419 void NodeDeleted(SDNode *N, SDNode *E) override { 8420 // Increment the iterator as needed. 8421 while (UI != UE && N == *UI) 8422 ++UI; 8423 } 8424 8425 public: 8426 RAUWUpdateListener(SelectionDAG &d, 8427 SDNode::use_iterator &ui, 8428 SDNode::use_iterator &ue) 8429 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8430 }; 8431 8432 } // end anonymous namespace 8433 8434 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8435 /// This can cause recursive merging of nodes in the DAG. 8436 /// 8437 /// This version assumes From has a single result value. 8438 /// 8439 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8440 SDNode *From = FromN.getNode(); 8441 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8442 "Cannot replace with this method!"); 8443 assert(From != To.getNode() && "Cannot replace uses of with self"); 8444 8445 // Preserve Debug Values 8446 transferDbgValues(FromN, To); 8447 8448 // Iterate over all the existing uses of From. New uses will be added 8449 // to the beginning of the use list, which we avoid visiting. 8450 // This specifically avoids visiting uses of From that arise while the 8451 // replacement is happening, because any such uses would be the result 8452 // of CSE: If an existing node looks like From after one of its operands 8453 // is replaced by To, we don't want to replace of all its users with To 8454 // too. See PR3018 for more info. 8455 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8456 RAUWUpdateListener Listener(*this, UI, UE); 8457 while (UI != UE) { 8458 SDNode *User = *UI; 8459 8460 // This node is about to morph, remove its old self from the CSE maps. 8461 RemoveNodeFromCSEMaps(User); 8462 8463 // A user can appear in a use list multiple times, and when this 8464 // happens the uses are usually next to each other in the list. 8465 // To help reduce the number of CSE recomputations, process all 8466 // the uses of this user that we can find this way. 8467 do { 8468 SDUse &Use = UI.getUse(); 8469 ++UI; 8470 Use.set(To); 8471 if (To->isDivergent() != From->isDivergent()) 8472 updateDivergence(User); 8473 } while (UI != UE && *UI == User); 8474 // Now that we have modified User, add it back to the CSE maps. If it 8475 // already exists there, recursively merge the results together. 8476 AddModifiedNodeToCSEMaps(User); 8477 } 8478 8479 // If we just RAUW'd the root, take note. 8480 if (FromN == getRoot()) 8481 setRoot(To); 8482 } 8483 8484 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8485 /// This can cause recursive merging of nodes in the DAG. 8486 /// 8487 /// This version assumes that for each value of From, there is a 8488 /// corresponding value in To in the same position with the same type. 8489 /// 8490 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8491 #ifndef NDEBUG 8492 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8493 assert((!From->hasAnyUseOfValue(i) || 8494 From->getValueType(i) == To->getValueType(i)) && 8495 "Cannot use this version of ReplaceAllUsesWith!"); 8496 #endif 8497 8498 // Handle the trivial case. 8499 if (From == To) 8500 return; 8501 8502 // Preserve Debug Info. Only do this if there's a use. 8503 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8504 if (From->hasAnyUseOfValue(i)) { 8505 assert((i < To->getNumValues()) && "Invalid To location"); 8506 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8507 } 8508 8509 // Iterate over just the existing users of From. See the comments in 8510 // the ReplaceAllUsesWith above. 8511 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8512 RAUWUpdateListener Listener(*this, UI, UE); 8513 while (UI != UE) { 8514 SDNode *User = *UI; 8515 8516 // This node is about to morph, remove its old self from the CSE maps. 8517 RemoveNodeFromCSEMaps(User); 8518 8519 // A user can appear in a use list multiple times, and when this 8520 // happens the uses are usually next to each other in the list. 8521 // To help reduce the number of CSE recomputations, process all 8522 // the uses of this user that we can find this way. 8523 do { 8524 SDUse &Use = UI.getUse(); 8525 ++UI; 8526 Use.setNode(To); 8527 if (To->isDivergent() != From->isDivergent()) 8528 updateDivergence(User); 8529 } while (UI != UE && *UI == User); 8530 8531 // Now that we have modified User, add it back to the CSE maps. If it 8532 // already exists there, recursively merge the results together. 8533 AddModifiedNodeToCSEMaps(User); 8534 } 8535 8536 // If we just RAUW'd the root, take note. 8537 if (From == getRoot().getNode()) 8538 setRoot(SDValue(To, getRoot().getResNo())); 8539 } 8540 8541 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8542 /// This can cause recursive merging of nodes in the DAG. 8543 /// 8544 /// This version can replace From with any result values. To must match the 8545 /// number and types of values returned by From. 8546 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8547 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8548 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8549 8550 // Preserve Debug Info. 8551 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8552 transferDbgValues(SDValue(From, i), To[i]); 8553 8554 // Iterate over just the existing users of From. See the comments in 8555 // the ReplaceAllUsesWith above. 8556 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8557 RAUWUpdateListener Listener(*this, UI, UE); 8558 while (UI != UE) { 8559 SDNode *User = *UI; 8560 8561 // This node is about to morph, remove its old self from the CSE maps. 8562 RemoveNodeFromCSEMaps(User); 8563 8564 // A user can appear in a use list multiple times, and when this happens the 8565 // uses are usually next to each other in the list. To help reduce the 8566 // number of CSE and divergence recomputations, process all the uses of this 8567 // user that we can find this way. 8568 bool To_IsDivergent = false; 8569 do { 8570 SDUse &Use = UI.getUse(); 8571 const SDValue &ToOp = To[Use.getResNo()]; 8572 ++UI; 8573 Use.set(ToOp); 8574 To_IsDivergent |= ToOp->isDivergent(); 8575 } while (UI != UE && *UI == User); 8576 8577 if (To_IsDivergent != From->isDivergent()) 8578 updateDivergence(User); 8579 8580 // Now that we have modified User, add it back to the CSE maps. If it 8581 // already exists there, recursively merge the results together. 8582 AddModifiedNodeToCSEMaps(User); 8583 } 8584 8585 // If we just RAUW'd the root, take note. 8586 if (From == getRoot().getNode()) 8587 setRoot(SDValue(To[getRoot().getResNo()])); 8588 } 8589 8590 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8591 /// uses of other values produced by From.getNode() alone. The Deleted 8592 /// vector is handled the same way as for ReplaceAllUsesWith. 8593 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8594 // Handle the really simple, really trivial case efficiently. 8595 if (From == To) return; 8596 8597 // Handle the simple, trivial, case efficiently. 8598 if (From.getNode()->getNumValues() == 1) { 8599 ReplaceAllUsesWith(From, To); 8600 return; 8601 } 8602 8603 // Preserve Debug Info. 8604 transferDbgValues(From, To); 8605 8606 // Iterate over just the existing users of From. See the comments in 8607 // the ReplaceAllUsesWith above. 8608 SDNode::use_iterator UI = From.getNode()->use_begin(), 8609 UE = From.getNode()->use_end(); 8610 RAUWUpdateListener Listener(*this, UI, UE); 8611 while (UI != UE) { 8612 SDNode *User = *UI; 8613 bool UserRemovedFromCSEMaps = false; 8614 8615 // A user can appear in a use list multiple times, and when this 8616 // happens the uses are usually next to each other in the list. 8617 // To help reduce the number of CSE recomputations, process all 8618 // the uses of this user that we can find this way. 8619 do { 8620 SDUse &Use = UI.getUse(); 8621 8622 // Skip uses of different values from the same node. 8623 if (Use.getResNo() != From.getResNo()) { 8624 ++UI; 8625 continue; 8626 } 8627 8628 // If this node hasn't been modified yet, it's still in the CSE maps, 8629 // so remove its old self from the CSE maps. 8630 if (!UserRemovedFromCSEMaps) { 8631 RemoveNodeFromCSEMaps(User); 8632 UserRemovedFromCSEMaps = true; 8633 } 8634 8635 ++UI; 8636 Use.set(To); 8637 if (To->isDivergent() != From->isDivergent()) 8638 updateDivergence(User); 8639 } while (UI != UE && *UI == User); 8640 // We are iterating over all uses of the From node, so if a use 8641 // doesn't use the specific value, no changes are made. 8642 if (!UserRemovedFromCSEMaps) 8643 continue; 8644 8645 // Now that we have modified User, add it back to the CSE maps. If it 8646 // already exists there, recursively merge the results together. 8647 AddModifiedNodeToCSEMaps(User); 8648 } 8649 8650 // If we just RAUW'd the root, take note. 8651 if (From == getRoot()) 8652 setRoot(To); 8653 } 8654 8655 namespace { 8656 8657 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8658 /// to record information about a use. 8659 struct UseMemo { 8660 SDNode *User; 8661 unsigned Index; 8662 SDUse *Use; 8663 }; 8664 8665 /// operator< - Sort Memos by User. 8666 bool operator<(const UseMemo &L, const UseMemo &R) { 8667 return (intptr_t)L.User < (intptr_t)R.User; 8668 } 8669 8670 } // end anonymous namespace 8671 8672 void SelectionDAG::updateDivergence(SDNode * N) 8673 { 8674 if (TLI->isSDNodeAlwaysUniform(N)) 8675 return; 8676 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8677 for (auto &Op : N->ops()) { 8678 if (Op.Val.getValueType() != MVT::Other) 8679 IsDivergent |= Op.getNode()->isDivergent(); 8680 } 8681 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8682 N->SDNodeBits.IsDivergent = IsDivergent; 8683 for (auto U : N->uses()) { 8684 updateDivergence(U); 8685 } 8686 } 8687 } 8688 8689 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8690 DenseMap<SDNode *, unsigned> Degree; 8691 Order.reserve(AllNodes.size()); 8692 for (auto &N : allnodes()) { 8693 unsigned NOps = N.getNumOperands(); 8694 Degree[&N] = NOps; 8695 if (0 == NOps) 8696 Order.push_back(&N); 8697 } 8698 for (size_t I = 0; I != Order.size(); ++I) { 8699 SDNode *N = Order[I]; 8700 for (auto U : N->uses()) { 8701 unsigned &UnsortedOps = Degree[U]; 8702 if (0 == --UnsortedOps) 8703 Order.push_back(U); 8704 } 8705 } 8706 } 8707 8708 #ifndef NDEBUG 8709 void SelectionDAG::VerifyDAGDiverence() { 8710 std::vector<SDNode *> TopoOrder; 8711 CreateTopologicalOrder(TopoOrder); 8712 const TargetLowering &TLI = getTargetLoweringInfo(); 8713 DenseMap<const SDNode *, bool> DivergenceMap; 8714 for (auto &N : allnodes()) { 8715 DivergenceMap[&N] = false; 8716 } 8717 for (auto N : TopoOrder) { 8718 bool IsDivergent = DivergenceMap[N]; 8719 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8720 for (auto &Op : N->ops()) { 8721 if (Op.Val.getValueType() != MVT::Other) 8722 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8723 } 8724 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8725 DivergenceMap[N] = true; 8726 } 8727 } 8728 for (auto &N : allnodes()) { 8729 (void)N; 8730 assert(DivergenceMap[&N] == N.isDivergent() && 8731 "Divergence bit inconsistency detected\n"); 8732 } 8733 } 8734 #endif 8735 8736 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8737 /// uses of other values produced by From.getNode() alone. The same value 8738 /// may appear in both the From and To list. The Deleted vector is 8739 /// handled the same way as for ReplaceAllUsesWith. 8740 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8741 const SDValue *To, 8742 unsigned Num){ 8743 // Handle the simple, trivial case efficiently. 8744 if (Num == 1) 8745 return ReplaceAllUsesOfValueWith(*From, *To); 8746 8747 transferDbgValues(*From, *To); 8748 8749 // Read up all the uses and make records of them. This helps 8750 // processing new uses that are introduced during the 8751 // replacement process. 8752 SmallVector<UseMemo, 4> Uses; 8753 for (unsigned i = 0; i != Num; ++i) { 8754 unsigned FromResNo = From[i].getResNo(); 8755 SDNode *FromNode = From[i].getNode(); 8756 for (SDNode::use_iterator UI = FromNode->use_begin(), 8757 E = FromNode->use_end(); UI != E; ++UI) { 8758 SDUse &Use = UI.getUse(); 8759 if (Use.getResNo() == FromResNo) { 8760 UseMemo Memo = { *UI, i, &Use }; 8761 Uses.push_back(Memo); 8762 } 8763 } 8764 } 8765 8766 // Sort the uses, so that all the uses from a given User are together. 8767 llvm::sort(Uses); 8768 8769 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8770 UseIndex != UseIndexEnd; ) { 8771 // We know that this user uses some value of From. If it is the right 8772 // value, update it. 8773 SDNode *User = Uses[UseIndex].User; 8774 8775 // This node is about to morph, remove its old self from the CSE maps. 8776 RemoveNodeFromCSEMaps(User); 8777 8778 // The Uses array is sorted, so all the uses for a given User 8779 // are next to each other in the list. 8780 // To help reduce the number of CSE recomputations, process all 8781 // the uses of this user that we can find this way. 8782 do { 8783 unsigned i = Uses[UseIndex].Index; 8784 SDUse &Use = *Uses[UseIndex].Use; 8785 ++UseIndex; 8786 8787 Use.set(To[i]); 8788 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8789 8790 // Now that we have modified User, add it back to the CSE maps. If it 8791 // already exists there, recursively merge the results together. 8792 AddModifiedNodeToCSEMaps(User); 8793 } 8794 } 8795 8796 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8797 /// based on their topological order. It returns the maximum id and a vector 8798 /// of the SDNodes* in assigned order by reference. 8799 unsigned SelectionDAG::AssignTopologicalOrder() { 8800 unsigned DAGSize = 0; 8801 8802 // SortedPos tracks the progress of the algorithm. Nodes before it are 8803 // sorted, nodes after it are unsorted. When the algorithm completes 8804 // it is at the end of the list. 8805 allnodes_iterator SortedPos = allnodes_begin(); 8806 8807 // Visit all the nodes. Move nodes with no operands to the front of 8808 // the list immediately. Annotate nodes that do have operands with their 8809 // operand count. Before we do this, the Node Id fields of the nodes 8810 // may contain arbitrary values. After, the Node Id fields for nodes 8811 // before SortedPos will contain the topological sort index, and the 8812 // Node Id fields for nodes At SortedPos and after will contain the 8813 // count of outstanding operands. 8814 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8815 SDNode *N = &*I++; 8816 checkForCycles(N, this); 8817 unsigned Degree = N->getNumOperands(); 8818 if (Degree == 0) { 8819 // A node with no uses, add it to the result array immediately. 8820 N->setNodeId(DAGSize++); 8821 allnodes_iterator Q(N); 8822 if (Q != SortedPos) 8823 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8824 assert(SortedPos != AllNodes.end() && "Overran node list"); 8825 ++SortedPos; 8826 } else { 8827 // Temporarily use the Node Id as scratch space for the degree count. 8828 N->setNodeId(Degree); 8829 } 8830 } 8831 8832 // Visit all the nodes. As we iterate, move nodes into sorted order, 8833 // such that by the time the end is reached all nodes will be sorted. 8834 for (SDNode &Node : allnodes()) { 8835 SDNode *N = &Node; 8836 checkForCycles(N, this); 8837 // N is in sorted position, so all its uses have one less operand 8838 // that needs to be sorted. 8839 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8840 UI != UE; ++UI) { 8841 SDNode *P = *UI; 8842 unsigned Degree = P->getNodeId(); 8843 assert(Degree != 0 && "Invalid node degree"); 8844 --Degree; 8845 if (Degree == 0) { 8846 // All of P's operands are sorted, so P may sorted now. 8847 P->setNodeId(DAGSize++); 8848 if (P->getIterator() != SortedPos) 8849 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8850 assert(SortedPos != AllNodes.end() && "Overran node list"); 8851 ++SortedPos; 8852 } else { 8853 // Update P's outstanding operand count. 8854 P->setNodeId(Degree); 8855 } 8856 } 8857 if (Node.getIterator() == SortedPos) { 8858 #ifndef NDEBUG 8859 allnodes_iterator I(N); 8860 SDNode *S = &*++I; 8861 dbgs() << "Overran sorted position:\n"; 8862 S->dumprFull(this); dbgs() << "\n"; 8863 dbgs() << "Checking if this is due to cycles\n"; 8864 checkForCycles(this, true); 8865 #endif 8866 llvm_unreachable(nullptr); 8867 } 8868 } 8869 8870 assert(SortedPos == AllNodes.end() && 8871 "Topological sort incomplete!"); 8872 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8873 "First node in topological sort is not the entry token!"); 8874 assert(AllNodes.front().getNodeId() == 0 && 8875 "First node in topological sort has non-zero id!"); 8876 assert(AllNodes.front().getNumOperands() == 0 && 8877 "First node in topological sort has operands!"); 8878 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8879 "Last node in topologic sort has unexpected id!"); 8880 assert(AllNodes.back().use_empty() && 8881 "Last node in topologic sort has users!"); 8882 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8883 return DAGSize; 8884 } 8885 8886 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8887 /// value is produced by SD. 8888 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8889 if (SD) { 8890 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8891 SD->setHasDebugValue(true); 8892 } 8893 DbgInfo->add(DB, SD, isParameter); 8894 } 8895 8896 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8897 DbgInfo->add(DB); 8898 } 8899 8900 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8901 SDValue NewMemOp) { 8902 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8903 // The new memory operation must have the same position as the old load in 8904 // terms of memory dependency. Create a TokenFactor for the old load and new 8905 // memory operation and update uses of the old load's output chain to use that 8906 // TokenFactor. 8907 SDValue OldChain = SDValue(OldLoad, 1); 8908 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8909 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8910 return NewChain; 8911 8912 SDValue TokenFactor = 8913 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8914 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8915 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8916 return TokenFactor; 8917 } 8918 8919 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8920 Function **OutFunction) { 8921 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8922 8923 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8924 auto *Module = MF->getFunction().getParent(); 8925 auto *Function = Module->getFunction(Symbol); 8926 8927 if (OutFunction != nullptr) 8928 *OutFunction = Function; 8929 8930 if (Function != nullptr) { 8931 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8932 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8933 } 8934 8935 std::string ErrorStr; 8936 raw_string_ostream ErrorFormatter(ErrorStr); 8937 8938 ErrorFormatter << "Undefined external symbol "; 8939 ErrorFormatter << '"' << Symbol << '"'; 8940 ErrorFormatter.flush(); 8941 8942 report_fatal_error(ErrorStr); 8943 } 8944 8945 //===----------------------------------------------------------------------===// 8946 // SDNode Class 8947 //===----------------------------------------------------------------------===// 8948 8949 bool llvm::isNullConstant(SDValue V) { 8950 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8951 return Const != nullptr && Const->isNullValue(); 8952 } 8953 8954 bool llvm::isNullFPConstant(SDValue V) { 8955 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8956 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8957 } 8958 8959 bool llvm::isAllOnesConstant(SDValue V) { 8960 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8961 return Const != nullptr && Const->isAllOnesValue(); 8962 } 8963 8964 bool llvm::isOneConstant(SDValue V) { 8965 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8966 return Const != nullptr && Const->isOne(); 8967 } 8968 8969 SDValue llvm::peekThroughBitcasts(SDValue V) { 8970 while (V.getOpcode() == ISD::BITCAST) 8971 V = V.getOperand(0); 8972 return V; 8973 } 8974 8975 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8976 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8977 V = V.getOperand(0); 8978 return V; 8979 } 8980 8981 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8982 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8983 V = V.getOperand(0); 8984 return V; 8985 } 8986 8987 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8988 if (V.getOpcode() != ISD::XOR) 8989 return false; 8990 V = peekThroughBitcasts(V.getOperand(1)); 8991 unsigned NumBits = V.getScalarValueSizeInBits(); 8992 ConstantSDNode *C = 8993 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8994 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8995 } 8996 8997 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8998 bool AllowTruncation) { 8999 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9000 return CN; 9001 9002 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9003 BitVector UndefElements; 9004 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 9005 9006 // BuildVectors can truncate their operands. Ignore that case here unless 9007 // AllowTruncation is set. 9008 if (CN && (UndefElements.none() || AllowUndefs)) { 9009 EVT CVT = CN->getValueType(0); 9010 EVT NSVT = N.getValueType().getScalarType(); 9011 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9012 if (AllowTruncation || (CVT == NSVT)) 9013 return CN; 9014 } 9015 } 9016 9017 return nullptr; 9018 } 9019 9020 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 9021 bool AllowUndefs, 9022 bool AllowTruncation) { 9023 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9024 return CN; 9025 9026 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9027 BitVector UndefElements; 9028 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 9029 9030 // BuildVectors can truncate their operands. Ignore that case here unless 9031 // AllowTruncation is set. 9032 if (CN && (UndefElements.none() || AllowUndefs)) { 9033 EVT CVT = CN->getValueType(0); 9034 EVT NSVT = N.getValueType().getScalarType(); 9035 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9036 if (AllowTruncation || (CVT == NSVT)) 9037 return CN; 9038 } 9039 } 9040 9041 return nullptr; 9042 } 9043 9044 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 9045 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9046 return CN; 9047 9048 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9049 BitVector UndefElements; 9050 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 9051 if (CN && (UndefElements.none() || AllowUndefs)) 9052 return CN; 9053 } 9054 9055 return nullptr; 9056 } 9057 9058 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 9059 const APInt &DemandedElts, 9060 bool AllowUndefs) { 9061 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9062 return CN; 9063 9064 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9065 BitVector UndefElements; 9066 ConstantFPSDNode *CN = 9067 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 9068 if (CN && (UndefElements.none() || AllowUndefs)) 9069 return CN; 9070 } 9071 9072 return nullptr; 9073 } 9074 9075 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 9076 // TODO: may want to use peekThroughBitcast() here. 9077 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9078 return C && C->isNullValue(); 9079 } 9080 9081 bool llvm::isOneOrOneSplat(SDValue N) { 9082 // TODO: may want to use peekThroughBitcast() here. 9083 unsigned BitWidth = N.getScalarValueSizeInBits(); 9084 ConstantSDNode *C = isConstOrConstSplat(N); 9085 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 9086 } 9087 9088 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 9089 N = peekThroughBitcasts(N); 9090 unsigned BitWidth = N.getScalarValueSizeInBits(); 9091 ConstantSDNode *C = isConstOrConstSplat(N); 9092 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 9093 } 9094 9095 HandleSDNode::~HandleSDNode() { 9096 DropOperands(); 9097 } 9098 9099 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 9100 const DebugLoc &DL, 9101 const GlobalValue *GA, EVT VT, 9102 int64_t o, unsigned TF) 9103 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9104 TheGlobal = GA; 9105 } 9106 9107 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9108 EVT VT, unsigned SrcAS, 9109 unsigned DestAS) 9110 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9111 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9112 9113 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9114 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9115 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9116 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9117 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9118 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9119 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9120 9121 // We check here that the size of the memory operand fits within the size of 9122 // the MMO. This is because the MMO might indicate only a possible address 9123 // range instead of specifying the affected memory addresses precisely. 9124 // TODO: Make MachineMemOperands aware of scalable vectors. 9125 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9126 "Size mismatch!"); 9127 } 9128 9129 /// Profile - Gather unique data for the node. 9130 /// 9131 void SDNode::Profile(FoldingSetNodeID &ID) const { 9132 AddNodeIDNode(ID, this); 9133 } 9134 9135 namespace { 9136 9137 struct EVTArray { 9138 std::vector<EVT> VTs; 9139 9140 EVTArray() { 9141 VTs.reserve(MVT::LAST_VALUETYPE); 9142 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9143 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9144 } 9145 }; 9146 9147 } // end anonymous namespace 9148 9149 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9150 static ManagedStatic<EVTArray> SimpleVTArray; 9151 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9152 9153 /// getValueTypeList - Return a pointer to the specified value type. 9154 /// 9155 const EVT *SDNode::getValueTypeList(EVT VT) { 9156 if (VT.isExtended()) { 9157 sys::SmartScopedLock<true> Lock(*VTMutex); 9158 return &(*EVTs->insert(VT).first); 9159 } else { 9160 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9161 "Value type out of range!"); 9162 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9163 } 9164 } 9165 9166 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9167 /// indicated value. This method ignores uses of other values defined by this 9168 /// operation. 9169 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9170 assert(Value < getNumValues() && "Bad value!"); 9171 9172 // TODO: Only iterate over uses of a given value of the node 9173 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9174 if (UI.getUse().getResNo() == Value) { 9175 if (NUses == 0) 9176 return false; 9177 --NUses; 9178 } 9179 } 9180 9181 // Found exactly the right number of uses? 9182 return NUses == 0; 9183 } 9184 9185 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9186 /// value. This method ignores uses of other values defined by this operation. 9187 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9188 assert(Value < getNumValues() && "Bad value!"); 9189 9190 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9191 if (UI.getUse().getResNo() == Value) 9192 return true; 9193 9194 return false; 9195 } 9196 9197 /// isOnlyUserOf - Return true if this node is the only use of N. 9198 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9199 bool Seen = false; 9200 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9201 SDNode *User = *I; 9202 if (User == this) 9203 Seen = true; 9204 else 9205 return false; 9206 } 9207 9208 return Seen; 9209 } 9210 9211 /// Return true if the only users of N are contained in Nodes. 9212 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9213 bool Seen = false; 9214 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9215 SDNode *User = *I; 9216 if (llvm::any_of(Nodes, 9217 [&User](const SDNode *Node) { return User == Node; })) 9218 Seen = true; 9219 else 9220 return false; 9221 } 9222 9223 return Seen; 9224 } 9225 9226 /// isOperand - Return true if this node is an operand of N. 9227 bool SDValue::isOperandOf(const SDNode *N) const { 9228 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9229 } 9230 9231 bool SDNode::isOperandOf(const SDNode *N) const { 9232 return any_of(N->op_values(), 9233 [this](SDValue Op) { return this == Op.getNode(); }); 9234 } 9235 9236 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9237 /// be a chain) reaches the specified operand without crossing any 9238 /// side-effecting instructions on any chain path. In practice, this looks 9239 /// through token factors and non-volatile loads. In order to remain efficient, 9240 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9241 /// 9242 /// Note that we only need to examine chains when we're searching for 9243 /// side-effects; SelectionDAG requires that all side-effects are represented 9244 /// by chains, even if another operand would force a specific ordering. This 9245 /// constraint is necessary to allow transformations like splitting loads. 9246 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9247 unsigned Depth) const { 9248 if (*this == Dest) return true; 9249 9250 // Don't search too deeply, we just want to be able to see through 9251 // TokenFactor's etc. 9252 if (Depth == 0) return false; 9253 9254 // If this is a token factor, all inputs to the TF happen in parallel. 9255 if (getOpcode() == ISD::TokenFactor) { 9256 // First, try a shallow search. 9257 if (is_contained((*this)->ops(), Dest)) { 9258 // We found the chain we want as an operand of this TokenFactor. 9259 // Essentially, we reach the chain without side-effects if we could 9260 // serialize the TokenFactor into a simple chain of operations with 9261 // Dest as the last operation. This is automatically true if the 9262 // chain has one use: there are no other ordering constraints. 9263 // If the chain has more than one use, we give up: some other 9264 // use of Dest might force a side-effect between Dest and the current 9265 // node. 9266 if (Dest.hasOneUse()) 9267 return true; 9268 } 9269 // Next, try a deep search: check whether every operand of the TokenFactor 9270 // reaches Dest. 9271 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9272 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9273 }); 9274 } 9275 9276 // Loads don't have side effects, look through them. 9277 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9278 if (Ld->isUnordered()) 9279 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9280 } 9281 return false; 9282 } 9283 9284 bool SDNode::hasPredecessor(const SDNode *N) const { 9285 SmallPtrSet<const SDNode *, 32> Visited; 9286 SmallVector<const SDNode *, 16> Worklist; 9287 Worklist.push_back(this); 9288 return hasPredecessorHelper(N, Visited, Worklist); 9289 } 9290 9291 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9292 this->Flags.intersectWith(Flags); 9293 } 9294 9295 SDValue 9296 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9297 ArrayRef<ISD::NodeType> CandidateBinOps, 9298 bool AllowPartials) { 9299 // The pattern must end in an extract from index 0. 9300 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9301 !isNullConstant(Extract->getOperand(1))) 9302 return SDValue(); 9303 9304 // Match against one of the candidate binary ops. 9305 SDValue Op = Extract->getOperand(0); 9306 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9307 return Op.getOpcode() == unsigned(BinOp); 9308 })) 9309 return SDValue(); 9310 9311 // Floating-point reductions may require relaxed constraints on the final step 9312 // of the reduction because they may reorder intermediate operations. 9313 unsigned CandidateBinOp = Op.getOpcode(); 9314 if (Op.getValueType().isFloatingPoint()) { 9315 SDNodeFlags Flags = Op->getFlags(); 9316 switch (CandidateBinOp) { 9317 case ISD::FADD: 9318 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9319 return SDValue(); 9320 break; 9321 default: 9322 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9323 } 9324 } 9325 9326 // Matching failed - attempt to see if we did enough stages that a partial 9327 // reduction from a subvector is possible. 9328 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9329 if (!AllowPartials || !Op) 9330 return SDValue(); 9331 EVT OpVT = Op.getValueType(); 9332 EVT OpSVT = OpVT.getScalarType(); 9333 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9334 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9335 return SDValue(); 9336 BinOp = (ISD::NodeType)CandidateBinOp; 9337 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9338 getVectorIdxConstant(0, SDLoc(Op))); 9339 }; 9340 9341 // At each stage, we're looking for something that looks like: 9342 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9343 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9344 // i32 undef, i32 undef, i32 undef, i32 undef> 9345 // %a = binop <8 x i32> %op, %s 9346 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9347 // we expect something like: 9348 // <4,5,6,7,u,u,u,u> 9349 // <2,3,u,u,u,u,u,u> 9350 // <1,u,u,u,u,u,u,u> 9351 // While a partial reduction match would be: 9352 // <2,3,u,u,u,u,u,u> 9353 // <1,u,u,u,u,u,u,u> 9354 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9355 SDValue PrevOp; 9356 for (unsigned i = 0; i < Stages; ++i) { 9357 unsigned MaskEnd = (1 << i); 9358 9359 if (Op.getOpcode() != CandidateBinOp) 9360 return PartialReduction(PrevOp, MaskEnd); 9361 9362 SDValue Op0 = Op.getOperand(0); 9363 SDValue Op1 = Op.getOperand(1); 9364 9365 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9366 if (Shuffle) { 9367 Op = Op1; 9368 } else { 9369 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9370 Op = Op0; 9371 } 9372 9373 // The first operand of the shuffle should be the same as the other operand 9374 // of the binop. 9375 if (!Shuffle || Shuffle->getOperand(0) != Op) 9376 return PartialReduction(PrevOp, MaskEnd); 9377 9378 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9379 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9380 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9381 return PartialReduction(PrevOp, MaskEnd); 9382 9383 PrevOp = Op; 9384 } 9385 9386 // Handle subvector reductions, which tend to appear after the shuffle 9387 // reduction stages. 9388 while (Op.getOpcode() == CandidateBinOp) { 9389 unsigned NumElts = Op.getValueType().getVectorNumElements(); 9390 SDValue Op0 = Op.getOperand(0); 9391 SDValue Op1 = Op.getOperand(1); 9392 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9393 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9394 Op0.getOperand(0) != Op1.getOperand(0)) 9395 break; 9396 SDValue Src = Op0.getOperand(0); 9397 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 9398 if (NumSrcElts != (2 * NumElts)) 9399 break; 9400 if (!(Op0.getConstantOperandAPInt(1) == 0 && 9401 Op1.getConstantOperandAPInt(1) == NumElts) && 9402 !(Op1.getConstantOperandAPInt(1) == 0 && 9403 Op0.getConstantOperandAPInt(1) == NumElts)) 9404 break; 9405 Op = Src; 9406 } 9407 9408 BinOp = (ISD::NodeType)CandidateBinOp; 9409 return Op; 9410 } 9411 9412 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9413 assert(N->getNumValues() == 1 && 9414 "Can't unroll a vector with multiple results!"); 9415 9416 EVT VT = N->getValueType(0); 9417 unsigned NE = VT.getVectorNumElements(); 9418 EVT EltVT = VT.getVectorElementType(); 9419 SDLoc dl(N); 9420 9421 SmallVector<SDValue, 8> Scalars; 9422 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9423 9424 // If ResNE is 0, fully unroll the vector op. 9425 if (ResNE == 0) 9426 ResNE = NE; 9427 else if (NE > ResNE) 9428 NE = ResNE; 9429 9430 unsigned i; 9431 for (i= 0; i != NE; ++i) { 9432 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9433 SDValue Operand = N->getOperand(j); 9434 EVT OperandVT = Operand.getValueType(); 9435 if (OperandVT.isVector()) { 9436 // A vector operand; extract a single element. 9437 EVT OperandEltVT = OperandVT.getVectorElementType(); 9438 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9439 Operand, getVectorIdxConstant(i, dl)); 9440 } else { 9441 // A scalar operand; just use it as is. 9442 Operands[j] = Operand; 9443 } 9444 } 9445 9446 switch (N->getOpcode()) { 9447 default: { 9448 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9449 N->getFlags())); 9450 break; 9451 } 9452 case ISD::VSELECT: 9453 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9454 break; 9455 case ISD::SHL: 9456 case ISD::SRA: 9457 case ISD::SRL: 9458 case ISD::ROTL: 9459 case ISD::ROTR: 9460 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9461 getShiftAmountOperand(Operands[0].getValueType(), 9462 Operands[1]))); 9463 break; 9464 case ISD::SIGN_EXTEND_INREG: { 9465 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9466 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9467 Operands[0], 9468 getValueType(ExtVT))); 9469 } 9470 } 9471 } 9472 9473 for (; i < ResNE; ++i) 9474 Scalars.push_back(getUNDEF(EltVT)); 9475 9476 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9477 return getBuildVector(VecVT, dl, Scalars); 9478 } 9479 9480 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9481 SDNode *N, unsigned ResNE) { 9482 unsigned Opcode = N->getOpcode(); 9483 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9484 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9485 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9486 "Expected an overflow opcode"); 9487 9488 EVT ResVT = N->getValueType(0); 9489 EVT OvVT = N->getValueType(1); 9490 EVT ResEltVT = ResVT.getVectorElementType(); 9491 EVT OvEltVT = OvVT.getVectorElementType(); 9492 SDLoc dl(N); 9493 9494 // If ResNE is 0, fully unroll the vector op. 9495 unsigned NE = ResVT.getVectorNumElements(); 9496 if (ResNE == 0) 9497 ResNE = NE; 9498 else if (NE > ResNE) 9499 NE = ResNE; 9500 9501 SmallVector<SDValue, 8> LHSScalars; 9502 SmallVector<SDValue, 8> RHSScalars; 9503 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9504 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9505 9506 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9507 SDVTList VTs = getVTList(ResEltVT, SVT); 9508 SmallVector<SDValue, 8> ResScalars; 9509 SmallVector<SDValue, 8> OvScalars; 9510 for (unsigned i = 0; i < NE; ++i) { 9511 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9512 SDValue Ov = 9513 getSelect(dl, OvEltVT, Res.getValue(1), 9514 getBoolConstant(true, dl, OvEltVT, ResVT), 9515 getConstant(0, dl, OvEltVT)); 9516 9517 ResScalars.push_back(Res); 9518 OvScalars.push_back(Ov); 9519 } 9520 9521 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9522 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9523 9524 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9525 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9526 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9527 getBuildVector(NewOvVT, dl, OvScalars)); 9528 } 9529 9530 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9531 LoadSDNode *Base, 9532 unsigned Bytes, 9533 int Dist) const { 9534 if (LD->isVolatile() || Base->isVolatile()) 9535 return false; 9536 // TODO: probably too restrictive for atomics, revisit 9537 if (!LD->isSimple()) 9538 return false; 9539 if (LD->isIndexed() || Base->isIndexed()) 9540 return false; 9541 if (LD->getChain() != Base->getChain()) 9542 return false; 9543 EVT VT = LD->getValueType(0); 9544 if (VT.getSizeInBits() / 8 != Bytes) 9545 return false; 9546 9547 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9548 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9549 9550 int64_t Offset = 0; 9551 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9552 return (Dist * Bytes == Offset); 9553 return false; 9554 } 9555 9556 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9557 /// if it cannot be inferred. 9558 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9559 // If this is a GlobalAddress + cst, return the alignment. 9560 const GlobalValue *GV = nullptr; 9561 int64_t GVOffset = 0; 9562 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9563 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9564 KnownBits Known(PtrWidth); 9565 llvm::computeKnownBits(GV, Known, getDataLayout()); 9566 unsigned AlignBits = Known.countMinTrailingZeros(); 9567 if (AlignBits) 9568 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9569 } 9570 9571 // If this is a direct reference to a stack slot, use information about the 9572 // stack slot's alignment. 9573 int FrameIdx = INT_MIN; 9574 int64_t FrameOffset = 0; 9575 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9576 FrameIdx = FI->getIndex(); 9577 } else if (isBaseWithConstantOffset(Ptr) && 9578 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9579 // Handle FI+Cst 9580 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9581 FrameOffset = Ptr.getConstantOperandVal(1); 9582 } 9583 9584 if (FrameIdx != INT_MIN) { 9585 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9586 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9587 } 9588 9589 return None; 9590 } 9591 9592 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9593 /// which is split (or expanded) into two not necessarily identical pieces. 9594 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9595 // Currently all types are split in half. 9596 EVT LoVT, HiVT; 9597 if (!VT.isVector()) 9598 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9599 else 9600 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9601 9602 return std::make_pair(LoVT, HiVT); 9603 } 9604 9605 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9606 /// type, dependent on an enveloping VT that has been split into two identical 9607 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9608 std::pair<EVT, EVT> 9609 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9610 bool *HiIsEmpty) const { 9611 EVT EltTp = VT.getVectorElementType(); 9612 bool IsScalable = VT.isScalableVector(); 9613 // Examples: 9614 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9615 // custom VL=9 with enveloping VL=8/8 yields 8/1 9616 // custom VL=10 with enveloping VL=8/8 yields 8/2 9617 // etc. 9618 unsigned VTNumElts = VT.getVectorNumElements(); 9619 unsigned EnvNumElts = EnvVT.getVectorNumElements(); 9620 EVT LoVT, HiVT; 9621 if (VTNumElts > EnvNumElts) { 9622 LoVT = EnvVT; 9623 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts, 9624 IsScalable); 9625 *HiIsEmpty = false; 9626 } else { 9627 // Flag that hi type has zero storage size, but return split envelop type 9628 // (this would be easier if vector types with zero elements were allowed). 9629 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts, IsScalable); 9630 HiVT = EnvVT; 9631 *HiIsEmpty = true; 9632 } 9633 return std::make_pair(LoVT, HiVT); 9634 } 9635 9636 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9637 /// low/high part. 9638 std::pair<SDValue, SDValue> 9639 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9640 const EVT &HiVT) { 9641 assert(LoVT.isScalableVector() == HiVT.isScalableVector() && 9642 LoVT.isScalableVector() == N.getValueType().isScalableVector() && 9643 "Splitting vector with an invalid mixture of fixed and scalable " 9644 "vector types"); 9645 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <= 9646 N.getValueType().getVectorMinNumElements() && 9647 "More vector elements requested than available!"); 9648 SDValue Lo, Hi; 9649 Lo = 9650 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9651 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements() 9652 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales 9653 // IDX with the runtime scaling factor of the result vector type. For 9654 // fixed-width result vectors, that runtime scaling factor is 1. 9655 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9656 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL)); 9657 return std::make_pair(Lo, Hi); 9658 } 9659 9660 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9661 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9662 EVT VT = N.getValueType(); 9663 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9664 NextPowerOf2(VT.getVectorNumElements())); 9665 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9666 getVectorIdxConstant(0, DL)); 9667 } 9668 9669 void SelectionDAG::ExtractVectorElements(SDValue Op, 9670 SmallVectorImpl<SDValue> &Args, 9671 unsigned Start, unsigned Count, 9672 EVT EltVT) { 9673 EVT VT = Op.getValueType(); 9674 if (Count == 0) 9675 Count = VT.getVectorNumElements(); 9676 if (EltVT == EVT()) 9677 EltVT = VT.getVectorElementType(); 9678 SDLoc SL(Op); 9679 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9680 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9681 getVectorIdxConstant(i, SL))); 9682 } 9683 } 9684 9685 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9686 unsigned GlobalAddressSDNode::getAddressSpace() const { 9687 return getGlobal()->getType()->getAddressSpace(); 9688 } 9689 9690 Type *ConstantPoolSDNode::getType() const { 9691 if (isMachineConstantPoolEntry()) 9692 return Val.MachineCPVal->getType(); 9693 return Val.ConstVal->getType(); 9694 } 9695 9696 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9697 unsigned &SplatBitSize, 9698 bool &HasAnyUndefs, 9699 unsigned MinSplatBits, 9700 bool IsBigEndian) const { 9701 EVT VT = getValueType(0); 9702 assert(VT.isVector() && "Expected a vector type"); 9703 unsigned VecWidth = VT.getSizeInBits(); 9704 if (MinSplatBits > VecWidth) 9705 return false; 9706 9707 // FIXME: The widths are based on this node's type, but build vectors can 9708 // truncate their operands. 9709 SplatValue = APInt(VecWidth, 0); 9710 SplatUndef = APInt(VecWidth, 0); 9711 9712 // Get the bits. Bits with undefined values (when the corresponding element 9713 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9714 // in SplatValue. If any of the values are not constant, give up and return 9715 // false. 9716 unsigned int NumOps = getNumOperands(); 9717 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9718 unsigned EltWidth = VT.getScalarSizeInBits(); 9719 9720 for (unsigned j = 0; j < NumOps; ++j) { 9721 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9722 SDValue OpVal = getOperand(i); 9723 unsigned BitPos = j * EltWidth; 9724 9725 if (OpVal.isUndef()) 9726 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9727 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9728 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9729 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9730 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9731 else 9732 return false; 9733 } 9734 9735 // The build_vector is all constants or undefs. Find the smallest element 9736 // size that splats the vector. 9737 HasAnyUndefs = (SplatUndef != 0); 9738 9739 // FIXME: This does not work for vectors with elements less than 8 bits. 9740 while (VecWidth > 8) { 9741 unsigned HalfSize = VecWidth / 2; 9742 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9743 APInt LowValue = SplatValue.trunc(HalfSize); 9744 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9745 APInt LowUndef = SplatUndef.trunc(HalfSize); 9746 9747 // If the two halves do not match (ignoring undef bits), stop here. 9748 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9749 MinSplatBits > HalfSize) 9750 break; 9751 9752 SplatValue = HighValue | LowValue; 9753 SplatUndef = HighUndef & LowUndef; 9754 9755 VecWidth = HalfSize; 9756 } 9757 9758 SplatBitSize = VecWidth; 9759 return true; 9760 } 9761 9762 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9763 BitVector *UndefElements) const { 9764 if (UndefElements) { 9765 UndefElements->clear(); 9766 UndefElements->resize(getNumOperands()); 9767 } 9768 assert(getNumOperands() == DemandedElts.getBitWidth() && 9769 "Unexpected vector size"); 9770 if (!DemandedElts) 9771 return SDValue(); 9772 SDValue Splatted; 9773 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9774 if (!DemandedElts[i]) 9775 continue; 9776 SDValue Op = getOperand(i); 9777 if (Op.isUndef()) { 9778 if (UndefElements) 9779 (*UndefElements)[i] = true; 9780 } else if (!Splatted) { 9781 Splatted = Op; 9782 } else if (Splatted != Op) { 9783 return SDValue(); 9784 } 9785 } 9786 9787 if (!Splatted) { 9788 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9789 assert(getOperand(FirstDemandedIdx).isUndef() && 9790 "Can only have a splat without a constant for all undefs."); 9791 return getOperand(FirstDemandedIdx); 9792 } 9793 9794 return Splatted; 9795 } 9796 9797 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9798 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9799 return getSplatValue(DemandedElts, UndefElements); 9800 } 9801 9802 ConstantSDNode * 9803 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9804 BitVector *UndefElements) const { 9805 return dyn_cast_or_null<ConstantSDNode>( 9806 getSplatValue(DemandedElts, UndefElements)); 9807 } 9808 9809 ConstantSDNode * 9810 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9811 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9812 } 9813 9814 ConstantFPSDNode * 9815 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9816 BitVector *UndefElements) const { 9817 return dyn_cast_or_null<ConstantFPSDNode>( 9818 getSplatValue(DemandedElts, UndefElements)); 9819 } 9820 9821 ConstantFPSDNode * 9822 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9823 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9824 } 9825 9826 int32_t 9827 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9828 uint32_t BitWidth) const { 9829 if (ConstantFPSDNode *CN = 9830 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9831 bool IsExact; 9832 APSInt IntVal(BitWidth); 9833 const APFloat &APF = CN->getValueAPF(); 9834 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9835 APFloat::opOK || 9836 !IsExact) 9837 return -1; 9838 9839 return IntVal.exactLogBase2(); 9840 } 9841 return -1; 9842 } 9843 9844 bool BuildVectorSDNode::isConstant() const { 9845 for (const SDValue &Op : op_values()) { 9846 unsigned Opc = Op.getOpcode(); 9847 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9848 return false; 9849 } 9850 return true; 9851 } 9852 9853 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9854 // Find the first non-undef value in the shuffle mask. 9855 unsigned i, e; 9856 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9857 /* search */; 9858 9859 // If all elements are undefined, this shuffle can be considered a splat 9860 // (although it should eventually get simplified away completely). 9861 if (i == e) 9862 return true; 9863 9864 // Make sure all remaining elements are either undef or the same as the first 9865 // non-undef value. 9866 for (int Idx = Mask[i]; i != e; ++i) 9867 if (Mask[i] >= 0 && Mask[i] != Idx) 9868 return false; 9869 return true; 9870 } 9871 9872 // Returns the SDNode if it is a constant integer BuildVector 9873 // or constant integer. 9874 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9875 if (isa<ConstantSDNode>(N)) 9876 return N.getNode(); 9877 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9878 return N.getNode(); 9879 // Treat a GlobalAddress supporting constant offset folding as a 9880 // constant integer. 9881 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9882 if (GA->getOpcode() == ISD::GlobalAddress && 9883 TLI->isOffsetFoldingLegal(GA)) 9884 return GA; 9885 if ((N.getOpcode() == ISD::SPLAT_VECTOR) && 9886 isa<ConstantSDNode>(N.getOperand(0))) 9887 return N.getNode(); 9888 return nullptr; 9889 } 9890 9891 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9892 if (isa<ConstantFPSDNode>(N)) 9893 return N.getNode(); 9894 9895 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9896 return N.getNode(); 9897 9898 return nullptr; 9899 } 9900 9901 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9902 assert(!Node->OperandList && "Node already has operands"); 9903 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9904 "too many operands to fit into SDNode"); 9905 SDUse *Ops = OperandRecycler.allocate( 9906 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9907 9908 bool IsDivergent = false; 9909 for (unsigned I = 0; I != Vals.size(); ++I) { 9910 Ops[I].setUser(Node); 9911 Ops[I].setInitial(Vals[I]); 9912 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9913 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9914 } 9915 Node->NumOperands = Vals.size(); 9916 Node->OperandList = Ops; 9917 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9918 if (!TLI->isSDNodeAlwaysUniform(Node)) 9919 Node->SDNodeBits.IsDivergent = IsDivergent; 9920 checkForCycles(Node); 9921 } 9922 9923 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9924 SmallVectorImpl<SDValue> &Vals) { 9925 size_t Limit = SDNode::getMaxNumOperands(); 9926 while (Vals.size() > Limit) { 9927 unsigned SliceIdx = Vals.size() - Limit; 9928 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9929 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9930 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9931 Vals.emplace_back(NewTF); 9932 } 9933 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9934 } 9935 9936 #ifndef NDEBUG 9937 static void checkForCyclesHelper(const SDNode *N, 9938 SmallPtrSetImpl<const SDNode*> &Visited, 9939 SmallPtrSetImpl<const SDNode*> &Checked, 9940 const llvm::SelectionDAG *DAG) { 9941 // If this node has already been checked, don't check it again. 9942 if (Checked.count(N)) 9943 return; 9944 9945 // If a node has already been visited on this depth-first walk, reject it as 9946 // a cycle. 9947 if (!Visited.insert(N).second) { 9948 errs() << "Detected cycle in SelectionDAG\n"; 9949 dbgs() << "Offending node:\n"; 9950 N->dumprFull(DAG); dbgs() << "\n"; 9951 abort(); 9952 } 9953 9954 for (const SDValue &Op : N->op_values()) 9955 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9956 9957 Checked.insert(N); 9958 Visited.erase(N); 9959 } 9960 #endif 9961 9962 void llvm::checkForCycles(const llvm::SDNode *N, 9963 const llvm::SelectionDAG *DAG, 9964 bool force) { 9965 #ifndef NDEBUG 9966 bool check = force; 9967 #ifdef EXPENSIVE_CHECKS 9968 check = true; 9969 #endif // EXPENSIVE_CHECKS 9970 if (check) { 9971 assert(N && "Checking nonexistent SDNode"); 9972 SmallPtrSet<const SDNode*, 32> visited; 9973 SmallPtrSet<const SDNode*, 32> checked; 9974 checkForCyclesHelper(N, visited, checked, DAG); 9975 } 9976 #endif // !NDEBUG 9977 } 9978 9979 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9980 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9981 } 9982