1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetLowering.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/CodeGen/TargetSubtargetInfo.h" 45 #include "llvm/CodeGen/ValueTypes.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/GlobalValue.h" 54 #include "llvm/IR/Metadata.h" 55 #include "llvm/IR/Type.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CodeGen.h" 59 #include "llvm/Support/Compiler.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/KnownBits.h" 63 #include "llvm/Support/MachineValueType.h" 64 #include "llvm/Support/ManagedStatic.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/Mutex.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Target/TargetMachine.h" 69 #include "llvm/Target/TargetOptions.h" 70 #include "llvm/Transforms/Utils/SizeOpts.h" 71 #include <algorithm> 72 #include <cassert> 73 #include <cstdint> 74 #include <cstdlib> 75 #include <limits> 76 #include <set> 77 #include <string> 78 #include <utility> 79 #include <vector> 80 81 using namespace llvm; 82 83 /// makeVTList - Return an instance of the SDVTList struct initialized with the 84 /// specified members. 85 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 86 SDVTList Res = {VTs, NumVTs}; 87 return Res; 88 } 89 90 // Default null implementations of the callbacks. 91 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 93 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 94 95 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 96 97 #define DEBUG_TYPE "selectiondag" 98 99 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 100 cl::Hidden, cl::init(true), 101 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 102 103 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 104 cl::desc("Number limit for gluing ld/st of memcpy."), 105 cl::Hidden, cl::init(0)); 106 107 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 108 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 109 } 110 111 //===----------------------------------------------------------------------===// 112 // ConstantFPSDNode Class 113 //===----------------------------------------------------------------------===// 114 115 /// isExactlyValue - We don't rely on operator== working on double values, as 116 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 117 /// As such, this method can be used to do an exact bit-for-bit comparison of 118 /// two floating point values. 119 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 120 return getValueAPF().bitwiseIsEqual(V); 121 } 122 123 bool ConstantFPSDNode::isValueValidForType(EVT VT, 124 const APFloat& Val) { 125 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 126 127 // convert modifies in place, so make a copy. 128 APFloat Val2 = APFloat(Val); 129 bool losesInfo; 130 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 131 APFloat::rmNearestTiesToEven, 132 &losesInfo); 133 return !losesInfo; 134 } 135 136 //===----------------------------------------------------------------------===// 137 // ISD Namespace 138 //===----------------------------------------------------------------------===// 139 140 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 141 auto *BV = dyn_cast<BuildVectorSDNode>(N); 142 if (!BV) 143 return false; 144 145 APInt SplatUndef; 146 unsigned SplatBitSize; 147 bool HasUndefs; 148 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 149 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 150 EltSize) && 151 EltSize == SplatBitSize; 152 } 153 154 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 155 // specializations of the more general isConstantSplatVector()? 156 157 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 158 // Look through a bit convert. 159 while (N->getOpcode() == ISD::BITCAST) 160 N = N->getOperand(0).getNode(); 161 162 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 163 164 unsigned i = 0, e = N->getNumOperands(); 165 166 // Skip over all of the undef values. 167 while (i != e && N->getOperand(i).isUndef()) 168 ++i; 169 170 // Do not accept an all-undef vector. 171 if (i == e) return false; 172 173 // Do not accept build_vectors that aren't all constants or which have non-~0 174 // elements. We have to be a bit careful here, as the type of the constant 175 // may not be the same as the type of the vector elements due to type 176 // legalization (the elements are promoted to a legal type for the target and 177 // a vector of a type may be legal when the base element type is not). 178 // We only want to check enough bits to cover the vector elements, because 179 // we care if the resultant vector is all ones, not whether the individual 180 // constants are. 181 SDValue NotZero = N->getOperand(i); 182 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 183 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 184 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 185 return false; 186 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 187 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 188 return false; 189 } else 190 return false; 191 192 // Okay, we have at least one ~0 value, check to see if the rest match or are 193 // undefs. Even with the above element type twiddling, this should be OK, as 194 // the same type legalization should have applied to all the elements. 195 for (++i; i != e; ++i) 196 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 197 return false; 198 return true; 199 } 200 201 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 202 // Look through a bit convert. 203 while (N->getOpcode() == ISD::BITCAST) 204 N = N->getOperand(0).getNode(); 205 206 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 207 208 bool IsAllUndef = true; 209 for (const SDValue &Op : N->op_values()) { 210 if (Op.isUndef()) 211 continue; 212 IsAllUndef = false; 213 // Do not accept build_vectors that aren't all constants or which have non-0 214 // elements. We have to be a bit careful here, as the type of the constant 215 // may not be the same as the type of the vector elements due to type 216 // legalization (the elements are promoted to a legal type for the target 217 // and a vector of a type may be legal when the base element type is not). 218 // We only want to check enough bits to cover the vector elements, because 219 // we care if the resultant vector is all zeros, not whether the individual 220 // constants are. 221 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 222 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 223 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 224 return false; 225 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 226 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 227 return false; 228 } else 229 return false; 230 } 231 232 // Do not accept an all-undef vector. 233 if (IsAllUndef) 234 return false; 235 return true; 236 } 237 238 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 239 if (N->getOpcode() != ISD::BUILD_VECTOR) 240 return false; 241 242 for (const SDValue &Op : N->op_values()) { 243 if (Op.isUndef()) 244 continue; 245 if (!isa<ConstantSDNode>(Op)) 246 return false; 247 } 248 return true; 249 } 250 251 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 252 if (N->getOpcode() != ISD::BUILD_VECTOR) 253 return false; 254 255 for (const SDValue &Op : N->op_values()) { 256 if (Op.isUndef()) 257 continue; 258 if (!isa<ConstantFPSDNode>(Op)) 259 return false; 260 } 261 return true; 262 } 263 264 bool ISD::allOperandsUndef(const SDNode *N) { 265 // Return false if the node has no operands. 266 // This is "logically inconsistent" with the definition of "all" but 267 // is probably the desired behavior. 268 if (N->getNumOperands() == 0) 269 return false; 270 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 271 } 272 273 bool ISD::matchUnaryPredicate(SDValue Op, 274 std::function<bool(ConstantSDNode *)> Match, 275 bool AllowUndefs) { 276 // FIXME: Add support for scalar UNDEF cases? 277 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 278 return Match(Cst); 279 280 // FIXME: Add support for vector UNDEF cases? 281 if (ISD::BUILD_VECTOR != Op.getOpcode()) 282 return false; 283 284 EVT SVT = Op.getValueType().getScalarType(); 285 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 286 if (AllowUndefs && Op.getOperand(i).isUndef()) { 287 if (!Match(nullptr)) 288 return false; 289 continue; 290 } 291 292 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 293 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 294 return false; 295 } 296 return true; 297 } 298 299 bool ISD::matchBinaryPredicate( 300 SDValue LHS, SDValue RHS, 301 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 302 bool AllowUndefs, bool AllowTypeMismatch) { 303 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 304 return false; 305 306 // TODO: Add support for scalar UNDEF cases? 307 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 308 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 309 return Match(LHSCst, RHSCst); 310 311 // TODO: Add support for vector UNDEF cases? 312 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 313 ISD::BUILD_VECTOR != RHS.getOpcode()) 314 return false; 315 316 EVT SVT = LHS.getValueType().getScalarType(); 317 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 318 SDValue LHSOp = LHS.getOperand(i); 319 SDValue RHSOp = RHS.getOperand(i); 320 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 321 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 322 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 323 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 324 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 325 return false; 326 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 327 LHSOp.getValueType() != RHSOp.getValueType())) 328 return false; 329 if (!Match(LHSCst, RHSCst)) 330 return false; 331 } 332 return true; 333 } 334 335 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 336 switch (ExtType) { 337 case ISD::EXTLOAD: 338 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 339 case ISD::SEXTLOAD: 340 return ISD::SIGN_EXTEND; 341 case ISD::ZEXTLOAD: 342 return ISD::ZERO_EXTEND; 343 default: 344 break; 345 } 346 347 llvm_unreachable("Invalid LoadExtType"); 348 } 349 350 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 351 // To perform this operation, we just need to swap the L and G bits of the 352 // operation. 353 unsigned OldL = (Operation >> 2) & 1; 354 unsigned OldG = (Operation >> 1) & 1; 355 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 356 (OldL << 1) | // New G bit 357 (OldG << 2)); // New L bit. 358 } 359 360 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 361 unsigned Operation = Op; 362 if (isIntegerLike) 363 Operation ^= 7; // Flip L, G, E bits, but not U. 364 else 365 Operation ^= 15; // Flip all of the condition bits. 366 367 if (Operation > ISD::SETTRUE2) 368 Operation &= ~8; // Don't let N and U bits get set. 369 370 return ISD::CondCode(Operation); 371 } 372 373 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 374 return getSetCCInverseImpl(Op, Type.isInteger()); 375 } 376 377 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 378 bool isIntegerLike) { 379 return getSetCCInverseImpl(Op, isIntegerLike); 380 } 381 382 /// For an integer comparison, return 1 if the comparison is a signed operation 383 /// and 2 if the result is an unsigned comparison. Return zero if the operation 384 /// does not depend on the sign of the input (setne and seteq). 385 static int isSignedOp(ISD::CondCode Opcode) { 386 switch (Opcode) { 387 default: llvm_unreachable("Illegal integer setcc operation!"); 388 case ISD::SETEQ: 389 case ISD::SETNE: return 0; 390 case ISD::SETLT: 391 case ISD::SETLE: 392 case ISD::SETGT: 393 case ISD::SETGE: return 1; 394 case ISD::SETULT: 395 case ISD::SETULE: 396 case ISD::SETUGT: 397 case ISD::SETUGE: return 2; 398 } 399 } 400 401 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 402 EVT Type) { 403 bool IsInteger = Type.isInteger(); 404 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 405 // Cannot fold a signed integer setcc with an unsigned integer setcc. 406 return ISD::SETCC_INVALID; 407 408 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 409 410 // If the N and U bits get set, then the resultant comparison DOES suddenly 411 // care about orderedness, and it is true when ordered. 412 if (Op > ISD::SETTRUE2) 413 Op &= ~16; // Clear the U bit if the N bit is set. 414 415 // Canonicalize illegal integer setcc's. 416 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 417 Op = ISD::SETNE; 418 419 return ISD::CondCode(Op); 420 } 421 422 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 423 EVT Type) { 424 bool IsInteger = Type.isInteger(); 425 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 426 // Cannot fold a signed setcc with an unsigned setcc. 427 return ISD::SETCC_INVALID; 428 429 // Combine all of the condition bits. 430 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 431 432 // Canonicalize illegal integer setcc's. 433 if (IsInteger) { 434 switch (Result) { 435 default: break; 436 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 437 case ISD::SETOEQ: // SETEQ & SETU[LG]E 438 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 439 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 440 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 441 } 442 } 443 444 return Result; 445 } 446 447 //===----------------------------------------------------------------------===// 448 // SDNode Profile Support 449 //===----------------------------------------------------------------------===// 450 451 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 452 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 453 ID.AddInteger(OpC); 454 } 455 456 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 457 /// solely with their pointer. 458 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 459 ID.AddPointer(VTList.VTs); 460 } 461 462 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 463 static void AddNodeIDOperands(FoldingSetNodeID &ID, 464 ArrayRef<SDValue> Ops) { 465 for (auto& Op : Ops) { 466 ID.AddPointer(Op.getNode()); 467 ID.AddInteger(Op.getResNo()); 468 } 469 } 470 471 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 472 static void AddNodeIDOperands(FoldingSetNodeID &ID, 473 ArrayRef<SDUse> Ops) { 474 for (auto& Op : Ops) { 475 ID.AddPointer(Op.getNode()); 476 ID.AddInteger(Op.getResNo()); 477 } 478 } 479 480 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 481 SDVTList VTList, ArrayRef<SDValue> OpList) { 482 AddNodeIDOpcode(ID, OpC); 483 AddNodeIDValueTypes(ID, VTList); 484 AddNodeIDOperands(ID, OpList); 485 } 486 487 /// If this is an SDNode with special info, add this info to the NodeID data. 488 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 489 switch (N->getOpcode()) { 490 case ISD::TargetExternalSymbol: 491 case ISD::ExternalSymbol: 492 case ISD::MCSymbol: 493 llvm_unreachable("Should only be used on nodes with operands"); 494 default: break; // Normal nodes don't need extra info. 495 case ISD::TargetConstant: 496 case ISD::Constant: { 497 const ConstantSDNode *C = cast<ConstantSDNode>(N); 498 ID.AddPointer(C->getConstantIntValue()); 499 ID.AddBoolean(C->isOpaque()); 500 break; 501 } 502 case ISD::TargetConstantFP: 503 case ISD::ConstantFP: 504 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 505 break; 506 case ISD::TargetGlobalAddress: 507 case ISD::GlobalAddress: 508 case ISD::TargetGlobalTLSAddress: 509 case ISD::GlobalTLSAddress: { 510 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 511 ID.AddPointer(GA->getGlobal()); 512 ID.AddInteger(GA->getOffset()); 513 ID.AddInteger(GA->getTargetFlags()); 514 break; 515 } 516 case ISD::BasicBlock: 517 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 518 break; 519 case ISD::Register: 520 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 521 break; 522 case ISD::RegisterMask: 523 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 524 break; 525 case ISD::SRCVALUE: 526 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 527 break; 528 case ISD::FrameIndex: 529 case ISD::TargetFrameIndex: 530 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 531 break; 532 case ISD::LIFETIME_START: 533 case ISD::LIFETIME_END: 534 if (cast<LifetimeSDNode>(N)->hasOffset()) { 535 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 536 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 537 } 538 break; 539 case ISD::JumpTable: 540 case ISD::TargetJumpTable: 541 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 542 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 543 break; 544 case ISD::ConstantPool: 545 case ISD::TargetConstantPool: { 546 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 547 ID.AddInteger(CP->getAlign().value()); 548 ID.AddInteger(CP->getOffset()); 549 if (CP->isMachineConstantPoolEntry()) 550 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 551 else 552 ID.AddPointer(CP->getConstVal()); 553 ID.AddInteger(CP->getTargetFlags()); 554 break; 555 } 556 case ISD::TargetIndex: { 557 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 558 ID.AddInteger(TI->getIndex()); 559 ID.AddInteger(TI->getOffset()); 560 ID.AddInteger(TI->getTargetFlags()); 561 break; 562 } 563 case ISD::LOAD: { 564 const LoadSDNode *LD = cast<LoadSDNode>(N); 565 ID.AddInteger(LD->getMemoryVT().getRawBits()); 566 ID.AddInteger(LD->getRawSubclassData()); 567 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 568 break; 569 } 570 case ISD::STORE: { 571 const StoreSDNode *ST = cast<StoreSDNode>(N); 572 ID.AddInteger(ST->getMemoryVT().getRawBits()); 573 ID.AddInteger(ST->getRawSubclassData()); 574 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 575 break; 576 } 577 case ISD::MLOAD: { 578 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 579 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 580 ID.AddInteger(MLD->getRawSubclassData()); 581 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 582 break; 583 } 584 case ISD::MSTORE: { 585 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 586 ID.AddInteger(MST->getMemoryVT().getRawBits()); 587 ID.AddInteger(MST->getRawSubclassData()); 588 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 589 break; 590 } 591 case ISD::MGATHER: { 592 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 593 ID.AddInteger(MG->getMemoryVT().getRawBits()); 594 ID.AddInteger(MG->getRawSubclassData()); 595 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 596 break; 597 } 598 case ISD::MSCATTER: { 599 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 600 ID.AddInteger(MS->getMemoryVT().getRawBits()); 601 ID.AddInteger(MS->getRawSubclassData()); 602 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 603 break; 604 } 605 case ISD::ATOMIC_CMP_SWAP: 606 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 607 case ISD::ATOMIC_SWAP: 608 case ISD::ATOMIC_LOAD_ADD: 609 case ISD::ATOMIC_LOAD_SUB: 610 case ISD::ATOMIC_LOAD_AND: 611 case ISD::ATOMIC_LOAD_CLR: 612 case ISD::ATOMIC_LOAD_OR: 613 case ISD::ATOMIC_LOAD_XOR: 614 case ISD::ATOMIC_LOAD_NAND: 615 case ISD::ATOMIC_LOAD_MIN: 616 case ISD::ATOMIC_LOAD_MAX: 617 case ISD::ATOMIC_LOAD_UMIN: 618 case ISD::ATOMIC_LOAD_UMAX: 619 case ISD::ATOMIC_LOAD: 620 case ISD::ATOMIC_STORE: { 621 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 622 ID.AddInteger(AT->getMemoryVT().getRawBits()); 623 ID.AddInteger(AT->getRawSubclassData()); 624 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 625 break; 626 } 627 case ISD::PREFETCH: { 628 const MemSDNode *PF = cast<MemSDNode>(N); 629 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 630 break; 631 } 632 case ISD::VECTOR_SHUFFLE: { 633 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 634 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 635 i != e; ++i) 636 ID.AddInteger(SVN->getMaskElt(i)); 637 break; 638 } 639 case ISD::TargetBlockAddress: 640 case ISD::BlockAddress: { 641 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 642 ID.AddPointer(BA->getBlockAddress()); 643 ID.AddInteger(BA->getOffset()); 644 ID.AddInteger(BA->getTargetFlags()); 645 break; 646 } 647 } // end switch (N->getOpcode()) 648 649 // Target specific memory nodes could also have address spaces to check. 650 if (N->isTargetMemoryOpcode()) 651 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 652 } 653 654 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 655 /// data. 656 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 657 AddNodeIDOpcode(ID, N->getOpcode()); 658 // Add the return value info. 659 AddNodeIDValueTypes(ID, N->getVTList()); 660 // Add the operand info. 661 AddNodeIDOperands(ID, N->ops()); 662 663 // Handle SDNode leafs with special info. 664 AddNodeIDCustom(ID, N); 665 } 666 667 //===----------------------------------------------------------------------===// 668 // SelectionDAG Class 669 //===----------------------------------------------------------------------===// 670 671 /// doNotCSE - Return true if CSE should not be performed for this node. 672 static bool doNotCSE(SDNode *N) { 673 if (N->getValueType(0) == MVT::Glue) 674 return true; // Never CSE anything that produces a flag. 675 676 switch (N->getOpcode()) { 677 default: break; 678 case ISD::HANDLENODE: 679 case ISD::EH_LABEL: 680 return true; // Never CSE these nodes. 681 } 682 683 // Check that remaining values produced are not flags. 684 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 685 if (N->getValueType(i) == MVT::Glue) 686 return true; // Never CSE anything that produces a flag. 687 688 return false; 689 } 690 691 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 692 /// SelectionDAG. 693 void SelectionDAG::RemoveDeadNodes() { 694 // Create a dummy node (which is not added to allnodes), that adds a reference 695 // to the root node, preventing it from being deleted. 696 HandleSDNode Dummy(getRoot()); 697 698 SmallVector<SDNode*, 128> DeadNodes; 699 700 // Add all obviously-dead nodes to the DeadNodes worklist. 701 for (SDNode &Node : allnodes()) 702 if (Node.use_empty()) 703 DeadNodes.push_back(&Node); 704 705 RemoveDeadNodes(DeadNodes); 706 707 // If the root changed (e.g. it was a dead load, update the root). 708 setRoot(Dummy.getValue()); 709 } 710 711 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 712 /// given list, and any nodes that become unreachable as a result. 713 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 714 715 // Process the worklist, deleting the nodes and adding their uses to the 716 // worklist. 717 while (!DeadNodes.empty()) { 718 SDNode *N = DeadNodes.pop_back_val(); 719 // Skip to next node if we've already managed to delete the node. This could 720 // happen if replacing a node causes a node previously added to the node to 721 // be deleted. 722 if (N->getOpcode() == ISD::DELETED_NODE) 723 continue; 724 725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 726 DUL->NodeDeleted(N, nullptr); 727 728 // Take the node out of the appropriate CSE map. 729 RemoveNodeFromCSEMaps(N); 730 731 // Next, brutally remove the operand list. This is safe to do, as there are 732 // no cycles in the graph. 733 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 734 SDUse &Use = *I++; 735 SDNode *Operand = Use.getNode(); 736 Use.set(SDValue()); 737 738 // Now that we removed this operand, see if there are no uses of it left. 739 if (Operand->use_empty()) 740 DeadNodes.push_back(Operand); 741 } 742 743 DeallocateNode(N); 744 } 745 } 746 747 void SelectionDAG::RemoveDeadNode(SDNode *N){ 748 SmallVector<SDNode*, 16> DeadNodes(1, N); 749 750 // Create a dummy node that adds a reference to the root node, preventing 751 // it from being deleted. (This matters if the root is an operand of the 752 // dead node.) 753 HandleSDNode Dummy(getRoot()); 754 755 RemoveDeadNodes(DeadNodes); 756 } 757 758 void SelectionDAG::DeleteNode(SDNode *N) { 759 // First take this out of the appropriate CSE map. 760 RemoveNodeFromCSEMaps(N); 761 762 // Finally, remove uses due to operands of this node, remove from the 763 // AllNodes list, and delete the node. 764 DeleteNodeNotInCSEMaps(N); 765 } 766 767 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 768 assert(N->getIterator() != AllNodes.begin() && 769 "Cannot delete the entry node!"); 770 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 771 772 // Drop all of the operands and decrement used node's use counts. 773 N->DropOperands(); 774 775 DeallocateNode(N); 776 } 777 778 void SDDbgInfo::erase(const SDNode *Node) { 779 DbgValMapType::iterator I = DbgValMap.find(Node); 780 if (I == DbgValMap.end()) 781 return; 782 for (auto &Val: I->second) 783 Val->setIsInvalidated(); 784 DbgValMap.erase(I); 785 } 786 787 void SelectionDAG::DeallocateNode(SDNode *N) { 788 // If we have operands, deallocate them. 789 removeOperands(N); 790 791 NodeAllocator.Deallocate(AllNodes.remove(N)); 792 793 // Set the opcode to DELETED_NODE to help catch bugs when node 794 // memory is reallocated. 795 // FIXME: There are places in SDag that have grown a dependency on the opcode 796 // value in the released node. 797 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 798 N->NodeType = ISD::DELETED_NODE; 799 800 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 801 // them and forget about that node. 802 DbgInfo->erase(N); 803 } 804 805 #ifndef NDEBUG 806 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 807 static void VerifySDNode(SDNode *N) { 808 switch (N->getOpcode()) { 809 default: 810 break; 811 case ISD::BUILD_PAIR: { 812 EVT VT = N->getValueType(0); 813 assert(N->getNumValues() == 1 && "Too many results!"); 814 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 815 "Wrong return type!"); 816 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 817 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 818 "Mismatched operand types!"); 819 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 820 "Wrong operand type!"); 821 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 822 "Wrong return type size"); 823 break; 824 } 825 case ISD::BUILD_VECTOR: { 826 assert(N->getNumValues() == 1 && "Too many results!"); 827 assert(N->getValueType(0).isVector() && "Wrong return type!"); 828 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 829 "Wrong number of operands!"); 830 EVT EltVT = N->getValueType(0).getVectorElementType(); 831 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 832 assert((I->getValueType() == EltVT || 833 (EltVT.isInteger() && I->getValueType().isInteger() && 834 EltVT.bitsLE(I->getValueType()))) && 835 "Wrong operand type!"); 836 assert(I->getValueType() == N->getOperand(0).getValueType() && 837 "Operands must all have the same type"); 838 } 839 break; 840 } 841 } 842 } 843 #endif // NDEBUG 844 845 /// Insert a newly allocated node into the DAG. 846 /// 847 /// Handles insertion into the all nodes list and CSE map, as well as 848 /// verification and other common operations when a new node is allocated. 849 void SelectionDAG::InsertNode(SDNode *N) { 850 AllNodes.push_back(N); 851 #ifndef NDEBUG 852 N->PersistentId = NextPersistentId++; 853 VerifySDNode(N); 854 #endif 855 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 856 DUL->NodeInserted(N); 857 } 858 859 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 860 /// correspond to it. This is useful when we're about to delete or repurpose 861 /// the node. We don't want future request for structurally identical nodes 862 /// to return N anymore. 863 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 864 bool Erased = false; 865 switch (N->getOpcode()) { 866 case ISD::HANDLENODE: return false; // noop. 867 case ISD::CONDCODE: 868 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 869 "Cond code doesn't exist!"); 870 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 871 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 872 break; 873 case ISD::ExternalSymbol: 874 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 875 break; 876 case ISD::TargetExternalSymbol: { 877 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 878 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 879 ESN->getSymbol(), ESN->getTargetFlags())); 880 break; 881 } 882 case ISD::MCSymbol: { 883 auto *MCSN = cast<MCSymbolSDNode>(N); 884 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 885 break; 886 } 887 case ISD::VALUETYPE: { 888 EVT VT = cast<VTSDNode>(N)->getVT(); 889 if (VT.isExtended()) { 890 Erased = ExtendedValueTypeNodes.erase(VT); 891 } else { 892 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 893 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 894 } 895 break; 896 } 897 default: 898 // Remove it from the CSE Map. 899 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 900 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 901 Erased = CSEMap.RemoveNode(N); 902 break; 903 } 904 #ifndef NDEBUG 905 // Verify that the node was actually in one of the CSE maps, unless it has a 906 // flag result (which cannot be CSE'd) or is one of the special cases that are 907 // not subject to CSE. 908 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 909 !N->isMachineOpcode() && !doNotCSE(N)) { 910 N->dump(this); 911 dbgs() << "\n"; 912 llvm_unreachable("Node is not in map!"); 913 } 914 #endif 915 return Erased; 916 } 917 918 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 919 /// maps and modified in place. Add it back to the CSE maps, unless an identical 920 /// node already exists, in which case transfer all its users to the existing 921 /// node. This transfer can potentially trigger recursive merging. 922 void 923 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 924 // For node types that aren't CSE'd, just act as if no identical node 925 // already exists. 926 if (!doNotCSE(N)) { 927 SDNode *Existing = CSEMap.GetOrInsertNode(N); 928 if (Existing != N) { 929 // If there was already an existing matching node, use ReplaceAllUsesWith 930 // to replace the dead one with the existing one. This can cause 931 // recursive merging of other unrelated nodes down the line. 932 ReplaceAllUsesWith(N, Existing); 933 934 // N is now dead. Inform the listeners and delete it. 935 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 936 DUL->NodeDeleted(N, Existing); 937 DeleteNodeNotInCSEMaps(N); 938 return; 939 } 940 } 941 942 // If the node doesn't already exist, we updated it. Inform listeners. 943 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 944 DUL->NodeUpdated(N); 945 } 946 947 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 948 /// were replaced with those specified. If this node is never memoized, 949 /// return null, otherwise return a pointer to the slot it would take. If a 950 /// node already exists with these operands, the slot will be non-null. 951 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 952 void *&InsertPos) { 953 if (doNotCSE(N)) 954 return nullptr; 955 956 SDValue Ops[] = { Op }; 957 FoldingSetNodeID ID; 958 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 959 AddNodeIDCustom(ID, N); 960 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 961 if (Node) 962 Node->intersectFlagsWith(N->getFlags()); 963 return Node; 964 } 965 966 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 967 /// were replaced with those specified. If this node is never memoized, 968 /// return null, otherwise return a pointer to the slot it would take. If a 969 /// node already exists with these operands, the slot will be non-null. 970 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 971 SDValue Op1, SDValue Op2, 972 void *&InsertPos) { 973 if (doNotCSE(N)) 974 return nullptr; 975 976 SDValue Ops[] = { Op1, Op2 }; 977 FoldingSetNodeID ID; 978 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 979 AddNodeIDCustom(ID, N); 980 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 981 if (Node) 982 Node->intersectFlagsWith(N->getFlags()); 983 return Node; 984 } 985 986 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 987 /// were replaced with those specified. If this node is never memoized, 988 /// return null, otherwise return a pointer to the slot it would take. If a 989 /// node already exists with these operands, the slot will be non-null. 990 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 991 void *&InsertPos) { 992 if (doNotCSE(N)) 993 return nullptr; 994 995 FoldingSetNodeID ID; 996 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 997 AddNodeIDCustom(ID, N); 998 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 999 if (Node) 1000 Node->intersectFlagsWith(N->getFlags()); 1001 return Node; 1002 } 1003 1004 Align SelectionDAG::getEVTAlign(EVT VT) const { 1005 Type *Ty = VT == MVT::iPTR ? 1006 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1007 VT.getTypeForEVT(*getContext()); 1008 1009 return getDataLayout().getABITypeAlign(Ty); 1010 } 1011 1012 // EntryNode could meaningfully have debug info if we can find it... 1013 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1014 : TM(tm), OptLevel(OL), 1015 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1016 Root(getEntryNode()) { 1017 InsertNode(&EntryNode); 1018 DbgInfo = new SDDbgInfo(); 1019 } 1020 1021 void SelectionDAG::init(MachineFunction &NewMF, 1022 OptimizationRemarkEmitter &NewORE, 1023 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1024 LegacyDivergenceAnalysis * Divergence, 1025 ProfileSummaryInfo *PSIin, 1026 BlockFrequencyInfo *BFIin) { 1027 MF = &NewMF; 1028 SDAGISelPass = PassPtr; 1029 ORE = &NewORE; 1030 TLI = getSubtarget().getTargetLowering(); 1031 TSI = getSubtarget().getSelectionDAGInfo(); 1032 LibInfo = LibraryInfo; 1033 Context = &MF->getFunction().getContext(); 1034 DA = Divergence; 1035 PSI = PSIin; 1036 BFI = BFIin; 1037 } 1038 1039 SelectionDAG::~SelectionDAG() { 1040 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1041 allnodes_clear(); 1042 OperandRecycler.clear(OperandAllocator); 1043 delete DbgInfo; 1044 } 1045 1046 bool SelectionDAG::shouldOptForSize() const { 1047 return MF->getFunction().hasOptSize() || 1048 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1049 } 1050 1051 void SelectionDAG::allnodes_clear() { 1052 assert(&*AllNodes.begin() == &EntryNode); 1053 AllNodes.remove(AllNodes.begin()); 1054 while (!AllNodes.empty()) 1055 DeallocateNode(&AllNodes.front()); 1056 #ifndef NDEBUG 1057 NextPersistentId = 0; 1058 #endif 1059 } 1060 1061 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1062 void *&InsertPos) { 1063 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1064 if (N) { 1065 switch (N->getOpcode()) { 1066 default: break; 1067 case ISD::Constant: 1068 case ISD::ConstantFP: 1069 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1070 "debug location. Use another overload."); 1071 } 1072 } 1073 return N; 1074 } 1075 1076 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1077 const SDLoc &DL, void *&InsertPos) { 1078 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1079 if (N) { 1080 switch (N->getOpcode()) { 1081 case ISD::Constant: 1082 case ISD::ConstantFP: 1083 // Erase debug location from the node if the node is used at several 1084 // different places. Do not propagate one location to all uses as it 1085 // will cause a worse single stepping debugging experience. 1086 if (N->getDebugLoc() != DL.getDebugLoc()) 1087 N->setDebugLoc(DebugLoc()); 1088 break; 1089 default: 1090 // When the node's point of use is located earlier in the instruction 1091 // sequence than its prior point of use, update its debug info to the 1092 // earlier location. 1093 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1094 N->setDebugLoc(DL.getDebugLoc()); 1095 break; 1096 } 1097 } 1098 return N; 1099 } 1100 1101 void SelectionDAG::clear() { 1102 allnodes_clear(); 1103 OperandRecycler.clear(OperandAllocator); 1104 OperandAllocator.Reset(); 1105 CSEMap.clear(); 1106 1107 ExtendedValueTypeNodes.clear(); 1108 ExternalSymbols.clear(); 1109 TargetExternalSymbols.clear(); 1110 MCSymbols.clear(); 1111 SDCallSiteDbgInfo.clear(); 1112 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1113 static_cast<CondCodeSDNode*>(nullptr)); 1114 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1115 static_cast<SDNode*>(nullptr)); 1116 1117 EntryNode.UseList = nullptr; 1118 InsertNode(&EntryNode); 1119 Root = getEntryNode(); 1120 DbgInfo->clear(); 1121 } 1122 1123 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1124 return VT.bitsGT(Op.getValueType()) 1125 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1126 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1127 } 1128 1129 std::pair<SDValue, SDValue> 1130 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1131 const SDLoc &DL, EVT VT) { 1132 assert(!VT.bitsEq(Op.getValueType()) && 1133 "Strict no-op FP extend/round not allowed."); 1134 SDValue Res = 1135 VT.bitsGT(Op.getValueType()) 1136 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1137 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1138 {Chain, Op, getIntPtrConstant(0, DL)}); 1139 1140 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1141 } 1142 1143 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1144 return VT.bitsGT(Op.getValueType()) ? 1145 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1146 getNode(ISD::TRUNCATE, DL, VT, Op); 1147 } 1148 1149 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1150 return VT.bitsGT(Op.getValueType()) ? 1151 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1152 getNode(ISD::TRUNCATE, DL, VT, Op); 1153 } 1154 1155 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1156 return VT.bitsGT(Op.getValueType()) ? 1157 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1158 getNode(ISD::TRUNCATE, DL, VT, Op); 1159 } 1160 1161 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1162 EVT OpVT) { 1163 if (VT.bitsLE(Op.getValueType())) 1164 return getNode(ISD::TRUNCATE, SL, VT, Op); 1165 1166 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1167 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1168 } 1169 1170 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1171 EVT OpVT = Op.getValueType(); 1172 assert(VT.isInteger() && OpVT.isInteger() && 1173 "Cannot getZeroExtendInReg FP types"); 1174 assert(VT.isVector() == OpVT.isVector() && 1175 "getZeroExtendInReg type should be vector iff the operand " 1176 "type is vector!"); 1177 assert((!VT.isVector() || 1178 VT.getVectorElementCount() == OpVT.getVectorElementCount()) && 1179 "Vector element counts must match in getZeroExtendInReg"); 1180 assert(VT.bitsLE(OpVT) && "Not extending!"); 1181 if (OpVT == VT) 1182 return Op; 1183 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1184 VT.getScalarSizeInBits()); 1185 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1186 } 1187 1188 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1189 // Only unsigned pointer semantics are supported right now. In the future this 1190 // might delegate to TLI to check pointer signedness. 1191 return getZExtOrTrunc(Op, DL, VT); 1192 } 1193 1194 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1195 // Only unsigned pointer semantics are supported right now. In the future this 1196 // might delegate to TLI to check pointer signedness. 1197 return getZeroExtendInReg(Op, DL, VT); 1198 } 1199 1200 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1201 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1202 EVT EltVT = VT.getScalarType(); 1203 SDValue NegOne = 1204 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1205 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1206 } 1207 1208 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1209 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1210 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1211 } 1212 1213 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1214 EVT OpVT) { 1215 if (!V) 1216 return getConstant(0, DL, VT); 1217 1218 switch (TLI->getBooleanContents(OpVT)) { 1219 case TargetLowering::ZeroOrOneBooleanContent: 1220 case TargetLowering::UndefinedBooleanContent: 1221 return getConstant(1, DL, VT); 1222 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1223 return getAllOnesConstant(DL, VT); 1224 } 1225 llvm_unreachable("Unexpected boolean content enum!"); 1226 } 1227 1228 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1229 bool isT, bool isO) { 1230 EVT EltVT = VT.getScalarType(); 1231 assert((EltVT.getSizeInBits() >= 64 || 1232 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1233 "getConstant with a uint64_t value that doesn't fit in the type!"); 1234 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1235 } 1236 1237 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1238 bool isT, bool isO) { 1239 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1240 } 1241 1242 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1243 EVT VT, bool isT, bool isO) { 1244 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1245 1246 EVT EltVT = VT.getScalarType(); 1247 const ConstantInt *Elt = &Val; 1248 1249 // In some cases the vector type is legal but the element type is illegal and 1250 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1251 // inserted value (the type does not need to match the vector element type). 1252 // Any extra bits introduced will be truncated away. 1253 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1254 TargetLowering::TypePromoteInteger) { 1255 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1256 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1257 Elt = ConstantInt::get(*getContext(), NewVal); 1258 } 1259 // In other cases the element type is illegal and needs to be expanded, for 1260 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1261 // the value into n parts and use a vector type with n-times the elements. 1262 // Then bitcast to the type requested. 1263 // Legalizing constants too early makes the DAGCombiner's job harder so we 1264 // only legalize if the DAG tells us we must produce legal types. 1265 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1266 TLI->getTypeAction(*getContext(), EltVT) == 1267 TargetLowering::TypeExpandInteger) { 1268 const APInt &NewVal = Elt->getValue(); 1269 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1270 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1271 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1272 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1273 1274 // Check the temporary vector is the correct size. If this fails then 1275 // getTypeToTransformTo() probably returned a type whose size (in bits) 1276 // isn't a power-of-2 factor of the requested type size. 1277 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1278 1279 SmallVector<SDValue, 2> EltParts; 1280 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1281 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1282 .zextOrTrunc(ViaEltSizeInBits), DL, 1283 ViaEltVT, isT, isO)); 1284 } 1285 1286 // EltParts is currently in little endian order. If we actually want 1287 // big-endian order then reverse it now. 1288 if (getDataLayout().isBigEndian()) 1289 std::reverse(EltParts.begin(), EltParts.end()); 1290 1291 // The elements must be reversed when the element order is different 1292 // to the endianness of the elements (because the BITCAST is itself a 1293 // vector shuffle in this situation). However, we do not need any code to 1294 // perform this reversal because getConstant() is producing a vector 1295 // splat. 1296 // This situation occurs in MIPS MSA. 1297 1298 SmallVector<SDValue, 8> Ops; 1299 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1300 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1301 1302 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1303 return V; 1304 } 1305 1306 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1307 "APInt size does not match type size!"); 1308 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1309 FoldingSetNodeID ID; 1310 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1311 ID.AddPointer(Elt); 1312 ID.AddBoolean(isO); 1313 void *IP = nullptr; 1314 SDNode *N = nullptr; 1315 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1316 if (!VT.isVector()) 1317 return SDValue(N, 0); 1318 1319 if (!N) { 1320 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1321 CSEMap.InsertNode(N, IP); 1322 InsertNode(N); 1323 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1324 } 1325 1326 SDValue Result(N, 0); 1327 if (VT.isScalableVector()) 1328 Result = getSplatVector(VT, DL, Result); 1329 else if (VT.isVector()) 1330 Result = getSplatBuildVector(VT, DL, Result); 1331 1332 return Result; 1333 } 1334 1335 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1336 bool isTarget) { 1337 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1338 } 1339 1340 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1341 const SDLoc &DL, bool LegalTypes) { 1342 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1343 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1344 return getConstant(Val, DL, ShiftVT); 1345 } 1346 1347 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1348 bool isTarget) { 1349 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1350 } 1351 1352 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1353 bool isTarget) { 1354 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1355 } 1356 1357 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1358 EVT VT, bool isTarget) { 1359 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1360 1361 EVT EltVT = VT.getScalarType(); 1362 1363 // Do the map lookup using the actual bit pattern for the floating point 1364 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1365 // we don't have issues with SNANs. 1366 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1367 FoldingSetNodeID ID; 1368 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1369 ID.AddPointer(&V); 1370 void *IP = nullptr; 1371 SDNode *N = nullptr; 1372 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1373 if (!VT.isVector()) 1374 return SDValue(N, 0); 1375 1376 if (!N) { 1377 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1378 CSEMap.InsertNode(N, IP); 1379 InsertNode(N); 1380 } 1381 1382 SDValue Result(N, 0); 1383 if (VT.isVector()) 1384 Result = getSplatBuildVector(VT, DL, Result); 1385 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1386 return Result; 1387 } 1388 1389 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1390 bool isTarget) { 1391 EVT EltVT = VT.getScalarType(); 1392 if (EltVT == MVT::f32) 1393 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1394 else if (EltVT == MVT::f64) 1395 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1396 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1397 EltVT == MVT::f16) { 1398 bool Ignored; 1399 APFloat APF = APFloat(Val); 1400 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1401 &Ignored); 1402 return getConstantFP(APF, DL, VT, isTarget); 1403 } else 1404 llvm_unreachable("Unsupported type in getConstantFP"); 1405 } 1406 1407 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1408 EVT VT, int64_t Offset, bool isTargetGA, 1409 unsigned TargetFlags) { 1410 assert((TargetFlags == 0 || isTargetGA) && 1411 "Cannot set target flags on target-independent globals"); 1412 1413 // Truncate (with sign-extension) the offset value to the pointer size. 1414 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1415 if (BitWidth < 64) 1416 Offset = SignExtend64(Offset, BitWidth); 1417 1418 unsigned Opc; 1419 if (GV->isThreadLocal()) 1420 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1421 else 1422 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1423 1424 FoldingSetNodeID ID; 1425 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1426 ID.AddPointer(GV); 1427 ID.AddInteger(Offset); 1428 ID.AddInteger(TargetFlags); 1429 void *IP = nullptr; 1430 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1431 return SDValue(E, 0); 1432 1433 auto *N = newSDNode<GlobalAddressSDNode>( 1434 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1435 CSEMap.InsertNode(N, IP); 1436 InsertNode(N); 1437 return SDValue(N, 0); 1438 } 1439 1440 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1441 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1442 FoldingSetNodeID ID; 1443 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1444 ID.AddInteger(FI); 1445 void *IP = nullptr; 1446 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1447 return SDValue(E, 0); 1448 1449 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1450 CSEMap.InsertNode(N, IP); 1451 InsertNode(N); 1452 return SDValue(N, 0); 1453 } 1454 1455 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1456 unsigned TargetFlags) { 1457 assert((TargetFlags == 0 || isTarget) && 1458 "Cannot set target flags on target-independent jump tables"); 1459 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1460 FoldingSetNodeID ID; 1461 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1462 ID.AddInteger(JTI); 1463 ID.AddInteger(TargetFlags); 1464 void *IP = nullptr; 1465 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1466 return SDValue(E, 0); 1467 1468 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1469 CSEMap.InsertNode(N, IP); 1470 InsertNode(N); 1471 return SDValue(N, 0); 1472 } 1473 1474 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1475 MaybeAlign Alignment, int Offset, 1476 bool isTarget, unsigned TargetFlags) { 1477 assert((TargetFlags == 0 || isTarget) && 1478 "Cannot set target flags on target-independent globals"); 1479 if (!Alignment) 1480 Alignment = shouldOptForSize() 1481 ? getDataLayout().getABITypeAlign(C->getType()) 1482 : getDataLayout().getPrefTypeAlign(C->getType()); 1483 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1484 FoldingSetNodeID ID; 1485 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1486 ID.AddInteger(Alignment->value()); 1487 ID.AddInteger(Offset); 1488 ID.AddPointer(C); 1489 ID.AddInteger(TargetFlags); 1490 void *IP = nullptr; 1491 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1492 return SDValue(E, 0); 1493 1494 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1495 TargetFlags); 1496 CSEMap.InsertNode(N, IP); 1497 InsertNode(N); 1498 SDValue V = SDValue(N, 0); 1499 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1500 return V; 1501 } 1502 1503 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1504 MaybeAlign Alignment, int Offset, 1505 bool isTarget, unsigned TargetFlags) { 1506 assert((TargetFlags == 0 || isTarget) && 1507 "Cannot set target flags on target-independent globals"); 1508 if (!Alignment) 1509 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1510 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1511 FoldingSetNodeID ID; 1512 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1513 ID.AddInteger(Alignment->value()); 1514 ID.AddInteger(Offset); 1515 C->addSelectionDAGCSEId(ID); 1516 ID.AddInteger(TargetFlags); 1517 void *IP = nullptr; 1518 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1519 return SDValue(E, 0); 1520 1521 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1522 TargetFlags); 1523 CSEMap.InsertNode(N, IP); 1524 InsertNode(N); 1525 return SDValue(N, 0); 1526 } 1527 1528 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1529 unsigned TargetFlags) { 1530 FoldingSetNodeID ID; 1531 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1532 ID.AddInteger(Index); 1533 ID.AddInteger(Offset); 1534 ID.AddInteger(TargetFlags); 1535 void *IP = nullptr; 1536 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1537 return SDValue(E, 0); 1538 1539 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1540 CSEMap.InsertNode(N, IP); 1541 InsertNode(N); 1542 return SDValue(N, 0); 1543 } 1544 1545 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1546 FoldingSetNodeID ID; 1547 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1548 ID.AddPointer(MBB); 1549 void *IP = nullptr; 1550 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1551 return SDValue(E, 0); 1552 1553 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1554 CSEMap.InsertNode(N, IP); 1555 InsertNode(N); 1556 return SDValue(N, 0); 1557 } 1558 1559 SDValue SelectionDAG::getValueType(EVT VT) { 1560 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1561 ValueTypeNodes.size()) 1562 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1563 1564 SDNode *&N = VT.isExtended() ? 1565 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1566 1567 if (N) return SDValue(N, 0); 1568 N = newSDNode<VTSDNode>(VT); 1569 InsertNode(N); 1570 return SDValue(N, 0); 1571 } 1572 1573 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1574 SDNode *&N = ExternalSymbols[Sym]; 1575 if (N) return SDValue(N, 0); 1576 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1577 InsertNode(N); 1578 return SDValue(N, 0); 1579 } 1580 1581 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1582 SDNode *&N = MCSymbols[Sym]; 1583 if (N) 1584 return SDValue(N, 0); 1585 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1586 InsertNode(N); 1587 return SDValue(N, 0); 1588 } 1589 1590 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1591 unsigned TargetFlags) { 1592 SDNode *&N = 1593 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1594 if (N) return SDValue(N, 0); 1595 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1596 InsertNode(N); 1597 return SDValue(N, 0); 1598 } 1599 1600 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1601 if ((unsigned)Cond >= CondCodeNodes.size()) 1602 CondCodeNodes.resize(Cond+1); 1603 1604 if (!CondCodeNodes[Cond]) { 1605 auto *N = newSDNode<CondCodeSDNode>(Cond); 1606 CondCodeNodes[Cond] = N; 1607 InsertNode(N); 1608 } 1609 1610 return SDValue(CondCodeNodes[Cond], 0); 1611 } 1612 1613 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1614 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1615 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1616 std::swap(N1, N2); 1617 ShuffleVectorSDNode::commuteMask(M); 1618 } 1619 1620 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1621 SDValue N2, ArrayRef<int> Mask) { 1622 assert(VT.getVectorNumElements() == Mask.size() && 1623 "Must have the same number of vector elements as mask elements!"); 1624 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1625 "Invalid VECTOR_SHUFFLE"); 1626 1627 // Canonicalize shuffle undef, undef -> undef 1628 if (N1.isUndef() && N2.isUndef()) 1629 return getUNDEF(VT); 1630 1631 // Validate that all indices in Mask are within the range of the elements 1632 // input to the shuffle. 1633 int NElts = Mask.size(); 1634 assert(llvm::all_of(Mask, 1635 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1636 "Index out of range"); 1637 1638 // Copy the mask so we can do any needed cleanup. 1639 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1640 1641 // Canonicalize shuffle v, v -> v, undef 1642 if (N1 == N2) { 1643 N2 = getUNDEF(VT); 1644 for (int i = 0; i != NElts; ++i) 1645 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1646 } 1647 1648 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1649 if (N1.isUndef()) 1650 commuteShuffle(N1, N2, MaskVec); 1651 1652 if (TLI->hasVectorBlend()) { 1653 // If shuffling a splat, try to blend the splat instead. We do this here so 1654 // that even when this arises during lowering we don't have to re-handle it. 1655 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1656 BitVector UndefElements; 1657 SDValue Splat = BV->getSplatValue(&UndefElements); 1658 if (!Splat) 1659 return; 1660 1661 for (int i = 0; i < NElts; ++i) { 1662 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1663 continue; 1664 1665 // If this input comes from undef, mark it as such. 1666 if (UndefElements[MaskVec[i] - Offset]) { 1667 MaskVec[i] = -1; 1668 continue; 1669 } 1670 1671 // If we can blend a non-undef lane, use that instead. 1672 if (!UndefElements[i]) 1673 MaskVec[i] = i + Offset; 1674 } 1675 }; 1676 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1677 BlendSplat(N1BV, 0); 1678 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1679 BlendSplat(N2BV, NElts); 1680 } 1681 1682 // Canonicalize all index into lhs, -> shuffle lhs, undef 1683 // Canonicalize all index into rhs, -> shuffle rhs, undef 1684 bool AllLHS = true, AllRHS = true; 1685 bool N2Undef = N2.isUndef(); 1686 for (int i = 0; i != NElts; ++i) { 1687 if (MaskVec[i] >= NElts) { 1688 if (N2Undef) 1689 MaskVec[i] = -1; 1690 else 1691 AllLHS = false; 1692 } else if (MaskVec[i] >= 0) { 1693 AllRHS = false; 1694 } 1695 } 1696 if (AllLHS && AllRHS) 1697 return getUNDEF(VT); 1698 if (AllLHS && !N2Undef) 1699 N2 = getUNDEF(VT); 1700 if (AllRHS) { 1701 N1 = getUNDEF(VT); 1702 commuteShuffle(N1, N2, MaskVec); 1703 } 1704 // Reset our undef status after accounting for the mask. 1705 N2Undef = N2.isUndef(); 1706 // Re-check whether both sides ended up undef. 1707 if (N1.isUndef() && N2Undef) 1708 return getUNDEF(VT); 1709 1710 // If Identity shuffle return that node. 1711 bool Identity = true, AllSame = true; 1712 for (int i = 0; i != NElts; ++i) { 1713 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1714 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1715 } 1716 if (Identity && NElts) 1717 return N1; 1718 1719 // Shuffling a constant splat doesn't change the result. 1720 if (N2Undef) { 1721 SDValue V = N1; 1722 1723 // Look through any bitcasts. We check that these don't change the number 1724 // (and size) of elements and just changes their types. 1725 while (V.getOpcode() == ISD::BITCAST) 1726 V = V->getOperand(0); 1727 1728 // A splat should always show up as a build vector node. 1729 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1730 BitVector UndefElements; 1731 SDValue Splat = BV->getSplatValue(&UndefElements); 1732 // If this is a splat of an undef, shuffling it is also undef. 1733 if (Splat && Splat.isUndef()) 1734 return getUNDEF(VT); 1735 1736 bool SameNumElts = 1737 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1738 1739 // We only have a splat which can skip shuffles if there is a splatted 1740 // value and no undef lanes rearranged by the shuffle. 1741 if (Splat && UndefElements.none()) { 1742 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1743 // number of elements match or the value splatted is a zero constant. 1744 if (SameNumElts) 1745 return N1; 1746 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1747 if (C->isNullValue()) 1748 return N1; 1749 } 1750 1751 // If the shuffle itself creates a splat, build the vector directly. 1752 if (AllSame && SameNumElts) { 1753 EVT BuildVT = BV->getValueType(0); 1754 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1755 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1756 1757 // We may have jumped through bitcasts, so the type of the 1758 // BUILD_VECTOR may not match the type of the shuffle. 1759 if (BuildVT != VT) 1760 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1761 return NewBV; 1762 } 1763 } 1764 } 1765 1766 FoldingSetNodeID ID; 1767 SDValue Ops[2] = { N1, N2 }; 1768 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1769 for (int i = 0; i != NElts; ++i) 1770 ID.AddInteger(MaskVec[i]); 1771 1772 void* IP = nullptr; 1773 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1774 return SDValue(E, 0); 1775 1776 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1777 // SDNode doesn't have access to it. This memory will be "leaked" when 1778 // the node is deallocated, but recovered when the NodeAllocator is released. 1779 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1780 llvm::copy(MaskVec, MaskAlloc); 1781 1782 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1783 dl.getDebugLoc(), MaskAlloc); 1784 createOperands(N, Ops); 1785 1786 CSEMap.InsertNode(N, IP); 1787 InsertNode(N); 1788 SDValue V = SDValue(N, 0); 1789 NewSDValueDbgMsg(V, "Creating new node: ", this); 1790 return V; 1791 } 1792 1793 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1794 EVT VT = SV.getValueType(0); 1795 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1796 ShuffleVectorSDNode::commuteMask(MaskVec); 1797 1798 SDValue Op0 = SV.getOperand(0); 1799 SDValue Op1 = SV.getOperand(1); 1800 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1801 } 1802 1803 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1804 FoldingSetNodeID ID; 1805 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1806 ID.AddInteger(RegNo); 1807 void *IP = nullptr; 1808 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1809 return SDValue(E, 0); 1810 1811 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1812 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1813 CSEMap.InsertNode(N, IP); 1814 InsertNode(N); 1815 return SDValue(N, 0); 1816 } 1817 1818 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1819 FoldingSetNodeID ID; 1820 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1821 ID.AddPointer(RegMask); 1822 void *IP = nullptr; 1823 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1824 return SDValue(E, 0); 1825 1826 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1827 CSEMap.InsertNode(N, IP); 1828 InsertNode(N); 1829 return SDValue(N, 0); 1830 } 1831 1832 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1833 MCSymbol *Label) { 1834 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1835 } 1836 1837 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1838 SDValue Root, MCSymbol *Label) { 1839 FoldingSetNodeID ID; 1840 SDValue Ops[] = { Root }; 1841 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1842 ID.AddPointer(Label); 1843 void *IP = nullptr; 1844 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1845 return SDValue(E, 0); 1846 1847 auto *N = 1848 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1849 createOperands(N, Ops); 1850 1851 CSEMap.InsertNode(N, IP); 1852 InsertNode(N); 1853 return SDValue(N, 0); 1854 } 1855 1856 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1857 int64_t Offset, bool isTarget, 1858 unsigned TargetFlags) { 1859 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1860 1861 FoldingSetNodeID ID; 1862 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1863 ID.AddPointer(BA); 1864 ID.AddInteger(Offset); 1865 ID.AddInteger(TargetFlags); 1866 void *IP = nullptr; 1867 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1868 return SDValue(E, 0); 1869 1870 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1871 CSEMap.InsertNode(N, IP); 1872 InsertNode(N); 1873 return SDValue(N, 0); 1874 } 1875 1876 SDValue SelectionDAG::getSrcValue(const Value *V) { 1877 FoldingSetNodeID ID; 1878 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1879 ID.AddPointer(V); 1880 1881 void *IP = nullptr; 1882 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1883 return SDValue(E, 0); 1884 1885 auto *N = newSDNode<SrcValueSDNode>(V); 1886 CSEMap.InsertNode(N, IP); 1887 InsertNode(N); 1888 return SDValue(N, 0); 1889 } 1890 1891 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1892 FoldingSetNodeID ID; 1893 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1894 ID.AddPointer(MD); 1895 1896 void *IP = nullptr; 1897 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1898 return SDValue(E, 0); 1899 1900 auto *N = newSDNode<MDNodeSDNode>(MD); 1901 CSEMap.InsertNode(N, IP); 1902 InsertNode(N); 1903 return SDValue(N, 0); 1904 } 1905 1906 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1907 if (VT == V.getValueType()) 1908 return V; 1909 1910 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1911 } 1912 1913 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1914 unsigned SrcAS, unsigned DestAS) { 1915 SDValue Ops[] = {Ptr}; 1916 FoldingSetNodeID ID; 1917 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1918 ID.AddInteger(SrcAS); 1919 ID.AddInteger(DestAS); 1920 1921 void *IP = nullptr; 1922 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1923 return SDValue(E, 0); 1924 1925 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1926 VT, SrcAS, DestAS); 1927 createOperands(N, Ops); 1928 1929 CSEMap.InsertNode(N, IP); 1930 InsertNode(N); 1931 return SDValue(N, 0); 1932 } 1933 1934 SDValue SelectionDAG::getFreeze(SDValue V) { 1935 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 1936 } 1937 1938 /// getShiftAmountOperand - Return the specified value casted to 1939 /// the target's desired shift amount type. 1940 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1941 EVT OpTy = Op.getValueType(); 1942 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1943 if (OpTy == ShTy || OpTy.isVector()) return Op; 1944 1945 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1946 } 1947 1948 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1949 SDLoc dl(Node); 1950 const TargetLowering &TLI = getTargetLoweringInfo(); 1951 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1952 EVT VT = Node->getValueType(0); 1953 SDValue Tmp1 = Node->getOperand(0); 1954 SDValue Tmp2 = Node->getOperand(1); 1955 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1956 1957 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1958 Tmp2, MachinePointerInfo(V)); 1959 SDValue VAList = VAListLoad; 1960 1961 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1962 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1963 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1964 1965 VAList = 1966 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1967 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1968 } 1969 1970 // Increment the pointer, VAList, to the next vaarg 1971 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1972 getConstant(getDataLayout().getTypeAllocSize( 1973 VT.getTypeForEVT(*getContext())), 1974 dl, VAList.getValueType())); 1975 // Store the incremented VAList to the legalized pointer 1976 Tmp1 = 1977 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1978 // Load the actual argument out of the pointer VAList 1979 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1980 } 1981 1982 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1983 SDLoc dl(Node); 1984 const TargetLowering &TLI = getTargetLoweringInfo(); 1985 // This defaults to loading a pointer from the input and storing it to the 1986 // output, returning the chain. 1987 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1988 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1989 SDValue Tmp1 = 1990 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1991 Node->getOperand(2), MachinePointerInfo(VS)); 1992 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1993 MachinePointerInfo(VD)); 1994 } 1995 1996 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { 1997 const DataLayout &DL = getDataLayout(); 1998 Type *Ty = VT.getTypeForEVT(*getContext()); 1999 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2000 2001 if (TLI->isTypeLegal(VT) || !VT.isVector()) 2002 return RedAlign; 2003 2004 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2005 const Align StackAlign = TFI->getStackAlign(); 2006 2007 // See if we can choose a smaller ABI alignment in cases where it's an 2008 // illegal vector type that will get broken down. 2009 if (RedAlign > StackAlign) { 2010 EVT IntermediateVT; 2011 MVT RegisterVT; 2012 unsigned NumIntermediates; 2013 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, 2014 NumIntermediates, RegisterVT); 2015 Ty = IntermediateVT.getTypeForEVT(*getContext()); 2016 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2017 if (RedAlign2 < RedAlign) 2018 RedAlign = RedAlign2; 2019 } 2020 2021 return RedAlign; 2022 } 2023 2024 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 2025 MachineFrameInfo &MFI = MF->getFrameInfo(); 2026 int FrameIdx = MFI.CreateStackObject(Bytes, Alignment, false); 2027 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2028 } 2029 2030 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2031 Type *Ty = VT.getTypeForEVT(*getContext()); 2032 Align StackAlign = 2033 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2034 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2035 } 2036 2037 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2038 TypeSize Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 2039 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2040 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2041 const DataLayout &DL = getDataLayout(); 2042 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2043 return CreateStackTemporary(Bytes, Align); 2044 } 2045 2046 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2047 ISD::CondCode Cond, const SDLoc &dl) { 2048 EVT OpVT = N1.getValueType(); 2049 2050 // These setcc operations always fold. 2051 switch (Cond) { 2052 default: break; 2053 case ISD::SETFALSE: 2054 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2055 case ISD::SETTRUE: 2056 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2057 2058 case ISD::SETOEQ: 2059 case ISD::SETOGT: 2060 case ISD::SETOGE: 2061 case ISD::SETOLT: 2062 case ISD::SETOLE: 2063 case ISD::SETONE: 2064 case ISD::SETO: 2065 case ISD::SETUO: 2066 case ISD::SETUEQ: 2067 case ISD::SETUNE: 2068 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2069 break; 2070 } 2071 2072 if (OpVT.isInteger()) { 2073 // For EQ and NE, we can always pick a value for the undef to make the 2074 // predicate pass or fail, so we can return undef. 2075 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2076 // icmp eq/ne X, undef -> undef. 2077 if ((N1.isUndef() || N2.isUndef()) && 2078 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2079 return getUNDEF(VT); 2080 2081 // If both operands are undef, we can return undef for int comparison. 2082 // icmp undef, undef -> undef. 2083 if (N1.isUndef() && N2.isUndef()) 2084 return getUNDEF(VT); 2085 2086 // icmp X, X -> true/false 2087 // icmp X, undef -> true/false because undef could be X. 2088 if (N1 == N2) 2089 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2090 } 2091 2092 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2093 const APInt &C2 = N2C->getAPIntValue(); 2094 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2095 const APInt &C1 = N1C->getAPIntValue(); 2096 2097 switch (Cond) { 2098 default: llvm_unreachable("Unknown integer setcc!"); 2099 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2100 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2101 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2102 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2103 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2104 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2105 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2106 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2107 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2108 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2109 } 2110 } 2111 } 2112 2113 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2114 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2115 2116 if (N1CFP && N2CFP) { 2117 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2118 switch (Cond) { 2119 default: break; 2120 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2121 return getUNDEF(VT); 2122 LLVM_FALLTHROUGH; 2123 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2124 OpVT); 2125 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2126 return getUNDEF(VT); 2127 LLVM_FALLTHROUGH; 2128 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2129 R==APFloat::cmpLessThan, dl, VT, 2130 OpVT); 2131 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2132 return getUNDEF(VT); 2133 LLVM_FALLTHROUGH; 2134 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2135 OpVT); 2136 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2137 return getUNDEF(VT); 2138 LLVM_FALLTHROUGH; 2139 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2140 VT, OpVT); 2141 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2142 return getUNDEF(VT); 2143 LLVM_FALLTHROUGH; 2144 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2145 R==APFloat::cmpEqual, dl, VT, 2146 OpVT); 2147 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2148 return getUNDEF(VT); 2149 LLVM_FALLTHROUGH; 2150 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2151 R==APFloat::cmpEqual, dl, VT, OpVT); 2152 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2153 OpVT); 2154 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2155 OpVT); 2156 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2157 R==APFloat::cmpEqual, dl, VT, 2158 OpVT); 2159 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2160 OpVT); 2161 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2162 R==APFloat::cmpLessThan, dl, VT, 2163 OpVT); 2164 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2165 R==APFloat::cmpUnordered, dl, VT, 2166 OpVT); 2167 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2168 VT, OpVT); 2169 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2170 OpVT); 2171 } 2172 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2173 // Ensure that the constant occurs on the RHS. 2174 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2175 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2176 return SDValue(); 2177 return getSetCC(dl, VT, N2, N1, SwappedCond); 2178 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2179 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2180 // If an operand is known to be a nan (or undef that could be a nan), we can 2181 // fold it. 2182 // Choosing NaN for the undef will always make unordered comparison succeed 2183 // and ordered comparison fails. 2184 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2185 switch (ISD::getUnorderedFlavor(Cond)) { 2186 default: 2187 llvm_unreachable("Unknown flavor!"); 2188 case 0: // Known false. 2189 return getBoolConstant(false, dl, VT, OpVT); 2190 case 1: // Known true. 2191 return getBoolConstant(true, dl, VT, OpVT); 2192 case 2: // Undefined. 2193 return getUNDEF(VT); 2194 } 2195 } 2196 2197 // Could not fold it. 2198 return SDValue(); 2199 } 2200 2201 /// See if the specified operand can be simplified with the knowledge that only 2202 /// the bits specified by DemandedBits are used. 2203 /// TODO: really we should be making this into the DAG equivalent of 2204 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2205 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2206 EVT VT = V.getValueType(); 2207 APInt DemandedElts = VT.isVector() 2208 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2209 : APInt(1, 1); 2210 return GetDemandedBits(V, DemandedBits, DemandedElts); 2211 } 2212 2213 /// See if the specified operand can be simplified with the knowledge that only 2214 /// the bits specified by DemandedBits are used in the elements specified by 2215 /// DemandedElts. 2216 /// TODO: really we should be making this into the DAG equivalent of 2217 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2218 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2219 const APInt &DemandedElts) { 2220 switch (V.getOpcode()) { 2221 default: 2222 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2223 *this, 0); 2224 break; 2225 case ISD::Constant: { 2226 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); 2227 APInt NewVal = CVal & DemandedBits; 2228 if (NewVal != CVal) 2229 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2230 break; 2231 } 2232 case ISD::SRL: 2233 // Only look at single-use SRLs. 2234 if (!V.getNode()->hasOneUse()) 2235 break; 2236 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2237 // See if we can recursively simplify the LHS. 2238 unsigned Amt = RHSC->getZExtValue(); 2239 2240 // Watch out for shift count overflow though. 2241 if (Amt >= DemandedBits.getBitWidth()) 2242 break; 2243 APInt SrcDemandedBits = DemandedBits << Amt; 2244 if (SDValue SimplifyLHS = 2245 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2246 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2247 V.getOperand(1)); 2248 } 2249 break; 2250 case ISD::AND: { 2251 // X & -1 -> X (ignoring bits which aren't demanded). 2252 // Also handle the case where masked out bits in X are known to be zero. 2253 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2254 const APInt &AndVal = RHSC->getAPIntValue(); 2255 if (DemandedBits.isSubsetOf(AndVal) || 2256 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2257 AndVal)) 2258 return V.getOperand(0); 2259 } 2260 break; 2261 } 2262 } 2263 return SDValue(); 2264 } 2265 2266 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2267 /// use this predicate to simplify operations downstream. 2268 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2269 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2270 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2271 } 2272 2273 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2274 /// this predicate to simplify operations downstream. Mask is known to be zero 2275 /// for bits that V cannot have. 2276 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2277 unsigned Depth) const { 2278 EVT VT = V.getValueType(); 2279 APInt DemandedElts = VT.isVector() 2280 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2281 : APInt(1, 1); 2282 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2283 } 2284 2285 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2286 /// DemandedElts. We use this predicate to simplify operations downstream. 2287 /// Mask is known to be zero for bits that V cannot have. 2288 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2289 const APInt &DemandedElts, 2290 unsigned Depth) const { 2291 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2292 } 2293 2294 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2295 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2296 unsigned Depth) const { 2297 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2298 } 2299 2300 /// isSplatValue - Return true if the vector V has the same value 2301 /// across all DemandedElts. For scalable vectors it does not make 2302 /// sense to specify which elements are demanded or undefined, therefore 2303 /// they are simply ignored. 2304 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2305 APInt &UndefElts) { 2306 EVT VT = V.getValueType(); 2307 assert(VT.isVector() && "Vector type expected"); 2308 2309 if (!VT.isScalableVector() && !DemandedElts) 2310 return false; // No demanded elts, better to assume we don't know anything. 2311 2312 // Deal with some common cases here that work for both fixed and scalable 2313 // vector types. 2314 switch (V.getOpcode()) { 2315 case ISD::SPLAT_VECTOR: 2316 return true; 2317 case ISD::ADD: 2318 case ISD::SUB: 2319 case ISD::AND: { 2320 APInt UndefLHS, UndefRHS; 2321 SDValue LHS = V.getOperand(0); 2322 SDValue RHS = V.getOperand(1); 2323 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2324 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2325 UndefElts = UndefLHS | UndefRHS; 2326 return true; 2327 } 2328 break; 2329 } 2330 } 2331 2332 // We don't support other cases than those above for scalable vectors at 2333 // the moment. 2334 if (VT.isScalableVector()) 2335 return false; 2336 2337 unsigned NumElts = VT.getVectorNumElements(); 2338 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2339 UndefElts = APInt::getNullValue(NumElts); 2340 2341 switch (V.getOpcode()) { 2342 case ISD::BUILD_VECTOR: { 2343 SDValue Scl; 2344 for (unsigned i = 0; i != NumElts; ++i) { 2345 SDValue Op = V.getOperand(i); 2346 if (Op.isUndef()) { 2347 UndefElts.setBit(i); 2348 continue; 2349 } 2350 if (!DemandedElts[i]) 2351 continue; 2352 if (Scl && Scl != Op) 2353 return false; 2354 Scl = Op; 2355 } 2356 return true; 2357 } 2358 case ISD::VECTOR_SHUFFLE: { 2359 // Check if this is a shuffle node doing a splat. 2360 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2361 int SplatIndex = -1; 2362 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2363 for (int i = 0; i != (int)NumElts; ++i) { 2364 int M = Mask[i]; 2365 if (M < 0) { 2366 UndefElts.setBit(i); 2367 continue; 2368 } 2369 if (!DemandedElts[i]) 2370 continue; 2371 if (0 <= SplatIndex && SplatIndex != M) 2372 return false; 2373 SplatIndex = M; 2374 } 2375 return true; 2376 } 2377 case ISD::EXTRACT_SUBVECTOR: { 2378 // Offset the demanded elts by the subvector index. 2379 SDValue Src = V.getOperand(0); 2380 uint64_t Idx = V.getConstantOperandVal(1); 2381 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2382 APInt UndefSrcElts; 2383 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2384 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts)) { 2385 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2386 return true; 2387 } 2388 break; 2389 } 2390 } 2391 2392 return false; 2393 } 2394 2395 /// Helper wrapper to main isSplatValue function. 2396 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2397 EVT VT = V.getValueType(); 2398 assert(VT.isVector() && "Vector type expected"); 2399 2400 APInt UndefElts; 2401 APInt DemandedElts; 2402 2403 // For now we don't support this with scalable vectors. 2404 if (!VT.isScalableVector()) 2405 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2406 return isSplatValue(V, DemandedElts, UndefElts) && 2407 (AllowUndefs || !UndefElts); 2408 } 2409 2410 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2411 V = peekThroughExtractSubvectors(V); 2412 2413 EVT VT = V.getValueType(); 2414 unsigned Opcode = V.getOpcode(); 2415 switch (Opcode) { 2416 default: { 2417 APInt UndefElts; 2418 APInt DemandedElts; 2419 2420 if (!VT.isScalableVector()) 2421 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2422 2423 if (isSplatValue(V, DemandedElts, UndefElts)) { 2424 if (VT.isScalableVector()) { 2425 // DemandedElts and UndefElts are ignored for scalable vectors, since 2426 // the only supported cases are SPLAT_VECTOR nodes. 2427 SplatIdx = 0; 2428 } else { 2429 // Handle case where all demanded elements are UNDEF. 2430 if (DemandedElts.isSubsetOf(UndefElts)) { 2431 SplatIdx = 0; 2432 return getUNDEF(VT); 2433 } 2434 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2435 } 2436 return V; 2437 } 2438 break; 2439 } 2440 case ISD::SPLAT_VECTOR: 2441 SplatIdx = 0; 2442 return V; 2443 case ISD::VECTOR_SHUFFLE: { 2444 if (VT.isScalableVector()) 2445 return SDValue(); 2446 2447 // Check if this is a shuffle node doing a splat. 2448 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2449 // getTargetVShiftNode currently struggles without the splat source. 2450 auto *SVN = cast<ShuffleVectorSDNode>(V); 2451 if (!SVN->isSplat()) 2452 break; 2453 int Idx = SVN->getSplatIndex(); 2454 int NumElts = V.getValueType().getVectorNumElements(); 2455 SplatIdx = Idx % NumElts; 2456 return V.getOperand(Idx / NumElts); 2457 } 2458 } 2459 2460 return SDValue(); 2461 } 2462 2463 SDValue SelectionDAG::getSplatValue(SDValue V) { 2464 int SplatIdx; 2465 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2466 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2467 SrcVector.getValueType().getScalarType(), SrcVector, 2468 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2469 return SDValue(); 2470 } 2471 2472 const APInt * 2473 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2474 const APInt &DemandedElts) const { 2475 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2476 V.getOpcode() == ISD::SRA) && 2477 "Unknown shift node"); 2478 unsigned BitWidth = V.getScalarValueSizeInBits(); 2479 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2480 // Shifting more than the bitwidth is not valid. 2481 const APInt &ShAmt = SA->getAPIntValue(); 2482 if (ShAmt.ult(BitWidth)) 2483 return &ShAmt; 2484 } 2485 return nullptr; 2486 } 2487 2488 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2489 SDValue V, const APInt &DemandedElts) const { 2490 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2491 V.getOpcode() == ISD::SRA) && 2492 "Unknown shift node"); 2493 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2494 return ValidAmt; 2495 unsigned BitWidth = V.getScalarValueSizeInBits(); 2496 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2497 if (!BV) 2498 return nullptr; 2499 const APInt *MinShAmt = nullptr; 2500 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2501 if (!DemandedElts[i]) 2502 continue; 2503 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2504 if (!SA) 2505 return nullptr; 2506 // Shifting more than the bitwidth is not valid. 2507 const APInt &ShAmt = SA->getAPIntValue(); 2508 if (ShAmt.uge(BitWidth)) 2509 return nullptr; 2510 if (MinShAmt && MinShAmt->ule(ShAmt)) 2511 continue; 2512 MinShAmt = &ShAmt; 2513 } 2514 return MinShAmt; 2515 } 2516 2517 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2518 SDValue V, const APInt &DemandedElts) const { 2519 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2520 V.getOpcode() == ISD::SRA) && 2521 "Unknown shift node"); 2522 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2523 return ValidAmt; 2524 unsigned BitWidth = V.getScalarValueSizeInBits(); 2525 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2526 if (!BV) 2527 return nullptr; 2528 const APInt *MaxShAmt = nullptr; 2529 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2530 if (!DemandedElts[i]) 2531 continue; 2532 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2533 if (!SA) 2534 return nullptr; 2535 // Shifting more than the bitwidth is not valid. 2536 const APInt &ShAmt = SA->getAPIntValue(); 2537 if (ShAmt.uge(BitWidth)) 2538 return nullptr; 2539 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2540 continue; 2541 MaxShAmt = &ShAmt; 2542 } 2543 return MaxShAmt; 2544 } 2545 2546 /// Determine which bits of Op are known to be either zero or one and return 2547 /// them in Known. For vectors, the known bits are those that are shared by 2548 /// every vector element. 2549 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2550 EVT VT = Op.getValueType(); 2551 APInt DemandedElts = VT.isVector() 2552 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2553 : APInt(1, 1); 2554 return computeKnownBits(Op, DemandedElts, Depth); 2555 } 2556 2557 /// Determine which bits of Op are known to be either zero or one and return 2558 /// them in Known. The DemandedElts argument allows us to only collect the known 2559 /// bits that are shared by the requested vector elements. 2560 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2561 unsigned Depth) const { 2562 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2563 2564 KnownBits Known(BitWidth); // Don't know anything. 2565 2566 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2567 // We know all of the bits for a constant! 2568 Known.One = C->getAPIntValue(); 2569 Known.Zero = ~Known.One; 2570 return Known; 2571 } 2572 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2573 // We know all of the bits for a constant fp! 2574 Known.One = C->getValueAPF().bitcastToAPInt(); 2575 Known.Zero = ~Known.One; 2576 return Known; 2577 } 2578 2579 if (Depth >= MaxRecursionDepth) 2580 return Known; // Limit search depth. 2581 2582 KnownBits Known2; 2583 unsigned NumElts = DemandedElts.getBitWidth(); 2584 assert((!Op.getValueType().isVector() || 2585 NumElts == Op.getValueType().getVectorNumElements()) && 2586 "Unexpected vector size"); 2587 2588 if (!DemandedElts) 2589 return Known; // No demanded elts, better to assume we don't know anything. 2590 2591 unsigned Opcode = Op.getOpcode(); 2592 switch (Opcode) { 2593 case ISD::BUILD_VECTOR: 2594 // Collect the known bits that are shared by every demanded vector element. 2595 Known.Zero.setAllBits(); Known.One.setAllBits(); 2596 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2597 if (!DemandedElts[i]) 2598 continue; 2599 2600 SDValue SrcOp = Op.getOperand(i); 2601 Known2 = computeKnownBits(SrcOp, Depth + 1); 2602 2603 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2604 if (SrcOp.getValueSizeInBits() != BitWidth) { 2605 assert(SrcOp.getValueSizeInBits() > BitWidth && 2606 "Expected BUILD_VECTOR implicit truncation"); 2607 Known2 = Known2.trunc(BitWidth); 2608 } 2609 2610 // Known bits are the values that are shared by every demanded element. 2611 Known.One &= Known2.One; 2612 Known.Zero &= Known2.Zero; 2613 2614 // If we don't know any bits, early out. 2615 if (Known.isUnknown()) 2616 break; 2617 } 2618 break; 2619 case ISD::VECTOR_SHUFFLE: { 2620 // Collect the known bits that are shared by every vector element referenced 2621 // by the shuffle. 2622 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2623 Known.Zero.setAllBits(); Known.One.setAllBits(); 2624 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2625 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2626 for (unsigned i = 0; i != NumElts; ++i) { 2627 if (!DemandedElts[i]) 2628 continue; 2629 2630 int M = SVN->getMaskElt(i); 2631 if (M < 0) { 2632 // For UNDEF elements, we don't know anything about the common state of 2633 // the shuffle result. 2634 Known.resetAll(); 2635 DemandedLHS.clearAllBits(); 2636 DemandedRHS.clearAllBits(); 2637 break; 2638 } 2639 2640 if ((unsigned)M < NumElts) 2641 DemandedLHS.setBit((unsigned)M % NumElts); 2642 else 2643 DemandedRHS.setBit((unsigned)M % NumElts); 2644 } 2645 // Known bits are the values that are shared by every demanded element. 2646 if (!!DemandedLHS) { 2647 SDValue LHS = Op.getOperand(0); 2648 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2649 Known.One &= Known2.One; 2650 Known.Zero &= Known2.Zero; 2651 } 2652 // If we don't know any bits, early out. 2653 if (Known.isUnknown()) 2654 break; 2655 if (!!DemandedRHS) { 2656 SDValue RHS = Op.getOperand(1); 2657 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2658 Known.One &= Known2.One; 2659 Known.Zero &= Known2.Zero; 2660 } 2661 break; 2662 } 2663 case ISD::CONCAT_VECTORS: { 2664 // Split DemandedElts and test each of the demanded subvectors. 2665 Known.Zero.setAllBits(); Known.One.setAllBits(); 2666 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2667 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2668 unsigned NumSubVectors = Op.getNumOperands(); 2669 for (unsigned i = 0; i != NumSubVectors; ++i) { 2670 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2671 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2672 if (!!DemandedSub) { 2673 SDValue Sub = Op.getOperand(i); 2674 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2675 Known.One &= Known2.One; 2676 Known.Zero &= Known2.Zero; 2677 } 2678 // If we don't know any bits, early out. 2679 if (Known.isUnknown()) 2680 break; 2681 } 2682 break; 2683 } 2684 case ISD::INSERT_SUBVECTOR: { 2685 // Demand any elements from the subvector and the remainder from the src its 2686 // inserted into. 2687 SDValue Src = Op.getOperand(0); 2688 SDValue Sub = Op.getOperand(1); 2689 uint64_t Idx = Op.getConstantOperandVal(2); 2690 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2691 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2692 APInt DemandedSrcElts = DemandedElts; 2693 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2694 2695 Known.One.setAllBits(); 2696 Known.Zero.setAllBits(); 2697 if (!!DemandedSubElts) { 2698 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2699 if (Known.isUnknown()) 2700 break; // early-out. 2701 } 2702 if (!!DemandedSrcElts) { 2703 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2704 Known.One &= Known2.One; 2705 Known.Zero &= Known2.Zero; 2706 } 2707 break; 2708 } 2709 case ISD::EXTRACT_SUBVECTOR: { 2710 // Offset the demanded elts by the subvector index. 2711 SDValue Src = Op.getOperand(0); 2712 uint64_t Idx = Op.getConstantOperandVal(1); 2713 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2714 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2715 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2716 break; 2717 } 2718 case ISD::SCALAR_TO_VECTOR: { 2719 // We know about scalar_to_vector as much as we know about it source, 2720 // which becomes the first element of otherwise unknown vector. 2721 if (DemandedElts != 1) 2722 break; 2723 2724 SDValue N0 = Op.getOperand(0); 2725 Known = computeKnownBits(N0, Depth + 1); 2726 if (N0.getValueSizeInBits() != BitWidth) 2727 Known = Known.trunc(BitWidth); 2728 2729 break; 2730 } 2731 case ISD::BITCAST: { 2732 SDValue N0 = Op.getOperand(0); 2733 EVT SubVT = N0.getValueType(); 2734 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2735 2736 // Ignore bitcasts from unsupported types. 2737 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2738 break; 2739 2740 // Fast handling of 'identity' bitcasts. 2741 if (BitWidth == SubBitWidth) { 2742 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2743 break; 2744 } 2745 2746 bool IsLE = getDataLayout().isLittleEndian(); 2747 2748 // Bitcast 'small element' vector to 'large element' scalar/vector. 2749 if ((BitWidth % SubBitWidth) == 0) { 2750 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2751 2752 // Collect known bits for the (larger) output by collecting the known 2753 // bits from each set of sub elements and shift these into place. 2754 // We need to separately call computeKnownBits for each set of 2755 // sub elements as the knownbits for each is likely to be different. 2756 unsigned SubScale = BitWidth / SubBitWidth; 2757 APInt SubDemandedElts(NumElts * SubScale, 0); 2758 for (unsigned i = 0; i != NumElts; ++i) 2759 if (DemandedElts[i]) 2760 SubDemandedElts.setBit(i * SubScale); 2761 2762 for (unsigned i = 0; i != SubScale; ++i) { 2763 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2764 Depth + 1); 2765 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2766 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2767 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2768 } 2769 } 2770 2771 // Bitcast 'large element' scalar/vector to 'small element' vector. 2772 if ((SubBitWidth % BitWidth) == 0) { 2773 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2774 2775 // Collect known bits for the (smaller) output by collecting the known 2776 // bits from the overlapping larger input elements and extracting the 2777 // sub sections we actually care about. 2778 unsigned SubScale = SubBitWidth / BitWidth; 2779 APInt SubDemandedElts(NumElts / SubScale, 0); 2780 for (unsigned i = 0; i != NumElts; ++i) 2781 if (DemandedElts[i]) 2782 SubDemandedElts.setBit(i / SubScale); 2783 2784 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2785 2786 Known.Zero.setAllBits(); Known.One.setAllBits(); 2787 for (unsigned i = 0; i != NumElts; ++i) 2788 if (DemandedElts[i]) { 2789 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2790 unsigned Offset = (Shifts % SubScale) * BitWidth; 2791 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2792 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2793 // If we don't know any bits, early out. 2794 if (Known.isUnknown()) 2795 break; 2796 } 2797 } 2798 break; 2799 } 2800 case ISD::AND: 2801 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2802 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2803 2804 Known &= Known2; 2805 break; 2806 case ISD::OR: 2807 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2808 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2809 2810 Known |= Known2; 2811 break; 2812 case ISD::XOR: 2813 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2814 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2815 2816 Known ^= Known2; 2817 break; 2818 case ISD::MUL: { 2819 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2820 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2821 2822 // If low bits are zero in either operand, output low known-0 bits. 2823 // Also compute a conservative estimate for high known-0 bits. 2824 // More trickiness is possible, but this is sufficient for the 2825 // interesting case of alignment computation. 2826 unsigned TrailZ = Known.countMinTrailingZeros() + 2827 Known2.countMinTrailingZeros(); 2828 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2829 Known2.countMinLeadingZeros(), 2830 BitWidth) - BitWidth; 2831 2832 Known.resetAll(); 2833 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2834 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2835 break; 2836 } 2837 case ISD::UDIV: { 2838 // For the purposes of computing leading zeros we can conservatively 2839 // treat a udiv as a logical right shift by the power of 2 known to 2840 // be less than the denominator. 2841 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2842 unsigned LeadZ = Known2.countMinLeadingZeros(); 2843 2844 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2845 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2846 if (RHSMaxLeadingZeros != BitWidth) 2847 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2848 2849 Known.Zero.setHighBits(LeadZ); 2850 break; 2851 } 2852 case ISD::SELECT: 2853 case ISD::VSELECT: 2854 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2855 // If we don't know any bits, early out. 2856 if (Known.isUnknown()) 2857 break; 2858 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2859 2860 // Only known if known in both the LHS and RHS. 2861 Known.One &= Known2.One; 2862 Known.Zero &= Known2.Zero; 2863 break; 2864 case ISD::SELECT_CC: 2865 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2866 // If we don't know any bits, early out. 2867 if (Known.isUnknown()) 2868 break; 2869 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2870 2871 // Only known if known in both the LHS and RHS. 2872 Known.One &= Known2.One; 2873 Known.Zero &= Known2.Zero; 2874 break; 2875 case ISD::SMULO: 2876 case ISD::UMULO: 2877 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2878 if (Op.getResNo() != 1) 2879 break; 2880 // The boolean result conforms to getBooleanContents. 2881 // If we know the result of a setcc has the top bits zero, use this info. 2882 // We know that we have an integer-based boolean since these operations 2883 // are only available for integer. 2884 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2885 TargetLowering::ZeroOrOneBooleanContent && 2886 BitWidth > 1) 2887 Known.Zero.setBitsFrom(1); 2888 break; 2889 case ISD::SETCC: 2890 case ISD::STRICT_FSETCC: 2891 case ISD::STRICT_FSETCCS: { 2892 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2893 // If we know the result of a setcc has the top bits zero, use this info. 2894 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2895 TargetLowering::ZeroOrOneBooleanContent && 2896 BitWidth > 1) 2897 Known.Zero.setBitsFrom(1); 2898 break; 2899 } 2900 case ISD::SHL: 2901 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2902 2903 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2904 unsigned Shift = ShAmt->getZExtValue(); 2905 Known.Zero <<= Shift; 2906 Known.One <<= Shift; 2907 // Low bits are known zero. 2908 Known.Zero.setLowBits(Shift); 2909 break; 2910 } 2911 2912 // No matter the shift amount, the trailing zeros will stay zero. 2913 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros()); 2914 Known.One.clearAllBits(); 2915 2916 // Minimum shift low bits are known zero. 2917 if (const APInt *ShMinAmt = 2918 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2919 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 2920 break; 2921 case ISD::SRL: 2922 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2923 2924 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2925 unsigned Shift = ShAmt->getZExtValue(); 2926 Known.Zero.lshrInPlace(Shift); 2927 Known.One.lshrInPlace(Shift); 2928 // High bits are known zero. 2929 Known.Zero.setHighBits(Shift); 2930 break; 2931 } 2932 2933 // No matter the shift amount, the leading zeros will stay zero. 2934 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros()); 2935 Known.One.clearAllBits(); 2936 2937 // Minimum shift high bits are known zero. 2938 if (const APInt *ShMinAmt = 2939 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2940 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2941 break; 2942 case ISD::SRA: 2943 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { 2944 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2945 unsigned Shift = ShAmt->getZExtValue(); 2946 // Sign extend known zero/one bit (else is unknown). 2947 Known.Zero.ashrInPlace(Shift); 2948 Known.One.ashrInPlace(Shift); 2949 } 2950 break; 2951 case ISD::FSHL: 2952 case ISD::FSHR: 2953 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2954 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2955 2956 // For fshl, 0-shift returns the 1st arg. 2957 // For fshr, 0-shift returns the 2nd arg. 2958 if (Amt == 0) { 2959 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2960 DemandedElts, Depth + 1); 2961 break; 2962 } 2963 2964 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2965 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2966 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2967 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2968 if (Opcode == ISD::FSHL) { 2969 Known.One <<= Amt; 2970 Known.Zero <<= Amt; 2971 Known2.One.lshrInPlace(BitWidth - Amt); 2972 Known2.Zero.lshrInPlace(BitWidth - Amt); 2973 } else { 2974 Known.One <<= BitWidth - Amt; 2975 Known.Zero <<= BitWidth - Amt; 2976 Known2.One.lshrInPlace(Amt); 2977 Known2.Zero.lshrInPlace(Amt); 2978 } 2979 Known.One |= Known2.One; 2980 Known.Zero |= Known2.Zero; 2981 } 2982 break; 2983 case ISD::SIGN_EXTEND_INREG: { 2984 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2985 unsigned EBits = EVT.getScalarSizeInBits(); 2986 2987 // Sign extension. Compute the demanded bits in the result that are not 2988 // present in the input. 2989 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2990 2991 APInt InSignMask = APInt::getSignMask(EBits); 2992 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2993 2994 // If the sign extended bits are demanded, we know that the sign 2995 // bit is demanded. 2996 InSignMask = InSignMask.zext(BitWidth); 2997 if (NewBits.getBoolValue()) 2998 InputDemandedBits |= InSignMask; 2999 3000 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3001 Known.One &= InputDemandedBits; 3002 Known.Zero &= InputDemandedBits; 3003 3004 // If the sign bit of the input is known set or clear, then we know the 3005 // top bits of the result. 3006 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 3007 Known.Zero |= NewBits; 3008 Known.One &= ~NewBits; 3009 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 3010 Known.One |= NewBits; 3011 Known.Zero &= ~NewBits; 3012 } else { // Input sign bit unknown 3013 Known.Zero &= ~NewBits; 3014 Known.One &= ~NewBits; 3015 } 3016 break; 3017 } 3018 case ISD::CTTZ: 3019 case ISD::CTTZ_ZERO_UNDEF: { 3020 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3021 // If we have a known 1, its position is our upper bound. 3022 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 3023 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3024 Known.Zero.setBitsFrom(LowBits); 3025 break; 3026 } 3027 case ISD::CTLZ: 3028 case ISD::CTLZ_ZERO_UNDEF: { 3029 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3030 // If we have a known 1, its position is our upper bound. 3031 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3032 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3033 Known.Zero.setBitsFrom(LowBits); 3034 break; 3035 } 3036 case ISD::CTPOP: { 3037 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3038 // If we know some of the bits are zero, they can't be one. 3039 unsigned PossibleOnes = Known2.countMaxPopulation(); 3040 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3041 break; 3042 } 3043 case ISD::LOAD: { 3044 LoadSDNode *LD = cast<LoadSDNode>(Op); 3045 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3046 if (ISD::isNON_EXTLoad(LD) && Cst) { 3047 // Determine any common known bits from the loaded constant pool value. 3048 Type *CstTy = Cst->getType(); 3049 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3050 // If its a vector splat, then we can (quickly) reuse the scalar path. 3051 // NOTE: We assume all elements match and none are UNDEF. 3052 if (CstTy->isVectorTy()) { 3053 if (const Constant *Splat = Cst->getSplatValue()) { 3054 Cst = Splat; 3055 CstTy = Cst->getType(); 3056 } 3057 } 3058 // TODO - do we need to handle different bitwidths? 3059 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3060 // Iterate across all vector elements finding common known bits. 3061 Known.One.setAllBits(); 3062 Known.Zero.setAllBits(); 3063 for (unsigned i = 0; i != NumElts; ++i) { 3064 if (!DemandedElts[i]) 3065 continue; 3066 if (Constant *Elt = Cst->getAggregateElement(i)) { 3067 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3068 const APInt &Value = CInt->getValue(); 3069 Known.One &= Value; 3070 Known.Zero &= ~Value; 3071 continue; 3072 } 3073 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3074 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3075 Known.One &= Value; 3076 Known.Zero &= ~Value; 3077 continue; 3078 } 3079 } 3080 Known.One.clearAllBits(); 3081 Known.Zero.clearAllBits(); 3082 break; 3083 } 3084 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3085 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3086 const APInt &Value = CInt->getValue(); 3087 Known.One = Value; 3088 Known.Zero = ~Value; 3089 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3090 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3091 Known.One = Value; 3092 Known.Zero = ~Value; 3093 } 3094 } 3095 } 3096 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3097 // If this is a ZEXTLoad and we are looking at the loaded value. 3098 EVT VT = LD->getMemoryVT(); 3099 unsigned MemBits = VT.getScalarSizeInBits(); 3100 Known.Zero.setBitsFrom(MemBits); 3101 } else if (const MDNode *Ranges = LD->getRanges()) { 3102 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3103 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3104 } 3105 break; 3106 } 3107 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3108 EVT InVT = Op.getOperand(0).getValueType(); 3109 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3110 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3111 Known = Known.zext(BitWidth); 3112 break; 3113 } 3114 case ISD::ZERO_EXTEND: { 3115 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3116 Known = Known.zext(BitWidth); 3117 break; 3118 } 3119 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3120 EVT InVT = Op.getOperand(0).getValueType(); 3121 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3122 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3123 // If the sign bit is known to be zero or one, then sext will extend 3124 // it to the top bits, else it will just zext. 3125 Known = Known.sext(BitWidth); 3126 break; 3127 } 3128 case ISD::SIGN_EXTEND: { 3129 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3130 // If the sign bit is known to be zero or one, then sext will extend 3131 // it to the top bits, else it will just zext. 3132 Known = Known.sext(BitWidth); 3133 break; 3134 } 3135 case ISD::ANY_EXTEND_VECTOR_INREG: { 3136 EVT InVT = Op.getOperand(0).getValueType(); 3137 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3138 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3139 Known = Known.anyext(BitWidth); 3140 break; 3141 } 3142 case ISD::ANY_EXTEND: { 3143 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3144 Known = Known.anyext(BitWidth); 3145 break; 3146 } 3147 case ISD::TRUNCATE: { 3148 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3149 Known = Known.trunc(BitWidth); 3150 break; 3151 } 3152 case ISD::AssertZext: { 3153 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3154 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3155 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3156 Known.Zero |= (~InMask); 3157 Known.One &= (~Known.Zero); 3158 break; 3159 } 3160 case ISD::FGETSIGN: 3161 // All bits are zero except the low bit. 3162 Known.Zero.setBitsFrom(1); 3163 break; 3164 case ISD::USUBO: 3165 case ISD::SSUBO: 3166 if (Op.getResNo() == 1) { 3167 // If we know the result of a setcc has the top bits zero, use this info. 3168 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3169 TargetLowering::ZeroOrOneBooleanContent && 3170 BitWidth > 1) 3171 Known.Zero.setBitsFrom(1); 3172 break; 3173 } 3174 LLVM_FALLTHROUGH; 3175 case ISD::SUB: 3176 case ISD::SUBC: { 3177 assert(Op.getResNo() == 0 && 3178 "We only compute knownbits for the difference here."); 3179 3180 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3181 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3182 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3183 Known, Known2); 3184 break; 3185 } 3186 case ISD::UADDO: 3187 case ISD::SADDO: 3188 case ISD::ADDCARRY: 3189 if (Op.getResNo() == 1) { 3190 // If we know the result of a setcc has the top bits zero, use this info. 3191 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3192 TargetLowering::ZeroOrOneBooleanContent && 3193 BitWidth > 1) 3194 Known.Zero.setBitsFrom(1); 3195 break; 3196 } 3197 LLVM_FALLTHROUGH; 3198 case ISD::ADD: 3199 case ISD::ADDC: 3200 case ISD::ADDE: { 3201 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3202 3203 // With ADDE and ADDCARRY, a carry bit may be added in. 3204 KnownBits Carry(1); 3205 if (Opcode == ISD::ADDE) 3206 // Can't track carry from glue, set carry to unknown. 3207 Carry.resetAll(); 3208 else if (Opcode == ISD::ADDCARRY) 3209 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3210 // the trouble (how often will we find a known carry bit). And I haven't 3211 // tested this very much yet, but something like this might work: 3212 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3213 // Carry = Carry.zextOrTrunc(1, false); 3214 Carry.resetAll(); 3215 else 3216 Carry.setAllZero(); 3217 3218 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3219 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3220 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3221 break; 3222 } 3223 case ISD::SREM: 3224 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3225 const APInt &RA = Rem->getAPIntValue().abs(); 3226 if (RA.isPowerOf2()) { 3227 APInt LowBits = RA - 1; 3228 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3229 3230 // The low bits of the first operand are unchanged by the srem. 3231 Known.Zero = Known2.Zero & LowBits; 3232 Known.One = Known2.One & LowBits; 3233 3234 // If the first operand is non-negative or has all low bits zero, then 3235 // the upper bits are all zero. 3236 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3237 Known.Zero |= ~LowBits; 3238 3239 // If the first operand is negative and not all low bits are zero, then 3240 // the upper bits are all one. 3241 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3242 Known.One |= ~LowBits; 3243 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3244 } 3245 } 3246 break; 3247 case ISD::UREM: { 3248 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3249 const APInt &RA = Rem->getAPIntValue(); 3250 if (RA.isPowerOf2()) { 3251 APInt LowBits = (RA - 1); 3252 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3253 3254 // The upper bits are all zero, the lower ones are unchanged. 3255 Known.Zero = Known2.Zero | ~LowBits; 3256 Known.One = Known2.One & LowBits; 3257 break; 3258 } 3259 } 3260 3261 // Since the result is less than or equal to either operand, any leading 3262 // zero bits in either operand must also exist in the result. 3263 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3264 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3265 3266 uint32_t Leaders = 3267 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3268 Known.resetAll(); 3269 Known.Zero.setHighBits(Leaders); 3270 break; 3271 } 3272 case ISD::EXTRACT_ELEMENT: { 3273 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3274 const unsigned Index = Op.getConstantOperandVal(1); 3275 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3276 3277 // Remove low part of known bits mask 3278 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3279 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3280 3281 // Remove high part of known bit mask 3282 Known = Known.trunc(EltBitWidth); 3283 break; 3284 } 3285 case ISD::EXTRACT_VECTOR_ELT: { 3286 SDValue InVec = Op.getOperand(0); 3287 SDValue EltNo = Op.getOperand(1); 3288 EVT VecVT = InVec.getValueType(); 3289 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3290 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3291 3292 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3293 // anything about the extended bits. 3294 if (BitWidth > EltBitWidth) 3295 Known = Known.trunc(EltBitWidth); 3296 3297 // If we know the element index, just demand that vector element, else for 3298 // an unknown element index, ignore DemandedElts and demand them all. 3299 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3300 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3301 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3302 DemandedSrcElts = 3303 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3304 3305 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3306 if (BitWidth > EltBitWidth) 3307 Known = Known.anyext(BitWidth); 3308 break; 3309 } 3310 case ISD::INSERT_VECTOR_ELT: { 3311 // If we know the element index, split the demand between the 3312 // source vector and the inserted element, otherwise assume we need 3313 // the original demanded vector elements and the value. 3314 SDValue InVec = Op.getOperand(0); 3315 SDValue InVal = Op.getOperand(1); 3316 SDValue EltNo = Op.getOperand(2); 3317 bool DemandedVal = true; 3318 APInt DemandedVecElts = DemandedElts; 3319 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3320 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3321 unsigned EltIdx = CEltNo->getZExtValue(); 3322 DemandedVal = !!DemandedElts[EltIdx]; 3323 DemandedVecElts.clearBit(EltIdx); 3324 } 3325 Known.One.setAllBits(); 3326 Known.Zero.setAllBits(); 3327 if (DemandedVal) { 3328 Known2 = computeKnownBits(InVal, Depth + 1); 3329 Known.One &= Known2.One.zextOrTrunc(BitWidth); 3330 Known.Zero &= Known2.Zero.zextOrTrunc(BitWidth); 3331 } 3332 if (!!DemandedVecElts) { 3333 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3334 Known.One &= Known2.One; 3335 Known.Zero &= Known2.Zero; 3336 } 3337 break; 3338 } 3339 case ISD::BITREVERSE: { 3340 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3341 Known.Zero = Known2.Zero.reverseBits(); 3342 Known.One = Known2.One.reverseBits(); 3343 break; 3344 } 3345 case ISD::BSWAP: { 3346 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3347 Known.Zero = Known2.Zero.byteSwap(); 3348 Known.One = Known2.One.byteSwap(); 3349 break; 3350 } 3351 case ISD::ABS: { 3352 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3353 3354 // If the source's MSB is zero then we know the rest of the bits already. 3355 if (Known2.isNonNegative()) { 3356 Known.Zero = Known2.Zero; 3357 Known.One = Known2.One; 3358 break; 3359 } 3360 3361 // We only know that the absolute values's MSB will be zero iff there is 3362 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3363 Known2.One.clearSignBit(); 3364 if (Known2.One.getBoolValue()) { 3365 Known.Zero = APInt::getSignMask(BitWidth); 3366 break; 3367 } 3368 break; 3369 } 3370 case ISD::UMIN: { 3371 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3372 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3373 3374 // UMIN - we know that the result will have the maximum of the 3375 // known zero leading bits of the inputs. 3376 unsigned LeadZero = Known.countMinLeadingZeros(); 3377 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3378 3379 Known.Zero &= Known2.Zero; 3380 Known.One &= Known2.One; 3381 Known.Zero.setHighBits(LeadZero); 3382 break; 3383 } 3384 case ISD::UMAX: { 3385 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3386 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3387 3388 // UMAX - we know that the result will have the maximum of the 3389 // known one leading bits of the inputs. 3390 unsigned LeadOne = Known.countMinLeadingOnes(); 3391 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3392 3393 Known.Zero &= Known2.Zero; 3394 Known.One &= Known2.One; 3395 Known.One.setHighBits(LeadOne); 3396 break; 3397 } 3398 case ISD::SMIN: 3399 case ISD::SMAX: { 3400 // If we have a clamp pattern, we know that the number of sign bits will be 3401 // the minimum of the clamp min/max range. 3402 bool IsMax = (Opcode == ISD::SMAX); 3403 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3404 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3405 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3406 CstHigh = 3407 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3408 if (CstLow && CstHigh) { 3409 if (!IsMax) 3410 std::swap(CstLow, CstHigh); 3411 3412 const APInt &ValueLow = CstLow->getAPIntValue(); 3413 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3414 if (ValueLow.sle(ValueHigh)) { 3415 unsigned LowSignBits = ValueLow.getNumSignBits(); 3416 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3417 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3418 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3419 Known.One.setHighBits(MinSignBits); 3420 break; 3421 } 3422 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3423 Known.Zero.setHighBits(MinSignBits); 3424 break; 3425 } 3426 } 3427 } 3428 3429 // Fallback - just get the shared known bits of the operands. 3430 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3431 if (Known.isUnknown()) break; // Early-out 3432 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3433 Known.Zero &= Known2.Zero; 3434 Known.One &= Known2.One; 3435 break; 3436 } 3437 case ISD::FrameIndex: 3438 case ISD::TargetFrameIndex: 3439 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), 3440 Known, getMachineFunction()); 3441 break; 3442 3443 default: 3444 if (Opcode < ISD::BUILTIN_OP_END) 3445 break; 3446 LLVM_FALLTHROUGH; 3447 case ISD::INTRINSIC_WO_CHAIN: 3448 case ISD::INTRINSIC_W_CHAIN: 3449 case ISD::INTRINSIC_VOID: 3450 // Allow the target to implement this method for its nodes. 3451 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3452 break; 3453 } 3454 3455 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3456 return Known; 3457 } 3458 3459 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3460 SDValue N1) const { 3461 // X + 0 never overflow 3462 if (isNullConstant(N1)) 3463 return OFK_Never; 3464 3465 KnownBits N1Known = computeKnownBits(N1); 3466 if (N1Known.Zero.getBoolValue()) { 3467 KnownBits N0Known = computeKnownBits(N0); 3468 3469 bool overflow; 3470 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3471 if (!overflow) 3472 return OFK_Never; 3473 } 3474 3475 // mulhi + 1 never overflow 3476 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3477 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3478 return OFK_Never; 3479 3480 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3481 KnownBits N0Known = computeKnownBits(N0); 3482 3483 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3484 return OFK_Never; 3485 } 3486 3487 return OFK_Sometime; 3488 } 3489 3490 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3491 EVT OpVT = Val.getValueType(); 3492 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3493 3494 // Is the constant a known power of 2? 3495 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3496 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3497 3498 // A left-shift of a constant one will have exactly one bit set because 3499 // shifting the bit off the end is undefined. 3500 if (Val.getOpcode() == ISD::SHL) { 3501 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3502 if (C && C->getAPIntValue() == 1) 3503 return true; 3504 } 3505 3506 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3507 // one bit set. 3508 if (Val.getOpcode() == ISD::SRL) { 3509 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3510 if (C && C->getAPIntValue().isSignMask()) 3511 return true; 3512 } 3513 3514 // Are all operands of a build vector constant powers of two? 3515 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3516 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3517 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3518 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3519 return false; 3520 })) 3521 return true; 3522 3523 // More could be done here, though the above checks are enough 3524 // to handle some common cases. 3525 3526 // Fall back to computeKnownBits to catch other known cases. 3527 KnownBits Known = computeKnownBits(Val); 3528 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3529 } 3530 3531 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3532 EVT VT = Op.getValueType(); 3533 APInt DemandedElts = VT.isVector() 3534 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3535 : APInt(1, 1); 3536 return ComputeNumSignBits(Op, DemandedElts, Depth); 3537 } 3538 3539 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3540 unsigned Depth) const { 3541 EVT VT = Op.getValueType(); 3542 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3543 unsigned VTBits = VT.getScalarSizeInBits(); 3544 unsigned NumElts = DemandedElts.getBitWidth(); 3545 unsigned Tmp, Tmp2; 3546 unsigned FirstAnswer = 1; 3547 3548 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3549 const APInt &Val = C->getAPIntValue(); 3550 return Val.getNumSignBits(); 3551 } 3552 3553 if (Depth >= MaxRecursionDepth) 3554 return 1; // Limit search depth. 3555 3556 if (!DemandedElts) 3557 return 1; // No demanded elts, better to assume we don't know anything. 3558 3559 unsigned Opcode = Op.getOpcode(); 3560 switch (Opcode) { 3561 default: break; 3562 case ISD::AssertSext: 3563 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3564 return VTBits-Tmp+1; 3565 case ISD::AssertZext: 3566 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3567 return VTBits-Tmp; 3568 3569 case ISD::BUILD_VECTOR: 3570 Tmp = VTBits; 3571 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3572 if (!DemandedElts[i]) 3573 continue; 3574 3575 SDValue SrcOp = Op.getOperand(i); 3576 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); 3577 3578 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3579 if (SrcOp.getValueSizeInBits() != VTBits) { 3580 assert(SrcOp.getValueSizeInBits() > VTBits && 3581 "Expected BUILD_VECTOR implicit truncation"); 3582 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3583 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3584 } 3585 Tmp = std::min(Tmp, Tmp2); 3586 } 3587 return Tmp; 3588 3589 case ISD::VECTOR_SHUFFLE: { 3590 // Collect the minimum number of sign bits that are shared by every vector 3591 // element referenced by the shuffle. 3592 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3593 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3594 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3595 for (unsigned i = 0; i != NumElts; ++i) { 3596 int M = SVN->getMaskElt(i); 3597 if (!DemandedElts[i]) 3598 continue; 3599 // For UNDEF elements, we don't know anything about the common state of 3600 // the shuffle result. 3601 if (M < 0) 3602 return 1; 3603 if ((unsigned)M < NumElts) 3604 DemandedLHS.setBit((unsigned)M % NumElts); 3605 else 3606 DemandedRHS.setBit((unsigned)M % NumElts); 3607 } 3608 Tmp = std::numeric_limits<unsigned>::max(); 3609 if (!!DemandedLHS) 3610 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3611 if (!!DemandedRHS) { 3612 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3613 Tmp = std::min(Tmp, Tmp2); 3614 } 3615 // If we don't know anything, early out and try computeKnownBits fall-back. 3616 if (Tmp == 1) 3617 break; 3618 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3619 return Tmp; 3620 } 3621 3622 case ISD::BITCAST: { 3623 SDValue N0 = Op.getOperand(0); 3624 EVT SrcVT = N0.getValueType(); 3625 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3626 3627 // Ignore bitcasts from unsupported types.. 3628 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3629 break; 3630 3631 // Fast handling of 'identity' bitcasts. 3632 if (VTBits == SrcBits) 3633 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3634 3635 bool IsLE = getDataLayout().isLittleEndian(); 3636 3637 // Bitcast 'large element' scalar/vector to 'small element' vector. 3638 if ((SrcBits % VTBits) == 0) { 3639 assert(VT.isVector() && "Expected bitcast to vector"); 3640 3641 unsigned Scale = SrcBits / VTBits; 3642 APInt SrcDemandedElts(NumElts / Scale, 0); 3643 for (unsigned i = 0; i != NumElts; ++i) 3644 if (DemandedElts[i]) 3645 SrcDemandedElts.setBit(i / Scale); 3646 3647 // Fast case - sign splat can be simply split across the small elements. 3648 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3649 if (Tmp == SrcBits) 3650 return VTBits; 3651 3652 // Slow case - determine how far the sign extends into each sub-element. 3653 Tmp2 = VTBits; 3654 for (unsigned i = 0; i != NumElts; ++i) 3655 if (DemandedElts[i]) { 3656 unsigned SubOffset = i % Scale; 3657 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3658 SubOffset = SubOffset * VTBits; 3659 if (Tmp <= SubOffset) 3660 return 1; 3661 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3662 } 3663 return Tmp2; 3664 } 3665 break; 3666 } 3667 3668 case ISD::SIGN_EXTEND: 3669 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3670 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3671 case ISD::SIGN_EXTEND_INREG: 3672 // Max of the input and what this extends. 3673 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3674 Tmp = VTBits-Tmp+1; 3675 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3676 return std::max(Tmp, Tmp2); 3677 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3678 SDValue Src = Op.getOperand(0); 3679 EVT SrcVT = Src.getValueType(); 3680 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3681 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3682 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3683 } 3684 case ISD::SRA: 3685 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3686 // SRA X, C -> adds C sign bits. 3687 if (const APInt *ShAmt = 3688 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3689 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3690 return Tmp; 3691 case ISD::SHL: 3692 if (const APInt *ShAmt = 3693 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3694 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3695 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3696 if (ShAmt->ult(Tmp)) 3697 return Tmp - ShAmt->getZExtValue(); 3698 } 3699 break; 3700 case ISD::AND: 3701 case ISD::OR: 3702 case ISD::XOR: // NOT is handled here. 3703 // Logical binary ops preserve the number of sign bits at the worst. 3704 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3705 if (Tmp != 1) { 3706 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3707 FirstAnswer = std::min(Tmp, Tmp2); 3708 // We computed what we know about the sign bits as our first 3709 // answer. Now proceed to the generic code that uses 3710 // computeKnownBits, and pick whichever answer is better. 3711 } 3712 break; 3713 3714 case ISD::SELECT: 3715 case ISD::VSELECT: 3716 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3717 if (Tmp == 1) return 1; // Early out. 3718 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3719 return std::min(Tmp, Tmp2); 3720 case ISD::SELECT_CC: 3721 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3722 if (Tmp == 1) return 1; // Early out. 3723 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3724 return std::min(Tmp, Tmp2); 3725 3726 case ISD::SMIN: 3727 case ISD::SMAX: { 3728 // If we have a clamp pattern, we know that the number of sign bits will be 3729 // the minimum of the clamp min/max range. 3730 bool IsMax = (Opcode == ISD::SMAX); 3731 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3732 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3733 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3734 CstHigh = 3735 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3736 if (CstLow && CstHigh) { 3737 if (!IsMax) 3738 std::swap(CstLow, CstHigh); 3739 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3740 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3741 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3742 return std::min(Tmp, Tmp2); 3743 } 3744 } 3745 3746 // Fallback - just get the minimum number of sign bits of the operands. 3747 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3748 if (Tmp == 1) 3749 return 1; // Early out. 3750 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3751 return std::min(Tmp, Tmp2); 3752 } 3753 case ISD::UMIN: 3754 case ISD::UMAX: 3755 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3756 if (Tmp == 1) 3757 return 1; // Early out. 3758 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3759 return std::min(Tmp, Tmp2); 3760 case ISD::SADDO: 3761 case ISD::UADDO: 3762 case ISD::SSUBO: 3763 case ISD::USUBO: 3764 case ISD::SMULO: 3765 case ISD::UMULO: 3766 if (Op.getResNo() != 1) 3767 break; 3768 // The boolean result conforms to getBooleanContents. Fall through. 3769 // If setcc returns 0/-1, all bits are sign bits. 3770 // We know that we have an integer-based boolean since these operations 3771 // are only available for integer. 3772 if (TLI->getBooleanContents(VT.isVector(), false) == 3773 TargetLowering::ZeroOrNegativeOneBooleanContent) 3774 return VTBits; 3775 break; 3776 case ISD::SETCC: 3777 case ISD::STRICT_FSETCC: 3778 case ISD::STRICT_FSETCCS: { 3779 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3780 // If setcc returns 0/-1, all bits are sign bits. 3781 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3782 TargetLowering::ZeroOrNegativeOneBooleanContent) 3783 return VTBits; 3784 break; 3785 } 3786 case ISD::ROTL: 3787 case ISD::ROTR: 3788 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3789 3790 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3791 if (Tmp == VTBits) 3792 return VTBits; 3793 3794 if (ConstantSDNode *C = 3795 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3796 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3797 3798 // Handle rotate right by N like a rotate left by 32-N. 3799 if (Opcode == ISD::ROTR) 3800 RotAmt = (VTBits - RotAmt) % VTBits; 3801 3802 // If we aren't rotating out all of the known-in sign bits, return the 3803 // number that are left. This handles rotl(sext(x), 1) for example. 3804 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3805 } 3806 break; 3807 case ISD::ADD: 3808 case ISD::ADDC: 3809 // Add can have at most one carry bit. Thus we know that the output 3810 // is, at worst, one more bit than the inputs. 3811 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3812 if (Tmp == 1) return 1; // Early out. 3813 3814 // Special case decrementing a value (ADD X, -1): 3815 if (ConstantSDNode *CRHS = 3816 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3817 if (CRHS->isAllOnesValue()) { 3818 KnownBits Known = 3819 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3820 3821 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3822 // sign bits set. 3823 if ((Known.Zero | 1).isAllOnesValue()) 3824 return VTBits; 3825 3826 // If we are subtracting one from a positive number, there is no carry 3827 // out of the result. 3828 if (Known.isNonNegative()) 3829 return Tmp; 3830 } 3831 3832 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3833 if (Tmp2 == 1) return 1; // Early out. 3834 return std::min(Tmp, Tmp2) - 1; 3835 case ISD::SUB: 3836 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3837 if (Tmp2 == 1) return 1; // Early out. 3838 3839 // Handle NEG. 3840 if (ConstantSDNode *CLHS = 3841 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3842 if (CLHS->isNullValue()) { 3843 KnownBits Known = 3844 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3845 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3846 // sign bits set. 3847 if ((Known.Zero | 1).isAllOnesValue()) 3848 return VTBits; 3849 3850 // If the input is known to be positive (the sign bit is known clear), 3851 // the output of the NEG has the same number of sign bits as the input. 3852 if (Known.isNonNegative()) 3853 return Tmp2; 3854 3855 // Otherwise, we treat this like a SUB. 3856 } 3857 3858 // Sub can have at most one carry bit. Thus we know that the output 3859 // is, at worst, one more bit than the inputs. 3860 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3861 if (Tmp == 1) return 1; // Early out. 3862 return std::min(Tmp, Tmp2) - 1; 3863 case ISD::MUL: { 3864 // The output of the Mul can be at most twice the valid bits in the inputs. 3865 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3866 if (SignBitsOp0 == 1) 3867 break; 3868 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3869 if (SignBitsOp1 == 1) 3870 break; 3871 unsigned OutValidBits = 3872 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3873 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3874 } 3875 case ISD::TRUNCATE: { 3876 // Check if the sign bits of source go down as far as the truncated value. 3877 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3878 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3879 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3880 return NumSrcSignBits - (NumSrcBits - VTBits); 3881 break; 3882 } 3883 case ISD::EXTRACT_ELEMENT: { 3884 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3885 const int BitWidth = Op.getValueSizeInBits(); 3886 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3887 3888 // Get reverse index (starting from 1), Op1 value indexes elements from 3889 // little end. Sign starts at big end. 3890 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3891 3892 // If the sign portion ends in our element the subtraction gives correct 3893 // result. Otherwise it gives either negative or > bitwidth result 3894 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3895 } 3896 case ISD::INSERT_VECTOR_ELT: { 3897 // If we know the element index, split the demand between the 3898 // source vector and the inserted element, otherwise assume we need 3899 // the original demanded vector elements and the value. 3900 SDValue InVec = Op.getOperand(0); 3901 SDValue InVal = Op.getOperand(1); 3902 SDValue EltNo = Op.getOperand(2); 3903 bool DemandedVal = true; 3904 APInt DemandedVecElts = DemandedElts; 3905 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3906 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3907 unsigned EltIdx = CEltNo->getZExtValue(); 3908 DemandedVal = !!DemandedElts[EltIdx]; 3909 DemandedVecElts.clearBit(EltIdx); 3910 } 3911 Tmp = std::numeric_limits<unsigned>::max(); 3912 if (DemandedVal) { 3913 // TODO - handle implicit truncation of inserted elements. 3914 if (InVal.getScalarValueSizeInBits() != VTBits) 3915 break; 3916 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3917 Tmp = std::min(Tmp, Tmp2); 3918 } 3919 if (!!DemandedVecElts) { 3920 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3921 Tmp = std::min(Tmp, Tmp2); 3922 } 3923 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3924 return Tmp; 3925 } 3926 case ISD::EXTRACT_VECTOR_ELT: { 3927 SDValue InVec = Op.getOperand(0); 3928 SDValue EltNo = Op.getOperand(1); 3929 EVT VecVT = InVec.getValueType(); 3930 const unsigned BitWidth = Op.getValueSizeInBits(); 3931 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3932 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3933 3934 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3935 // anything about sign bits. But if the sizes match we can derive knowledge 3936 // about sign bits from the vector operand. 3937 if (BitWidth != EltBitWidth) 3938 break; 3939 3940 // If we know the element index, just demand that vector element, else for 3941 // an unknown element index, ignore DemandedElts and demand them all. 3942 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3943 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3944 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3945 DemandedSrcElts = 3946 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3947 3948 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3949 } 3950 case ISD::EXTRACT_SUBVECTOR: { 3951 // Offset the demanded elts by the subvector index. 3952 SDValue Src = Op.getOperand(0); 3953 uint64_t Idx = Op.getConstantOperandVal(1); 3954 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3955 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3956 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3957 } 3958 case ISD::CONCAT_VECTORS: { 3959 // Determine the minimum number of sign bits across all demanded 3960 // elts of the input vectors. Early out if the result is already 1. 3961 Tmp = std::numeric_limits<unsigned>::max(); 3962 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3963 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3964 unsigned NumSubVectors = Op.getNumOperands(); 3965 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3966 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3967 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3968 if (!DemandedSub) 3969 continue; 3970 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3971 Tmp = std::min(Tmp, Tmp2); 3972 } 3973 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3974 return Tmp; 3975 } 3976 case ISD::INSERT_SUBVECTOR: { 3977 // Demand any elements from the subvector and the remainder from the src its 3978 // inserted into. 3979 SDValue Src = Op.getOperand(0); 3980 SDValue Sub = Op.getOperand(1); 3981 uint64_t Idx = Op.getConstantOperandVal(2); 3982 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3983 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3984 APInt DemandedSrcElts = DemandedElts; 3985 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 3986 3987 Tmp = std::numeric_limits<unsigned>::max(); 3988 if (!!DemandedSubElts) { 3989 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3990 if (Tmp == 1) 3991 return 1; // early-out 3992 } 3993 if (!!DemandedSrcElts) { 3994 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3995 Tmp = std::min(Tmp, Tmp2); 3996 } 3997 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3998 return Tmp; 3999 } 4000 } 4001 4002 // If we are looking at the loaded value of the SDNode. 4003 if (Op.getResNo() == 0) { 4004 // Handle LOADX separately here. EXTLOAD case will fallthrough. 4005 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 4006 unsigned ExtType = LD->getExtensionType(); 4007 switch (ExtType) { 4008 default: break; 4009 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 4010 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4011 return VTBits - Tmp + 1; 4012 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 4013 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4014 return VTBits - Tmp; 4015 case ISD::NON_EXTLOAD: 4016 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 4017 // We only need to handle vectors - computeKnownBits should handle 4018 // scalar cases. 4019 Type *CstTy = Cst->getType(); 4020 if (CstTy->isVectorTy() && 4021 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4022 Tmp = VTBits; 4023 for (unsigned i = 0; i != NumElts; ++i) { 4024 if (!DemandedElts[i]) 4025 continue; 4026 if (Constant *Elt = Cst->getAggregateElement(i)) { 4027 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4028 const APInt &Value = CInt->getValue(); 4029 Tmp = std::min(Tmp, Value.getNumSignBits()); 4030 continue; 4031 } 4032 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4033 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4034 Tmp = std::min(Tmp, Value.getNumSignBits()); 4035 continue; 4036 } 4037 } 4038 // Unknown type. Conservatively assume no bits match sign bit. 4039 return 1; 4040 } 4041 return Tmp; 4042 } 4043 } 4044 break; 4045 } 4046 } 4047 } 4048 4049 // Allow the target to implement this method for its nodes. 4050 if (Opcode >= ISD::BUILTIN_OP_END || 4051 Opcode == ISD::INTRINSIC_WO_CHAIN || 4052 Opcode == ISD::INTRINSIC_W_CHAIN || 4053 Opcode == ISD::INTRINSIC_VOID) { 4054 unsigned NumBits = 4055 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4056 if (NumBits > 1) 4057 FirstAnswer = std::max(FirstAnswer, NumBits); 4058 } 4059 4060 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4061 // use this information. 4062 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4063 4064 APInt Mask; 4065 if (Known.isNonNegative()) { // sign bit is 0 4066 Mask = Known.Zero; 4067 } else if (Known.isNegative()) { // sign bit is 1; 4068 Mask = Known.One; 4069 } else { 4070 // Nothing known. 4071 return FirstAnswer; 4072 } 4073 4074 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4075 // the number of identical bits in the top of the input value. 4076 Mask <<= Mask.getBitWidth()-VTBits; 4077 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4078 } 4079 4080 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4081 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4082 !isa<ConstantSDNode>(Op.getOperand(1))) 4083 return false; 4084 4085 if (Op.getOpcode() == ISD::OR && 4086 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4087 return false; 4088 4089 return true; 4090 } 4091 4092 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4093 // If we're told that NaNs won't happen, assume they won't. 4094 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4095 return true; 4096 4097 if (Depth >= MaxRecursionDepth) 4098 return false; // Limit search depth. 4099 4100 // TODO: Handle vectors. 4101 // If the value is a constant, we can obviously see if it is a NaN or not. 4102 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4103 return !C->getValueAPF().isNaN() || 4104 (SNaN && !C->getValueAPF().isSignaling()); 4105 } 4106 4107 unsigned Opcode = Op.getOpcode(); 4108 switch (Opcode) { 4109 case ISD::FADD: 4110 case ISD::FSUB: 4111 case ISD::FMUL: 4112 case ISD::FDIV: 4113 case ISD::FREM: 4114 case ISD::FSIN: 4115 case ISD::FCOS: { 4116 if (SNaN) 4117 return true; 4118 // TODO: Need isKnownNeverInfinity 4119 return false; 4120 } 4121 case ISD::FCANONICALIZE: 4122 case ISD::FEXP: 4123 case ISD::FEXP2: 4124 case ISD::FTRUNC: 4125 case ISD::FFLOOR: 4126 case ISD::FCEIL: 4127 case ISD::FROUND: 4128 case ISD::FROUNDEVEN: 4129 case ISD::FRINT: 4130 case ISD::FNEARBYINT: { 4131 if (SNaN) 4132 return true; 4133 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4134 } 4135 case ISD::FABS: 4136 case ISD::FNEG: 4137 case ISD::FCOPYSIGN: { 4138 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4139 } 4140 case ISD::SELECT: 4141 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4142 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4143 case ISD::FP_EXTEND: 4144 case ISD::FP_ROUND: { 4145 if (SNaN) 4146 return true; 4147 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4148 } 4149 case ISD::SINT_TO_FP: 4150 case ISD::UINT_TO_FP: 4151 return true; 4152 case ISD::FMA: 4153 case ISD::FMAD: { 4154 if (SNaN) 4155 return true; 4156 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4157 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4158 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4159 } 4160 case ISD::FSQRT: // Need is known positive 4161 case ISD::FLOG: 4162 case ISD::FLOG2: 4163 case ISD::FLOG10: 4164 case ISD::FPOWI: 4165 case ISD::FPOW: { 4166 if (SNaN) 4167 return true; 4168 // TODO: Refine on operand 4169 return false; 4170 } 4171 case ISD::FMINNUM: 4172 case ISD::FMAXNUM: { 4173 // Only one needs to be known not-nan, since it will be returned if the 4174 // other ends up being one. 4175 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4176 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4177 } 4178 case ISD::FMINNUM_IEEE: 4179 case ISD::FMAXNUM_IEEE: { 4180 if (SNaN) 4181 return true; 4182 // This can return a NaN if either operand is an sNaN, or if both operands 4183 // are NaN. 4184 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4185 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4186 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4187 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4188 } 4189 case ISD::FMINIMUM: 4190 case ISD::FMAXIMUM: { 4191 // TODO: Does this quiet or return the origina NaN as-is? 4192 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4193 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4194 } 4195 case ISD::EXTRACT_VECTOR_ELT: { 4196 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4197 } 4198 default: 4199 if (Opcode >= ISD::BUILTIN_OP_END || 4200 Opcode == ISD::INTRINSIC_WO_CHAIN || 4201 Opcode == ISD::INTRINSIC_W_CHAIN || 4202 Opcode == ISD::INTRINSIC_VOID) { 4203 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4204 } 4205 4206 return false; 4207 } 4208 } 4209 4210 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4211 assert(Op.getValueType().isFloatingPoint() && 4212 "Floating point type expected"); 4213 4214 // If the value is a constant, we can obviously see if it is a zero or not. 4215 // TODO: Add BuildVector support. 4216 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4217 return !C->isZero(); 4218 return false; 4219 } 4220 4221 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4222 assert(!Op.getValueType().isFloatingPoint() && 4223 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4224 4225 // If the value is a constant, we can obviously see if it is a zero or not. 4226 if (ISD::matchUnaryPredicate( 4227 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4228 return true; 4229 4230 // TODO: Recognize more cases here. 4231 switch (Op.getOpcode()) { 4232 default: break; 4233 case ISD::OR: 4234 if (isKnownNeverZero(Op.getOperand(1)) || 4235 isKnownNeverZero(Op.getOperand(0))) 4236 return true; 4237 break; 4238 } 4239 4240 return false; 4241 } 4242 4243 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4244 // Check the obvious case. 4245 if (A == B) return true; 4246 4247 // For for negative and positive zero. 4248 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4249 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4250 if (CA->isZero() && CB->isZero()) return true; 4251 4252 // Otherwise they may not be equal. 4253 return false; 4254 } 4255 4256 // FIXME: unify with llvm::haveNoCommonBitsSet. 4257 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4258 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4259 assert(A.getValueType() == B.getValueType() && 4260 "Values must have the same type"); 4261 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4262 } 4263 4264 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4265 ArrayRef<SDValue> Ops, 4266 SelectionDAG &DAG) { 4267 int NumOps = Ops.size(); 4268 assert(NumOps != 0 && "Can't build an empty vector!"); 4269 assert(!VT.isScalableVector() && 4270 "BUILD_VECTOR cannot be used with scalable types"); 4271 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4272 "Incorrect element count in BUILD_VECTOR!"); 4273 4274 // BUILD_VECTOR of UNDEFs is UNDEF. 4275 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4276 return DAG.getUNDEF(VT); 4277 4278 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4279 SDValue IdentitySrc; 4280 bool IsIdentity = true; 4281 for (int i = 0; i != NumOps; ++i) { 4282 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4283 Ops[i].getOperand(0).getValueType() != VT || 4284 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4285 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4286 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4287 IsIdentity = false; 4288 break; 4289 } 4290 IdentitySrc = Ops[i].getOperand(0); 4291 } 4292 if (IsIdentity) 4293 return IdentitySrc; 4294 4295 return SDValue(); 4296 } 4297 4298 /// Try to simplify vector concatenation to an input value, undef, or build 4299 /// vector. 4300 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4301 ArrayRef<SDValue> Ops, 4302 SelectionDAG &DAG) { 4303 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4304 assert(llvm::all_of(Ops, 4305 [Ops](SDValue Op) { 4306 return Ops[0].getValueType() == Op.getValueType(); 4307 }) && 4308 "Concatenation of vectors with inconsistent value types!"); 4309 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4310 VT.getVectorNumElements() && 4311 "Incorrect element count in vector concatenation!"); 4312 4313 if (Ops.size() == 1) 4314 return Ops[0]; 4315 4316 // Concat of UNDEFs is UNDEF. 4317 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4318 return DAG.getUNDEF(VT); 4319 4320 // Scan the operands and look for extract operations from a single source 4321 // that correspond to insertion at the same location via this concatenation: 4322 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4323 SDValue IdentitySrc; 4324 bool IsIdentity = true; 4325 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4326 SDValue Op = Ops[i]; 4327 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4328 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4329 Op.getOperand(0).getValueType() != VT || 4330 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4331 Op.getConstantOperandVal(1) != IdentityIndex) { 4332 IsIdentity = false; 4333 break; 4334 } 4335 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4336 "Unexpected identity source vector for concat of extracts"); 4337 IdentitySrc = Op.getOperand(0); 4338 } 4339 if (IsIdentity) { 4340 assert(IdentitySrc && "Failed to set source vector of extracts"); 4341 return IdentitySrc; 4342 } 4343 4344 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4345 // simplified to one big BUILD_VECTOR. 4346 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4347 EVT SVT = VT.getScalarType(); 4348 SmallVector<SDValue, 16> Elts; 4349 for (SDValue Op : Ops) { 4350 EVT OpVT = Op.getValueType(); 4351 if (Op.isUndef()) 4352 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4353 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4354 Elts.append(Op->op_begin(), Op->op_end()); 4355 else 4356 return SDValue(); 4357 } 4358 4359 // BUILD_VECTOR requires all inputs to be of the same type, find the 4360 // maximum type and extend them all. 4361 for (SDValue Op : Elts) 4362 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4363 4364 if (SVT.bitsGT(VT.getScalarType())) 4365 for (SDValue &Op : Elts) 4366 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4367 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4368 : DAG.getSExtOrTrunc(Op, DL, SVT); 4369 4370 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4371 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4372 return V; 4373 } 4374 4375 /// Gets or creates the specified node. 4376 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4377 FoldingSetNodeID ID; 4378 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4379 void *IP = nullptr; 4380 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4381 return SDValue(E, 0); 4382 4383 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4384 getVTList(VT)); 4385 CSEMap.InsertNode(N, IP); 4386 4387 InsertNode(N); 4388 SDValue V = SDValue(N, 0); 4389 NewSDValueDbgMsg(V, "Creating new node: ", this); 4390 return V; 4391 } 4392 4393 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4394 SDValue Operand, const SDNodeFlags Flags) { 4395 // Constant fold unary operations with an integer constant operand. Even 4396 // opaque constant will be folded, because the folding of unary operations 4397 // doesn't create new constants with different values. Nevertheless, the 4398 // opaque flag is preserved during folding to prevent future folding with 4399 // other constants. 4400 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4401 const APInt &Val = C->getAPIntValue(); 4402 switch (Opcode) { 4403 default: break; 4404 case ISD::SIGN_EXTEND: 4405 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4406 C->isTargetOpcode(), C->isOpaque()); 4407 case ISD::TRUNCATE: 4408 if (C->isOpaque()) 4409 break; 4410 LLVM_FALLTHROUGH; 4411 case ISD::ANY_EXTEND: 4412 case ISD::ZERO_EXTEND: 4413 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4414 C->isTargetOpcode(), C->isOpaque()); 4415 case ISD::UINT_TO_FP: 4416 case ISD::SINT_TO_FP: { 4417 APFloat apf(EVTToAPFloatSemantics(VT), 4418 APInt::getNullValue(VT.getSizeInBits())); 4419 (void)apf.convertFromAPInt(Val, 4420 Opcode==ISD::SINT_TO_FP, 4421 APFloat::rmNearestTiesToEven); 4422 return getConstantFP(apf, DL, VT); 4423 } 4424 case ISD::BITCAST: 4425 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4426 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4427 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4428 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4429 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4430 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4431 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4432 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4433 break; 4434 case ISD::ABS: 4435 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4436 C->isOpaque()); 4437 case ISD::BITREVERSE: 4438 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4439 C->isOpaque()); 4440 case ISD::BSWAP: 4441 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4442 C->isOpaque()); 4443 case ISD::CTPOP: 4444 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4445 C->isOpaque()); 4446 case ISD::CTLZ: 4447 case ISD::CTLZ_ZERO_UNDEF: 4448 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4449 C->isOpaque()); 4450 case ISD::CTTZ: 4451 case ISD::CTTZ_ZERO_UNDEF: 4452 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4453 C->isOpaque()); 4454 case ISD::FP16_TO_FP: { 4455 bool Ignored; 4456 APFloat FPV(APFloat::IEEEhalf(), 4457 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4458 4459 // This can return overflow, underflow, or inexact; we don't care. 4460 // FIXME need to be more flexible about rounding mode. 4461 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4462 APFloat::rmNearestTiesToEven, &Ignored); 4463 return getConstantFP(FPV, DL, VT); 4464 } 4465 } 4466 } 4467 4468 // Constant fold unary operations with a floating point constant operand. 4469 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4470 APFloat V = C->getValueAPF(); // make copy 4471 switch (Opcode) { 4472 case ISD::FNEG: 4473 V.changeSign(); 4474 return getConstantFP(V, DL, VT); 4475 case ISD::FABS: 4476 V.clearSign(); 4477 return getConstantFP(V, DL, VT); 4478 case ISD::FCEIL: { 4479 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4480 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4481 return getConstantFP(V, DL, VT); 4482 break; 4483 } 4484 case ISD::FTRUNC: { 4485 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4486 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4487 return getConstantFP(V, DL, VT); 4488 break; 4489 } 4490 case ISD::FFLOOR: { 4491 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4492 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4493 return getConstantFP(V, DL, VT); 4494 break; 4495 } 4496 case ISD::FP_EXTEND: { 4497 bool ignored; 4498 // This can return overflow, underflow, or inexact; we don't care. 4499 // FIXME need to be more flexible about rounding mode. 4500 (void)V.convert(EVTToAPFloatSemantics(VT), 4501 APFloat::rmNearestTiesToEven, &ignored); 4502 return getConstantFP(V, DL, VT); 4503 } 4504 case ISD::FP_TO_SINT: 4505 case ISD::FP_TO_UINT: { 4506 bool ignored; 4507 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4508 // FIXME need to be more flexible about rounding mode. 4509 APFloat::opStatus s = 4510 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4511 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4512 break; 4513 return getConstant(IntVal, DL, VT); 4514 } 4515 case ISD::BITCAST: 4516 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4517 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4518 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4519 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4520 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4521 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4522 break; 4523 case ISD::FP_TO_FP16: { 4524 bool Ignored; 4525 // This can return overflow, underflow, or inexact; we don't care. 4526 // FIXME need to be more flexible about rounding mode. 4527 (void)V.convert(APFloat::IEEEhalf(), 4528 APFloat::rmNearestTiesToEven, &Ignored); 4529 return getConstant(V.bitcastToAPInt(), DL, VT); 4530 } 4531 } 4532 } 4533 4534 // Constant fold unary operations with a vector integer or float operand. 4535 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4536 if (BV->isConstant()) { 4537 switch (Opcode) { 4538 default: 4539 // FIXME: Entirely reasonable to perform folding of other unary 4540 // operations here as the need arises. 4541 break; 4542 case ISD::FNEG: 4543 case ISD::FABS: 4544 case ISD::FCEIL: 4545 case ISD::FTRUNC: 4546 case ISD::FFLOOR: 4547 case ISD::FP_EXTEND: 4548 case ISD::FP_TO_SINT: 4549 case ISD::FP_TO_UINT: 4550 case ISD::TRUNCATE: 4551 case ISD::ANY_EXTEND: 4552 case ISD::ZERO_EXTEND: 4553 case ISD::SIGN_EXTEND: 4554 case ISD::UINT_TO_FP: 4555 case ISD::SINT_TO_FP: 4556 case ISD::ABS: 4557 case ISD::BITREVERSE: 4558 case ISD::BSWAP: 4559 case ISD::CTLZ: 4560 case ISD::CTLZ_ZERO_UNDEF: 4561 case ISD::CTTZ: 4562 case ISD::CTTZ_ZERO_UNDEF: 4563 case ISD::CTPOP: { 4564 SDValue Ops = { Operand }; 4565 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4566 return Fold; 4567 } 4568 } 4569 } 4570 } 4571 4572 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4573 switch (Opcode) { 4574 case ISD::FREEZE: 4575 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4576 break; 4577 case ISD::TokenFactor: 4578 case ISD::MERGE_VALUES: 4579 case ISD::CONCAT_VECTORS: 4580 return Operand; // Factor, merge or concat of one node? No need. 4581 case ISD::BUILD_VECTOR: { 4582 // Attempt to simplify BUILD_VECTOR. 4583 SDValue Ops[] = {Operand}; 4584 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4585 return V; 4586 break; 4587 } 4588 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4589 case ISD::FP_EXTEND: 4590 assert(VT.isFloatingPoint() && 4591 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4592 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4593 assert((!VT.isVector() || 4594 VT.getVectorNumElements() == 4595 Operand.getValueType().getVectorNumElements()) && 4596 "Vector element count mismatch!"); 4597 assert(Operand.getValueType().bitsLT(VT) && 4598 "Invalid fpext node, dst < src!"); 4599 if (Operand.isUndef()) 4600 return getUNDEF(VT); 4601 break; 4602 case ISD::FP_TO_SINT: 4603 case ISD::FP_TO_UINT: 4604 if (Operand.isUndef()) 4605 return getUNDEF(VT); 4606 break; 4607 case ISD::SINT_TO_FP: 4608 case ISD::UINT_TO_FP: 4609 // [us]itofp(undef) = 0, because the result value is bounded. 4610 if (Operand.isUndef()) 4611 return getConstantFP(0.0, DL, VT); 4612 break; 4613 case ISD::SIGN_EXTEND: 4614 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4615 "Invalid SIGN_EXTEND!"); 4616 assert(VT.isVector() == Operand.getValueType().isVector() && 4617 "SIGN_EXTEND result type type should be vector iff the operand " 4618 "type is vector!"); 4619 if (Operand.getValueType() == VT) return Operand; // noop extension 4620 assert((!VT.isVector() || 4621 VT.getVectorElementCount() == 4622 Operand.getValueType().getVectorElementCount()) && 4623 "Vector element count mismatch!"); 4624 assert(Operand.getValueType().bitsLT(VT) && 4625 "Invalid sext node, dst < src!"); 4626 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4627 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4628 else if (OpOpcode == ISD::UNDEF) 4629 // sext(undef) = 0, because the top bits will all be the same. 4630 return getConstant(0, DL, VT); 4631 break; 4632 case ISD::ZERO_EXTEND: 4633 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4634 "Invalid ZERO_EXTEND!"); 4635 assert(VT.isVector() == Operand.getValueType().isVector() && 4636 "ZERO_EXTEND result type type should be vector iff the operand " 4637 "type is vector!"); 4638 if (Operand.getValueType() == VT) return Operand; // noop extension 4639 assert((!VT.isVector() || 4640 VT.getVectorElementCount() == 4641 Operand.getValueType().getVectorElementCount()) && 4642 "Vector element count mismatch!"); 4643 assert(Operand.getValueType().bitsLT(VT) && 4644 "Invalid zext node, dst < src!"); 4645 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4646 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4647 else if (OpOpcode == ISD::UNDEF) 4648 // zext(undef) = 0, because the top bits will be zero. 4649 return getConstant(0, DL, VT); 4650 break; 4651 case ISD::ANY_EXTEND: 4652 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4653 "Invalid ANY_EXTEND!"); 4654 assert(VT.isVector() == Operand.getValueType().isVector() && 4655 "ANY_EXTEND result type type should be vector iff the operand " 4656 "type is vector!"); 4657 if (Operand.getValueType() == VT) return Operand; // noop extension 4658 assert((!VT.isVector() || 4659 VT.getVectorElementCount() == 4660 Operand.getValueType().getVectorElementCount()) && 4661 "Vector element count mismatch!"); 4662 assert(Operand.getValueType().bitsLT(VT) && 4663 "Invalid anyext node, dst < src!"); 4664 4665 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4666 OpOpcode == ISD::ANY_EXTEND) 4667 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4668 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4669 else if (OpOpcode == ISD::UNDEF) 4670 return getUNDEF(VT); 4671 4672 // (ext (trunc x)) -> x 4673 if (OpOpcode == ISD::TRUNCATE) { 4674 SDValue OpOp = Operand.getOperand(0); 4675 if (OpOp.getValueType() == VT) { 4676 transferDbgValues(Operand, OpOp); 4677 return OpOp; 4678 } 4679 } 4680 break; 4681 case ISD::TRUNCATE: 4682 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4683 "Invalid TRUNCATE!"); 4684 assert(VT.isVector() == Operand.getValueType().isVector() && 4685 "TRUNCATE result type type should be vector iff the operand " 4686 "type is vector!"); 4687 if (Operand.getValueType() == VT) return Operand; // noop truncate 4688 assert((!VT.isVector() || 4689 VT.getVectorElementCount() == 4690 Operand.getValueType().getVectorElementCount()) && 4691 "Vector element count mismatch!"); 4692 assert(Operand.getValueType().bitsGT(VT) && 4693 "Invalid truncate node, src < dst!"); 4694 if (OpOpcode == ISD::TRUNCATE) 4695 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4696 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4697 OpOpcode == ISD::ANY_EXTEND) { 4698 // If the source is smaller than the dest, we still need an extend. 4699 if (Operand.getOperand(0).getValueType().getScalarType() 4700 .bitsLT(VT.getScalarType())) 4701 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4702 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4703 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4704 return Operand.getOperand(0); 4705 } 4706 if (OpOpcode == ISD::UNDEF) 4707 return getUNDEF(VT); 4708 break; 4709 case ISD::ANY_EXTEND_VECTOR_INREG: 4710 case ISD::ZERO_EXTEND_VECTOR_INREG: 4711 case ISD::SIGN_EXTEND_VECTOR_INREG: 4712 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4713 assert(Operand.getValueType().bitsLE(VT) && 4714 "The input must be the same size or smaller than the result."); 4715 assert(VT.getVectorNumElements() < 4716 Operand.getValueType().getVectorNumElements() && 4717 "The destination vector type must have fewer lanes than the input."); 4718 break; 4719 case ISD::ABS: 4720 assert(VT.isInteger() && VT == Operand.getValueType() && 4721 "Invalid ABS!"); 4722 if (OpOpcode == ISD::UNDEF) 4723 return getUNDEF(VT); 4724 break; 4725 case ISD::BSWAP: 4726 assert(VT.isInteger() && VT == Operand.getValueType() && 4727 "Invalid BSWAP!"); 4728 assert((VT.getScalarSizeInBits() % 16 == 0) && 4729 "BSWAP types must be a multiple of 16 bits!"); 4730 if (OpOpcode == ISD::UNDEF) 4731 return getUNDEF(VT); 4732 break; 4733 case ISD::BITREVERSE: 4734 assert(VT.isInteger() && VT == Operand.getValueType() && 4735 "Invalid BITREVERSE!"); 4736 if (OpOpcode == ISD::UNDEF) 4737 return getUNDEF(VT); 4738 break; 4739 case ISD::BITCAST: 4740 // Basic sanity checking. 4741 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4742 "Cannot BITCAST between types of different sizes!"); 4743 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4744 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4745 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4746 if (OpOpcode == ISD::UNDEF) 4747 return getUNDEF(VT); 4748 break; 4749 case ISD::SCALAR_TO_VECTOR: 4750 assert(VT.isVector() && !Operand.getValueType().isVector() && 4751 (VT.getVectorElementType() == Operand.getValueType() || 4752 (VT.getVectorElementType().isInteger() && 4753 Operand.getValueType().isInteger() && 4754 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4755 "Illegal SCALAR_TO_VECTOR node!"); 4756 if (OpOpcode == ISD::UNDEF) 4757 return getUNDEF(VT); 4758 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4759 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4760 isa<ConstantSDNode>(Operand.getOperand(1)) && 4761 Operand.getConstantOperandVal(1) == 0 && 4762 Operand.getOperand(0).getValueType() == VT) 4763 return Operand.getOperand(0); 4764 break; 4765 case ISD::FNEG: 4766 // Negation of an unknown bag of bits is still completely undefined. 4767 if (OpOpcode == ISD::UNDEF) 4768 return getUNDEF(VT); 4769 4770 if (OpOpcode == ISD::FNEG) // --X -> X 4771 return Operand.getOperand(0); 4772 break; 4773 case ISD::FABS: 4774 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4775 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4776 break; 4777 } 4778 4779 SDNode *N; 4780 SDVTList VTs = getVTList(VT); 4781 SDValue Ops[] = {Operand}; 4782 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4783 FoldingSetNodeID ID; 4784 AddNodeIDNode(ID, Opcode, VTs, Ops); 4785 void *IP = nullptr; 4786 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4787 E->intersectFlagsWith(Flags); 4788 return SDValue(E, 0); 4789 } 4790 4791 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4792 N->setFlags(Flags); 4793 createOperands(N, Ops); 4794 CSEMap.InsertNode(N, IP); 4795 } else { 4796 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4797 createOperands(N, Ops); 4798 } 4799 4800 InsertNode(N); 4801 SDValue V = SDValue(N, 0); 4802 NewSDValueDbgMsg(V, "Creating new node: ", this); 4803 return V; 4804 } 4805 4806 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4807 const APInt &C2) { 4808 switch (Opcode) { 4809 case ISD::ADD: return C1 + C2; 4810 case ISD::SUB: return C1 - C2; 4811 case ISD::MUL: return C1 * C2; 4812 case ISD::AND: return C1 & C2; 4813 case ISD::OR: return C1 | C2; 4814 case ISD::XOR: return C1 ^ C2; 4815 case ISD::SHL: return C1 << C2; 4816 case ISD::SRL: return C1.lshr(C2); 4817 case ISD::SRA: return C1.ashr(C2); 4818 case ISD::ROTL: return C1.rotl(C2); 4819 case ISD::ROTR: return C1.rotr(C2); 4820 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4821 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4822 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4823 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4824 case ISD::SADDSAT: return C1.sadd_sat(C2); 4825 case ISD::UADDSAT: return C1.uadd_sat(C2); 4826 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4827 case ISD::USUBSAT: return C1.usub_sat(C2); 4828 case ISD::UDIV: 4829 if (!C2.getBoolValue()) 4830 break; 4831 return C1.udiv(C2); 4832 case ISD::UREM: 4833 if (!C2.getBoolValue()) 4834 break; 4835 return C1.urem(C2); 4836 case ISD::SDIV: 4837 if (!C2.getBoolValue()) 4838 break; 4839 return C1.sdiv(C2); 4840 case ISD::SREM: 4841 if (!C2.getBoolValue()) 4842 break; 4843 return C1.srem(C2); 4844 } 4845 return llvm::None; 4846 } 4847 4848 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4849 const GlobalAddressSDNode *GA, 4850 const SDNode *N2) { 4851 if (GA->getOpcode() != ISD::GlobalAddress) 4852 return SDValue(); 4853 if (!TLI->isOffsetFoldingLegal(GA)) 4854 return SDValue(); 4855 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4856 if (!C2) 4857 return SDValue(); 4858 int64_t Offset = C2->getSExtValue(); 4859 switch (Opcode) { 4860 case ISD::ADD: break; 4861 case ISD::SUB: Offset = -uint64_t(Offset); break; 4862 default: return SDValue(); 4863 } 4864 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4865 GA->getOffset() + uint64_t(Offset)); 4866 } 4867 4868 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4869 switch (Opcode) { 4870 case ISD::SDIV: 4871 case ISD::UDIV: 4872 case ISD::SREM: 4873 case ISD::UREM: { 4874 // If a divisor is zero/undef or any element of a divisor vector is 4875 // zero/undef, the whole op is undef. 4876 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4877 SDValue Divisor = Ops[1]; 4878 if (Divisor.isUndef() || isNullConstant(Divisor)) 4879 return true; 4880 4881 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4882 llvm::any_of(Divisor->op_values(), 4883 [](SDValue V) { return V.isUndef() || 4884 isNullConstant(V); }); 4885 // TODO: Handle signed overflow. 4886 } 4887 // TODO: Handle oversized shifts. 4888 default: 4889 return false; 4890 } 4891 } 4892 4893 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4894 EVT VT, ArrayRef<SDValue> Ops) { 4895 // If the opcode is a target-specific ISD node, there's nothing we can 4896 // do here and the operand rules may not line up with the below, so 4897 // bail early. 4898 if (Opcode >= ISD::BUILTIN_OP_END) 4899 return SDValue(); 4900 4901 // For now, the array Ops should only contain two values. 4902 // This enforcement will be removed once this function is merged with 4903 // FoldConstantVectorArithmetic 4904 if (Ops.size() != 2) 4905 return SDValue(); 4906 4907 if (isUndef(Opcode, Ops)) 4908 return getUNDEF(VT); 4909 4910 SDNode *N1 = Ops[0].getNode(); 4911 SDNode *N2 = Ops[1].getNode(); 4912 4913 // Handle the case of two scalars. 4914 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4915 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4916 if (C1->isOpaque() || C2->isOpaque()) 4917 return SDValue(); 4918 4919 Optional<APInt> FoldAttempt = 4920 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4921 if (!FoldAttempt) 4922 return SDValue(); 4923 4924 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4925 assert((!Folded || !VT.isVector()) && 4926 "Can't fold vectors ops with scalar operands"); 4927 return Folded; 4928 } 4929 } 4930 4931 // fold (add Sym, c) -> Sym+c 4932 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4933 return FoldSymbolOffset(Opcode, VT, GA, N2); 4934 if (TLI->isCommutativeBinOp(Opcode)) 4935 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4936 return FoldSymbolOffset(Opcode, VT, GA, N1); 4937 4938 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4939 // vector width, however we should be able to do constant folds involving 4940 // splat vector nodes too. 4941 if (VT.isScalableVector()) 4942 return SDValue(); 4943 4944 // For fixed width vectors, extract each constant element and fold them 4945 // individually. Either input may be an undef value. 4946 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4947 if (!BV1 && !N1->isUndef()) 4948 return SDValue(); 4949 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4950 if (!BV2 && !N2->isUndef()) 4951 return SDValue(); 4952 // If both operands are undef, that's handled the same way as scalars. 4953 if (!BV1 && !BV2) 4954 return SDValue(); 4955 4956 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4957 "Vector binop with different number of elements in operands?"); 4958 4959 EVT SVT = VT.getScalarType(); 4960 EVT LegalSVT = SVT; 4961 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4962 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4963 if (LegalSVT.bitsLT(SVT)) 4964 return SDValue(); 4965 } 4966 SmallVector<SDValue, 4> Outputs; 4967 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4968 for (unsigned I = 0; I != NumOps; ++I) { 4969 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4970 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4971 if (SVT.isInteger()) { 4972 if (V1->getValueType(0).bitsGT(SVT)) 4973 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4974 if (V2->getValueType(0).bitsGT(SVT)) 4975 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4976 } 4977 4978 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4979 return SDValue(); 4980 4981 // Fold one vector element. 4982 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4983 if (LegalSVT != SVT) 4984 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4985 4986 // Scalar folding only succeeded if the result is a constant or UNDEF. 4987 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4988 ScalarResult.getOpcode() != ISD::ConstantFP) 4989 return SDValue(); 4990 Outputs.push_back(ScalarResult); 4991 } 4992 4993 assert(VT.getVectorNumElements() == Outputs.size() && 4994 "Vector size mismatch!"); 4995 4996 // We may have a vector type but a scalar result. Create a splat. 4997 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4998 4999 // Build a big vector out of the scalar elements we generated. 5000 return getBuildVector(VT, SDLoc(), Outputs); 5001 } 5002 5003 // TODO: Merge with FoldConstantArithmetic 5004 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 5005 const SDLoc &DL, EVT VT, 5006 ArrayRef<SDValue> Ops, 5007 const SDNodeFlags Flags) { 5008 // If the opcode is a target-specific ISD node, there's nothing we can 5009 // do here and the operand rules may not line up with the below, so 5010 // bail early. 5011 if (Opcode >= ISD::BUILTIN_OP_END) 5012 return SDValue(); 5013 5014 if (isUndef(Opcode, Ops)) 5015 return getUNDEF(VT); 5016 5017 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 5018 if (!VT.isVector()) 5019 return SDValue(); 5020 5021 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5022 // vector width, however we should be able to do constant folds involving 5023 // splat vector nodes too. 5024 if (VT.isScalableVector()) 5025 return SDValue(); 5026 5027 // From this point onwards all vectors are assumed to be fixed width. 5028 unsigned NumElts = VT.getVectorNumElements(); 5029 5030 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5031 return !Op.getValueType().isVector() || 5032 Op.getValueType().getVectorNumElements() == NumElts; 5033 }; 5034 5035 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5036 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5037 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5038 (BV && BV->isConstant()); 5039 }; 5040 5041 // All operands must be vector types with the same number of elements as 5042 // the result type and must be either UNDEF or a build vector of constant 5043 // or UNDEF scalars. 5044 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5045 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5046 return SDValue(); 5047 5048 // If we are comparing vectors, then the result needs to be a i1 boolean 5049 // that is then sign-extended back to the legal result type. 5050 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5051 5052 // Find legal integer scalar type for constant promotion and 5053 // ensure that its scalar size is at least as large as source. 5054 EVT LegalSVT = VT.getScalarType(); 5055 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5056 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5057 if (LegalSVT.bitsLT(VT.getScalarType())) 5058 return SDValue(); 5059 } 5060 5061 // Constant fold each scalar lane separately. 5062 SmallVector<SDValue, 4> ScalarResults; 5063 for (unsigned i = 0; i != NumElts; i++) { 5064 SmallVector<SDValue, 4> ScalarOps; 5065 for (SDValue Op : Ops) { 5066 EVT InSVT = Op.getValueType().getScalarType(); 5067 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5068 if (!InBV) { 5069 // We've checked that this is UNDEF or a constant of some kind. 5070 if (Op.isUndef()) 5071 ScalarOps.push_back(getUNDEF(InSVT)); 5072 else 5073 ScalarOps.push_back(Op); 5074 continue; 5075 } 5076 5077 SDValue ScalarOp = InBV->getOperand(i); 5078 EVT ScalarVT = ScalarOp.getValueType(); 5079 5080 // Build vector (integer) scalar operands may need implicit 5081 // truncation - do this before constant folding. 5082 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5083 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5084 5085 ScalarOps.push_back(ScalarOp); 5086 } 5087 5088 // Constant fold the scalar operands. 5089 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5090 5091 // Legalize the (integer) scalar constant if necessary. 5092 if (LegalSVT != SVT) 5093 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5094 5095 // Scalar folding only succeeded if the result is a constant or UNDEF. 5096 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5097 ScalarResult.getOpcode() != ISD::ConstantFP) 5098 return SDValue(); 5099 ScalarResults.push_back(ScalarResult); 5100 } 5101 5102 SDValue V = getBuildVector(VT, DL, ScalarResults); 5103 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5104 return V; 5105 } 5106 5107 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5108 EVT VT, SDValue N1, SDValue N2) { 5109 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5110 // should. That will require dealing with a potentially non-default 5111 // rounding mode, checking the "opStatus" return value from the APFloat 5112 // math calculations, and possibly other variations. 5113 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5114 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5115 if (N1CFP && N2CFP) { 5116 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5117 switch (Opcode) { 5118 case ISD::FADD: 5119 C1.add(C2, APFloat::rmNearestTiesToEven); 5120 return getConstantFP(C1, DL, VT); 5121 case ISD::FSUB: 5122 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5123 return getConstantFP(C1, DL, VT); 5124 case ISD::FMUL: 5125 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5126 return getConstantFP(C1, DL, VT); 5127 case ISD::FDIV: 5128 C1.divide(C2, APFloat::rmNearestTiesToEven); 5129 return getConstantFP(C1, DL, VT); 5130 case ISD::FREM: 5131 C1.mod(C2); 5132 return getConstantFP(C1, DL, VT); 5133 case ISD::FCOPYSIGN: 5134 C1.copySign(C2); 5135 return getConstantFP(C1, DL, VT); 5136 default: break; 5137 } 5138 } 5139 if (N1CFP && Opcode == ISD::FP_ROUND) { 5140 APFloat C1 = N1CFP->getValueAPF(); // make copy 5141 bool Unused; 5142 // This can return overflow, underflow, or inexact; we don't care. 5143 // FIXME need to be more flexible about rounding mode. 5144 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5145 &Unused); 5146 return getConstantFP(C1, DL, VT); 5147 } 5148 5149 switch (Opcode) { 5150 case ISD::FSUB: 5151 // -0.0 - undef --> undef (consistent with "fneg undef") 5152 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5153 return getUNDEF(VT); 5154 LLVM_FALLTHROUGH; 5155 5156 case ISD::FADD: 5157 case ISD::FMUL: 5158 case ISD::FDIV: 5159 case ISD::FREM: 5160 // If both operands are undef, the result is undef. If 1 operand is undef, 5161 // the result is NaN. This should match the behavior of the IR optimizer. 5162 if (N1.isUndef() && N2.isUndef()) 5163 return getUNDEF(VT); 5164 if (N1.isUndef() || N2.isUndef()) 5165 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5166 } 5167 return SDValue(); 5168 } 5169 5170 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5171 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5172 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5173 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5174 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5175 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5176 5177 // Canonicalize constant to RHS if commutative. 5178 if (TLI->isCommutativeBinOp(Opcode)) { 5179 if (N1C && !N2C) { 5180 std::swap(N1C, N2C); 5181 std::swap(N1, N2); 5182 } else if (N1CFP && !N2CFP) { 5183 std::swap(N1CFP, N2CFP); 5184 std::swap(N1, N2); 5185 } 5186 } 5187 5188 switch (Opcode) { 5189 default: break; 5190 case ISD::TokenFactor: 5191 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5192 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5193 // Fold trivial token factors. 5194 if (N1.getOpcode() == ISD::EntryToken) return N2; 5195 if (N2.getOpcode() == ISD::EntryToken) return N1; 5196 if (N1 == N2) return N1; 5197 break; 5198 case ISD::BUILD_VECTOR: { 5199 // Attempt to simplify BUILD_VECTOR. 5200 SDValue Ops[] = {N1, N2}; 5201 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5202 return V; 5203 break; 5204 } 5205 case ISD::CONCAT_VECTORS: { 5206 SDValue Ops[] = {N1, N2}; 5207 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5208 return V; 5209 break; 5210 } 5211 case ISD::AND: 5212 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5213 assert(N1.getValueType() == N2.getValueType() && 5214 N1.getValueType() == VT && "Binary operator types must match!"); 5215 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5216 // worth handling here. 5217 if (N2C && N2C->isNullValue()) 5218 return N2; 5219 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5220 return N1; 5221 break; 5222 case ISD::OR: 5223 case ISD::XOR: 5224 case ISD::ADD: 5225 case ISD::SUB: 5226 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5227 assert(N1.getValueType() == N2.getValueType() && 5228 N1.getValueType() == VT && "Binary operator types must match!"); 5229 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5230 // it's worth handling here. 5231 if (N2C && N2C->isNullValue()) 5232 return N1; 5233 break; 5234 case ISD::MUL: 5235 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5236 assert(N1.getValueType() == N2.getValueType() && 5237 N1.getValueType() == VT && "Binary operator types must match!"); 5238 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5239 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5240 APInt N2CImm = N2C->getAPIntValue(); 5241 return getVScale(DL, VT, MulImm * N2CImm); 5242 } 5243 break; 5244 case ISD::UDIV: 5245 case ISD::UREM: 5246 case ISD::MULHU: 5247 case ISD::MULHS: 5248 case ISD::SDIV: 5249 case ISD::SREM: 5250 case ISD::SMIN: 5251 case ISD::SMAX: 5252 case ISD::UMIN: 5253 case ISD::UMAX: 5254 case ISD::SADDSAT: 5255 case ISD::SSUBSAT: 5256 case ISD::UADDSAT: 5257 case ISD::USUBSAT: 5258 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5259 assert(N1.getValueType() == N2.getValueType() && 5260 N1.getValueType() == VT && "Binary operator types must match!"); 5261 break; 5262 case ISD::FADD: 5263 case ISD::FSUB: 5264 case ISD::FMUL: 5265 case ISD::FDIV: 5266 case ISD::FREM: 5267 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5268 assert(N1.getValueType() == N2.getValueType() && 5269 N1.getValueType() == VT && "Binary operator types must match!"); 5270 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5271 return V; 5272 break; 5273 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5274 assert(N1.getValueType() == VT && 5275 N1.getValueType().isFloatingPoint() && 5276 N2.getValueType().isFloatingPoint() && 5277 "Invalid FCOPYSIGN!"); 5278 break; 5279 case ISD::SHL: 5280 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5281 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5282 APInt ShiftImm = N2C->getAPIntValue(); 5283 return getVScale(DL, VT, MulImm << ShiftImm); 5284 } 5285 LLVM_FALLTHROUGH; 5286 case ISD::SRA: 5287 case ISD::SRL: 5288 if (SDValue V = simplifyShift(N1, N2)) 5289 return V; 5290 LLVM_FALLTHROUGH; 5291 case ISD::ROTL: 5292 case ISD::ROTR: 5293 assert(VT == N1.getValueType() && 5294 "Shift operators return type must be the same as their first arg"); 5295 assert(VT.isInteger() && N2.getValueType().isInteger() && 5296 "Shifts only work on integers"); 5297 assert((!VT.isVector() || VT == N2.getValueType()) && 5298 "Vector shift amounts must be in the same as their first arg"); 5299 // Verify that the shift amount VT is big enough to hold valid shift 5300 // amounts. This catches things like trying to shift an i1024 value by an 5301 // i8, which is easy to fall into in generic code that uses 5302 // TLI.getShiftAmount(). 5303 assert(N2.getValueType().getScalarSizeInBits().getFixedSize() >= 5304 Log2_32_Ceil(VT.getScalarSizeInBits().getFixedSize()) && 5305 "Invalid use of small shift amount with oversized value!"); 5306 5307 // Always fold shifts of i1 values so the code generator doesn't need to 5308 // handle them. Since we know the size of the shift has to be less than the 5309 // size of the value, the shift/rotate count is guaranteed to be zero. 5310 if (VT == MVT::i1) 5311 return N1; 5312 if (N2C && N2C->isNullValue()) 5313 return N1; 5314 break; 5315 case ISD::FP_ROUND: 5316 assert(VT.isFloatingPoint() && 5317 N1.getValueType().isFloatingPoint() && 5318 VT.bitsLE(N1.getValueType()) && 5319 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5320 "Invalid FP_ROUND!"); 5321 if (N1.getValueType() == VT) return N1; // noop conversion. 5322 break; 5323 case ISD::AssertSext: 5324 case ISD::AssertZext: { 5325 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5326 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5327 assert(VT.isInteger() && EVT.isInteger() && 5328 "Cannot *_EXTEND_INREG FP types"); 5329 assert(!EVT.isVector() && 5330 "AssertSExt/AssertZExt type should be the vector element type " 5331 "rather than the vector type!"); 5332 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5333 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5334 break; 5335 } 5336 case ISD::SIGN_EXTEND_INREG: { 5337 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5338 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5339 assert(VT.isInteger() && EVT.isInteger() && 5340 "Cannot *_EXTEND_INREG FP types"); 5341 assert(EVT.isVector() == VT.isVector() && 5342 "SIGN_EXTEND_INREG type should be vector iff the operand " 5343 "type is vector!"); 5344 assert((!EVT.isVector() || 5345 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5346 "Vector element counts must match in SIGN_EXTEND_INREG"); 5347 assert(EVT.bitsLE(VT) && "Not extending!"); 5348 if (EVT == VT) return N1; // Not actually extending 5349 5350 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5351 unsigned FromBits = EVT.getScalarSizeInBits(); 5352 Val <<= Val.getBitWidth() - FromBits; 5353 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5354 return getConstant(Val, DL, ConstantVT); 5355 }; 5356 5357 if (N1C) { 5358 const APInt &Val = N1C->getAPIntValue(); 5359 return SignExtendInReg(Val, VT); 5360 } 5361 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5362 SmallVector<SDValue, 8> Ops; 5363 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5364 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5365 SDValue Op = N1.getOperand(i); 5366 if (Op.isUndef()) { 5367 Ops.push_back(getUNDEF(OpVT)); 5368 continue; 5369 } 5370 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5371 APInt Val = C->getAPIntValue(); 5372 Ops.push_back(SignExtendInReg(Val, OpVT)); 5373 } 5374 return getBuildVector(VT, DL, Ops); 5375 } 5376 break; 5377 } 5378 case ISD::EXTRACT_VECTOR_ELT: 5379 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5380 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5381 element type of the vector."); 5382 5383 // Extract from an undefined value or using an undefined index is undefined. 5384 if (N1.isUndef() || N2.isUndef()) 5385 return getUNDEF(VT); 5386 5387 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length 5388 // vectors. For scalable vectors we will provide appropriate support for 5389 // dealing with arbitrary indices. 5390 if (N2C && N1.getValueType().isFixedLengthVector() && 5391 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5392 return getUNDEF(VT); 5393 5394 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5395 // expanding copies of large vectors from registers. This only works for 5396 // fixed length vectors, since we need to know the exact number of 5397 // elements. 5398 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && 5399 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { 5400 unsigned Factor = 5401 N1.getOperand(0).getValueType().getVectorNumElements(); 5402 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5403 N1.getOperand(N2C->getZExtValue() / Factor), 5404 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5405 } 5406 5407 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while 5408 // lowering is expanding large vector constants. 5409 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || 5410 N1.getOpcode() == ISD::SPLAT_VECTOR)) { 5411 assert((N1.getOpcode() != ISD::BUILD_VECTOR || 5412 N1.getValueType().isFixedLengthVector()) && 5413 "BUILD_VECTOR used for scalable vectors"); 5414 unsigned Index = 5415 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; 5416 SDValue Elt = N1.getOperand(Index); 5417 5418 if (VT != Elt.getValueType()) 5419 // If the vector element type is not legal, the BUILD_VECTOR operands 5420 // are promoted and implicitly truncated, and the result implicitly 5421 // extended. Make that explicit here. 5422 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5423 5424 return Elt; 5425 } 5426 5427 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5428 // operations are lowered to scalars. 5429 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5430 // If the indices are the same, return the inserted element else 5431 // if the indices are known different, extract the element from 5432 // the original vector. 5433 SDValue N1Op2 = N1.getOperand(2); 5434 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5435 5436 if (N1Op2C && N2C) { 5437 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5438 if (VT == N1.getOperand(1).getValueType()) 5439 return N1.getOperand(1); 5440 else 5441 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5442 } 5443 5444 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5445 } 5446 } 5447 5448 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5449 // when vector types are scalarized and v1iX is legal. 5450 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). 5451 // Here we are completely ignoring the extract element index (N2), 5452 // which is fine for fixed width vectors, since any index other than 0 5453 // is undefined anyway. However, this cannot be ignored for scalable 5454 // vectors - in theory we could support this, but we don't want to do this 5455 // without a profitability check. 5456 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5457 N1.getValueType().isFixedLengthVector() && 5458 N1.getValueType().getVectorNumElements() == 1) { 5459 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5460 N1.getOperand(1)); 5461 } 5462 break; 5463 case ISD::EXTRACT_ELEMENT: 5464 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5465 assert(!N1.getValueType().isVector() && !VT.isVector() && 5466 (N1.getValueType().isInteger() == VT.isInteger()) && 5467 N1.getValueType() != VT && 5468 "Wrong types for EXTRACT_ELEMENT!"); 5469 5470 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5471 // 64-bit integers into 32-bit parts. Instead of building the extract of 5472 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5473 if (N1.getOpcode() == ISD::BUILD_PAIR) 5474 return N1.getOperand(N2C->getZExtValue()); 5475 5476 // EXTRACT_ELEMENT of a constant int is also very common. 5477 if (N1C) { 5478 unsigned ElementSize = VT.getSizeInBits(); 5479 unsigned Shift = ElementSize * N2C->getZExtValue(); 5480 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5481 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5482 } 5483 break; 5484 case ISD::EXTRACT_SUBVECTOR: 5485 EVT N1VT = N1.getValueType(); 5486 assert(VT.isVector() && N1VT.isVector() && 5487 "Extract subvector VTs must be vectors!"); 5488 assert(VT.getVectorElementType() == N1VT.getVectorElementType() && 5489 "Extract subvector VTs must have the same element type!"); 5490 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && 5491 "Cannot extract a scalable vector from a fixed length vector!"); 5492 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5493 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && 5494 "Extract subvector must be from larger vector to smaller vector!"); 5495 assert(N2C && "Extract subvector index must be a constant"); 5496 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5497 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= 5498 N1VT.getVectorMinNumElements()) && 5499 "Extract subvector overflow!"); 5500 5501 // Trivial extraction. 5502 if (VT == N1VT) 5503 return N1; 5504 5505 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5506 if (N1.isUndef()) 5507 return getUNDEF(VT); 5508 5509 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5510 // the concat have the same type as the extract. 5511 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5512 N1.getNumOperands() > 0 && VT == N1.getOperand(0).getValueType()) { 5513 unsigned Factor = VT.getVectorNumElements(); 5514 return N1.getOperand(N2C->getZExtValue() / Factor); 5515 } 5516 5517 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5518 // during shuffle legalization. 5519 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5520 VT == N1.getOperand(1).getValueType()) 5521 return N1.getOperand(1); 5522 break; 5523 } 5524 5525 // Perform trivial constant folding. 5526 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5527 return SV; 5528 5529 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5530 return V; 5531 5532 // Canonicalize an UNDEF to the RHS, even over a constant. 5533 if (N1.isUndef()) { 5534 if (TLI->isCommutativeBinOp(Opcode)) { 5535 std::swap(N1, N2); 5536 } else { 5537 switch (Opcode) { 5538 case ISD::SIGN_EXTEND_INREG: 5539 case ISD::SUB: 5540 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5541 case ISD::UDIV: 5542 case ISD::SDIV: 5543 case ISD::UREM: 5544 case ISD::SREM: 5545 case ISD::SSUBSAT: 5546 case ISD::USUBSAT: 5547 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5548 } 5549 } 5550 } 5551 5552 // Fold a bunch of operators when the RHS is undef. 5553 if (N2.isUndef()) { 5554 switch (Opcode) { 5555 case ISD::XOR: 5556 if (N1.isUndef()) 5557 // Handle undef ^ undef -> 0 special case. This is a common 5558 // idiom (misuse). 5559 return getConstant(0, DL, VT); 5560 LLVM_FALLTHROUGH; 5561 case ISD::ADD: 5562 case ISD::SUB: 5563 case ISD::UDIV: 5564 case ISD::SDIV: 5565 case ISD::UREM: 5566 case ISD::SREM: 5567 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5568 case ISD::MUL: 5569 case ISD::AND: 5570 case ISD::SSUBSAT: 5571 case ISD::USUBSAT: 5572 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5573 case ISD::OR: 5574 case ISD::SADDSAT: 5575 case ISD::UADDSAT: 5576 return getAllOnesConstant(DL, VT); 5577 } 5578 } 5579 5580 // Memoize this node if possible. 5581 SDNode *N; 5582 SDVTList VTs = getVTList(VT); 5583 SDValue Ops[] = {N1, N2}; 5584 if (VT != MVT::Glue) { 5585 FoldingSetNodeID ID; 5586 AddNodeIDNode(ID, Opcode, VTs, Ops); 5587 void *IP = nullptr; 5588 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5589 E->intersectFlagsWith(Flags); 5590 return SDValue(E, 0); 5591 } 5592 5593 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5594 N->setFlags(Flags); 5595 createOperands(N, Ops); 5596 CSEMap.InsertNode(N, IP); 5597 } else { 5598 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5599 createOperands(N, Ops); 5600 } 5601 5602 InsertNode(N); 5603 SDValue V = SDValue(N, 0); 5604 NewSDValueDbgMsg(V, "Creating new node: ", this); 5605 return V; 5606 } 5607 5608 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5609 SDValue N1, SDValue N2, SDValue N3, 5610 const SDNodeFlags Flags) { 5611 // Perform various simplifications. 5612 switch (Opcode) { 5613 case ISD::FMA: { 5614 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5615 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5616 N3.getValueType() == VT && "FMA types must match!"); 5617 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5618 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5619 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5620 if (N1CFP && N2CFP && N3CFP) { 5621 APFloat V1 = N1CFP->getValueAPF(); 5622 const APFloat &V2 = N2CFP->getValueAPF(); 5623 const APFloat &V3 = N3CFP->getValueAPF(); 5624 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5625 return getConstantFP(V1, DL, VT); 5626 } 5627 break; 5628 } 5629 case ISD::BUILD_VECTOR: { 5630 // Attempt to simplify BUILD_VECTOR. 5631 SDValue Ops[] = {N1, N2, N3}; 5632 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5633 return V; 5634 break; 5635 } 5636 case ISD::CONCAT_VECTORS: { 5637 SDValue Ops[] = {N1, N2, N3}; 5638 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5639 return V; 5640 break; 5641 } 5642 case ISD::SETCC: { 5643 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5644 assert(N1.getValueType() == N2.getValueType() && 5645 "SETCC operands must have the same type!"); 5646 assert(VT.isVector() == N1.getValueType().isVector() && 5647 "SETCC type should be vector iff the operand type is vector!"); 5648 assert((!VT.isVector() || VT.getVectorElementCount() == 5649 N1.getValueType().getVectorElementCount()) && 5650 "SETCC vector element counts must match!"); 5651 // Use FoldSetCC to simplify SETCC's. 5652 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5653 return V; 5654 // Vector constant folding. 5655 SDValue Ops[] = {N1, N2, N3}; 5656 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5657 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5658 return V; 5659 } 5660 break; 5661 } 5662 case ISD::SELECT: 5663 case ISD::VSELECT: 5664 if (SDValue V = simplifySelect(N1, N2, N3)) 5665 return V; 5666 break; 5667 case ISD::VECTOR_SHUFFLE: 5668 llvm_unreachable("should use getVectorShuffle constructor!"); 5669 case ISD::INSERT_VECTOR_ELT: { 5670 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5671 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5672 // for scalable vectors where we will generate appropriate code to 5673 // deal with out-of-bounds cases correctly. 5674 if (N3C && N1.getValueType().isFixedLengthVector() && 5675 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5676 return getUNDEF(VT); 5677 5678 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5679 if (N3.isUndef()) 5680 return getUNDEF(VT); 5681 5682 // If the inserted element is an UNDEF, just use the input vector. 5683 if (N2.isUndef()) 5684 return N1; 5685 5686 break; 5687 } 5688 case ISD::INSERT_SUBVECTOR: { 5689 // Inserting undef into undef is still undef. 5690 if (N1.isUndef() && N2.isUndef()) 5691 return getUNDEF(VT); 5692 5693 EVT N2VT = N2.getValueType(); 5694 assert(VT == N1.getValueType() && 5695 "Dest and insert subvector source types must match!"); 5696 assert(VT.isVector() && N2VT.isVector() && 5697 "Insert subvector VTs must be vectors!"); 5698 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && 5699 "Cannot insert a scalable vector into a fixed length vector!"); 5700 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5701 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && 5702 "Insert subvector must be from smaller vector to larger vector!"); 5703 assert(isa<ConstantSDNode>(N3) && 5704 "Insert subvector index must be constant"); 5705 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5706 (N2VT.getVectorMinNumElements() + 5707 cast<ConstantSDNode>(N3)->getZExtValue()) <= 5708 VT.getVectorMinNumElements()) && 5709 "Insert subvector overflow!"); 5710 5711 // Trivial insertion. 5712 if (VT == N2VT) 5713 return N2; 5714 5715 // If this is an insert of an extracted vector into an undef vector, we 5716 // can just use the input to the extract. 5717 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5718 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5719 return N2.getOperand(0); 5720 break; 5721 } 5722 case ISD::BITCAST: 5723 // Fold bit_convert nodes from a type to themselves. 5724 if (N1.getValueType() == VT) 5725 return N1; 5726 break; 5727 } 5728 5729 // Memoize node if it doesn't produce a flag. 5730 SDNode *N; 5731 SDVTList VTs = getVTList(VT); 5732 SDValue Ops[] = {N1, N2, N3}; 5733 if (VT != MVT::Glue) { 5734 FoldingSetNodeID ID; 5735 AddNodeIDNode(ID, Opcode, VTs, Ops); 5736 void *IP = nullptr; 5737 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5738 E->intersectFlagsWith(Flags); 5739 return SDValue(E, 0); 5740 } 5741 5742 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5743 N->setFlags(Flags); 5744 createOperands(N, Ops); 5745 CSEMap.InsertNode(N, IP); 5746 } else { 5747 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5748 createOperands(N, Ops); 5749 } 5750 5751 InsertNode(N); 5752 SDValue V = SDValue(N, 0); 5753 NewSDValueDbgMsg(V, "Creating new node: ", this); 5754 return V; 5755 } 5756 5757 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5758 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5759 SDValue Ops[] = { N1, N2, N3, N4 }; 5760 return getNode(Opcode, DL, VT, Ops); 5761 } 5762 5763 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5764 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5765 SDValue N5) { 5766 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5767 return getNode(Opcode, DL, VT, Ops); 5768 } 5769 5770 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5771 /// the incoming stack arguments to be loaded from the stack. 5772 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5773 SmallVector<SDValue, 8> ArgChains; 5774 5775 // Include the original chain at the beginning of the list. When this is 5776 // used by target LowerCall hooks, this helps legalize find the 5777 // CALLSEQ_BEGIN node. 5778 ArgChains.push_back(Chain); 5779 5780 // Add a chain value for each stack argument. 5781 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5782 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5783 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5784 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5785 if (FI->getIndex() < 0) 5786 ArgChains.push_back(SDValue(L, 1)); 5787 5788 // Build a tokenfactor for all the chains. 5789 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5790 } 5791 5792 /// getMemsetValue - Vectorized representation of the memset value 5793 /// operand. 5794 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5795 const SDLoc &dl) { 5796 assert(!Value.isUndef()); 5797 5798 unsigned NumBits = VT.getScalarSizeInBits(); 5799 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5800 assert(C->getAPIntValue().getBitWidth() == 8); 5801 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5802 if (VT.isInteger()) { 5803 bool IsOpaque = VT.getSizeInBits() > 64 || 5804 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5805 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5806 } 5807 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5808 VT); 5809 } 5810 5811 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5812 EVT IntVT = VT.getScalarType(); 5813 if (!IntVT.isInteger()) 5814 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5815 5816 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5817 if (NumBits > 8) { 5818 // Use a multiplication with 0x010101... to extend the input to the 5819 // required length. 5820 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5821 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5822 DAG.getConstant(Magic, dl, IntVT)); 5823 } 5824 5825 if (VT != Value.getValueType() && !VT.isInteger()) 5826 Value = DAG.getBitcast(VT.getScalarType(), Value); 5827 if (VT != Value.getValueType()) 5828 Value = DAG.getSplatBuildVector(VT, dl, Value); 5829 5830 return Value; 5831 } 5832 5833 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5834 /// used when a memcpy is turned into a memset when the source is a constant 5835 /// string ptr. 5836 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5837 const TargetLowering &TLI, 5838 const ConstantDataArraySlice &Slice) { 5839 // Handle vector with all elements zero. 5840 if (Slice.Array == nullptr) { 5841 if (VT.isInteger()) 5842 return DAG.getConstant(0, dl, VT); 5843 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5844 return DAG.getConstantFP(0.0, dl, VT); 5845 else if (VT.isVector()) { 5846 unsigned NumElts = VT.getVectorNumElements(); 5847 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5848 return DAG.getNode(ISD::BITCAST, dl, VT, 5849 DAG.getConstant(0, dl, 5850 EVT::getVectorVT(*DAG.getContext(), 5851 EltVT, NumElts))); 5852 } else 5853 llvm_unreachable("Expected type!"); 5854 } 5855 5856 assert(!VT.isVector() && "Can't handle vector type here!"); 5857 unsigned NumVTBits = VT.getSizeInBits(); 5858 unsigned NumVTBytes = NumVTBits / 8; 5859 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5860 5861 APInt Val(NumVTBits, 0); 5862 if (DAG.getDataLayout().isLittleEndian()) { 5863 for (unsigned i = 0; i != NumBytes; ++i) 5864 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5865 } else { 5866 for (unsigned i = 0; i != NumBytes; ++i) 5867 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5868 } 5869 5870 // If the "cost" of materializing the integer immediate is less than the cost 5871 // of a load, then it is cost effective to turn the load into the immediate. 5872 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5873 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5874 return DAG.getConstant(Val, dl, VT); 5875 return SDValue(nullptr, 0); 5876 } 5877 5878 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5879 const SDLoc &DL, 5880 const SDNodeFlags Flags) { 5881 EVT VT = Base.getValueType(); 5882 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5883 } 5884 5885 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5886 const SDLoc &DL, 5887 const SDNodeFlags Flags) { 5888 assert(Offset.getValueType().isInteger()); 5889 EVT BasePtrVT = Ptr.getValueType(); 5890 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5891 } 5892 5893 /// Returns true if memcpy source is constant data. 5894 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5895 uint64_t SrcDelta = 0; 5896 GlobalAddressSDNode *G = nullptr; 5897 if (Src.getOpcode() == ISD::GlobalAddress) 5898 G = cast<GlobalAddressSDNode>(Src); 5899 else if (Src.getOpcode() == ISD::ADD && 5900 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5901 Src.getOperand(1).getOpcode() == ISD::Constant) { 5902 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5903 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5904 } 5905 if (!G) 5906 return false; 5907 5908 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5909 SrcDelta + G->getOffset()); 5910 } 5911 5912 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5913 SelectionDAG &DAG) { 5914 // On Darwin, -Os means optimize for size without hurting performance, so 5915 // only really optimize for size when -Oz (MinSize) is used. 5916 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5917 return MF.getFunction().hasMinSize(); 5918 return DAG.shouldOptForSize(); 5919 } 5920 5921 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5922 SmallVector<SDValue, 32> &OutChains, unsigned From, 5923 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5924 SmallVector<SDValue, 16> &OutStoreChains) { 5925 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5926 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5927 SmallVector<SDValue, 16> GluedLoadChains; 5928 for (unsigned i = From; i < To; ++i) { 5929 OutChains.push_back(OutLoadChains[i]); 5930 GluedLoadChains.push_back(OutLoadChains[i]); 5931 } 5932 5933 // Chain for all loads. 5934 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5935 GluedLoadChains); 5936 5937 for (unsigned i = From; i < To; ++i) { 5938 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5939 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5940 ST->getBasePtr(), ST->getMemoryVT(), 5941 ST->getMemOperand()); 5942 OutChains.push_back(NewStore); 5943 } 5944 } 5945 5946 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5947 SDValue Chain, SDValue Dst, SDValue Src, 5948 uint64_t Size, Align Alignment, 5949 bool isVol, bool AlwaysInline, 5950 MachinePointerInfo DstPtrInfo, 5951 MachinePointerInfo SrcPtrInfo) { 5952 // Turn a memcpy of undef to nop. 5953 // FIXME: We need to honor volatile even is Src is undef. 5954 if (Src.isUndef()) 5955 return Chain; 5956 5957 // Expand memcpy to a series of load and store ops if the size operand falls 5958 // below a certain threshold. 5959 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5960 // rather than maybe a humongous number of loads and stores. 5961 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5962 const DataLayout &DL = DAG.getDataLayout(); 5963 LLVMContext &C = *DAG.getContext(); 5964 std::vector<EVT> MemOps; 5965 bool DstAlignCanChange = false; 5966 MachineFunction &MF = DAG.getMachineFunction(); 5967 MachineFrameInfo &MFI = MF.getFrameInfo(); 5968 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 5969 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5970 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5971 DstAlignCanChange = true; 5972 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 5973 if (!SrcAlign || Alignment > *SrcAlign) 5974 SrcAlign = Alignment; 5975 assert(SrcAlign && "SrcAlign must be set"); 5976 ConstantDataArraySlice Slice; 5977 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5978 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5979 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5980 const MemOp Op = isZeroConstant 5981 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 5982 /*IsZeroMemset*/ true, isVol) 5983 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 5984 *SrcAlign, isVol, CopyFromConstant); 5985 if (!TLI.findOptimalMemOpLowering( 5986 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 5987 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 5988 return SDValue(); 5989 5990 if (DstAlignCanChange) { 5991 Type *Ty = MemOps[0].getTypeForEVT(C); 5992 Align NewAlign = DL.getABITypeAlign(Ty); 5993 5994 // Don't promote to an alignment that would require dynamic stack 5995 // realignment. 5996 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5997 if (!TRI->needsStackRealignment(MF)) 5998 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 5999 NewAlign = NewAlign / 2; 6000 6001 if (NewAlign > Alignment) { 6002 // Give the stack frame object a larger alignment if needed. 6003 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6004 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6005 Alignment = NewAlign; 6006 } 6007 } 6008 6009 MachineMemOperand::Flags MMOFlags = 6010 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6011 SmallVector<SDValue, 16> OutLoadChains; 6012 SmallVector<SDValue, 16> OutStoreChains; 6013 SmallVector<SDValue, 32> OutChains; 6014 unsigned NumMemOps = MemOps.size(); 6015 uint64_t SrcOff = 0, DstOff = 0; 6016 for (unsigned i = 0; i != NumMemOps; ++i) { 6017 EVT VT = MemOps[i]; 6018 unsigned VTSize = VT.getSizeInBits() / 8; 6019 SDValue Value, Store; 6020 6021 if (VTSize > Size) { 6022 // Issuing an unaligned load / store pair that overlaps with the previous 6023 // pair. Adjust the offset accordingly. 6024 assert(i == NumMemOps-1 && i != 0); 6025 SrcOff -= VTSize - Size; 6026 DstOff -= VTSize - Size; 6027 } 6028 6029 if (CopyFromConstant && 6030 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 6031 // It's unlikely a store of a vector immediate can be done in a single 6032 // instruction. It would require a load from a constantpool first. 6033 // We only handle zero vectors here. 6034 // FIXME: Handle other cases where store of vector immediate is done in 6035 // a single instruction. 6036 ConstantDataArraySlice SubSlice; 6037 if (SrcOff < Slice.Length) { 6038 SubSlice = Slice; 6039 SubSlice.move(SrcOff); 6040 } else { 6041 // This is an out-of-bounds access and hence UB. Pretend we read zero. 6042 SubSlice.Array = nullptr; 6043 SubSlice.Offset = 0; 6044 SubSlice.Length = VTSize; 6045 } 6046 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6047 if (Value.getNode()) { 6048 Store = DAG.getStore( 6049 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6050 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6051 OutChains.push_back(Store); 6052 } 6053 } 6054 6055 if (!Store.getNode()) { 6056 // The type might not be legal for the target. This should only happen 6057 // if the type is smaller than a legal type, as on PPC, so the right 6058 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6059 // to Load/Store if NVT==VT. 6060 // FIXME does the case above also need this? 6061 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6062 assert(NVT.bitsGE(VT)); 6063 6064 bool isDereferenceable = 6065 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6066 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6067 if (isDereferenceable) 6068 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6069 6070 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 6071 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6072 SrcPtrInfo.getWithOffset(SrcOff), VT, 6073 commonAlignment(*SrcAlign, SrcOff).value(), 6074 SrcMMOFlags); 6075 OutLoadChains.push_back(Value.getValue(1)); 6076 6077 Store = DAG.getTruncStore( 6078 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6079 DstPtrInfo.getWithOffset(DstOff), VT, Alignment.value(), MMOFlags); 6080 OutStoreChains.push_back(Store); 6081 } 6082 SrcOff += VTSize; 6083 DstOff += VTSize; 6084 Size -= VTSize; 6085 } 6086 6087 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6088 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6089 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6090 6091 if (NumLdStInMemcpy) { 6092 // It may be that memcpy might be converted to memset if it's memcpy 6093 // of constants. In such a case, we won't have loads and stores, but 6094 // just stores. In the absence of loads, there is nothing to gang up. 6095 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6096 // If target does not care, just leave as it. 6097 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6098 OutChains.push_back(OutLoadChains[i]); 6099 OutChains.push_back(OutStoreChains[i]); 6100 } 6101 } else { 6102 // Ld/St less than/equal limit set by target. 6103 if (NumLdStInMemcpy <= GluedLdStLimit) { 6104 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6105 NumLdStInMemcpy, OutLoadChains, 6106 OutStoreChains); 6107 } else { 6108 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6109 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6110 unsigned GlueIter = 0; 6111 6112 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6113 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6114 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6115 6116 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6117 OutLoadChains, OutStoreChains); 6118 GlueIter += GluedLdStLimit; 6119 } 6120 6121 // Residual ld/st. 6122 if (RemainingLdStInMemcpy) { 6123 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6124 RemainingLdStInMemcpy, OutLoadChains, 6125 OutStoreChains); 6126 } 6127 } 6128 } 6129 } 6130 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6131 } 6132 6133 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6134 SDValue Chain, SDValue Dst, SDValue Src, 6135 uint64_t Size, Align Alignment, 6136 bool isVol, bool AlwaysInline, 6137 MachinePointerInfo DstPtrInfo, 6138 MachinePointerInfo SrcPtrInfo) { 6139 // Turn a memmove of undef to nop. 6140 // FIXME: We need to honor volatile even is Src is undef. 6141 if (Src.isUndef()) 6142 return Chain; 6143 6144 // Expand memmove to a series of load and store ops if the size operand falls 6145 // below a certain threshold. 6146 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6147 const DataLayout &DL = DAG.getDataLayout(); 6148 LLVMContext &C = *DAG.getContext(); 6149 std::vector<EVT> MemOps; 6150 bool DstAlignCanChange = false; 6151 MachineFunction &MF = DAG.getMachineFunction(); 6152 MachineFrameInfo &MFI = MF.getFrameInfo(); 6153 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6154 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6155 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6156 DstAlignCanChange = true; 6157 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6158 if (!SrcAlign || Alignment > *SrcAlign) 6159 SrcAlign = Alignment; 6160 assert(SrcAlign && "SrcAlign must be set"); 6161 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6162 if (!TLI.findOptimalMemOpLowering( 6163 MemOps, Limit, 6164 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6165 /*IsVolatile*/ true), 6166 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6167 MF.getFunction().getAttributes())) 6168 return SDValue(); 6169 6170 if (DstAlignCanChange) { 6171 Type *Ty = MemOps[0].getTypeForEVT(C); 6172 Align NewAlign = DL.getABITypeAlign(Ty); 6173 if (NewAlign > Alignment) { 6174 // Give the stack frame object a larger alignment if needed. 6175 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6176 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6177 Alignment = NewAlign; 6178 } 6179 } 6180 6181 MachineMemOperand::Flags MMOFlags = 6182 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6183 uint64_t SrcOff = 0, DstOff = 0; 6184 SmallVector<SDValue, 8> LoadValues; 6185 SmallVector<SDValue, 8> LoadChains; 6186 SmallVector<SDValue, 8> OutChains; 6187 unsigned NumMemOps = MemOps.size(); 6188 for (unsigned i = 0; i < NumMemOps; i++) { 6189 EVT VT = MemOps[i]; 6190 unsigned VTSize = VT.getSizeInBits() / 8; 6191 SDValue Value; 6192 6193 bool isDereferenceable = 6194 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6195 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6196 if (isDereferenceable) 6197 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6198 6199 Value = DAG.getLoad( 6200 VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6201 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign->value(), SrcMMOFlags); 6202 LoadValues.push_back(Value); 6203 LoadChains.push_back(Value.getValue(1)); 6204 SrcOff += VTSize; 6205 } 6206 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6207 OutChains.clear(); 6208 for (unsigned i = 0; i < NumMemOps; i++) { 6209 EVT VT = MemOps[i]; 6210 unsigned VTSize = VT.getSizeInBits() / 8; 6211 SDValue Store; 6212 6213 Store = DAG.getStore( 6214 Chain, dl, LoadValues[i], DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6215 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags); 6216 OutChains.push_back(Store); 6217 DstOff += VTSize; 6218 } 6219 6220 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6221 } 6222 6223 /// Lower the call to 'memset' intrinsic function into a series of store 6224 /// operations. 6225 /// 6226 /// \param DAG Selection DAG where lowered code is placed. 6227 /// \param dl Link to corresponding IR location. 6228 /// \param Chain Control flow dependency. 6229 /// \param Dst Pointer to destination memory location. 6230 /// \param Src Value of byte to write into the memory. 6231 /// \param Size Number of bytes to write. 6232 /// \param Alignment Alignment of the destination in bytes. 6233 /// \param isVol True if destination is volatile. 6234 /// \param DstPtrInfo IR information on the memory pointer. 6235 /// \returns New head in the control flow, if lowering was successful, empty 6236 /// SDValue otherwise. 6237 /// 6238 /// The function tries to replace 'llvm.memset' intrinsic with several store 6239 /// operations and value calculation code. This is usually profitable for small 6240 /// memory size. 6241 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6242 SDValue Chain, SDValue Dst, SDValue Src, 6243 uint64_t Size, Align Alignment, bool isVol, 6244 MachinePointerInfo DstPtrInfo) { 6245 // Turn a memset of undef to nop. 6246 // FIXME: We need to honor volatile even is Src is undef. 6247 if (Src.isUndef()) 6248 return Chain; 6249 6250 // Expand memset to a series of load/store ops if the size operand 6251 // falls below a certain threshold. 6252 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6253 std::vector<EVT> MemOps; 6254 bool DstAlignCanChange = false; 6255 MachineFunction &MF = DAG.getMachineFunction(); 6256 MachineFrameInfo &MFI = MF.getFrameInfo(); 6257 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6258 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6259 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6260 DstAlignCanChange = true; 6261 bool IsZeroVal = 6262 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6263 if (!TLI.findOptimalMemOpLowering( 6264 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6265 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6266 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6267 return SDValue(); 6268 6269 if (DstAlignCanChange) { 6270 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6271 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6272 if (NewAlign > Alignment) { 6273 // Give the stack frame object a larger alignment if needed. 6274 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6275 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6276 Alignment = NewAlign; 6277 } 6278 } 6279 6280 SmallVector<SDValue, 8> OutChains; 6281 uint64_t DstOff = 0; 6282 unsigned NumMemOps = MemOps.size(); 6283 6284 // Find the largest store and generate the bit pattern for it. 6285 EVT LargestVT = MemOps[0]; 6286 for (unsigned i = 1; i < NumMemOps; i++) 6287 if (MemOps[i].bitsGT(LargestVT)) 6288 LargestVT = MemOps[i]; 6289 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6290 6291 for (unsigned i = 0; i < NumMemOps; i++) { 6292 EVT VT = MemOps[i]; 6293 unsigned VTSize = VT.getSizeInBits() / 8; 6294 if (VTSize > Size) { 6295 // Issuing an unaligned load / store pair that overlaps with the previous 6296 // pair. Adjust the offset accordingly. 6297 assert(i == NumMemOps-1 && i != 0); 6298 DstOff -= VTSize - Size; 6299 } 6300 6301 // If this store is smaller than the largest store see whether we can get 6302 // the smaller value for free with a truncate. 6303 SDValue Value = MemSetValue; 6304 if (VT.bitsLT(LargestVT)) { 6305 if (!LargestVT.isVector() && !VT.isVector() && 6306 TLI.isTruncateFree(LargestVT, VT)) 6307 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6308 else 6309 Value = getMemsetValue(Src, VT, DAG, dl); 6310 } 6311 assert(Value.getValueType() == VT && "Value with wrong type."); 6312 SDValue Store = DAG.getStore( 6313 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6314 DstPtrInfo.getWithOffset(DstOff), Alignment.value(), 6315 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6316 OutChains.push_back(Store); 6317 DstOff += VT.getSizeInBits() / 8; 6318 Size -= VTSize; 6319 } 6320 6321 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6322 } 6323 6324 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6325 unsigned AS) { 6326 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6327 // pointer operands can be losslessly bitcasted to pointers of address space 0 6328 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6329 report_fatal_error("cannot lower memory intrinsic in address space " + 6330 Twine(AS)); 6331 } 6332 } 6333 6334 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6335 SDValue Src, SDValue Size, Align Alignment, 6336 bool isVol, bool AlwaysInline, bool isTailCall, 6337 MachinePointerInfo DstPtrInfo, 6338 MachinePointerInfo SrcPtrInfo) { 6339 // Check to see if we should lower the memcpy to loads and stores first. 6340 // For cases within the target-specified limits, this is the best choice. 6341 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6342 if (ConstantSize) { 6343 // Memcpy with size zero? Just return the original chain. 6344 if (ConstantSize->isNullValue()) 6345 return Chain; 6346 6347 SDValue Result = getMemcpyLoadsAndStores( 6348 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6349 isVol, false, DstPtrInfo, SrcPtrInfo); 6350 if (Result.getNode()) 6351 return Result; 6352 } 6353 6354 // Then check to see if we should lower the memcpy with target-specific 6355 // code. If the target chooses to do this, this is the next best. 6356 if (TSI) { 6357 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6358 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, 6359 AlwaysInline, DstPtrInfo, SrcPtrInfo); 6360 if (Result.getNode()) 6361 return Result; 6362 } 6363 6364 // If we really need inline code and the target declined to provide it, 6365 // use a (potentially long) sequence of loads and stores. 6366 if (AlwaysInline) { 6367 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6368 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6369 ConstantSize->getZExtValue(), Alignment, 6370 isVol, true, DstPtrInfo, SrcPtrInfo); 6371 } 6372 6373 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6374 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6375 6376 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6377 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6378 // respect volatile, so they may do things like read or write memory 6379 // beyond the given memory regions. But fixing this isn't easy, and most 6380 // people don't care. 6381 6382 // Emit a library call. 6383 TargetLowering::ArgListTy Args; 6384 TargetLowering::ArgListEntry Entry; 6385 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6386 Entry.Node = Dst; Args.push_back(Entry); 6387 Entry.Node = Src; Args.push_back(Entry); 6388 6389 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6390 Entry.Node = Size; Args.push_back(Entry); 6391 // FIXME: pass in SDLoc 6392 TargetLowering::CallLoweringInfo CLI(*this); 6393 CLI.setDebugLoc(dl) 6394 .setChain(Chain) 6395 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6396 Dst.getValueType().getTypeForEVT(*getContext()), 6397 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6398 TLI->getPointerTy(getDataLayout())), 6399 std::move(Args)) 6400 .setDiscardResult() 6401 .setTailCall(isTailCall); 6402 6403 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6404 return CallResult.second; 6405 } 6406 6407 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6408 SDValue Dst, unsigned DstAlign, 6409 SDValue Src, unsigned SrcAlign, 6410 SDValue Size, Type *SizeTy, 6411 unsigned ElemSz, bool isTailCall, 6412 MachinePointerInfo DstPtrInfo, 6413 MachinePointerInfo SrcPtrInfo) { 6414 // Emit a library call. 6415 TargetLowering::ArgListTy Args; 6416 TargetLowering::ArgListEntry Entry; 6417 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6418 Entry.Node = Dst; 6419 Args.push_back(Entry); 6420 6421 Entry.Node = Src; 6422 Args.push_back(Entry); 6423 6424 Entry.Ty = SizeTy; 6425 Entry.Node = Size; 6426 Args.push_back(Entry); 6427 6428 RTLIB::Libcall LibraryCall = 6429 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6430 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6431 report_fatal_error("Unsupported element size"); 6432 6433 TargetLowering::CallLoweringInfo CLI(*this); 6434 CLI.setDebugLoc(dl) 6435 .setChain(Chain) 6436 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6437 Type::getVoidTy(*getContext()), 6438 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6439 TLI->getPointerTy(getDataLayout())), 6440 std::move(Args)) 6441 .setDiscardResult() 6442 .setTailCall(isTailCall); 6443 6444 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6445 return CallResult.second; 6446 } 6447 6448 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6449 SDValue Src, SDValue Size, Align Alignment, 6450 bool isVol, bool isTailCall, 6451 MachinePointerInfo DstPtrInfo, 6452 MachinePointerInfo SrcPtrInfo) { 6453 // Check to see if we should lower the memmove to loads and stores first. 6454 // For cases within the target-specified limits, this is the best choice. 6455 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6456 if (ConstantSize) { 6457 // Memmove with size zero? Just return the original chain. 6458 if (ConstantSize->isNullValue()) 6459 return Chain; 6460 6461 SDValue Result = getMemmoveLoadsAndStores( 6462 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6463 isVol, false, DstPtrInfo, SrcPtrInfo); 6464 if (Result.getNode()) 6465 return Result; 6466 } 6467 6468 // Then check to see if we should lower the memmove with target-specific 6469 // code. If the target chooses to do this, this is the next best. 6470 if (TSI) { 6471 SDValue Result = TSI->EmitTargetCodeForMemmove( 6472 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, DstPtrInfo, 6473 SrcPtrInfo); 6474 if (Result.getNode()) 6475 return Result; 6476 } 6477 6478 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6479 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6480 6481 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6482 // not be safe. See memcpy above for more details. 6483 6484 // Emit a library call. 6485 TargetLowering::ArgListTy Args; 6486 TargetLowering::ArgListEntry Entry; 6487 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6488 Entry.Node = Dst; Args.push_back(Entry); 6489 Entry.Node = Src; Args.push_back(Entry); 6490 6491 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6492 Entry.Node = Size; Args.push_back(Entry); 6493 // FIXME: pass in SDLoc 6494 TargetLowering::CallLoweringInfo CLI(*this); 6495 CLI.setDebugLoc(dl) 6496 .setChain(Chain) 6497 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6498 Dst.getValueType().getTypeForEVT(*getContext()), 6499 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6500 TLI->getPointerTy(getDataLayout())), 6501 std::move(Args)) 6502 .setDiscardResult() 6503 .setTailCall(isTailCall); 6504 6505 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6506 return CallResult.second; 6507 } 6508 6509 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6510 SDValue Dst, unsigned DstAlign, 6511 SDValue Src, unsigned SrcAlign, 6512 SDValue Size, Type *SizeTy, 6513 unsigned ElemSz, bool isTailCall, 6514 MachinePointerInfo DstPtrInfo, 6515 MachinePointerInfo SrcPtrInfo) { 6516 // Emit a library call. 6517 TargetLowering::ArgListTy Args; 6518 TargetLowering::ArgListEntry Entry; 6519 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6520 Entry.Node = Dst; 6521 Args.push_back(Entry); 6522 6523 Entry.Node = Src; 6524 Args.push_back(Entry); 6525 6526 Entry.Ty = SizeTy; 6527 Entry.Node = Size; 6528 Args.push_back(Entry); 6529 6530 RTLIB::Libcall LibraryCall = 6531 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6532 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6533 report_fatal_error("Unsupported element size"); 6534 6535 TargetLowering::CallLoweringInfo CLI(*this); 6536 CLI.setDebugLoc(dl) 6537 .setChain(Chain) 6538 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6539 Type::getVoidTy(*getContext()), 6540 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6541 TLI->getPointerTy(getDataLayout())), 6542 std::move(Args)) 6543 .setDiscardResult() 6544 .setTailCall(isTailCall); 6545 6546 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6547 return CallResult.second; 6548 } 6549 6550 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6551 SDValue Src, SDValue Size, Align Alignment, 6552 bool isVol, bool isTailCall, 6553 MachinePointerInfo DstPtrInfo) { 6554 // Check to see if we should lower the memset to stores first. 6555 // For cases within the target-specified limits, this is the best choice. 6556 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6557 if (ConstantSize) { 6558 // Memset with size zero? Just return the original chain. 6559 if (ConstantSize->isNullValue()) 6560 return Chain; 6561 6562 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6563 ConstantSize->getZExtValue(), Alignment, 6564 isVol, DstPtrInfo); 6565 6566 if (Result.getNode()) 6567 return Result; 6568 } 6569 6570 // Then check to see if we should lower the memset with target-specific 6571 // code. If the target chooses to do this, this is the next best. 6572 if (TSI) { 6573 SDValue Result = TSI->EmitTargetCodeForMemset( 6574 *this, dl, Chain, Dst, Src, Size, Alignment.value(), isVol, DstPtrInfo); 6575 if (Result.getNode()) 6576 return Result; 6577 } 6578 6579 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6580 6581 // Emit a library call. 6582 TargetLowering::ArgListTy Args; 6583 TargetLowering::ArgListEntry Entry; 6584 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6585 Args.push_back(Entry); 6586 Entry.Node = Src; 6587 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6588 Args.push_back(Entry); 6589 Entry.Node = Size; 6590 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6591 Args.push_back(Entry); 6592 6593 // FIXME: pass in SDLoc 6594 TargetLowering::CallLoweringInfo CLI(*this); 6595 CLI.setDebugLoc(dl) 6596 .setChain(Chain) 6597 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6598 Dst.getValueType().getTypeForEVT(*getContext()), 6599 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6600 TLI->getPointerTy(getDataLayout())), 6601 std::move(Args)) 6602 .setDiscardResult() 6603 .setTailCall(isTailCall); 6604 6605 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6606 return CallResult.second; 6607 } 6608 6609 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6610 SDValue Dst, unsigned DstAlign, 6611 SDValue Value, SDValue Size, Type *SizeTy, 6612 unsigned ElemSz, bool isTailCall, 6613 MachinePointerInfo DstPtrInfo) { 6614 // Emit a library call. 6615 TargetLowering::ArgListTy Args; 6616 TargetLowering::ArgListEntry Entry; 6617 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6618 Entry.Node = Dst; 6619 Args.push_back(Entry); 6620 6621 Entry.Ty = Type::getInt8Ty(*getContext()); 6622 Entry.Node = Value; 6623 Args.push_back(Entry); 6624 6625 Entry.Ty = SizeTy; 6626 Entry.Node = Size; 6627 Args.push_back(Entry); 6628 6629 RTLIB::Libcall LibraryCall = 6630 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6631 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6632 report_fatal_error("Unsupported element size"); 6633 6634 TargetLowering::CallLoweringInfo CLI(*this); 6635 CLI.setDebugLoc(dl) 6636 .setChain(Chain) 6637 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6638 Type::getVoidTy(*getContext()), 6639 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6640 TLI->getPointerTy(getDataLayout())), 6641 std::move(Args)) 6642 .setDiscardResult() 6643 .setTailCall(isTailCall); 6644 6645 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6646 return CallResult.second; 6647 } 6648 6649 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6650 SDVTList VTList, ArrayRef<SDValue> Ops, 6651 MachineMemOperand *MMO) { 6652 FoldingSetNodeID ID; 6653 ID.AddInteger(MemVT.getRawBits()); 6654 AddNodeIDNode(ID, Opcode, VTList, Ops); 6655 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6656 void* IP = nullptr; 6657 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6658 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6659 return SDValue(E, 0); 6660 } 6661 6662 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6663 VTList, MemVT, MMO); 6664 createOperands(N, Ops); 6665 6666 CSEMap.InsertNode(N, IP); 6667 InsertNode(N); 6668 return SDValue(N, 0); 6669 } 6670 6671 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6672 EVT MemVT, SDVTList VTs, SDValue Chain, 6673 SDValue Ptr, SDValue Cmp, SDValue Swp, 6674 MachineMemOperand *MMO) { 6675 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6676 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6677 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6678 6679 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6680 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6681 } 6682 6683 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6684 SDValue Chain, SDValue Ptr, SDValue Val, 6685 MachineMemOperand *MMO) { 6686 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6687 Opcode == ISD::ATOMIC_LOAD_SUB || 6688 Opcode == ISD::ATOMIC_LOAD_AND || 6689 Opcode == ISD::ATOMIC_LOAD_CLR || 6690 Opcode == ISD::ATOMIC_LOAD_OR || 6691 Opcode == ISD::ATOMIC_LOAD_XOR || 6692 Opcode == ISD::ATOMIC_LOAD_NAND || 6693 Opcode == ISD::ATOMIC_LOAD_MIN || 6694 Opcode == ISD::ATOMIC_LOAD_MAX || 6695 Opcode == ISD::ATOMIC_LOAD_UMIN || 6696 Opcode == ISD::ATOMIC_LOAD_UMAX || 6697 Opcode == ISD::ATOMIC_LOAD_FADD || 6698 Opcode == ISD::ATOMIC_LOAD_FSUB || 6699 Opcode == ISD::ATOMIC_SWAP || 6700 Opcode == ISD::ATOMIC_STORE) && 6701 "Invalid Atomic Op"); 6702 6703 EVT VT = Val.getValueType(); 6704 6705 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6706 getVTList(VT, MVT::Other); 6707 SDValue Ops[] = {Chain, Ptr, Val}; 6708 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6709 } 6710 6711 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6712 EVT VT, SDValue Chain, SDValue Ptr, 6713 MachineMemOperand *MMO) { 6714 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6715 6716 SDVTList VTs = getVTList(VT, MVT::Other); 6717 SDValue Ops[] = {Chain, Ptr}; 6718 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6719 } 6720 6721 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6722 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6723 if (Ops.size() == 1) 6724 return Ops[0]; 6725 6726 SmallVector<EVT, 4> VTs; 6727 VTs.reserve(Ops.size()); 6728 for (unsigned i = 0; i < Ops.size(); ++i) 6729 VTs.push_back(Ops[i].getValueType()); 6730 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6731 } 6732 6733 SDValue SelectionDAG::getMemIntrinsicNode( 6734 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6735 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6736 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6737 if (!Size && MemVT.isScalableVector()) 6738 Size = MemoryLocation::UnknownSize; 6739 else if (!Size) 6740 Size = MemVT.getStoreSize(); 6741 6742 MachineFunction &MF = getMachineFunction(); 6743 MachineMemOperand *MMO = 6744 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6745 6746 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6747 } 6748 6749 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6750 SDVTList VTList, 6751 ArrayRef<SDValue> Ops, EVT MemVT, 6752 MachineMemOperand *MMO) { 6753 assert((Opcode == ISD::INTRINSIC_VOID || 6754 Opcode == ISD::INTRINSIC_W_CHAIN || 6755 Opcode == ISD::PREFETCH || 6756 ((int)Opcode <= std::numeric_limits<int>::max() && 6757 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6758 "Opcode is not a memory-accessing opcode!"); 6759 6760 // Memoize the node unless it returns a flag. 6761 MemIntrinsicSDNode *N; 6762 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6763 FoldingSetNodeID ID; 6764 AddNodeIDNode(ID, Opcode, VTList, Ops); 6765 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6766 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6767 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6768 void *IP = nullptr; 6769 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6770 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6771 return SDValue(E, 0); 6772 } 6773 6774 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6775 VTList, MemVT, MMO); 6776 createOperands(N, Ops); 6777 6778 CSEMap.InsertNode(N, IP); 6779 } else { 6780 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6781 VTList, MemVT, MMO); 6782 createOperands(N, Ops); 6783 } 6784 InsertNode(N); 6785 SDValue V(N, 0); 6786 NewSDValueDbgMsg(V, "Creating new node: ", this); 6787 return V; 6788 } 6789 6790 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6791 SDValue Chain, int FrameIndex, 6792 int64_t Size, int64_t Offset) { 6793 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6794 const auto VTs = getVTList(MVT::Other); 6795 SDValue Ops[2] = { 6796 Chain, 6797 getFrameIndex(FrameIndex, 6798 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6799 true)}; 6800 6801 FoldingSetNodeID ID; 6802 AddNodeIDNode(ID, Opcode, VTs, Ops); 6803 ID.AddInteger(FrameIndex); 6804 ID.AddInteger(Size); 6805 ID.AddInteger(Offset); 6806 void *IP = nullptr; 6807 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6808 return SDValue(E, 0); 6809 6810 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6811 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6812 createOperands(N, Ops); 6813 CSEMap.InsertNode(N, IP); 6814 InsertNode(N); 6815 SDValue V(N, 0); 6816 NewSDValueDbgMsg(V, "Creating new node: ", this); 6817 return V; 6818 } 6819 6820 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6821 /// MachinePointerInfo record from it. This is particularly useful because the 6822 /// code generator has many cases where it doesn't bother passing in a 6823 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6824 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6825 SelectionDAG &DAG, SDValue Ptr, 6826 int64_t Offset = 0) { 6827 // If this is FI+Offset, we can model it. 6828 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6829 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6830 FI->getIndex(), Offset); 6831 6832 // If this is (FI+Offset1)+Offset2, we can model it. 6833 if (Ptr.getOpcode() != ISD::ADD || 6834 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6835 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6836 return Info; 6837 6838 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6839 return MachinePointerInfo::getFixedStack( 6840 DAG.getMachineFunction(), FI, 6841 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6842 } 6843 6844 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6845 /// MachinePointerInfo record from it. This is particularly useful because the 6846 /// code generator has many cases where it doesn't bother passing in a 6847 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6848 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6849 SelectionDAG &DAG, SDValue Ptr, 6850 SDValue OffsetOp) { 6851 // If the 'Offset' value isn't a constant, we can't handle this. 6852 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6853 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6854 if (OffsetOp.isUndef()) 6855 return InferPointerInfo(Info, DAG, Ptr); 6856 return Info; 6857 } 6858 6859 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6860 EVT VT, const SDLoc &dl, SDValue Chain, 6861 SDValue Ptr, SDValue Offset, 6862 MachinePointerInfo PtrInfo, EVT MemVT, 6863 Align Alignment, 6864 MachineMemOperand::Flags MMOFlags, 6865 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6866 assert(Chain.getValueType() == MVT::Other && 6867 "Invalid chain type"); 6868 6869 MMOFlags |= MachineMemOperand::MOLoad; 6870 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6871 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6872 // clients. 6873 if (PtrInfo.V.isNull()) 6874 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6875 6876 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 6877 MachineFunction &MF = getMachineFunction(); 6878 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 6879 Alignment, AAInfo, Ranges); 6880 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6881 } 6882 6883 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6884 EVT VT, const SDLoc &dl, SDValue Chain, 6885 SDValue Ptr, SDValue Offset, EVT MemVT, 6886 MachineMemOperand *MMO) { 6887 if (VT == MemVT) { 6888 ExtType = ISD::NON_EXTLOAD; 6889 } else if (ExtType == ISD::NON_EXTLOAD) { 6890 assert(VT == MemVT && "Non-extending load from different memory type!"); 6891 } else { 6892 // Extending load. 6893 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6894 "Should only be an extending load, not truncating!"); 6895 assert(VT.isInteger() == MemVT.isInteger() && 6896 "Cannot convert from FP to Int or Int -> FP!"); 6897 assert(VT.isVector() == MemVT.isVector() && 6898 "Cannot use an ext load to convert to or from a vector!"); 6899 assert((!VT.isVector() || 6900 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6901 "Cannot use an ext load to change the number of vector elements!"); 6902 } 6903 6904 bool Indexed = AM != ISD::UNINDEXED; 6905 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6906 6907 SDVTList VTs = Indexed ? 6908 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6909 SDValue Ops[] = { Chain, Ptr, Offset }; 6910 FoldingSetNodeID ID; 6911 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6912 ID.AddInteger(MemVT.getRawBits()); 6913 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6914 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6915 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6916 void *IP = nullptr; 6917 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6918 cast<LoadSDNode>(E)->refineAlignment(MMO); 6919 return SDValue(E, 0); 6920 } 6921 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6922 ExtType, MemVT, MMO); 6923 createOperands(N, Ops); 6924 6925 CSEMap.InsertNode(N, IP); 6926 InsertNode(N); 6927 SDValue V(N, 0); 6928 NewSDValueDbgMsg(V, "Creating new node: ", this); 6929 return V; 6930 } 6931 6932 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6933 SDValue Ptr, MachinePointerInfo PtrInfo, 6934 MaybeAlign Alignment, 6935 MachineMemOperand::Flags MMOFlags, 6936 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6937 SDValue Undef = getUNDEF(Ptr.getValueType()); 6938 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6939 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6940 } 6941 6942 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6943 SDValue Ptr, MachineMemOperand *MMO) { 6944 SDValue Undef = getUNDEF(Ptr.getValueType()); 6945 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6946 VT, MMO); 6947 } 6948 6949 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6950 EVT VT, SDValue Chain, SDValue Ptr, 6951 MachinePointerInfo PtrInfo, EVT MemVT, 6952 MaybeAlign Alignment, 6953 MachineMemOperand::Flags MMOFlags, 6954 const AAMDNodes &AAInfo) { 6955 SDValue Undef = getUNDEF(Ptr.getValueType()); 6956 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6957 MemVT, Alignment, MMOFlags, AAInfo); 6958 } 6959 6960 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6961 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6962 MachineMemOperand *MMO) { 6963 SDValue Undef = getUNDEF(Ptr.getValueType()); 6964 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6965 MemVT, MMO); 6966 } 6967 6968 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6969 SDValue Base, SDValue Offset, 6970 ISD::MemIndexedMode AM) { 6971 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6972 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6973 // Don't propagate the invariant or dereferenceable flags. 6974 auto MMOFlags = 6975 LD->getMemOperand()->getFlags() & 6976 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6977 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6978 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6979 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6980 LD->getAAInfo()); 6981 } 6982 6983 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6984 SDValue Ptr, MachinePointerInfo PtrInfo, 6985 Align Alignment, 6986 MachineMemOperand::Flags MMOFlags, 6987 const AAMDNodes &AAInfo) { 6988 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6989 6990 MMOFlags |= MachineMemOperand::MOStore; 6991 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6992 6993 if (PtrInfo.V.isNull()) 6994 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6995 6996 MachineFunction &MF = getMachineFunction(); 6997 uint64_t Size = 6998 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 6999 MachineMemOperand *MMO = 7000 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 7001 return getStore(Chain, dl, Val, Ptr, MMO); 7002 } 7003 7004 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7005 SDValue Ptr, MachineMemOperand *MMO) { 7006 assert(Chain.getValueType() == MVT::Other && 7007 "Invalid chain type"); 7008 EVT VT = Val.getValueType(); 7009 SDVTList VTs = getVTList(MVT::Other); 7010 SDValue Undef = getUNDEF(Ptr.getValueType()); 7011 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7012 FoldingSetNodeID ID; 7013 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7014 ID.AddInteger(VT.getRawBits()); 7015 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7016 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 7017 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7018 void *IP = nullptr; 7019 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7020 cast<StoreSDNode>(E)->refineAlignment(MMO); 7021 return SDValue(E, 0); 7022 } 7023 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7024 ISD::UNINDEXED, false, VT, MMO); 7025 createOperands(N, Ops); 7026 7027 CSEMap.InsertNode(N, IP); 7028 InsertNode(N); 7029 SDValue V(N, 0); 7030 NewSDValueDbgMsg(V, "Creating new node: ", this); 7031 return V; 7032 } 7033 7034 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7035 SDValue Ptr, MachinePointerInfo PtrInfo, 7036 EVT SVT, Align Alignment, 7037 MachineMemOperand::Flags MMOFlags, 7038 const AAMDNodes &AAInfo) { 7039 assert(Chain.getValueType() == MVT::Other && 7040 "Invalid chain type"); 7041 7042 MMOFlags |= MachineMemOperand::MOStore; 7043 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7044 7045 if (PtrInfo.V.isNull()) 7046 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7047 7048 MachineFunction &MF = getMachineFunction(); 7049 MachineMemOperand *MMO = MF.getMachineMemOperand( 7050 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 7051 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7052 } 7053 7054 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7055 SDValue Ptr, EVT SVT, 7056 MachineMemOperand *MMO) { 7057 EVT VT = Val.getValueType(); 7058 7059 assert(Chain.getValueType() == MVT::Other && 7060 "Invalid chain type"); 7061 if (VT == SVT) 7062 return getStore(Chain, dl, Val, Ptr, MMO); 7063 7064 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7065 "Should only be a truncating store, not extending!"); 7066 assert(VT.isInteger() == SVT.isInteger() && 7067 "Can't do FP-INT conversion!"); 7068 assert(VT.isVector() == SVT.isVector() && 7069 "Cannot use trunc store to convert to or from a vector!"); 7070 assert((!VT.isVector() || 7071 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 7072 "Cannot use trunc store to change the number of vector elements!"); 7073 7074 SDVTList VTs = getVTList(MVT::Other); 7075 SDValue Undef = getUNDEF(Ptr.getValueType()); 7076 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7077 FoldingSetNodeID ID; 7078 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7079 ID.AddInteger(SVT.getRawBits()); 7080 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7081 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7082 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7083 void *IP = nullptr; 7084 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7085 cast<StoreSDNode>(E)->refineAlignment(MMO); 7086 return SDValue(E, 0); 7087 } 7088 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7089 ISD::UNINDEXED, true, SVT, MMO); 7090 createOperands(N, Ops); 7091 7092 CSEMap.InsertNode(N, IP); 7093 InsertNode(N); 7094 SDValue V(N, 0); 7095 NewSDValueDbgMsg(V, "Creating new node: ", this); 7096 return V; 7097 } 7098 7099 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7100 SDValue Base, SDValue Offset, 7101 ISD::MemIndexedMode AM) { 7102 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7103 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7104 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7105 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7106 FoldingSetNodeID ID; 7107 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7108 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7109 ID.AddInteger(ST->getRawSubclassData()); 7110 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7111 void *IP = nullptr; 7112 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7113 return SDValue(E, 0); 7114 7115 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7116 ST->isTruncatingStore(), ST->getMemoryVT(), 7117 ST->getMemOperand()); 7118 createOperands(N, Ops); 7119 7120 CSEMap.InsertNode(N, IP); 7121 InsertNode(N); 7122 SDValue V(N, 0); 7123 NewSDValueDbgMsg(V, "Creating new node: ", this); 7124 return V; 7125 } 7126 7127 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7128 SDValue Base, SDValue Offset, SDValue Mask, 7129 SDValue PassThru, EVT MemVT, 7130 MachineMemOperand *MMO, 7131 ISD::MemIndexedMode AM, 7132 ISD::LoadExtType ExtTy, bool isExpanding) { 7133 bool Indexed = AM != ISD::UNINDEXED; 7134 assert((Indexed || Offset.isUndef()) && 7135 "Unindexed masked load with an offset!"); 7136 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7137 : getVTList(VT, MVT::Other); 7138 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7139 FoldingSetNodeID ID; 7140 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7141 ID.AddInteger(MemVT.getRawBits()); 7142 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7143 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7144 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7145 void *IP = nullptr; 7146 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7147 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7148 return SDValue(E, 0); 7149 } 7150 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7151 AM, ExtTy, isExpanding, MemVT, MMO); 7152 createOperands(N, Ops); 7153 7154 CSEMap.InsertNode(N, IP); 7155 InsertNode(N); 7156 SDValue V(N, 0); 7157 NewSDValueDbgMsg(V, "Creating new node: ", this); 7158 return V; 7159 } 7160 7161 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7162 SDValue Base, SDValue Offset, 7163 ISD::MemIndexedMode AM) { 7164 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7165 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7166 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7167 Offset, LD->getMask(), LD->getPassThru(), 7168 LD->getMemoryVT(), LD->getMemOperand(), AM, 7169 LD->getExtensionType(), LD->isExpandingLoad()); 7170 } 7171 7172 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7173 SDValue Val, SDValue Base, SDValue Offset, 7174 SDValue Mask, EVT MemVT, 7175 MachineMemOperand *MMO, 7176 ISD::MemIndexedMode AM, bool IsTruncating, 7177 bool IsCompressing) { 7178 assert(Chain.getValueType() == MVT::Other && 7179 "Invalid chain type"); 7180 bool Indexed = AM != ISD::UNINDEXED; 7181 assert((Indexed || Offset.isUndef()) && 7182 "Unindexed masked store with an offset!"); 7183 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7184 : getVTList(MVT::Other); 7185 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7186 FoldingSetNodeID ID; 7187 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7188 ID.AddInteger(MemVT.getRawBits()); 7189 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7190 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7191 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7192 void *IP = nullptr; 7193 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7194 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7195 return SDValue(E, 0); 7196 } 7197 auto *N = 7198 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7199 IsTruncating, IsCompressing, MemVT, MMO); 7200 createOperands(N, Ops); 7201 7202 CSEMap.InsertNode(N, IP); 7203 InsertNode(N); 7204 SDValue V(N, 0); 7205 NewSDValueDbgMsg(V, "Creating new node: ", this); 7206 return V; 7207 } 7208 7209 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7210 SDValue Base, SDValue Offset, 7211 ISD::MemIndexedMode AM) { 7212 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7213 assert(ST->getOffset().isUndef() && 7214 "Masked store is already a indexed store!"); 7215 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7216 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7217 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7218 } 7219 7220 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7221 ArrayRef<SDValue> Ops, 7222 MachineMemOperand *MMO, 7223 ISD::MemIndexType IndexType) { 7224 assert(Ops.size() == 6 && "Incompatible number of operands"); 7225 7226 FoldingSetNodeID ID; 7227 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7228 ID.AddInteger(VT.getRawBits()); 7229 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7230 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7231 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7232 void *IP = nullptr; 7233 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7234 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7235 return SDValue(E, 0); 7236 } 7237 7238 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7239 VTs, VT, MMO, IndexType); 7240 createOperands(N, Ops); 7241 7242 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7243 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7244 assert(N->getMask().getValueType().getVectorNumElements() == 7245 N->getValueType(0).getVectorNumElements() && 7246 "Vector width mismatch between mask and data"); 7247 assert(N->getIndex().getValueType().getVectorNumElements() >= 7248 N->getValueType(0).getVectorNumElements() && 7249 "Vector width mismatch between index and data"); 7250 assert(isa<ConstantSDNode>(N->getScale()) && 7251 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7252 "Scale should be a constant power of 2"); 7253 7254 CSEMap.InsertNode(N, IP); 7255 InsertNode(N); 7256 SDValue V(N, 0); 7257 NewSDValueDbgMsg(V, "Creating new node: ", this); 7258 return V; 7259 } 7260 7261 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7262 ArrayRef<SDValue> Ops, 7263 MachineMemOperand *MMO, 7264 ISD::MemIndexType IndexType) { 7265 assert(Ops.size() == 6 && "Incompatible number of operands"); 7266 7267 FoldingSetNodeID ID; 7268 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7269 ID.AddInteger(VT.getRawBits()); 7270 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7271 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7272 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7273 void *IP = nullptr; 7274 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7275 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7276 return SDValue(E, 0); 7277 } 7278 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7279 VTs, VT, MMO, IndexType); 7280 createOperands(N, Ops); 7281 7282 assert(N->getMask().getValueType().getVectorNumElements() == 7283 N->getValue().getValueType().getVectorNumElements() && 7284 "Vector width mismatch between mask and data"); 7285 assert(N->getIndex().getValueType().getVectorNumElements() >= 7286 N->getValue().getValueType().getVectorNumElements() && 7287 "Vector width mismatch between index and data"); 7288 assert(isa<ConstantSDNode>(N->getScale()) && 7289 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7290 "Scale should be a constant power of 2"); 7291 7292 CSEMap.InsertNode(N, IP); 7293 InsertNode(N); 7294 SDValue V(N, 0); 7295 NewSDValueDbgMsg(V, "Creating new node: ", this); 7296 return V; 7297 } 7298 7299 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7300 // select undef, T, F --> T (if T is a constant), otherwise F 7301 // select, ?, undef, F --> F 7302 // select, ?, T, undef --> T 7303 if (Cond.isUndef()) 7304 return isConstantValueOfAnyType(T) ? T : F; 7305 if (T.isUndef()) 7306 return F; 7307 if (F.isUndef()) 7308 return T; 7309 7310 // select true, T, F --> T 7311 // select false, T, F --> F 7312 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7313 return CondC->isNullValue() ? F : T; 7314 7315 // TODO: This should simplify VSELECT with constant condition using something 7316 // like this (but check boolean contents to be complete?): 7317 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7318 // return T; 7319 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7320 // return F; 7321 7322 // select ?, T, T --> T 7323 if (T == F) 7324 return T; 7325 7326 return SDValue(); 7327 } 7328 7329 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7330 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7331 if (X.isUndef()) 7332 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7333 // shift X, undef --> undef (because it may shift by the bitwidth) 7334 if (Y.isUndef()) 7335 return getUNDEF(X.getValueType()); 7336 7337 // shift 0, Y --> 0 7338 // shift X, 0 --> X 7339 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7340 return X; 7341 7342 // shift X, C >= bitwidth(X) --> undef 7343 // All vector elements must be too big (or undef) to avoid partial undefs. 7344 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7345 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7346 }; 7347 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7348 return getUNDEF(X.getValueType()); 7349 7350 return SDValue(); 7351 } 7352 7353 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7354 SDNodeFlags Flags) { 7355 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7356 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7357 // operation is poison. That result can be relaxed to undef. 7358 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7359 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7360 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7361 (YC && YC->getValueAPF().isNaN()); 7362 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7363 (YC && YC->getValueAPF().isInfinity()); 7364 7365 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7366 return getUNDEF(X.getValueType()); 7367 7368 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7369 return getUNDEF(X.getValueType()); 7370 7371 if (!YC) 7372 return SDValue(); 7373 7374 // X + -0.0 --> X 7375 if (Opcode == ISD::FADD) 7376 if (YC->getValueAPF().isNegZero()) 7377 return X; 7378 7379 // X - +0.0 --> X 7380 if (Opcode == ISD::FSUB) 7381 if (YC->getValueAPF().isPosZero()) 7382 return X; 7383 7384 // X * 1.0 --> X 7385 // X / 1.0 --> X 7386 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7387 if (YC->getValueAPF().isExactlyValue(1.0)) 7388 return X; 7389 7390 return SDValue(); 7391 } 7392 7393 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7394 SDValue Ptr, SDValue SV, unsigned Align) { 7395 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7396 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7397 } 7398 7399 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7400 ArrayRef<SDUse> Ops) { 7401 switch (Ops.size()) { 7402 case 0: return getNode(Opcode, DL, VT); 7403 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7404 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7405 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7406 default: break; 7407 } 7408 7409 // Copy from an SDUse array into an SDValue array for use with 7410 // the regular getNode logic. 7411 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7412 return getNode(Opcode, DL, VT, NewOps); 7413 } 7414 7415 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7416 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7417 unsigned NumOps = Ops.size(); 7418 switch (NumOps) { 7419 case 0: return getNode(Opcode, DL, VT); 7420 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7421 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7422 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7423 default: break; 7424 } 7425 7426 switch (Opcode) { 7427 default: break; 7428 case ISD::BUILD_VECTOR: 7429 // Attempt to simplify BUILD_VECTOR. 7430 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7431 return V; 7432 break; 7433 case ISD::CONCAT_VECTORS: 7434 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7435 return V; 7436 break; 7437 case ISD::SELECT_CC: 7438 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7439 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7440 "LHS and RHS of condition must have same type!"); 7441 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7442 "True and False arms of SelectCC must have same type!"); 7443 assert(Ops[2].getValueType() == VT && 7444 "select_cc node must be of same type as true and false value!"); 7445 break; 7446 case ISD::BR_CC: 7447 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7448 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7449 "LHS/RHS of comparison should match types!"); 7450 break; 7451 } 7452 7453 // Memoize nodes. 7454 SDNode *N; 7455 SDVTList VTs = getVTList(VT); 7456 7457 if (VT != MVT::Glue) { 7458 FoldingSetNodeID ID; 7459 AddNodeIDNode(ID, Opcode, VTs, Ops); 7460 void *IP = nullptr; 7461 7462 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7463 return SDValue(E, 0); 7464 7465 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7466 createOperands(N, Ops); 7467 7468 CSEMap.InsertNode(N, IP); 7469 } else { 7470 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7471 createOperands(N, Ops); 7472 } 7473 7474 N->setFlags(Flags); 7475 InsertNode(N); 7476 SDValue V(N, 0); 7477 NewSDValueDbgMsg(V, "Creating new node: ", this); 7478 return V; 7479 } 7480 7481 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7482 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7483 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7484 } 7485 7486 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7487 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7488 if (VTList.NumVTs == 1) 7489 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7490 7491 switch (Opcode) { 7492 case ISD::STRICT_FP_EXTEND: 7493 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7494 "Invalid STRICT_FP_EXTEND!"); 7495 assert(VTList.VTs[0].isFloatingPoint() && 7496 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7497 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7498 "STRICT_FP_EXTEND result type should be vector iff the operand " 7499 "type is vector!"); 7500 assert((!VTList.VTs[0].isVector() || 7501 VTList.VTs[0].getVectorNumElements() == 7502 Ops[1].getValueType().getVectorNumElements()) && 7503 "Vector element count mismatch!"); 7504 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7505 "Invalid fpext node, dst <= src!"); 7506 break; 7507 case ISD::STRICT_FP_ROUND: 7508 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7509 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7510 "STRICT_FP_ROUND result type should be vector iff the operand " 7511 "type is vector!"); 7512 assert((!VTList.VTs[0].isVector() || 7513 VTList.VTs[0].getVectorNumElements() == 7514 Ops[1].getValueType().getVectorNumElements()) && 7515 "Vector element count mismatch!"); 7516 assert(VTList.VTs[0].isFloatingPoint() && 7517 Ops[1].getValueType().isFloatingPoint() && 7518 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7519 isa<ConstantSDNode>(Ops[2]) && 7520 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7521 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7522 "Invalid STRICT_FP_ROUND!"); 7523 break; 7524 #if 0 7525 // FIXME: figure out how to safely handle things like 7526 // int foo(int x) { return 1 << (x & 255); } 7527 // int bar() { return foo(256); } 7528 case ISD::SRA_PARTS: 7529 case ISD::SRL_PARTS: 7530 case ISD::SHL_PARTS: 7531 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7532 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7533 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7534 else if (N3.getOpcode() == ISD::AND) 7535 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7536 // If the and is only masking out bits that cannot effect the shift, 7537 // eliminate the and. 7538 unsigned NumBits = VT.getScalarSizeInBits()*2; 7539 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7540 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7541 } 7542 break; 7543 #endif 7544 } 7545 7546 // Memoize the node unless it returns a flag. 7547 SDNode *N; 7548 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7549 FoldingSetNodeID ID; 7550 AddNodeIDNode(ID, Opcode, VTList, Ops); 7551 void *IP = nullptr; 7552 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7553 return SDValue(E, 0); 7554 7555 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7556 createOperands(N, Ops); 7557 CSEMap.InsertNode(N, IP); 7558 } else { 7559 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7560 createOperands(N, Ops); 7561 } 7562 7563 N->setFlags(Flags); 7564 InsertNode(N); 7565 SDValue V(N, 0); 7566 NewSDValueDbgMsg(V, "Creating new node: ", this); 7567 return V; 7568 } 7569 7570 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7571 SDVTList VTList) { 7572 return getNode(Opcode, DL, VTList, None); 7573 } 7574 7575 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7576 SDValue N1) { 7577 SDValue Ops[] = { N1 }; 7578 return getNode(Opcode, DL, VTList, Ops); 7579 } 7580 7581 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7582 SDValue N1, SDValue N2) { 7583 SDValue Ops[] = { N1, N2 }; 7584 return getNode(Opcode, DL, VTList, Ops); 7585 } 7586 7587 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7588 SDValue N1, SDValue N2, SDValue N3) { 7589 SDValue Ops[] = { N1, N2, N3 }; 7590 return getNode(Opcode, DL, VTList, Ops); 7591 } 7592 7593 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7594 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7595 SDValue Ops[] = { N1, N2, N3, N4 }; 7596 return getNode(Opcode, DL, VTList, Ops); 7597 } 7598 7599 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7600 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7601 SDValue N5) { 7602 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7603 return getNode(Opcode, DL, VTList, Ops); 7604 } 7605 7606 SDVTList SelectionDAG::getVTList(EVT VT) { 7607 return makeVTList(SDNode::getValueTypeList(VT), 1); 7608 } 7609 7610 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7611 FoldingSetNodeID ID; 7612 ID.AddInteger(2U); 7613 ID.AddInteger(VT1.getRawBits()); 7614 ID.AddInteger(VT2.getRawBits()); 7615 7616 void *IP = nullptr; 7617 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7618 if (!Result) { 7619 EVT *Array = Allocator.Allocate<EVT>(2); 7620 Array[0] = VT1; 7621 Array[1] = VT2; 7622 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7623 VTListMap.InsertNode(Result, IP); 7624 } 7625 return Result->getSDVTList(); 7626 } 7627 7628 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7629 FoldingSetNodeID ID; 7630 ID.AddInteger(3U); 7631 ID.AddInteger(VT1.getRawBits()); 7632 ID.AddInteger(VT2.getRawBits()); 7633 ID.AddInteger(VT3.getRawBits()); 7634 7635 void *IP = nullptr; 7636 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7637 if (!Result) { 7638 EVT *Array = Allocator.Allocate<EVT>(3); 7639 Array[0] = VT1; 7640 Array[1] = VT2; 7641 Array[2] = VT3; 7642 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7643 VTListMap.InsertNode(Result, IP); 7644 } 7645 return Result->getSDVTList(); 7646 } 7647 7648 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7649 FoldingSetNodeID ID; 7650 ID.AddInteger(4U); 7651 ID.AddInteger(VT1.getRawBits()); 7652 ID.AddInteger(VT2.getRawBits()); 7653 ID.AddInteger(VT3.getRawBits()); 7654 ID.AddInteger(VT4.getRawBits()); 7655 7656 void *IP = nullptr; 7657 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7658 if (!Result) { 7659 EVT *Array = Allocator.Allocate<EVT>(4); 7660 Array[0] = VT1; 7661 Array[1] = VT2; 7662 Array[2] = VT3; 7663 Array[3] = VT4; 7664 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7665 VTListMap.InsertNode(Result, IP); 7666 } 7667 return Result->getSDVTList(); 7668 } 7669 7670 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7671 unsigned NumVTs = VTs.size(); 7672 FoldingSetNodeID ID; 7673 ID.AddInteger(NumVTs); 7674 for (unsigned index = 0; index < NumVTs; index++) { 7675 ID.AddInteger(VTs[index].getRawBits()); 7676 } 7677 7678 void *IP = nullptr; 7679 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7680 if (!Result) { 7681 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7682 llvm::copy(VTs, Array); 7683 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7684 VTListMap.InsertNode(Result, IP); 7685 } 7686 return Result->getSDVTList(); 7687 } 7688 7689 7690 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7691 /// specified operands. If the resultant node already exists in the DAG, 7692 /// this does not modify the specified node, instead it returns the node that 7693 /// already exists. If the resultant node does not exist in the DAG, the 7694 /// input node is returned. As a degenerate case, if you specify the same 7695 /// input operands as the node already has, the input node is returned. 7696 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7697 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7698 7699 // Check to see if there is no change. 7700 if (Op == N->getOperand(0)) return N; 7701 7702 // See if the modified node already exists. 7703 void *InsertPos = nullptr; 7704 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7705 return Existing; 7706 7707 // Nope it doesn't. Remove the node from its current place in the maps. 7708 if (InsertPos) 7709 if (!RemoveNodeFromCSEMaps(N)) 7710 InsertPos = nullptr; 7711 7712 // Now we update the operands. 7713 N->OperandList[0].set(Op); 7714 7715 updateDivergence(N); 7716 // If this gets put into a CSE map, add it. 7717 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7718 return N; 7719 } 7720 7721 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7722 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7723 7724 // Check to see if there is no change. 7725 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7726 return N; // No operands changed, just return the input node. 7727 7728 // See if the modified node already exists. 7729 void *InsertPos = nullptr; 7730 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7731 return Existing; 7732 7733 // Nope it doesn't. Remove the node from its current place in the maps. 7734 if (InsertPos) 7735 if (!RemoveNodeFromCSEMaps(N)) 7736 InsertPos = nullptr; 7737 7738 // Now we update the operands. 7739 if (N->OperandList[0] != Op1) 7740 N->OperandList[0].set(Op1); 7741 if (N->OperandList[1] != Op2) 7742 N->OperandList[1].set(Op2); 7743 7744 updateDivergence(N); 7745 // If this gets put into a CSE map, add it. 7746 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7747 return N; 7748 } 7749 7750 SDNode *SelectionDAG:: 7751 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7752 SDValue Ops[] = { Op1, Op2, Op3 }; 7753 return UpdateNodeOperands(N, Ops); 7754 } 7755 7756 SDNode *SelectionDAG:: 7757 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7758 SDValue Op3, SDValue Op4) { 7759 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7760 return UpdateNodeOperands(N, Ops); 7761 } 7762 7763 SDNode *SelectionDAG:: 7764 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7765 SDValue Op3, SDValue Op4, SDValue Op5) { 7766 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7767 return UpdateNodeOperands(N, Ops); 7768 } 7769 7770 SDNode *SelectionDAG:: 7771 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7772 unsigned NumOps = Ops.size(); 7773 assert(N->getNumOperands() == NumOps && 7774 "Update with wrong number of operands"); 7775 7776 // If no operands changed just return the input node. 7777 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7778 return N; 7779 7780 // See if the modified node already exists. 7781 void *InsertPos = nullptr; 7782 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7783 return Existing; 7784 7785 // Nope it doesn't. Remove the node from its current place in the maps. 7786 if (InsertPos) 7787 if (!RemoveNodeFromCSEMaps(N)) 7788 InsertPos = nullptr; 7789 7790 // Now we update the operands. 7791 for (unsigned i = 0; i != NumOps; ++i) 7792 if (N->OperandList[i] != Ops[i]) 7793 N->OperandList[i].set(Ops[i]); 7794 7795 updateDivergence(N); 7796 // If this gets put into a CSE map, add it. 7797 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7798 return N; 7799 } 7800 7801 /// DropOperands - Release the operands and set this node to have 7802 /// zero operands. 7803 void SDNode::DropOperands() { 7804 // Unlike the code in MorphNodeTo that does this, we don't need to 7805 // watch for dead nodes here. 7806 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7807 SDUse &Use = *I++; 7808 Use.set(SDValue()); 7809 } 7810 } 7811 7812 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7813 ArrayRef<MachineMemOperand *> NewMemRefs) { 7814 if (NewMemRefs.empty()) { 7815 N->clearMemRefs(); 7816 return; 7817 } 7818 7819 // Check if we can avoid allocating by storing a single reference directly. 7820 if (NewMemRefs.size() == 1) { 7821 N->MemRefs = NewMemRefs[0]; 7822 N->NumMemRefs = 1; 7823 return; 7824 } 7825 7826 MachineMemOperand **MemRefsBuffer = 7827 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7828 llvm::copy(NewMemRefs, MemRefsBuffer); 7829 N->MemRefs = MemRefsBuffer; 7830 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7831 } 7832 7833 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7834 /// machine opcode. 7835 /// 7836 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7837 EVT VT) { 7838 SDVTList VTs = getVTList(VT); 7839 return SelectNodeTo(N, MachineOpc, VTs, None); 7840 } 7841 7842 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7843 EVT VT, SDValue Op1) { 7844 SDVTList VTs = getVTList(VT); 7845 SDValue Ops[] = { Op1 }; 7846 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7847 } 7848 7849 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7850 EVT VT, SDValue Op1, 7851 SDValue Op2) { 7852 SDVTList VTs = getVTList(VT); 7853 SDValue Ops[] = { Op1, Op2 }; 7854 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7855 } 7856 7857 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7858 EVT VT, SDValue Op1, 7859 SDValue Op2, SDValue Op3) { 7860 SDVTList VTs = getVTList(VT); 7861 SDValue Ops[] = { Op1, Op2, Op3 }; 7862 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7863 } 7864 7865 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7866 EVT VT, ArrayRef<SDValue> Ops) { 7867 SDVTList VTs = getVTList(VT); 7868 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7869 } 7870 7871 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7872 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7873 SDVTList VTs = getVTList(VT1, VT2); 7874 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7875 } 7876 7877 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7878 EVT VT1, EVT VT2) { 7879 SDVTList VTs = getVTList(VT1, VT2); 7880 return SelectNodeTo(N, MachineOpc, VTs, None); 7881 } 7882 7883 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7884 EVT VT1, EVT VT2, EVT VT3, 7885 ArrayRef<SDValue> Ops) { 7886 SDVTList VTs = getVTList(VT1, VT2, VT3); 7887 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7888 } 7889 7890 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7891 EVT VT1, EVT VT2, 7892 SDValue Op1, SDValue Op2) { 7893 SDVTList VTs = getVTList(VT1, VT2); 7894 SDValue Ops[] = { Op1, Op2 }; 7895 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7896 } 7897 7898 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7899 SDVTList VTs,ArrayRef<SDValue> Ops) { 7900 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7901 // Reset the NodeID to -1. 7902 New->setNodeId(-1); 7903 if (New != N) { 7904 ReplaceAllUsesWith(N, New); 7905 RemoveDeadNode(N); 7906 } 7907 return New; 7908 } 7909 7910 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7911 /// the line number information on the merged node since it is not possible to 7912 /// preserve the information that operation is associated with multiple lines. 7913 /// This will make the debugger working better at -O0, were there is a higher 7914 /// probability having other instructions associated with that line. 7915 /// 7916 /// For IROrder, we keep the smaller of the two 7917 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7918 DebugLoc NLoc = N->getDebugLoc(); 7919 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7920 N->setDebugLoc(DebugLoc()); 7921 } 7922 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7923 N->setIROrder(Order); 7924 return N; 7925 } 7926 7927 /// MorphNodeTo - This *mutates* the specified node to have the specified 7928 /// return type, opcode, and operands. 7929 /// 7930 /// Note that MorphNodeTo returns the resultant node. If there is already a 7931 /// node of the specified opcode and operands, it returns that node instead of 7932 /// the current one. Note that the SDLoc need not be the same. 7933 /// 7934 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7935 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7936 /// node, and because it doesn't require CSE recalculation for any of 7937 /// the node's users. 7938 /// 7939 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7940 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7941 /// the legalizer which maintain worklists that would need to be updated when 7942 /// deleting things. 7943 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7944 SDVTList VTs, ArrayRef<SDValue> Ops) { 7945 // If an identical node already exists, use it. 7946 void *IP = nullptr; 7947 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7948 FoldingSetNodeID ID; 7949 AddNodeIDNode(ID, Opc, VTs, Ops); 7950 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7951 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7952 } 7953 7954 if (!RemoveNodeFromCSEMaps(N)) 7955 IP = nullptr; 7956 7957 // Start the morphing. 7958 N->NodeType = Opc; 7959 N->ValueList = VTs.VTs; 7960 N->NumValues = VTs.NumVTs; 7961 7962 // Clear the operands list, updating used nodes to remove this from their 7963 // use list. Keep track of any operands that become dead as a result. 7964 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7965 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7966 SDUse &Use = *I++; 7967 SDNode *Used = Use.getNode(); 7968 Use.set(SDValue()); 7969 if (Used->use_empty()) 7970 DeadNodeSet.insert(Used); 7971 } 7972 7973 // For MachineNode, initialize the memory references information. 7974 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7975 MN->clearMemRefs(); 7976 7977 // Swap for an appropriately sized array from the recycler. 7978 removeOperands(N); 7979 createOperands(N, Ops); 7980 7981 // Delete any nodes that are still dead after adding the uses for the 7982 // new operands. 7983 if (!DeadNodeSet.empty()) { 7984 SmallVector<SDNode *, 16> DeadNodes; 7985 for (SDNode *N : DeadNodeSet) 7986 if (N->use_empty()) 7987 DeadNodes.push_back(N); 7988 RemoveDeadNodes(DeadNodes); 7989 } 7990 7991 if (IP) 7992 CSEMap.InsertNode(N, IP); // Memoize the new node. 7993 return N; 7994 } 7995 7996 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7997 unsigned OrigOpc = Node->getOpcode(); 7998 unsigned NewOpc; 7999 switch (OrigOpc) { 8000 default: 8001 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 8002 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8003 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 8004 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8005 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 8006 #include "llvm/IR/ConstrainedOps.def" 8007 } 8008 8009 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 8010 8011 // We're taking this node out of the chain, so we need to re-link things. 8012 SDValue InputChain = Node->getOperand(0); 8013 SDValue OutputChain = SDValue(Node, 1); 8014 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 8015 8016 SmallVector<SDValue, 3> Ops; 8017 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 8018 Ops.push_back(Node->getOperand(i)); 8019 8020 SDVTList VTs = getVTList(Node->getValueType(0)); 8021 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 8022 8023 // MorphNodeTo can operate in two ways: if an existing node with the 8024 // specified operands exists, it can just return it. Otherwise, it 8025 // updates the node in place to have the requested operands. 8026 if (Res == Node) { 8027 // If we updated the node in place, reset the node ID. To the isel, 8028 // this should be just like a newly allocated machine node. 8029 Res->setNodeId(-1); 8030 } else { 8031 ReplaceAllUsesWith(Node, Res); 8032 RemoveDeadNode(Node); 8033 } 8034 8035 return Res; 8036 } 8037 8038 /// getMachineNode - These are used for target selectors to create a new node 8039 /// with specified return type(s), MachineInstr opcode, and operands. 8040 /// 8041 /// Note that getMachineNode returns the resultant node. If there is already a 8042 /// node of the specified opcode and operands, it returns that node instead of 8043 /// the current one. 8044 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8045 EVT VT) { 8046 SDVTList VTs = getVTList(VT); 8047 return getMachineNode(Opcode, dl, VTs, None); 8048 } 8049 8050 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8051 EVT VT, SDValue Op1) { 8052 SDVTList VTs = getVTList(VT); 8053 SDValue Ops[] = { Op1 }; 8054 return getMachineNode(Opcode, dl, VTs, Ops); 8055 } 8056 8057 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8058 EVT VT, SDValue Op1, SDValue Op2) { 8059 SDVTList VTs = getVTList(VT); 8060 SDValue Ops[] = { Op1, Op2 }; 8061 return getMachineNode(Opcode, dl, VTs, Ops); 8062 } 8063 8064 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8065 EVT VT, SDValue Op1, SDValue Op2, 8066 SDValue Op3) { 8067 SDVTList VTs = getVTList(VT); 8068 SDValue Ops[] = { Op1, Op2, Op3 }; 8069 return getMachineNode(Opcode, dl, VTs, Ops); 8070 } 8071 8072 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8073 EVT VT, ArrayRef<SDValue> Ops) { 8074 SDVTList VTs = getVTList(VT); 8075 return getMachineNode(Opcode, dl, VTs, Ops); 8076 } 8077 8078 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8079 EVT VT1, EVT VT2, SDValue Op1, 8080 SDValue Op2) { 8081 SDVTList VTs = getVTList(VT1, VT2); 8082 SDValue Ops[] = { Op1, Op2 }; 8083 return getMachineNode(Opcode, dl, VTs, Ops); 8084 } 8085 8086 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8087 EVT VT1, EVT VT2, SDValue Op1, 8088 SDValue Op2, SDValue Op3) { 8089 SDVTList VTs = getVTList(VT1, VT2); 8090 SDValue Ops[] = { Op1, Op2, Op3 }; 8091 return getMachineNode(Opcode, dl, VTs, Ops); 8092 } 8093 8094 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8095 EVT VT1, EVT VT2, 8096 ArrayRef<SDValue> Ops) { 8097 SDVTList VTs = getVTList(VT1, VT2); 8098 return getMachineNode(Opcode, dl, VTs, Ops); 8099 } 8100 8101 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8102 EVT VT1, EVT VT2, EVT VT3, 8103 SDValue Op1, SDValue Op2) { 8104 SDVTList VTs = getVTList(VT1, VT2, VT3); 8105 SDValue Ops[] = { Op1, Op2 }; 8106 return getMachineNode(Opcode, dl, VTs, Ops); 8107 } 8108 8109 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8110 EVT VT1, EVT VT2, EVT VT3, 8111 SDValue Op1, SDValue Op2, 8112 SDValue Op3) { 8113 SDVTList VTs = getVTList(VT1, VT2, VT3); 8114 SDValue Ops[] = { Op1, Op2, Op3 }; 8115 return getMachineNode(Opcode, dl, VTs, Ops); 8116 } 8117 8118 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8119 EVT VT1, EVT VT2, EVT VT3, 8120 ArrayRef<SDValue> Ops) { 8121 SDVTList VTs = getVTList(VT1, VT2, VT3); 8122 return getMachineNode(Opcode, dl, VTs, Ops); 8123 } 8124 8125 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8126 ArrayRef<EVT> ResultTys, 8127 ArrayRef<SDValue> Ops) { 8128 SDVTList VTs = getVTList(ResultTys); 8129 return getMachineNode(Opcode, dl, VTs, Ops); 8130 } 8131 8132 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8133 SDVTList VTs, 8134 ArrayRef<SDValue> Ops) { 8135 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8136 MachineSDNode *N; 8137 void *IP = nullptr; 8138 8139 if (DoCSE) { 8140 FoldingSetNodeID ID; 8141 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8142 IP = nullptr; 8143 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8144 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8145 } 8146 } 8147 8148 // Allocate a new MachineSDNode. 8149 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8150 createOperands(N, Ops); 8151 8152 if (DoCSE) 8153 CSEMap.InsertNode(N, IP); 8154 8155 InsertNode(N); 8156 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8157 return N; 8158 } 8159 8160 /// getTargetExtractSubreg - A convenience function for creating 8161 /// TargetOpcode::EXTRACT_SUBREG nodes. 8162 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8163 SDValue Operand) { 8164 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8165 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8166 VT, Operand, SRIdxVal); 8167 return SDValue(Subreg, 0); 8168 } 8169 8170 /// getTargetInsertSubreg - A convenience function for creating 8171 /// TargetOpcode::INSERT_SUBREG nodes. 8172 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8173 SDValue Operand, SDValue Subreg) { 8174 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8175 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8176 VT, Operand, Subreg, SRIdxVal); 8177 return SDValue(Result, 0); 8178 } 8179 8180 /// getNodeIfExists - Get the specified node if it's already available, or 8181 /// else return NULL. 8182 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8183 ArrayRef<SDValue> Ops, 8184 const SDNodeFlags Flags) { 8185 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8186 FoldingSetNodeID ID; 8187 AddNodeIDNode(ID, Opcode, VTList, Ops); 8188 void *IP = nullptr; 8189 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8190 E->intersectFlagsWith(Flags); 8191 return E; 8192 } 8193 } 8194 return nullptr; 8195 } 8196 8197 /// getDbgValue - Creates a SDDbgValue node. 8198 /// 8199 /// SDNode 8200 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8201 SDNode *N, unsigned R, bool IsIndirect, 8202 const DebugLoc &DL, unsigned O) { 8203 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8204 "Expected inlined-at fields to agree"); 8205 return new (DbgInfo->getAlloc()) 8206 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8207 } 8208 8209 /// Constant 8210 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8211 DIExpression *Expr, 8212 const Value *C, 8213 const DebugLoc &DL, unsigned O) { 8214 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8215 "Expected inlined-at fields to agree"); 8216 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8217 } 8218 8219 /// FrameIndex 8220 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8221 DIExpression *Expr, unsigned FI, 8222 bool IsIndirect, 8223 const DebugLoc &DL, 8224 unsigned O) { 8225 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8226 "Expected inlined-at fields to agree"); 8227 return new (DbgInfo->getAlloc()) 8228 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8229 } 8230 8231 /// VReg 8232 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8233 DIExpression *Expr, 8234 unsigned VReg, bool IsIndirect, 8235 const DebugLoc &DL, unsigned O) { 8236 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8237 "Expected inlined-at fields to agree"); 8238 return new (DbgInfo->getAlloc()) 8239 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8240 } 8241 8242 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8243 unsigned OffsetInBits, unsigned SizeInBits, 8244 bool InvalidateDbg) { 8245 SDNode *FromNode = From.getNode(); 8246 SDNode *ToNode = To.getNode(); 8247 assert(FromNode && ToNode && "Can't modify dbg values"); 8248 8249 // PR35338 8250 // TODO: assert(From != To && "Redundant dbg value transfer"); 8251 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8252 if (From == To || FromNode == ToNode) 8253 return; 8254 8255 if (!FromNode->getHasDebugValue()) 8256 return; 8257 8258 SmallVector<SDDbgValue *, 2> ClonedDVs; 8259 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8260 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8261 continue; 8262 8263 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8264 8265 // Just transfer the dbg value attached to From. 8266 if (Dbg->getResNo() != From.getResNo()) 8267 continue; 8268 8269 DIVariable *Var = Dbg->getVariable(); 8270 auto *Expr = Dbg->getExpression(); 8271 // If a fragment is requested, update the expression. 8272 if (SizeInBits) { 8273 // When splitting a larger (e.g., sign-extended) value whose 8274 // lower bits are described with an SDDbgValue, do not attempt 8275 // to transfer the SDDbgValue to the upper bits. 8276 if (auto FI = Expr->getFragmentInfo()) 8277 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8278 continue; 8279 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8280 SizeInBits); 8281 if (!Fragment) 8282 continue; 8283 Expr = *Fragment; 8284 } 8285 // Clone the SDDbgValue and move it to To. 8286 SDDbgValue *Clone = getDbgValue( 8287 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8288 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8289 ClonedDVs.push_back(Clone); 8290 8291 if (InvalidateDbg) { 8292 // Invalidate value and indicate the SDDbgValue should not be emitted. 8293 Dbg->setIsInvalidated(); 8294 Dbg->setIsEmitted(); 8295 } 8296 } 8297 8298 for (SDDbgValue *Dbg : ClonedDVs) 8299 AddDbgValue(Dbg, ToNode, false); 8300 } 8301 8302 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8303 if (!N.getHasDebugValue()) 8304 return; 8305 8306 SmallVector<SDDbgValue *, 2> ClonedDVs; 8307 for (auto DV : GetDbgValues(&N)) { 8308 if (DV->isInvalidated()) 8309 continue; 8310 switch (N.getOpcode()) { 8311 default: 8312 break; 8313 case ISD::ADD: 8314 SDValue N0 = N.getOperand(0); 8315 SDValue N1 = N.getOperand(1); 8316 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8317 isConstantIntBuildVectorOrConstantInt(N1)) { 8318 uint64_t Offset = N.getConstantOperandVal(1); 8319 // Rewrite an ADD constant node into a DIExpression. Since we are 8320 // performing arithmetic to compute the variable's *value* in the 8321 // DIExpression, we need to mark the expression with a 8322 // DW_OP_stack_value. 8323 auto *DIExpr = DV->getExpression(); 8324 DIExpr = 8325 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8326 SDDbgValue *Clone = 8327 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8328 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8329 ClonedDVs.push_back(Clone); 8330 DV->setIsInvalidated(); 8331 DV->setIsEmitted(); 8332 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8333 N0.getNode()->dumprFull(this); 8334 dbgs() << " into " << *DIExpr << '\n'); 8335 } 8336 } 8337 } 8338 8339 for (SDDbgValue *Dbg : ClonedDVs) 8340 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8341 } 8342 8343 /// Creates a SDDbgLabel node. 8344 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8345 const DebugLoc &DL, unsigned O) { 8346 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8347 "Expected inlined-at fields to agree"); 8348 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8349 } 8350 8351 namespace { 8352 8353 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8354 /// pointed to by a use iterator is deleted, increment the use iterator 8355 /// so that it doesn't dangle. 8356 /// 8357 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8358 SDNode::use_iterator &UI; 8359 SDNode::use_iterator &UE; 8360 8361 void NodeDeleted(SDNode *N, SDNode *E) override { 8362 // Increment the iterator as needed. 8363 while (UI != UE && N == *UI) 8364 ++UI; 8365 } 8366 8367 public: 8368 RAUWUpdateListener(SelectionDAG &d, 8369 SDNode::use_iterator &ui, 8370 SDNode::use_iterator &ue) 8371 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8372 }; 8373 8374 } // end anonymous namespace 8375 8376 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8377 /// This can cause recursive merging of nodes in the DAG. 8378 /// 8379 /// This version assumes From has a single result value. 8380 /// 8381 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8382 SDNode *From = FromN.getNode(); 8383 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8384 "Cannot replace with this method!"); 8385 assert(From != To.getNode() && "Cannot replace uses of with self"); 8386 8387 // Preserve Debug Values 8388 transferDbgValues(FromN, To); 8389 8390 // Iterate over all the existing uses of From. New uses will be added 8391 // to the beginning of the use list, which we avoid visiting. 8392 // This specifically avoids visiting uses of From that arise while the 8393 // replacement is happening, because any such uses would be the result 8394 // of CSE: If an existing node looks like From after one of its operands 8395 // is replaced by To, we don't want to replace of all its users with To 8396 // too. See PR3018 for more info. 8397 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8398 RAUWUpdateListener Listener(*this, UI, UE); 8399 while (UI != UE) { 8400 SDNode *User = *UI; 8401 8402 // This node is about to morph, remove its old self from the CSE maps. 8403 RemoveNodeFromCSEMaps(User); 8404 8405 // A user can appear in a use list multiple times, and when this 8406 // happens the uses are usually next to each other in the list. 8407 // To help reduce the number of CSE recomputations, process all 8408 // the uses of this user that we can find this way. 8409 do { 8410 SDUse &Use = UI.getUse(); 8411 ++UI; 8412 Use.set(To); 8413 if (To->isDivergent() != From->isDivergent()) 8414 updateDivergence(User); 8415 } while (UI != UE && *UI == User); 8416 // Now that we have modified User, add it back to the CSE maps. If it 8417 // already exists there, recursively merge the results together. 8418 AddModifiedNodeToCSEMaps(User); 8419 } 8420 8421 // If we just RAUW'd the root, take note. 8422 if (FromN == getRoot()) 8423 setRoot(To); 8424 } 8425 8426 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8427 /// This can cause recursive merging of nodes in the DAG. 8428 /// 8429 /// This version assumes that for each value of From, there is a 8430 /// corresponding value in To in the same position with the same type. 8431 /// 8432 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8433 #ifndef NDEBUG 8434 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8435 assert((!From->hasAnyUseOfValue(i) || 8436 From->getValueType(i) == To->getValueType(i)) && 8437 "Cannot use this version of ReplaceAllUsesWith!"); 8438 #endif 8439 8440 // Handle the trivial case. 8441 if (From == To) 8442 return; 8443 8444 // Preserve Debug Info. Only do this if there's a use. 8445 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8446 if (From->hasAnyUseOfValue(i)) { 8447 assert((i < To->getNumValues()) && "Invalid To location"); 8448 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8449 } 8450 8451 // Iterate over just the existing users of From. See the comments in 8452 // the ReplaceAllUsesWith above. 8453 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8454 RAUWUpdateListener Listener(*this, UI, UE); 8455 while (UI != UE) { 8456 SDNode *User = *UI; 8457 8458 // This node is about to morph, remove its old self from the CSE maps. 8459 RemoveNodeFromCSEMaps(User); 8460 8461 // A user can appear in a use list multiple times, and when this 8462 // happens the uses are usually next to each other in the list. 8463 // To help reduce the number of CSE recomputations, process all 8464 // the uses of this user that we can find this way. 8465 do { 8466 SDUse &Use = UI.getUse(); 8467 ++UI; 8468 Use.setNode(To); 8469 if (To->isDivergent() != From->isDivergent()) 8470 updateDivergence(User); 8471 } while (UI != UE && *UI == User); 8472 8473 // Now that we have modified User, add it back to the CSE maps. If it 8474 // already exists there, recursively merge the results together. 8475 AddModifiedNodeToCSEMaps(User); 8476 } 8477 8478 // If we just RAUW'd the root, take note. 8479 if (From == getRoot().getNode()) 8480 setRoot(SDValue(To, getRoot().getResNo())); 8481 } 8482 8483 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8484 /// This can cause recursive merging of nodes in the DAG. 8485 /// 8486 /// This version can replace From with any result values. To must match the 8487 /// number and types of values returned by From. 8488 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8489 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8490 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8491 8492 // Preserve Debug Info. 8493 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8494 transferDbgValues(SDValue(From, i), To[i]); 8495 8496 // Iterate over just the existing users of From. See the comments in 8497 // the ReplaceAllUsesWith above. 8498 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8499 RAUWUpdateListener Listener(*this, UI, UE); 8500 while (UI != UE) { 8501 SDNode *User = *UI; 8502 8503 // This node is about to morph, remove its old self from the CSE maps. 8504 RemoveNodeFromCSEMaps(User); 8505 8506 // A user can appear in a use list multiple times, and when this happens the 8507 // uses are usually next to each other in the list. To help reduce the 8508 // number of CSE and divergence recomputations, process all the uses of this 8509 // user that we can find this way. 8510 bool To_IsDivergent = false; 8511 do { 8512 SDUse &Use = UI.getUse(); 8513 const SDValue &ToOp = To[Use.getResNo()]; 8514 ++UI; 8515 Use.set(ToOp); 8516 To_IsDivergent |= ToOp->isDivergent(); 8517 } while (UI != UE && *UI == User); 8518 8519 if (To_IsDivergent != From->isDivergent()) 8520 updateDivergence(User); 8521 8522 // Now that we have modified User, add it back to the CSE maps. If it 8523 // already exists there, recursively merge the results together. 8524 AddModifiedNodeToCSEMaps(User); 8525 } 8526 8527 // If we just RAUW'd the root, take note. 8528 if (From == getRoot().getNode()) 8529 setRoot(SDValue(To[getRoot().getResNo()])); 8530 } 8531 8532 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8533 /// uses of other values produced by From.getNode() alone. The Deleted 8534 /// vector is handled the same way as for ReplaceAllUsesWith. 8535 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8536 // Handle the really simple, really trivial case efficiently. 8537 if (From == To) return; 8538 8539 // Handle the simple, trivial, case efficiently. 8540 if (From.getNode()->getNumValues() == 1) { 8541 ReplaceAllUsesWith(From, To); 8542 return; 8543 } 8544 8545 // Preserve Debug Info. 8546 transferDbgValues(From, To); 8547 8548 // Iterate over just the existing users of From. See the comments in 8549 // the ReplaceAllUsesWith above. 8550 SDNode::use_iterator UI = From.getNode()->use_begin(), 8551 UE = From.getNode()->use_end(); 8552 RAUWUpdateListener Listener(*this, UI, UE); 8553 while (UI != UE) { 8554 SDNode *User = *UI; 8555 bool UserRemovedFromCSEMaps = false; 8556 8557 // A user can appear in a use list multiple times, and when this 8558 // happens the uses are usually next to each other in the list. 8559 // To help reduce the number of CSE recomputations, process all 8560 // the uses of this user that we can find this way. 8561 do { 8562 SDUse &Use = UI.getUse(); 8563 8564 // Skip uses of different values from the same node. 8565 if (Use.getResNo() != From.getResNo()) { 8566 ++UI; 8567 continue; 8568 } 8569 8570 // If this node hasn't been modified yet, it's still in the CSE maps, 8571 // so remove its old self from the CSE maps. 8572 if (!UserRemovedFromCSEMaps) { 8573 RemoveNodeFromCSEMaps(User); 8574 UserRemovedFromCSEMaps = true; 8575 } 8576 8577 ++UI; 8578 Use.set(To); 8579 if (To->isDivergent() != From->isDivergent()) 8580 updateDivergence(User); 8581 } while (UI != UE && *UI == User); 8582 // We are iterating over all uses of the From node, so if a use 8583 // doesn't use the specific value, no changes are made. 8584 if (!UserRemovedFromCSEMaps) 8585 continue; 8586 8587 // Now that we have modified User, add it back to the CSE maps. If it 8588 // already exists there, recursively merge the results together. 8589 AddModifiedNodeToCSEMaps(User); 8590 } 8591 8592 // If we just RAUW'd the root, take note. 8593 if (From == getRoot()) 8594 setRoot(To); 8595 } 8596 8597 namespace { 8598 8599 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8600 /// to record information about a use. 8601 struct UseMemo { 8602 SDNode *User; 8603 unsigned Index; 8604 SDUse *Use; 8605 }; 8606 8607 /// operator< - Sort Memos by User. 8608 bool operator<(const UseMemo &L, const UseMemo &R) { 8609 return (intptr_t)L.User < (intptr_t)R.User; 8610 } 8611 8612 } // end anonymous namespace 8613 8614 void SelectionDAG::updateDivergence(SDNode * N) 8615 { 8616 if (TLI->isSDNodeAlwaysUniform(N)) 8617 return; 8618 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8619 for (auto &Op : N->ops()) { 8620 if (Op.Val.getValueType() != MVT::Other) 8621 IsDivergent |= Op.getNode()->isDivergent(); 8622 } 8623 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8624 N->SDNodeBits.IsDivergent = IsDivergent; 8625 for (auto U : N->uses()) { 8626 updateDivergence(U); 8627 } 8628 } 8629 } 8630 8631 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8632 DenseMap<SDNode *, unsigned> Degree; 8633 Order.reserve(AllNodes.size()); 8634 for (auto &N : allnodes()) { 8635 unsigned NOps = N.getNumOperands(); 8636 Degree[&N] = NOps; 8637 if (0 == NOps) 8638 Order.push_back(&N); 8639 } 8640 for (size_t I = 0; I != Order.size(); ++I) { 8641 SDNode *N = Order[I]; 8642 for (auto U : N->uses()) { 8643 unsigned &UnsortedOps = Degree[U]; 8644 if (0 == --UnsortedOps) 8645 Order.push_back(U); 8646 } 8647 } 8648 } 8649 8650 #ifndef NDEBUG 8651 void SelectionDAG::VerifyDAGDiverence() { 8652 std::vector<SDNode *> TopoOrder; 8653 CreateTopologicalOrder(TopoOrder); 8654 const TargetLowering &TLI = getTargetLoweringInfo(); 8655 DenseMap<const SDNode *, bool> DivergenceMap; 8656 for (auto &N : allnodes()) { 8657 DivergenceMap[&N] = false; 8658 } 8659 for (auto N : TopoOrder) { 8660 bool IsDivergent = DivergenceMap[N]; 8661 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8662 for (auto &Op : N->ops()) { 8663 if (Op.Val.getValueType() != MVT::Other) 8664 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8665 } 8666 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8667 DivergenceMap[N] = true; 8668 } 8669 } 8670 for (auto &N : allnodes()) { 8671 (void)N; 8672 assert(DivergenceMap[&N] == N.isDivergent() && 8673 "Divergence bit inconsistency detected\n"); 8674 } 8675 } 8676 #endif 8677 8678 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8679 /// uses of other values produced by From.getNode() alone. The same value 8680 /// may appear in both the From and To list. The Deleted vector is 8681 /// handled the same way as for ReplaceAllUsesWith. 8682 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8683 const SDValue *To, 8684 unsigned Num){ 8685 // Handle the simple, trivial case efficiently. 8686 if (Num == 1) 8687 return ReplaceAllUsesOfValueWith(*From, *To); 8688 8689 transferDbgValues(*From, *To); 8690 8691 // Read up all the uses and make records of them. This helps 8692 // processing new uses that are introduced during the 8693 // replacement process. 8694 SmallVector<UseMemo, 4> Uses; 8695 for (unsigned i = 0; i != Num; ++i) { 8696 unsigned FromResNo = From[i].getResNo(); 8697 SDNode *FromNode = From[i].getNode(); 8698 for (SDNode::use_iterator UI = FromNode->use_begin(), 8699 E = FromNode->use_end(); UI != E; ++UI) { 8700 SDUse &Use = UI.getUse(); 8701 if (Use.getResNo() == FromResNo) { 8702 UseMemo Memo = { *UI, i, &Use }; 8703 Uses.push_back(Memo); 8704 } 8705 } 8706 } 8707 8708 // Sort the uses, so that all the uses from a given User are together. 8709 llvm::sort(Uses); 8710 8711 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8712 UseIndex != UseIndexEnd; ) { 8713 // We know that this user uses some value of From. If it is the right 8714 // value, update it. 8715 SDNode *User = Uses[UseIndex].User; 8716 8717 // This node is about to morph, remove its old self from the CSE maps. 8718 RemoveNodeFromCSEMaps(User); 8719 8720 // The Uses array is sorted, so all the uses for a given User 8721 // are next to each other in the list. 8722 // To help reduce the number of CSE recomputations, process all 8723 // the uses of this user that we can find this way. 8724 do { 8725 unsigned i = Uses[UseIndex].Index; 8726 SDUse &Use = *Uses[UseIndex].Use; 8727 ++UseIndex; 8728 8729 Use.set(To[i]); 8730 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8731 8732 // Now that we have modified User, add it back to the CSE maps. If it 8733 // already exists there, recursively merge the results together. 8734 AddModifiedNodeToCSEMaps(User); 8735 } 8736 } 8737 8738 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8739 /// based on their topological order. It returns the maximum id and a vector 8740 /// of the SDNodes* in assigned order by reference. 8741 unsigned SelectionDAG::AssignTopologicalOrder() { 8742 unsigned DAGSize = 0; 8743 8744 // SortedPos tracks the progress of the algorithm. Nodes before it are 8745 // sorted, nodes after it are unsorted. When the algorithm completes 8746 // it is at the end of the list. 8747 allnodes_iterator SortedPos = allnodes_begin(); 8748 8749 // Visit all the nodes. Move nodes with no operands to the front of 8750 // the list immediately. Annotate nodes that do have operands with their 8751 // operand count. Before we do this, the Node Id fields of the nodes 8752 // may contain arbitrary values. After, the Node Id fields for nodes 8753 // before SortedPos will contain the topological sort index, and the 8754 // Node Id fields for nodes At SortedPos and after will contain the 8755 // count of outstanding operands. 8756 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8757 SDNode *N = &*I++; 8758 checkForCycles(N, this); 8759 unsigned Degree = N->getNumOperands(); 8760 if (Degree == 0) { 8761 // A node with no uses, add it to the result array immediately. 8762 N->setNodeId(DAGSize++); 8763 allnodes_iterator Q(N); 8764 if (Q != SortedPos) 8765 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8766 assert(SortedPos != AllNodes.end() && "Overran node list"); 8767 ++SortedPos; 8768 } else { 8769 // Temporarily use the Node Id as scratch space for the degree count. 8770 N->setNodeId(Degree); 8771 } 8772 } 8773 8774 // Visit all the nodes. As we iterate, move nodes into sorted order, 8775 // such that by the time the end is reached all nodes will be sorted. 8776 for (SDNode &Node : allnodes()) { 8777 SDNode *N = &Node; 8778 checkForCycles(N, this); 8779 // N is in sorted position, so all its uses have one less operand 8780 // that needs to be sorted. 8781 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8782 UI != UE; ++UI) { 8783 SDNode *P = *UI; 8784 unsigned Degree = P->getNodeId(); 8785 assert(Degree != 0 && "Invalid node degree"); 8786 --Degree; 8787 if (Degree == 0) { 8788 // All of P's operands are sorted, so P may sorted now. 8789 P->setNodeId(DAGSize++); 8790 if (P->getIterator() != SortedPos) 8791 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8792 assert(SortedPos != AllNodes.end() && "Overran node list"); 8793 ++SortedPos; 8794 } else { 8795 // Update P's outstanding operand count. 8796 P->setNodeId(Degree); 8797 } 8798 } 8799 if (Node.getIterator() == SortedPos) { 8800 #ifndef NDEBUG 8801 allnodes_iterator I(N); 8802 SDNode *S = &*++I; 8803 dbgs() << "Overran sorted position:\n"; 8804 S->dumprFull(this); dbgs() << "\n"; 8805 dbgs() << "Checking if this is due to cycles\n"; 8806 checkForCycles(this, true); 8807 #endif 8808 llvm_unreachable(nullptr); 8809 } 8810 } 8811 8812 assert(SortedPos == AllNodes.end() && 8813 "Topological sort incomplete!"); 8814 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8815 "First node in topological sort is not the entry token!"); 8816 assert(AllNodes.front().getNodeId() == 0 && 8817 "First node in topological sort has non-zero id!"); 8818 assert(AllNodes.front().getNumOperands() == 0 && 8819 "First node in topological sort has operands!"); 8820 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8821 "Last node in topologic sort has unexpected id!"); 8822 assert(AllNodes.back().use_empty() && 8823 "Last node in topologic sort has users!"); 8824 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8825 return DAGSize; 8826 } 8827 8828 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8829 /// value is produced by SD. 8830 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8831 if (SD) { 8832 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8833 SD->setHasDebugValue(true); 8834 } 8835 DbgInfo->add(DB, SD, isParameter); 8836 } 8837 8838 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8839 DbgInfo->add(DB); 8840 } 8841 8842 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8843 SDValue NewMemOp) { 8844 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8845 // The new memory operation must have the same position as the old load in 8846 // terms of memory dependency. Create a TokenFactor for the old load and new 8847 // memory operation and update uses of the old load's output chain to use that 8848 // TokenFactor. 8849 SDValue OldChain = SDValue(OldLoad, 1); 8850 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8851 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8852 return NewChain; 8853 8854 SDValue TokenFactor = 8855 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8856 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8857 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8858 return TokenFactor; 8859 } 8860 8861 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8862 Function **OutFunction) { 8863 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8864 8865 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8866 auto *Module = MF->getFunction().getParent(); 8867 auto *Function = Module->getFunction(Symbol); 8868 8869 if (OutFunction != nullptr) 8870 *OutFunction = Function; 8871 8872 if (Function != nullptr) { 8873 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8874 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8875 } 8876 8877 std::string ErrorStr; 8878 raw_string_ostream ErrorFormatter(ErrorStr); 8879 8880 ErrorFormatter << "Undefined external symbol "; 8881 ErrorFormatter << '"' << Symbol << '"'; 8882 ErrorFormatter.flush(); 8883 8884 report_fatal_error(ErrorStr); 8885 } 8886 8887 //===----------------------------------------------------------------------===// 8888 // SDNode Class 8889 //===----------------------------------------------------------------------===// 8890 8891 bool llvm::isNullConstant(SDValue V) { 8892 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8893 return Const != nullptr && Const->isNullValue(); 8894 } 8895 8896 bool llvm::isNullFPConstant(SDValue V) { 8897 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8898 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8899 } 8900 8901 bool llvm::isAllOnesConstant(SDValue V) { 8902 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8903 return Const != nullptr && Const->isAllOnesValue(); 8904 } 8905 8906 bool llvm::isOneConstant(SDValue V) { 8907 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8908 return Const != nullptr && Const->isOne(); 8909 } 8910 8911 SDValue llvm::peekThroughBitcasts(SDValue V) { 8912 while (V.getOpcode() == ISD::BITCAST) 8913 V = V.getOperand(0); 8914 return V; 8915 } 8916 8917 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8918 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8919 V = V.getOperand(0); 8920 return V; 8921 } 8922 8923 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8924 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8925 V = V.getOperand(0); 8926 return V; 8927 } 8928 8929 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8930 if (V.getOpcode() != ISD::XOR) 8931 return false; 8932 V = peekThroughBitcasts(V.getOperand(1)); 8933 unsigned NumBits = V.getScalarValueSizeInBits(); 8934 ConstantSDNode *C = 8935 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8936 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8937 } 8938 8939 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8940 bool AllowTruncation) { 8941 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8942 return CN; 8943 8944 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8945 BitVector UndefElements; 8946 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8947 8948 // BuildVectors can truncate their operands. Ignore that case here unless 8949 // AllowTruncation is set. 8950 if (CN && (UndefElements.none() || AllowUndefs)) { 8951 EVT CVT = CN->getValueType(0); 8952 EVT NSVT = N.getValueType().getScalarType(); 8953 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8954 if (AllowTruncation || (CVT == NSVT)) 8955 return CN; 8956 } 8957 } 8958 8959 return nullptr; 8960 } 8961 8962 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8963 bool AllowUndefs, 8964 bool AllowTruncation) { 8965 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8966 return CN; 8967 8968 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8969 BitVector UndefElements; 8970 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8971 8972 // BuildVectors can truncate their operands. Ignore that case here unless 8973 // AllowTruncation is set. 8974 if (CN && (UndefElements.none() || AllowUndefs)) { 8975 EVT CVT = CN->getValueType(0); 8976 EVT NSVT = N.getValueType().getScalarType(); 8977 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8978 if (AllowTruncation || (CVT == NSVT)) 8979 return CN; 8980 } 8981 } 8982 8983 return nullptr; 8984 } 8985 8986 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8987 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8988 return CN; 8989 8990 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8991 BitVector UndefElements; 8992 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8993 if (CN && (UndefElements.none() || AllowUndefs)) 8994 return CN; 8995 } 8996 8997 return nullptr; 8998 } 8999 9000 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 9001 const APInt &DemandedElts, 9002 bool AllowUndefs) { 9003 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9004 return CN; 9005 9006 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9007 BitVector UndefElements; 9008 ConstantFPSDNode *CN = 9009 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 9010 if (CN && (UndefElements.none() || AllowUndefs)) 9011 return CN; 9012 } 9013 9014 return nullptr; 9015 } 9016 9017 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 9018 // TODO: may want to use peekThroughBitcast() here. 9019 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9020 return C && C->isNullValue(); 9021 } 9022 9023 bool llvm::isOneOrOneSplat(SDValue N) { 9024 // TODO: may want to use peekThroughBitcast() here. 9025 unsigned BitWidth = N.getScalarValueSizeInBits(); 9026 ConstantSDNode *C = isConstOrConstSplat(N); 9027 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 9028 } 9029 9030 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 9031 N = peekThroughBitcasts(N); 9032 unsigned BitWidth = N.getScalarValueSizeInBits(); 9033 ConstantSDNode *C = isConstOrConstSplat(N); 9034 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 9035 } 9036 9037 HandleSDNode::~HandleSDNode() { 9038 DropOperands(); 9039 } 9040 9041 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 9042 const DebugLoc &DL, 9043 const GlobalValue *GA, EVT VT, 9044 int64_t o, unsigned TF) 9045 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9046 TheGlobal = GA; 9047 } 9048 9049 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9050 EVT VT, unsigned SrcAS, 9051 unsigned DestAS) 9052 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9053 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9054 9055 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9056 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9057 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9058 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9059 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9060 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9061 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9062 9063 // We check here that the size of the memory operand fits within the size of 9064 // the MMO. This is because the MMO might indicate only a possible address 9065 // range instead of specifying the affected memory addresses precisely. 9066 // TODO: Make MachineMemOperands aware of scalable vectors. 9067 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9068 "Size mismatch!"); 9069 } 9070 9071 /// Profile - Gather unique data for the node. 9072 /// 9073 void SDNode::Profile(FoldingSetNodeID &ID) const { 9074 AddNodeIDNode(ID, this); 9075 } 9076 9077 namespace { 9078 9079 struct EVTArray { 9080 std::vector<EVT> VTs; 9081 9082 EVTArray() { 9083 VTs.reserve(MVT::LAST_VALUETYPE); 9084 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9085 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9086 } 9087 }; 9088 9089 } // end anonymous namespace 9090 9091 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9092 static ManagedStatic<EVTArray> SimpleVTArray; 9093 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9094 9095 /// getValueTypeList - Return a pointer to the specified value type. 9096 /// 9097 const EVT *SDNode::getValueTypeList(EVT VT) { 9098 if (VT.isExtended()) { 9099 sys::SmartScopedLock<true> Lock(*VTMutex); 9100 return &(*EVTs->insert(VT).first); 9101 } else { 9102 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9103 "Value type out of range!"); 9104 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9105 } 9106 } 9107 9108 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9109 /// indicated value. This method ignores uses of other values defined by this 9110 /// operation. 9111 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9112 assert(Value < getNumValues() && "Bad value!"); 9113 9114 // TODO: Only iterate over uses of a given value of the node 9115 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9116 if (UI.getUse().getResNo() == Value) { 9117 if (NUses == 0) 9118 return false; 9119 --NUses; 9120 } 9121 } 9122 9123 // Found exactly the right number of uses? 9124 return NUses == 0; 9125 } 9126 9127 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9128 /// value. This method ignores uses of other values defined by this operation. 9129 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9130 assert(Value < getNumValues() && "Bad value!"); 9131 9132 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9133 if (UI.getUse().getResNo() == Value) 9134 return true; 9135 9136 return false; 9137 } 9138 9139 /// isOnlyUserOf - Return true if this node is the only use of N. 9140 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9141 bool Seen = false; 9142 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9143 SDNode *User = *I; 9144 if (User == this) 9145 Seen = true; 9146 else 9147 return false; 9148 } 9149 9150 return Seen; 9151 } 9152 9153 /// Return true if the only users of N are contained in Nodes. 9154 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9155 bool Seen = false; 9156 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9157 SDNode *User = *I; 9158 if (llvm::any_of(Nodes, 9159 [&User](const SDNode *Node) { return User == Node; })) 9160 Seen = true; 9161 else 9162 return false; 9163 } 9164 9165 return Seen; 9166 } 9167 9168 /// isOperand - Return true if this node is an operand of N. 9169 bool SDValue::isOperandOf(const SDNode *N) const { 9170 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9171 } 9172 9173 bool SDNode::isOperandOf(const SDNode *N) const { 9174 return any_of(N->op_values(), 9175 [this](SDValue Op) { return this == Op.getNode(); }); 9176 } 9177 9178 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9179 /// be a chain) reaches the specified operand without crossing any 9180 /// side-effecting instructions on any chain path. In practice, this looks 9181 /// through token factors and non-volatile loads. In order to remain efficient, 9182 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9183 /// 9184 /// Note that we only need to examine chains when we're searching for 9185 /// side-effects; SelectionDAG requires that all side-effects are represented 9186 /// by chains, even if another operand would force a specific ordering. This 9187 /// constraint is necessary to allow transformations like splitting loads. 9188 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9189 unsigned Depth) const { 9190 if (*this == Dest) return true; 9191 9192 // Don't search too deeply, we just want to be able to see through 9193 // TokenFactor's etc. 9194 if (Depth == 0) return false; 9195 9196 // If this is a token factor, all inputs to the TF happen in parallel. 9197 if (getOpcode() == ISD::TokenFactor) { 9198 // First, try a shallow search. 9199 if (is_contained((*this)->ops(), Dest)) { 9200 // We found the chain we want as an operand of this TokenFactor. 9201 // Essentially, we reach the chain without side-effects if we could 9202 // serialize the TokenFactor into a simple chain of operations with 9203 // Dest as the last operation. This is automatically true if the 9204 // chain has one use: there are no other ordering constraints. 9205 // If the chain has more than one use, we give up: some other 9206 // use of Dest might force a side-effect between Dest and the current 9207 // node. 9208 if (Dest.hasOneUse()) 9209 return true; 9210 } 9211 // Next, try a deep search: check whether every operand of the TokenFactor 9212 // reaches Dest. 9213 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9214 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9215 }); 9216 } 9217 9218 // Loads don't have side effects, look through them. 9219 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9220 if (Ld->isUnordered()) 9221 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9222 } 9223 return false; 9224 } 9225 9226 bool SDNode::hasPredecessor(const SDNode *N) const { 9227 SmallPtrSet<const SDNode *, 32> Visited; 9228 SmallVector<const SDNode *, 16> Worklist; 9229 Worklist.push_back(this); 9230 return hasPredecessorHelper(N, Visited, Worklist); 9231 } 9232 9233 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9234 this->Flags.intersectWith(Flags); 9235 } 9236 9237 SDValue 9238 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9239 ArrayRef<ISD::NodeType> CandidateBinOps, 9240 bool AllowPartials) { 9241 // The pattern must end in an extract from index 0. 9242 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9243 !isNullConstant(Extract->getOperand(1))) 9244 return SDValue(); 9245 9246 // Match against one of the candidate binary ops. 9247 SDValue Op = Extract->getOperand(0); 9248 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9249 return Op.getOpcode() == unsigned(BinOp); 9250 })) 9251 return SDValue(); 9252 9253 // Floating-point reductions may require relaxed constraints on the final step 9254 // of the reduction because they may reorder intermediate operations. 9255 unsigned CandidateBinOp = Op.getOpcode(); 9256 if (Op.getValueType().isFloatingPoint()) { 9257 SDNodeFlags Flags = Op->getFlags(); 9258 switch (CandidateBinOp) { 9259 case ISD::FADD: 9260 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9261 return SDValue(); 9262 break; 9263 default: 9264 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9265 } 9266 } 9267 9268 // Matching failed - attempt to see if we did enough stages that a partial 9269 // reduction from a subvector is possible. 9270 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9271 if (!AllowPartials || !Op) 9272 return SDValue(); 9273 EVT OpVT = Op.getValueType(); 9274 EVT OpSVT = OpVT.getScalarType(); 9275 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9276 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9277 return SDValue(); 9278 BinOp = (ISD::NodeType)CandidateBinOp; 9279 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9280 getVectorIdxConstant(0, SDLoc(Op))); 9281 }; 9282 9283 // At each stage, we're looking for something that looks like: 9284 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9285 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9286 // i32 undef, i32 undef, i32 undef, i32 undef> 9287 // %a = binop <8 x i32> %op, %s 9288 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9289 // we expect something like: 9290 // <4,5,6,7,u,u,u,u> 9291 // <2,3,u,u,u,u,u,u> 9292 // <1,u,u,u,u,u,u,u> 9293 // While a partial reduction match would be: 9294 // <2,3,u,u,u,u,u,u> 9295 // <1,u,u,u,u,u,u,u> 9296 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9297 SDValue PrevOp; 9298 for (unsigned i = 0; i < Stages; ++i) { 9299 unsigned MaskEnd = (1 << i); 9300 9301 if (Op.getOpcode() != CandidateBinOp) 9302 return PartialReduction(PrevOp, MaskEnd); 9303 9304 SDValue Op0 = Op.getOperand(0); 9305 SDValue Op1 = Op.getOperand(1); 9306 9307 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9308 if (Shuffle) { 9309 Op = Op1; 9310 } else { 9311 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9312 Op = Op0; 9313 } 9314 9315 // The first operand of the shuffle should be the same as the other operand 9316 // of the binop. 9317 if (!Shuffle || Shuffle->getOperand(0) != Op) 9318 return PartialReduction(PrevOp, MaskEnd); 9319 9320 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9321 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9322 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9323 return PartialReduction(PrevOp, MaskEnd); 9324 9325 PrevOp = Op; 9326 } 9327 9328 BinOp = (ISD::NodeType)CandidateBinOp; 9329 return Op; 9330 } 9331 9332 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9333 assert(N->getNumValues() == 1 && 9334 "Can't unroll a vector with multiple results!"); 9335 9336 EVT VT = N->getValueType(0); 9337 unsigned NE = VT.getVectorNumElements(); 9338 EVT EltVT = VT.getVectorElementType(); 9339 SDLoc dl(N); 9340 9341 SmallVector<SDValue, 8> Scalars; 9342 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9343 9344 // If ResNE is 0, fully unroll the vector op. 9345 if (ResNE == 0) 9346 ResNE = NE; 9347 else if (NE > ResNE) 9348 NE = ResNE; 9349 9350 unsigned i; 9351 for (i= 0; i != NE; ++i) { 9352 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9353 SDValue Operand = N->getOperand(j); 9354 EVT OperandVT = Operand.getValueType(); 9355 if (OperandVT.isVector()) { 9356 // A vector operand; extract a single element. 9357 EVT OperandEltVT = OperandVT.getVectorElementType(); 9358 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9359 Operand, getVectorIdxConstant(i, dl)); 9360 } else { 9361 // A scalar operand; just use it as is. 9362 Operands[j] = Operand; 9363 } 9364 } 9365 9366 switch (N->getOpcode()) { 9367 default: { 9368 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9369 N->getFlags())); 9370 break; 9371 } 9372 case ISD::VSELECT: 9373 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9374 break; 9375 case ISD::SHL: 9376 case ISD::SRA: 9377 case ISD::SRL: 9378 case ISD::ROTL: 9379 case ISD::ROTR: 9380 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9381 getShiftAmountOperand(Operands[0].getValueType(), 9382 Operands[1]))); 9383 break; 9384 case ISD::SIGN_EXTEND_INREG: { 9385 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9386 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9387 Operands[0], 9388 getValueType(ExtVT))); 9389 } 9390 } 9391 } 9392 9393 for (; i < ResNE; ++i) 9394 Scalars.push_back(getUNDEF(EltVT)); 9395 9396 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9397 return getBuildVector(VecVT, dl, Scalars); 9398 } 9399 9400 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9401 SDNode *N, unsigned ResNE) { 9402 unsigned Opcode = N->getOpcode(); 9403 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9404 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9405 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9406 "Expected an overflow opcode"); 9407 9408 EVT ResVT = N->getValueType(0); 9409 EVT OvVT = N->getValueType(1); 9410 EVT ResEltVT = ResVT.getVectorElementType(); 9411 EVT OvEltVT = OvVT.getVectorElementType(); 9412 SDLoc dl(N); 9413 9414 // If ResNE is 0, fully unroll the vector op. 9415 unsigned NE = ResVT.getVectorNumElements(); 9416 if (ResNE == 0) 9417 ResNE = NE; 9418 else if (NE > ResNE) 9419 NE = ResNE; 9420 9421 SmallVector<SDValue, 8> LHSScalars; 9422 SmallVector<SDValue, 8> RHSScalars; 9423 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9424 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9425 9426 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9427 SDVTList VTs = getVTList(ResEltVT, SVT); 9428 SmallVector<SDValue, 8> ResScalars; 9429 SmallVector<SDValue, 8> OvScalars; 9430 for (unsigned i = 0; i < NE; ++i) { 9431 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9432 SDValue Ov = 9433 getSelect(dl, OvEltVT, Res.getValue(1), 9434 getBoolConstant(true, dl, OvEltVT, ResVT), 9435 getConstant(0, dl, OvEltVT)); 9436 9437 ResScalars.push_back(Res); 9438 OvScalars.push_back(Ov); 9439 } 9440 9441 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9442 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9443 9444 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9445 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9446 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9447 getBuildVector(NewOvVT, dl, OvScalars)); 9448 } 9449 9450 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9451 LoadSDNode *Base, 9452 unsigned Bytes, 9453 int Dist) const { 9454 if (LD->isVolatile() || Base->isVolatile()) 9455 return false; 9456 // TODO: probably too restrictive for atomics, revisit 9457 if (!LD->isSimple()) 9458 return false; 9459 if (LD->isIndexed() || Base->isIndexed()) 9460 return false; 9461 if (LD->getChain() != Base->getChain()) 9462 return false; 9463 EVT VT = LD->getValueType(0); 9464 if (VT.getSizeInBits() / 8 != Bytes) 9465 return false; 9466 9467 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9468 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9469 9470 int64_t Offset = 0; 9471 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9472 return (Dist * Bytes == Offset); 9473 return false; 9474 } 9475 9476 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9477 /// if it cannot be inferred. 9478 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9479 // If this is a GlobalAddress + cst, return the alignment. 9480 const GlobalValue *GV = nullptr; 9481 int64_t GVOffset = 0; 9482 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9483 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9484 KnownBits Known(PtrWidth); 9485 llvm::computeKnownBits(GV, Known, getDataLayout()); 9486 unsigned AlignBits = Known.countMinTrailingZeros(); 9487 if (AlignBits) 9488 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9489 } 9490 9491 // If this is a direct reference to a stack slot, use information about the 9492 // stack slot's alignment. 9493 int FrameIdx = INT_MIN; 9494 int64_t FrameOffset = 0; 9495 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9496 FrameIdx = FI->getIndex(); 9497 } else if (isBaseWithConstantOffset(Ptr) && 9498 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9499 // Handle FI+Cst 9500 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9501 FrameOffset = Ptr.getConstantOperandVal(1); 9502 } 9503 9504 if (FrameIdx != INT_MIN) { 9505 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9506 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9507 } 9508 9509 return None; 9510 } 9511 9512 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9513 /// which is split (or expanded) into two not necessarily identical pieces. 9514 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9515 // Currently all types are split in half. 9516 EVT LoVT, HiVT; 9517 if (!VT.isVector()) 9518 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9519 else 9520 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9521 9522 return std::make_pair(LoVT, HiVT); 9523 } 9524 9525 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9526 /// type, dependent on an enveloping VT that has been split into two identical 9527 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9528 std::pair<EVT, EVT> 9529 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9530 bool *HiIsEmpty) const { 9531 EVT EltTp = VT.getVectorElementType(); 9532 bool IsScalable = VT.isScalableVector(); 9533 // Examples: 9534 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9535 // custom VL=9 with enveloping VL=8/8 yields 8/1 9536 // custom VL=10 with enveloping VL=8/8 yields 8/2 9537 // etc. 9538 unsigned VTNumElts = VT.getVectorNumElements(); 9539 unsigned EnvNumElts = EnvVT.getVectorNumElements(); 9540 EVT LoVT, HiVT; 9541 if (VTNumElts > EnvNumElts) { 9542 LoVT = EnvVT; 9543 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts, 9544 IsScalable); 9545 *HiIsEmpty = false; 9546 } else { 9547 // Flag that hi type has zero storage size, but return split envelop type 9548 // (this would be easier if vector types with zero elements were allowed). 9549 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts, IsScalable); 9550 HiVT = EnvVT; 9551 *HiIsEmpty = true; 9552 } 9553 return std::make_pair(LoVT, HiVT); 9554 } 9555 9556 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9557 /// low/high part. 9558 std::pair<SDValue, SDValue> 9559 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9560 const EVT &HiVT) { 9561 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9562 N.getValueType().getVectorNumElements() && 9563 "More vector elements requested than available!"); 9564 SDValue Lo, Hi; 9565 Lo = 9566 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9567 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9568 getVectorIdxConstant(LoVT.getVectorNumElements(), DL)); 9569 return std::make_pair(Lo, Hi); 9570 } 9571 9572 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9573 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9574 EVT VT = N.getValueType(); 9575 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9576 NextPowerOf2(VT.getVectorNumElements())); 9577 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9578 getVectorIdxConstant(0, DL)); 9579 } 9580 9581 void SelectionDAG::ExtractVectorElements(SDValue Op, 9582 SmallVectorImpl<SDValue> &Args, 9583 unsigned Start, unsigned Count, 9584 EVT EltVT) { 9585 EVT VT = Op.getValueType(); 9586 if (Count == 0) 9587 Count = VT.getVectorNumElements(); 9588 if (EltVT == EVT()) 9589 EltVT = VT.getVectorElementType(); 9590 SDLoc SL(Op); 9591 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9592 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9593 getVectorIdxConstant(i, SL))); 9594 } 9595 } 9596 9597 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9598 unsigned GlobalAddressSDNode::getAddressSpace() const { 9599 return getGlobal()->getType()->getAddressSpace(); 9600 } 9601 9602 Type *ConstantPoolSDNode::getType() const { 9603 if (isMachineConstantPoolEntry()) 9604 return Val.MachineCPVal->getType(); 9605 return Val.ConstVal->getType(); 9606 } 9607 9608 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9609 unsigned &SplatBitSize, 9610 bool &HasAnyUndefs, 9611 unsigned MinSplatBits, 9612 bool IsBigEndian) const { 9613 EVT VT = getValueType(0); 9614 assert(VT.isVector() && "Expected a vector type"); 9615 unsigned VecWidth = VT.getSizeInBits(); 9616 if (MinSplatBits > VecWidth) 9617 return false; 9618 9619 // FIXME: The widths are based on this node's type, but build vectors can 9620 // truncate their operands. 9621 SplatValue = APInt(VecWidth, 0); 9622 SplatUndef = APInt(VecWidth, 0); 9623 9624 // Get the bits. Bits with undefined values (when the corresponding element 9625 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9626 // in SplatValue. If any of the values are not constant, give up and return 9627 // false. 9628 unsigned int NumOps = getNumOperands(); 9629 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9630 unsigned EltWidth = VT.getScalarSizeInBits(); 9631 9632 for (unsigned j = 0; j < NumOps; ++j) { 9633 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9634 SDValue OpVal = getOperand(i); 9635 unsigned BitPos = j * EltWidth; 9636 9637 if (OpVal.isUndef()) 9638 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9639 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9640 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9641 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9642 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9643 else 9644 return false; 9645 } 9646 9647 // The build_vector is all constants or undefs. Find the smallest element 9648 // size that splats the vector. 9649 HasAnyUndefs = (SplatUndef != 0); 9650 9651 // FIXME: This does not work for vectors with elements less than 8 bits. 9652 while (VecWidth > 8) { 9653 unsigned HalfSize = VecWidth / 2; 9654 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9655 APInt LowValue = SplatValue.trunc(HalfSize); 9656 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9657 APInt LowUndef = SplatUndef.trunc(HalfSize); 9658 9659 // If the two halves do not match (ignoring undef bits), stop here. 9660 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9661 MinSplatBits > HalfSize) 9662 break; 9663 9664 SplatValue = HighValue | LowValue; 9665 SplatUndef = HighUndef & LowUndef; 9666 9667 VecWidth = HalfSize; 9668 } 9669 9670 SplatBitSize = VecWidth; 9671 return true; 9672 } 9673 9674 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9675 BitVector *UndefElements) const { 9676 if (UndefElements) { 9677 UndefElements->clear(); 9678 UndefElements->resize(getNumOperands()); 9679 } 9680 assert(getNumOperands() == DemandedElts.getBitWidth() && 9681 "Unexpected vector size"); 9682 if (!DemandedElts) 9683 return SDValue(); 9684 SDValue Splatted; 9685 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9686 if (!DemandedElts[i]) 9687 continue; 9688 SDValue Op = getOperand(i); 9689 if (Op.isUndef()) { 9690 if (UndefElements) 9691 (*UndefElements)[i] = true; 9692 } else if (!Splatted) { 9693 Splatted = Op; 9694 } else if (Splatted != Op) { 9695 return SDValue(); 9696 } 9697 } 9698 9699 if (!Splatted) { 9700 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9701 assert(getOperand(FirstDemandedIdx).isUndef() && 9702 "Can only have a splat without a constant for all undefs."); 9703 return getOperand(FirstDemandedIdx); 9704 } 9705 9706 return Splatted; 9707 } 9708 9709 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9710 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9711 return getSplatValue(DemandedElts, UndefElements); 9712 } 9713 9714 ConstantSDNode * 9715 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9716 BitVector *UndefElements) const { 9717 return dyn_cast_or_null<ConstantSDNode>( 9718 getSplatValue(DemandedElts, UndefElements)); 9719 } 9720 9721 ConstantSDNode * 9722 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9723 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9724 } 9725 9726 ConstantFPSDNode * 9727 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9728 BitVector *UndefElements) const { 9729 return dyn_cast_or_null<ConstantFPSDNode>( 9730 getSplatValue(DemandedElts, UndefElements)); 9731 } 9732 9733 ConstantFPSDNode * 9734 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9735 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9736 } 9737 9738 int32_t 9739 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9740 uint32_t BitWidth) const { 9741 if (ConstantFPSDNode *CN = 9742 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9743 bool IsExact; 9744 APSInt IntVal(BitWidth); 9745 const APFloat &APF = CN->getValueAPF(); 9746 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9747 APFloat::opOK || 9748 !IsExact) 9749 return -1; 9750 9751 return IntVal.exactLogBase2(); 9752 } 9753 return -1; 9754 } 9755 9756 bool BuildVectorSDNode::isConstant() const { 9757 for (const SDValue &Op : op_values()) { 9758 unsigned Opc = Op.getOpcode(); 9759 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9760 return false; 9761 } 9762 return true; 9763 } 9764 9765 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9766 // Find the first non-undef value in the shuffle mask. 9767 unsigned i, e; 9768 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9769 /* search */; 9770 9771 // If all elements are undefined, this shuffle can be considered a splat 9772 // (although it should eventually get simplified away completely). 9773 if (i == e) 9774 return true; 9775 9776 // Make sure all remaining elements are either undef or the same as the first 9777 // non-undef value. 9778 for (int Idx = Mask[i]; i != e; ++i) 9779 if (Mask[i] >= 0 && Mask[i] != Idx) 9780 return false; 9781 return true; 9782 } 9783 9784 // Returns the SDNode if it is a constant integer BuildVector 9785 // or constant integer. 9786 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9787 if (isa<ConstantSDNode>(N)) 9788 return N.getNode(); 9789 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9790 return N.getNode(); 9791 // Treat a GlobalAddress supporting constant offset folding as a 9792 // constant integer. 9793 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9794 if (GA->getOpcode() == ISD::GlobalAddress && 9795 TLI->isOffsetFoldingLegal(GA)) 9796 return GA; 9797 return nullptr; 9798 } 9799 9800 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9801 if (isa<ConstantFPSDNode>(N)) 9802 return N.getNode(); 9803 9804 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9805 return N.getNode(); 9806 9807 return nullptr; 9808 } 9809 9810 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9811 assert(!Node->OperandList && "Node already has operands"); 9812 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9813 "too many operands to fit into SDNode"); 9814 SDUse *Ops = OperandRecycler.allocate( 9815 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9816 9817 bool IsDivergent = false; 9818 for (unsigned I = 0; I != Vals.size(); ++I) { 9819 Ops[I].setUser(Node); 9820 Ops[I].setInitial(Vals[I]); 9821 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9822 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9823 } 9824 Node->NumOperands = Vals.size(); 9825 Node->OperandList = Ops; 9826 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9827 if (!TLI->isSDNodeAlwaysUniform(Node)) 9828 Node->SDNodeBits.IsDivergent = IsDivergent; 9829 checkForCycles(Node); 9830 } 9831 9832 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9833 SmallVectorImpl<SDValue> &Vals) { 9834 size_t Limit = SDNode::getMaxNumOperands(); 9835 while (Vals.size() > Limit) { 9836 unsigned SliceIdx = Vals.size() - Limit; 9837 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9838 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9839 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9840 Vals.emplace_back(NewTF); 9841 } 9842 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9843 } 9844 9845 #ifndef NDEBUG 9846 static void checkForCyclesHelper(const SDNode *N, 9847 SmallPtrSetImpl<const SDNode*> &Visited, 9848 SmallPtrSetImpl<const SDNode*> &Checked, 9849 const llvm::SelectionDAG *DAG) { 9850 // If this node has already been checked, don't check it again. 9851 if (Checked.count(N)) 9852 return; 9853 9854 // If a node has already been visited on this depth-first walk, reject it as 9855 // a cycle. 9856 if (!Visited.insert(N).second) { 9857 errs() << "Detected cycle in SelectionDAG\n"; 9858 dbgs() << "Offending node:\n"; 9859 N->dumprFull(DAG); dbgs() << "\n"; 9860 abort(); 9861 } 9862 9863 for (const SDValue &Op : N->op_values()) 9864 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9865 9866 Checked.insert(N); 9867 Visited.erase(N); 9868 } 9869 #endif 9870 9871 void llvm::checkForCycles(const llvm::SDNode *N, 9872 const llvm::SelectionDAG *DAG, 9873 bool force) { 9874 #ifndef NDEBUG 9875 bool check = force; 9876 #ifdef EXPENSIVE_CHECKS 9877 check = true; 9878 #endif // EXPENSIVE_CHECKS 9879 if (check) { 9880 assert(N && "Checking nonexistent SDNode"); 9881 SmallPtrSet<const SDNode*, 32> visited; 9882 SmallPtrSet<const SDNode*, 32> checked; 9883 checkForCyclesHelper(N, visited, checked, DAG); 9884 } 9885 #endif // !NDEBUG 9886 } 9887 9888 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9889 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9890 } 9891