1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/FunctionLoweringInfo.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/MachineBasicBlock.h" 34 #include "llvm/CodeGen/MachineConstantPool.h" 35 #include "llvm/CodeGen/MachineFrameInfo.h" 36 #include "llvm/CodeGen/MachineFunction.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/RuntimeLibcalls.h" 39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 40 #include "llvm/CodeGen/SelectionDAGNodes.h" 41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 42 #include "llvm/CodeGen/TargetFrameLowering.h" 43 #include "llvm/CodeGen/TargetLowering.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/ValueTypes.h" 47 #include "llvm/IR/Constant.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/DebugInfoMetadata.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/DerivedTypes.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GlobalValue.h" 55 #include "llvm/IR/Metadata.h" 56 #include "llvm/IR/Type.h" 57 #include "llvm/IR/Value.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Compiler.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/KnownBits.h" 64 #include "llvm/Support/MachineValueType.h" 65 #include "llvm/Support/ManagedStatic.h" 66 #include "llvm/Support/MathExtras.h" 67 #include "llvm/Support/Mutex.h" 68 #include "llvm/Support/raw_ostream.h" 69 #include "llvm/Target/TargetMachine.h" 70 #include "llvm/Target/TargetOptions.h" 71 #include "llvm/Transforms/Utils/SizeOpts.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <cstdlib> 76 #include <limits> 77 #include <set> 78 #include <string> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 84 /// makeVTList - Return an instance of the SDVTList struct initialized with the 85 /// specified members. 86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 87 SDVTList Res = {VTs, NumVTs}; 88 return Res; 89 } 90 91 // Default null implementations of the callbacks. 92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 95 96 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 97 98 #define DEBUG_TYPE "selectiondag" 99 100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 101 cl::Hidden, cl::init(true), 102 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 103 104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 105 cl::desc("Number limit for gluing ld/st of memcpy."), 106 cl::Hidden, cl::init(0)); 107 108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 110 } 111 112 //===----------------------------------------------------------------------===// 113 // ConstantFPSDNode Class 114 //===----------------------------------------------------------------------===// 115 116 /// isExactlyValue - We don't rely on operator== working on double values, as 117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 118 /// As such, this method can be used to do an exact bit-for-bit comparison of 119 /// two floating point values. 120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 121 return getValueAPF().bitwiseIsEqual(V); 122 } 123 124 bool ConstantFPSDNode::isValueValidForType(EVT VT, 125 const APFloat& Val) { 126 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 127 128 // convert modifies in place, so make a copy. 129 APFloat Val2 = APFloat(Val); 130 bool losesInfo; 131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 132 APFloat::rmNearestTiesToEven, 133 &losesInfo); 134 return !losesInfo; 135 } 136 137 //===----------------------------------------------------------------------===// 138 // ISD Namespace 139 //===----------------------------------------------------------------------===// 140 141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 142 auto *BV = dyn_cast<BuildVectorSDNode>(N); 143 if (!BV) 144 return false; 145 146 APInt SplatUndef; 147 unsigned SplatBitSize; 148 bool HasUndefs; 149 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 150 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 151 EltSize) && 152 EltSize == SplatBitSize; 153 } 154 155 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 156 // specializations of the more general isConstantSplatVector()? 157 158 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 159 // Look through a bit convert. 160 while (N->getOpcode() == ISD::BITCAST) 161 N = N->getOperand(0).getNode(); 162 163 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 164 165 unsigned i = 0, e = N->getNumOperands(); 166 167 // Skip over all of the undef values. 168 while (i != e && N->getOperand(i).isUndef()) 169 ++i; 170 171 // Do not accept an all-undef vector. 172 if (i == e) return false; 173 174 // Do not accept build_vectors that aren't all constants or which have non-~0 175 // elements. We have to be a bit careful here, as the type of the constant 176 // may not be the same as the type of the vector elements due to type 177 // legalization (the elements are promoted to a legal type for the target and 178 // a vector of a type may be legal when the base element type is not). 179 // We only want to check enough bits to cover the vector elements, because 180 // we care if the resultant vector is all ones, not whether the individual 181 // constants are. 182 SDValue NotZero = N->getOperand(i); 183 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 184 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 185 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 186 return false; 187 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 188 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 189 return false; 190 } else 191 return false; 192 193 // Okay, we have at least one ~0 value, check to see if the rest match or are 194 // undefs. Even with the above element type twiddling, this should be OK, as 195 // the same type legalization should have applied to all the elements. 196 for (++i; i != e; ++i) 197 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 198 return false; 199 return true; 200 } 201 202 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 203 // Look through a bit convert. 204 while (N->getOpcode() == ISD::BITCAST) 205 N = N->getOperand(0).getNode(); 206 207 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 208 209 bool IsAllUndef = true; 210 for (const SDValue &Op : N->op_values()) { 211 if (Op.isUndef()) 212 continue; 213 IsAllUndef = false; 214 // Do not accept build_vectors that aren't all constants or which have non-0 215 // elements. We have to be a bit careful here, as the type of the constant 216 // may not be the same as the type of the vector elements due to type 217 // legalization (the elements are promoted to a legal type for the target 218 // and a vector of a type may be legal when the base element type is not). 219 // We only want to check enough bits to cover the vector elements, because 220 // we care if the resultant vector is all zeros, not whether the individual 221 // constants are. 222 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 223 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 224 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 225 return false; 226 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 227 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 228 return false; 229 } else 230 return false; 231 } 232 233 // Do not accept an all-undef vector. 234 if (IsAllUndef) 235 return false; 236 return true; 237 } 238 239 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 240 if (N->getOpcode() != ISD::BUILD_VECTOR) 241 return false; 242 243 for (const SDValue &Op : N->op_values()) { 244 if (Op.isUndef()) 245 continue; 246 if (!isa<ConstantSDNode>(Op)) 247 return false; 248 } 249 return true; 250 } 251 252 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 253 if (N->getOpcode() != ISD::BUILD_VECTOR) 254 return false; 255 256 for (const SDValue &Op : N->op_values()) { 257 if (Op.isUndef()) 258 continue; 259 if (!isa<ConstantFPSDNode>(Op)) 260 return false; 261 } 262 return true; 263 } 264 265 bool ISD::allOperandsUndef(const SDNode *N) { 266 // Return false if the node has no operands. 267 // This is "logically inconsistent" with the definition of "all" but 268 // is probably the desired behavior. 269 if (N->getNumOperands() == 0) 270 return false; 271 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 272 } 273 274 bool ISD::matchUnaryPredicate(SDValue Op, 275 std::function<bool(ConstantSDNode *)> Match, 276 bool AllowUndefs) { 277 // FIXME: Add support for scalar UNDEF cases? 278 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 279 return Match(Cst); 280 281 // FIXME: Add support for vector UNDEF cases? 282 if (ISD::BUILD_VECTOR != Op.getOpcode()) 283 return false; 284 285 EVT SVT = Op.getValueType().getScalarType(); 286 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 287 if (AllowUndefs && Op.getOperand(i).isUndef()) { 288 if (!Match(nullptr)) 289 return false; 290 continue; 291 } 292 293 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 294 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 295 return false; 296 } 297 return true; 298 } 299 300 bool ISD::matchBinaryPredicate( 301 SDValue LHS, SDValue RHS, 302 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 303 bool AllowUndefs, bool AllowTypeMismatch) { 304 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 305 return false; 306 307 // TODO: Add support for scalar UNDEF cases? 308 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 309 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 310 return Match(LHSCst, RHSCst); 311 312 // TODO: Add support for vector UNDEF cases? 313 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 314 ISD::BUILD_VECTOR != RHS.getOpcode()) 315 return false; 316 317 EVT SVT = LHS.getValueType().getScalarType(); 318 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 319 SDValue LHSOp = LHS.getOperand(i); 320 SDValue RHSOp = RHS.getOperand(i); 321 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 322 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 323 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 324 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 325 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 326 return false; 327 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 328 LHSOp.getValueType() != RHSOp.getValueType())) 329 return false; 330 if (!Match(LHSCst, RHSCst)) 331 return false; 332 } 333 return true; 334 } 335 336 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) { 337 switch (VecReduceOpcode) { 338 default: 339 llvm_unreachable("Expected VECREDUCE opcode"); 340 case ISD::VECREDUCE_FADD: 341 case ISD::VECREDUCE_SEQ_FADD: 342 return ISD::FADD; 343 case ISD::VECREDUCE_FMUL: 344 case ISD::VECREDUCE_SEQ_FMUL: 345 return ISD::FMUL; 346 case ISD::VECREDUCE_ADD: 347 return ISD::ADD; 348 case ISD::VECREDUCE_MUL: 349 return ISD::MUL; 350 case ISD::VECREDUCE_AND: 351 return ISD::AND; 352 case ISD::VECREDUCE_OR: 353 return ISD::OR; 354 case ISD::VECREDUCE_XOR: 355 return ISD::XOR; 356 case ISD::VECREDUCE_SMAX: 357 return ISD::SMAX; 358 case ISD::VECREDUCE_SMIN: 359 return ISD::SMIN; 360 case ISD::VECREDUCE_UMAX: 361 return ISD::UMAX; 362 case ISD::VECREDUCE_UMIN: 363 return ISD::UMIN; 364 case ISD::VECREDUCE_FMAX: 365 return ISD::FMAXNUM; 366 case ISD::VECREDUCE_FMIN: 367 return ISD::FMINNUM; 368 } 369 } 370 371 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 372 switch (ExtType) { 373 case ISD::EXTLOAD: 374 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 375 case ISD::SEXTLOAD: 376 return ISD::SIGN_EXTEND; 377 case ISD::ZEXTLOAD: 378 return ISD::ZERO_EXTEND; 379 default: 380 break; 381 } 382 383 llvm_unreachable("Invalid LoadExtType"); 384 } 385 386 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 387 // To perform this operation, we just need to swap the L and G bits of the 388 // operation. 389 unsigned OldL = (Operation >> 2) & 1; 390 unsigned OldG = (Operation >> 1) & 1; 391 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 392 (OldL << 1) | // New G bit 393 (OldG << 2)); // New L bit. 394 } 395 396 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { 397 unsigned Operation = Op; 398 if (isIntegerLike) 399 Operation ^= 7; // Flip L, G, E bits, but not U. 400 else 401 Operation ^= 15; // Flip all of the condition bits. 402 403 if (Operation > ISD::SETTRUE2) 404 Operation &= ~8; // Don't let N and U bits get set. 405 406 return ISD::CondCode(Operation); 407 } 408 409 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 410 return getSetCCInverseImpl(Op, Type.isInteger()); 411 } 412 413 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, 414 bool isIntegerLike) { 415 return getSetCCInverseImpl(Op, isIntegerLike); 416 } 417 418 /// For an integer comparison, return 1 if the comparison is a signed operation 419 /// and 2 if the result is an unsigned comparison. Return zero if the operation 420 /// does not depend on the sign of the input (setne and seteq). 421 static int isSignedOp(ISD::CondCode Opcode) { 422 switch (Opcode) { 423 default: llvm_unreachable("Illegal integer setcc operation!"); 424 case ISD::SETEQ: 425 case ISD::SETNE: return 0; 426 case ISD::SETLT: 427 case ISD::SETLE: 428 case ISD::SETGT: 429 case ISD::SETGE: return 1; 430 case ISD::SETULT: 431 case ISD::SETULE: 432 case ISD::SETUGT: 433 case ISD::SETUGE: return 2; 434 } 435 } 436 437 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 438 EVT Type) { 439 bool IsInteger = Type.isInteger(); 440 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 441 // Cannot fold a signed integer setcc with an unsigned integer setcc. 442 return ISD::SETCC_INVALID; 443 444 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 445 446 // If the N and U bits get set, then the resultant comparison DOES suddenly 447 // care about orderedness, and it is true when ordered. 448 if (Op > ISD::SETTRUE2) 449 Op &= ~16; // Clear the U bit if the N bit is set. 450 451 // Canonicalize illegal integer setcc's. 452 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 453 Op = ISD::SETNE; 454 455 return ISD::CondCode(Op); 456 } 457 458 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 459 EVT Type) { 460 bool IsInteger = Type.isInteger(); 461 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 462 // Cannot fold a signed setcc with an unsigned setcc. 463 return ISD::SETCC_INVALID; 464 465 // Combine all of the condition bits. 466 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 467 468 // Canonicalize illegal integer setcc's. 469 if (IsInteger) { 470 switch (Result) { 471 default: break; 472 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 473 case ISD::SETOEQ: // SETEQ & SETU[LG]E 474 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 475 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 476 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 477 } 478 } 479 480 return Result; 481 } 482 483 //===----------------------------------------------------------------------===// 484 // SDNode Profile Support 485 //===----------------------------------------------------------------------===// 486 487 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 488 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 489 ID.AddInteger(OpC); 490 } 491 492 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 493 /// solely with their pointer. 494 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 495 ID.AddPointer(VTList.VTs); 496 } 497 498 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 499 static void AddNodeIDOperands(FoldingSetNodeID &ID, 500 ArrayRef<SDValue> Ops) { 501 for (auto& Op : Ops) { 502 ID.AddPointer(Op.getNode()); 503 ID.AddInteger(Op.getResNo()); 504 } 505 } 506 507 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 508 static void AddNodeIDOperands(FoldingSetNodeID &ID, 509 ArrayRef<SDUse> Ops) { 510 for (auto& Op : Ops) { 511 ID.AddPointer(Op.getNode()); 512 ID.AddInteger(Op.getResNo()); 513 } 514 } 515 516 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 517 SDVTList VTList, ArrayRef<SDValue> OpList) { 518 AddNodeIDOpcode(ID, OpC); 519 AddNodeIDValueTypes(ID, VTList); 520 AddNodeIDOperands(ID, OpList); 521 } 522 523 /// If this is an SDNode with special info, add this info to the NodeID data. 524 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 525 switch (N->getOpcode()) { 526 case ISD::TargetExternalSymbol: 527 case ISD::ExternalSymbol: 528 case ISD::MCSymbol: 529 llvm_unreachable("Should only be used on nodes with operands"); 530 default: break; // Normal nodes don't need extra info. 531 case ISD::TargetConstant: 532 case ISD::Constant: { 533 const ConstantSDNode *C = cast<ConstantSDNode>(N); 534 ID.AddPointer(C->getConstantIntValue()); 535 ID.AddBoolean(C->isOpaque()); 536 break; 537 } 538 case ISD::TargetConstantFP: 539 case ISD::ConstantFP: 540 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 541 break; 542 case ISD::TargetGlobalAddress: 543 case ISD::GlobalAddress: 544 case ISD::TargetGlobalTLSAddress: 545 case ISD::GlobalTLSAddress: { 546 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 547 ID.AddPointer(GA->getGlobal()); 548 ID.AddInteger(GA->getOffset()); 549 ID.AddInteger(GA->getTargetFlags()); 550 break; 551 } 552 case ISD::BasicBlock: 553 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 554 break; 555 case ISD::Register: 556 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 557 break; 558 case ISD::RegisterMask: 559 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 560 break; 561 case ISD::SRCVALUE: 562 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 563 break; 564 case ISD::FrameIndex: 565 case ISD::TargetFrameIndex: 566 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 567 break; 568 case ISD::LIFETIME_START: 569 case ISD::LIFETIME_END: 570 if (cast<LifetimeSDNode>(N)->hasOffset()) { 571 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 572 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 573 } 574 break; 575 case ISD::JumpTable: 576 case ISD::TargetJumpTable: 577 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 578 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 579 break; 580 case ISD::ConstantPool: 581 case ISD::TargetConstantPool: { 582 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 583 ID.AddInteger(CP->getAlign().value()); 584 ID.AddInteger(CP->getOffset()); 585 if (CP->isMachineConstantPoolEntry()) 586 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 587 else 588 ID.AddPointer(CP->getConstVal()); 589 ID.AddInteger(CP->getTargetFlags()); 590 break; 591 } 592 case ISD::TargetIndex: { 593 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 594 ID.AddInteger(TI->getIndex()); 595 ID.AddInteger(TI->getOffset()); 596 ID.AddInteger(TI->getTargetFlags()); 597 break; 598 } 599 case ISD::LOAD: { 600 const LoadSDNode *LD = cast<LoadSDNode>(N); 601 ID.AddInteger(LD->getMemoryVT().getRawBits()); 602 ID.AddInteger(LD->getRawSubclassData()); 603 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 604 break; 605 } 606 case ISD::STORE: { 607 const StoreSDNode *ST = cast<StoreSDNode>(N); 608 ID.AddInteger(ST->getMemoryVT().getRawBits()); 609 ID.AddInteger(ST->getRawSubclassData()); 610 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 611 break; 612 } 613 case ISD::MLOAD: { 614 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 615 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 616 ID.AddInteger(MLD->getRawSubclassData()); 617 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 618 break; 619 } 620 case ISD::MSTORE: { 621 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 622 ID.AddInteger(MST->getMemoryVT().getRawBits()); 623 ID.AddInteger(MST->getRawSubclassData()); 624 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 625 break; 626 } 627 case ISD::MGATHER: { 628 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 629 ID.AddInteger(MG->getMemoryVT().getRawBits()); 630 ID.AddInteger(MG->getRawSubclassData()); 631 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 632 break; 633 } 634 case ISD::MSCATTER: { 635 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 636 ID.AddInteger(MS->getMemoryVT().getRawBits()); 637 ID.AddInteger(MS->getRawSubclassData()); 638 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 639 break; 640 } 641 case ISD::ATOMIC_CMP_SWAP: 642 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 643 case ISD::ATOMIC_SWAP: 644 case ISD::ATOMIC_LOAD_ADD: 645 case ISD::ATOMIC_LOAD_SUB: 646 case ISD::ATOMIC_LOAD_AND: 647 case ISD::ATOMIC_LOAD_CLR: 648 case ISD::ATOMIC_LOAD_OR: 649 case ISD::ATOMIC_LOAD_XOR: 650 case ISD::ATOMIC_LOAD_NAND: 651 case ISD::ATOMIC_LOAD_MIN: 652 case ISD::ATOMIC_LOAD_MAX: 653 case ISD::ATOMIC_LOAD_UMIN: 654 case ISD::ATOMIC_LOAD_UMAX: 655 case ISD::ATOMIC_LOAD: 656 case ISD::ATOMIC_STORE: { 657 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 658 ID.AddInteger(AT->getMemoryVT().getRawBits()); 659 ID.AddInteger(AT->getRawSubclassData()); 660 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 661 break; 662 } 663 case ISD::PREFETCH: { 664 const MemSDNode *PF = cast<MemSDNode>(N); 665 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 666 break; 667 } 668 case ISD::VECTOR_SHUFFLE: { 669 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 670 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 671 i != e; ++i) 672 ID.AddInteger(SVN->getMaskElt(i)); 673 break; 674 } 675 case ISD::TargetBlockAddress: 676 case ISD::BlockAddress: { 677 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 678 ID.AddPointer(BA->getBlockAddress()); 679 ID.AddInteger(BA->getOffset()); 680 ID.AddInteger(BA->getTargetFlags()); 681 break; 682 } 683 } // end switch (N->getOpcode()) 684 685 // Target specific memory nodes could also have address spaces to check. 686 if (N->isTargetMemoryOpcode()) 687 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 688 } 689 690 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 691 /// data. 692 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 693 AddNodeIDOpcode(ID, N->getOpcode()); 694 // Add the return value info. 695 AddNodeIDValueTypes(ID, N->getVTList()); 696 // Add the operand info. 697 AddNodeIDOperands(ID, N->ops()); 698 699 // Handle SDNode leafs with special info. 700 AddNodeIDCustom(ID, N); 701 } 702 703 //===----------------------------------------------------------------------===// 704 // SelectionDAG Class 705 //===----------------------------------------------------------------------===// 706 707 /// doNotCSE - Return true if CSE should not be performed for this node. 708 static bool doNotCSE(SDNode *N) { 709 if (N->getValueType(0) == MVT::Glue) 710 return true; // Never CSE anything that produces a flag. 711 712 switch (N->getOpcode()) { 713 default: break; 714 case ISD::HANDLENODE: 715 case ISD::EH_LABEL: 716 return true; // Never CSE these nodes. 717 } 718 719 // Check that remaining values produced are not flags. 720 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 721 if (N->getValueType(i) == MVT::Glue) 722 return true; // Never CSE anything that produces a flag. 723 724 return false; 725 } 726 727 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 728 /// SelectionDAG. 729 void SelectionDAG::RemoveDeadNodes() { 730 // Create a dummy node (which is not added to allnodes), that adds a reference 731 // to the root node, preventing it from being deleted. 732 HandleSDNode Dummy(getRoot()); 733 734 SmallVector<SDNode*, 128> DeadNodes; 735 736 // Add all obviously-dead nodes to the DeadNodes worklist. 737 for (SDNode &Node : allnodes()) 738 if (Node.use_empty()) 739 DeadNodes.push_back(&Node); 740 741 RemoveDeadNodes(DeadNodes); 742 743 // If the root changed (e.g. it was a dead load, update the root). 744 setRoot(Dummy.getValue()); 745 } 746 747 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 748 /// given list, and any nodes that become unreachable as a result. 749 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 750 751 // Process the worklist, deleting the nodes and adding their uses to the 752 // worklist. 753 while (!DeadNodes.empty()) { 754 SDNode *N = DeadNodes.pop_back_val(); 755 // Skip to next node if we've already managed to delete the node. This could 756 // happen if replacing a node causes a node previously added to the node to 757 // be deleted. 758 if (N->getOpcode() == ISD::DELETED_NODE) 759 continue; 760 761 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 762 DUL->NodeDeleted(N, nullptr); 763 764 // Take the node out of the appropriate CSE map. 765 RemoveNodeFromCSEMaps(N); 766 767 // Next, brutally remove the operand list. This is safe to do, as there are 768 // no cycles in the graph. 769 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 770 SDUse &Use = *I++; 771 SDNode *Operand = Use.getNode(); 772 Use.set(SDValue()); 773 774 // Now that we removed this operand, see if there are no uses of it left. 775 if (Operand->use_empty()) 776 DeadNodes.push_back(Operand); 777 } 778 779 DeallocateNode(N); 780 } 781 } 782 783 void SelectionDAG::RemoveDeadNode(SDNode *N){ 784 SmallVector<SDNode*, 16> DeadNodes(1, N); 785 786 // Create a dummy node that adds a reference to the root node, preventing 787 // it from being deleted. (This matters if the root is an operand of the 788 // dead node.) 789 HandleSDNode Dummy(getRoot()); 790 791 RemoveDeadNodes(DeadNodes); 792 } 793 794 void SelectionDAG::DeleteNode(SDNode *N) { 795 // First take this out of the appropriate CSE map. 796 RemoveNodeFromCSEMaps(N); 797 798 // Finally, remove uses due to operands of this node, remove from the 799 // AllNodes list, and delete the node. 800 DeleteNodeNotInCSEMaps(N); 801 } 802 803 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 804 assert(N->getIterator() != AllNodes.begin() && 805 "Cannot delete the entry node!"); 806 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 807 808 // Drop all of the operands and decrement used node's use counts. 809 N->DropOperands(); 810 811 DeallocateNode(N); 812 } 813 814 void SDDbgInfo::erase(const SDNode *Node) { 815 DbgValMapType::iterator I = DbgValMap.find(Node); 816 if (I == DbgValMap.end()) 817 return; 818 for (auto &Val: I->second) 819 Val->setIsInvalidated(); 820 DbgValMap.erase(I); 821 } 822 823 void SelectionDAG::DeallocateNode(SDNode *N) { 824 // If we have operands, deallocate them. 825 removeOperands(N); 826 827 NodeAllocator.Deallocate(AllNodes.remove(N)); 828 829 // Set the opcode to DELETED_NODE to help catch bugs when node 830 // memory is reallocated. 831 // FIXME: There are places in SDag that have grown a dependency on the opcode 832 // value in the released node. 833 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 834 N->NodeType = ISD::DELETED_NODE; 835 836 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 837 // them and forget about that node. 838 DbgInfo->erase(N); 839 } 840 841 #ifndef NDEBUG 842 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 843 static void VerifySDNode(SDNode *N) { 844 switch (N->getOpcode()) { 845 default: 846 break; 847 case ISD::BUILD_PAIR: { 848 EVT VT = N->getValueType(0); 849 assert(N->getNumValues() == 1 && "Too many results!"); 850 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 851 "Wrong return type!"); 852 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 853 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 854 "Mismatched operand types!"); 855 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 856 "Wrong operand type!"); 857 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 858 "Wrong return type size"); 859 break; 860 } 861 case ISD::BUILD_VECTOR: { 862 assert(N->getNumValues() == 1 && "Too many results!"); 863 assert(N->getValueType(0).isVector() && "Wrong return type!"); 864 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 865 "Wrong number of operands!"); 866 EVT EltVT = N->getValueType(0).getVectorElementType(); 867 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 868 assert((I->getValueType() == EltVT || 869 (EltVT.isInteger() && I->getValueType().isInteger() && 870 EltVT.bitsLE(I->getValueType()))) && 871 "Wrong operand type!"); 872 assert(I->getValueType() == N->getOperand(0).getValueType() && 873 "Operands must all have the same type"); 874 } 875 break; 876 } 877 } 878 } 879 #endif // NDEBUG 880 881 /// Insert a newly allocated node into the DAG. 882 /// 883 /// Handles insertion into the all nodes list and CSE map, as well as 884 /// verification and other common operations when a new node is allocated. 885 void SelectionDAG::InsertNode(SDNode *N) { 886 AllNodes.push_back(N); 887 #ifndef NDEBUG 888 N->PersistentId = NextPersistentId++; 889 VerifySDNode(N); 890 #endif 891 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 892 DUL->NodeInserted(N); 893 } 894 895 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 896 /// correspond to it. This is useful when we're about to delete or repurpose 897 /// the node. We don't want future request for structurally identical nodes 898 /// to return N anymore. 899 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 900 bool Erased = false; 901 switch (N->getOpcode()) { 902 case ISD::HANDLENODE: return false; // noop. 903 case ISD::CONDCODE: 904 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 905 "Cond code doesn't exist!"); 906 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 907 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 908 break; 909 case ISD::ExternalSymbol: 910 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 911 break; 912 case ISD::TargetExternalSymbol: { 913 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 914 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 915 ESN->getSymbol(), ESN->getTargetFlags())); 916 break; 917 } 918 case ISD::MCSymbol: { 919 auto *MCSN = cast<MCSymbolSDNode>(N); 920 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 921 break; 922 } 923 case ISD::VALUETYPE: { 924 EVT VT = cast<VTSDNode>(N)->getVT(); 925 if (VT.isExtended()) { 926 Erased = ExtendedValueTypeNodes.erase(VT); 927 } else { 928 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 929 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 930 } 931 break; 932 } 933 default: 934 // Remove it from the CSE Map. 935 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 936 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 937 Erased = CSEMap.RemoveNode(N); 938 break; 939 } 940 #ifndef NDEBUG 941 // Verify that the node was actually in one of the CSE maps, unless it has a 942 // flag result (which cannot be CSE'd) or is one of the special cases that are 943 // not subject to CSE. 944 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 945 !N->isMachineOpcode() && !doNotCSE(N)) { 946 N->dump(this); 947 dbgs() << "\n"; 948 llvm_unreachable("Node is not in map!"); 949 } 950 #endif 951 return Erased; 952 } 953 954 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 955 /// maps and modified in place. Add it back to the CSE maps, unless an identical 956 /// node already exists, in which case transfer all its users to the existing 957 /// node. This transfer can potentially trigger recursive merging. 958 void 959 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 960 // For node types that aren't CSE'd, just act as if no identical node 961 // already exists. 962 if (!doNotCSE(N)) { 963 SDNode *Existing = CSEMap.GetOrInsertNode(N); 964 if (Existing != N) { 965 // If there was already an existing matching node, use ReplaceAllUsesWith 966 // to replace the dead one with the existing one. This can cause 967 // recursive merging of other unrelated nodes down the line. 968 ReplaceAllUsesWith(N, Existing); 969 970 // N is now dead. Inform the listeners and delete it. 971 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 972 DUL->NodeDeleted(N, Existing); 973 DeleteNodeNotInCSEMaps(N); 974 return; 975 } 976 } 977 978 // If the node doesn't already exist, we updated it. Inform listeners. 979 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 980 DUL->NodeUpdated(N); 981 } 982 983 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 984 /// were replaced with those specified. If this node is never memoized, 985 /// return null, otherwise return a pointer to the slot it would take. If a 986 /// node already exists with these operands, the slot will be non-null. 987 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 988 void *&InsertPos) { 989 if (doNotCSE(N)) 990 return nullptr; 991 992 SDValue Ops[] = { Op }; 993 FoldingSetNodeID ID; 994 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 995 AddNodeIDCustom(ID, N); 996 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 997 if (Node) 998 Node->intersectFlagsWith(N->getFlags()); 999 return Node; 1000 } 1001 1002 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1003 /// were replaced with those specified. If this node is never memoized, 1004 /// return null, otherwise return a pointer to the slot it would take. If a 1005 /// node already exists with these operands, the slot will be non-null. 1006 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 1007 SDValue Op1, SDValue Op2, 1008 void *&InsertPos) { 1009 if (doNotCSE(N)) 1010 return nullptr; 1011 1012 SDValue Ops[] = { Op1, Op2 }; 1013 FoldingSetNodeID ID; 1014 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1015 AddNodeIDCustom(ID, N); 1016 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1017 if (Node) 1018 Node->intersectFlagsWith(N->getFlags()); 1019 return Node; 1020 } 1021 1022 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 1023 /// were replaced with those specified. If this node is never memoized, 1024 /// return null, otherwise return a pointer to the slot it would take. If a 1025 /// node already exists with these operands, the slot will be non-null. 1026 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 1027 void *&InsertPos) { 1028 if (doNotCSE(N)) 1029 return nullptr; 1030 1031 FoldingSetNodeID ID; 1032 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 1033 AddNodeIDCustom(ID, N); 1034 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 1035 if (Node) 1036 Node->intersectFlagsWith(N->getFlags()); 1037 return Node; 1038 } 1039 1040 Align SelectionDAG::getEVTAlign(EVT VT) const { 1041 Type *Ty = VT == MVT::iPTR ? 1042 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 1043 VT.getTypeForEVT(*getContext()); 1044 1045 return getDataLayout().getABITypeAlign(Ty); 1046 } 1047 1048 // EntryNode could meaningfully have debug info if we can find it... 1049 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1050 : TM(tm), OptLevel(OL), 1051 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1052 Root(getEntryNode()) { 1053 InsertNode(&EntryNode); 1054 DbgInfo = new SDDbgInfo(); 1055 } 1056 1057 void SelectionDAG::init(MachineFunction &NewMF, 1058 OptimizationRemarkEmitter &NewORE, 1059 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1060 LegacyDivergenceAnalysis * Divergence, 1061 ProfileSummaryInfo *PSIin, 1062 BlockFrequencyInfo *BFIin) { 1063 MF = &NewMF; 1064 SDAGISelPass = PassPtr; 1065 ORE = &NewORE; 1066 TLI = getSubtarget().getTargetLowering(); 1067 TSI = getSubtarget().getSelectionDAGInfo(); 1068 LibInfo = LibraryInfo; 1069 Context = &MF->getFunction().getContext(); 1070 DA = Divergence; 1071 PSI = PSIin; 1072 BFI = BFIin; 1073 } 1074 1075 SelectionDAG::~SelectionDAG() { 1076 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1077 allnodes_clear(); 1078 OperandRecycler.clear(OperandAllocator); 1079 delete DbgInfo; 1080 } 1081 1082 bool SelectionDAG::shouldOptForSize() const { 1083 return MF->getFunction().hasOptSize() || 1084 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1085 } 1086 1087 void SelectionDAG::allnodes_clear() { 1088 assert(&*AllNodes.begin() == &EntryNode); 1089 AllNodes.remove(AllNodes.begin()); 1090 while (!AllNodes.empty()) 1091 DeallocateNode(&AllNodes.front()); 1092 #ifndef NDEBUG 1093 NextPersistentId = 0; 1094 #endif 1095 } 1096 1097 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1098 void *&InsertPos) { 1099 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1100 if (N) { 1101 switch (N->getOpcode()) { 1102 default: break; 1103 case ISD::Constant: 1104 case ISD::ConstantFP: 1105 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1106 "debug location. Use another overload."); 1107 } 1108 } 1109 return N; 1110 } 1111 1112 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1113 const SDLoc &DL, void *&InsertPos) { 1114 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1115 if (N) { 1116 switch (N->getOpcode()) { 1117 case ISD::Constant: 1118 case ISD::ConstantFP: 1119 // Erase debug location from the node if the node is used at several 1120 // different places. Do not propagate one location to all uses as it 1121 // will cause a worse single stepping debugging experience. 1122 if (N->getDebugLoc() != DL.getDebugLoc()) 1123 N->setDebugLoc(DebugLoc()); 1124 break; 1125 default: 1126 // When the node's point of use is located earlier in the instruction 1127 // sequence than its prior point of use, update its debug info to the 1128 // earlier location. 1129 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1130 N->setDebugLoc(DL.getDebugLoc()); 1131 break; 1132 } 1133 } 1134 return N; 1135 } 1136 1137 void SelectionDAG::clear() { 1138 allnodes_clear(); 1139 OperandRecycler.clear(OperandAllocator); 1140 OperandAllocator.Reset(); 1141 CSEMap.clear(); 1142 1143 ExtendedValueTypeNodes.clear(); 1144 ExternalSymbols.clear(); 1145 TargetExternalSymbols.clear(); 1146 MCSymbols.clear(); 1147 SDCallSiteDbgInfo.clear(); 1148 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1149 static_cast<CondCodeSDNode*>(nullptr)); 1150 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1151 static_cast<SDNode*>(nullptr)); 1152 1153 EntryNode.UseList = nullptr; 1154 InsertNode(&EntryNode); 1155 Root = getEntryNode(); 1156 DbgInfo->clear(); 1157 } 1158 1159 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1160 return VT.bitsGT(Op.getValueType()) 1161 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1162 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1163 } 1164 1165 std::pair<SDValue, SDValue> 1166 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1167 const SDLoc &DL, EVT VT) { 1168 assert(!VT.bitsEq(Op.getValueType()) && 1169 "Strict no-op FP extend/round not allowed."); 1170 SDValue Res = 1171 VT.bitsGT(Op.getValueType()) 1172 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1173 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1174 {Chain, Op, getIntPtrConstant(0, DL)}); 1175 1176 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1177 } 1178 1179 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1180 return VT.bitsGT(Op.getValueType()) ? 1181 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1182 getNode(ISD::TRUNCATE, DL, VT, Op); 1183 } 1184 1185 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1186 return VT.bitsGT(Op.getValueType()) ? 1187 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1188 getNode(ISD::TRUNCATE, DL, VT, Op); 1189 } 1190 1191 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1192 return VT.bitsGT(Op.getValueType()) ? 1193 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1194 getNode(ISD::TRUNCATE, DL, VT, Op); 1195 } 1196 1197 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1198 EVT OpVT) { 1199 if (VT.bitsLE(Op.getValueType())) 1200 return getNode(ISD::TRUNCATE, SL, VT, Op); 1201 1202 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1203 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1204 } 1205 1206 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1207 EVT OpVT = Op.getValueType(); 1208 assert(VT.isInteger() && OpVT.isInteger() && 1209 "Cannot getZeroExtendInReg FP types"); 1210 assert(VT.isVector() == OpVT.isVector() && 1211 "getZeroExtendInReg type should be vector iff the operand " 1212 "type is vector!"); 1213 assert((!VT.isVector() || 1214 VT.getVectorElementCount() == OpVT.getVectorElementCount()) && 1215 "Vector element counts must match in getZeroExtendInReg"); 1216 assert(VT.bitsLE(OpVT) && "Not extending!"); 1217 if (OpVT == VT) 1218 return Op; 1219 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), 1220 VT.getScalarSizeInBits()); 1221 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); 1222 } 1223 1224 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1225 // Only unsigned pointer semantics are supported right now. In the future this 1226 // might delegate to TLI to check pointer signedness. 1227 return getZExtOrTrunc(Op, DL, VT); 1228 } 1229 1230 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1231 // Only unsigned pointer semantics are supported right now. In the future this 1232 // might delegate to TLI to check pointer signedness. 1233 return getZeroExtendInReg(Op, DL, VT); 1234 } 1235 1236 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1237 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1238 EVT EltVT = VT.getScalarType(); 1239 SDValue NegOne = 1240 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1241 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1242 } 1243 1244 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1245 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1246 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1247 } 1248 1249 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1250 EVT OpVT) { 1251 if (!V) 1252 return getConstant(0, DL, VT); 1253 1254 switch (TLI->getBooleanContents(OpVT)) { 1255 case TargetLowering::ZeroOrOneBooleanContent: 1256 case TargetLowering::UndefinedBooleanContent: 1257 return getConstant(1, DL, VT); 1258 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1259 return getAllOnesConstant(DL, VT); 1260 } 1261 llvm_unreachable("Unexpected boolean content enum!"); 1262 } 1263 1264 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1265 bool isT, bool isO) { 1266 EVT EltVT = VT.getScalarType(); 1267 assert((EltVT.getSizeInBits() >= 64 || 1268 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1269 "getConstant with a uint64_t value that doesn't fit in the type!"); 1270 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1271 } 1272 1273 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1274 bool isT, bool isO) { 1275 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1276 } 1277 1278 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1279 EVT VT, bool isT, bool isO) { 1280 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1281 1282 EVT EltVT = VT.getScalarType(); 1283 const ConstantInt *Elt = &Val; 1284 1285 // In some cases the vector type is legal but the element type is illegal and 1286 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1287 // inserted value (the type does not need to match the vector element type). 1288 // Any extra bits introduced will be truncated away. 1289 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1290 TargetLowering::TypePromoteInteger) { 1291 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1292 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1293 Elt = ConstantInt::get(*getContext(), NewVal); 1294 } 1295 // In other cases the element type is illegal and needs to be expanded, for 1296 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1297 // the value into n parts and use a vector type with n-times the elements. 1298 // Then bitcast to the type requested. 1299 // Legalizing constants too early makes the DAGCombiner's job harder so we 1300 // only legalize if the DAG tells us we must produce legal types. 1301 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1302 TLI->getTypeAction(*getContext(), EltVT) == 1303 TargetLowering::TypeExpandInteger) { 1304 const APInt &NewVal = Elt->getValue(); 1305 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1306 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1307 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1308 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1309 1310 // Check the temporary vector is the correct size. If this fails then 1311 // getTypeToTransformTo() probably returned a type whose size (in bits) 1312 // isn't a power-of-2 factor of the requested type size. 1313 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1314 1315 SmallVector<SDValue, 2> EltParts; 1316 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1317 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1318 .zextOrTrunc(ViaEltSizeInBits), DL, 1319 ViaEltVT, isT, isO)); 1320 } 1321 1322 // EltParts is currently in little endian order. If we actually want 1323 // big-endian order then reverse it now. 1324 if (getDataLayout().isBigEndian()) 1325 std::reverse(EltParts.begin(), EltParts.end()); 1326 1327 // The elements must be reversed when the element order is different 1328 // to the endianness of the elements (because the BITCAST is itself a 1329 // vector shuffle in this situation). However, we do not need any code to 1330 // perform this reversal because getConstant() is producing a vector 1331 // splat. 1332 // This situation occurs in MIPS MSA. 1333 1334 SmallVector<SDValue, 8> Ops; 1335 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1336 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1337 1338 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1339 return V; 1340 } 1341 1342 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1343 "APInt size does not match type size!"); 1344 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1345 FoldingSetNodeID ID; 1346 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1347 ID.AddPointer(Elt); 1348 ID.AddBoolean(isO); 1349 void *IP = nullptr; 1350 SDNode *N = nullptr; 1351 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1352 if (!VT.isVector()) 1353 return SDValue(N, 0); 1354 1355 if (!N) { 1356 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1357 CSEMap.InsertNode(N, IP); 1358 InsertNode(N); 1359 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1360 } 1361 1362 SDValue Result(N, 0); 1363 if (VT.isScalableVector()) 1364 Result = getSplatVector(VT, DL, Result); 1365 else if (VT.isVector()) 1366 Result = getSplatBuildVector(VT, DL, Result); 1367 1368 return Result; 1369 } 1370 1371 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1372 bool isTarget) { 1373 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1374 } 1375 1376 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1377 const SDLoc &DL, bool LegalTypes) { 1378 assert(VT.isInteger() && "Shift amount is not an integer type!"); 1379 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1380 return getConstant(Val, DL, ShiftVT); 1381 } 1382 1383 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, 1384 bool isTarget) { 1385 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); 1386 } 1387 1388 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1389 bool isTarget) { 1390 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1391 } 1392 1393 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1394 EVT VT, bool isTarget) { 1395 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1396 1397 EVT EltVT = VT.getScalarType(); 1398 1399 // Do the map lookup using the actual bit pattern for the floating point 1400 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1401 // we don't have issues with SNANs. 1402 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1403 FoldingSetNodeID ID; 1404 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1405 ID.AddPointer(&V); 1406 void *IP = nullptr; 1407 SDNode *N = nullptr; 1408 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1409 if (!VT.isVector()) 1410 return SDValue(N, 0); 1411 1412 if (!N) { 1413 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1414 CSEMap.InsertNode(N, IP); 1415 InsertNode(N); 1416 } 1417 1418 SDValue Result(N, 0); 1419 if (VT.isScalableVector()) 1420 Result = getSplatVector(VT, DL, Result); 1421 else if (VT.isVector()) 1422 Result = getSplatBuildVector(VT, DL, Result); 1423 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1424 return Result; 1425 } 1426 1427 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1428 bool isTarget) { 1429 EVT EltVT = VT.getScalarType(); 1430 if (EltVT == MVT::f32) 1431 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1432 else if (EltVT == MVT::f64) 1433 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1434 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1435 EltVT == MVT::f16 || EltVT == MVT::bf16) { 1436 bool Ignored; 1437 APFloat APF = APFloat(Val); 1438 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1439 &Ignored); 1440 return getConstantFP(APF, DL, VT, isTarget); 1441 } else 1442 llvm_unreachable("Unsupported type in getConstantFP"); 1443 } 1444 1445 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1446 EVT VT, int64_t Offset, bool isTargetGA, 1447 unsigned TargetFlags) { 1448 assert((TargetFlags == 0 || isTargetGA) && 1449 "Cannot set target flags on target-independent globals"); 1450 1451 // Truncate (with sign-extension) the offset value to the pointer size. 1452 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1453 if (BitWidth < 64) 1454 Offset = SignExtend64(Offset, BitWidth); 1455 1456 unsigned Opc; 1457 if (GV->isThreadLocal()) 1458 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1459 else 1460 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1461 1462 FoldingSetNodeID ID; 1463 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1464 ID.AddPointer(GV); 1465 ID.AddInteger(Offset); 1466 ID.AddInteger(TargetFlags); 1467 void *IP = nullptr; 1468 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1469 return SDValue(E, 0); 1470 1471 auto *N = newSDNode<GlobalAddressSDNode>( 1472 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1473 CSEMap.InsertNode(N, IP); 1474 InsertNode(N); 1475 return SDValue(N, 0); 1476 } 1477 1478 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1479 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1480 FoldingSetNodeID ID; 1481 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1482 ID.AddInteger(FI); 1483 void *IP = nullptr; 1484 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1485 return SDValue(E, 0); 1486 1487 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1488 CSEMap.InsertNode(N, IP); 1489 InsertNode(N); 1490 return SDValue(N, 0); 1491 } 1492 1493 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1494 unsigned TargetFlags) { 1495 assert((TargetFlags == 0 || isTarget) && 1496 "Cannot set target flags on target-independent jump tables"); 1497 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1498 FoldingSetNodeID ID; 1499 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1500 ID.AddInteger(JTI); 1501 ID.AddInteger(TargetFlags); 1502 void *IP = nullptr; 1503 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1504 return SDValue(E, 0); 1505 1506 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1507 CSEMap.InsertNode(N, IP); 1508 InsertNode(N); 1509 return SDValue(N, 0); 1510 } 1511 1512 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1513 MaybeAlign Alignment, int Offset, 1514 bool isTarget, unsigned TargetFlags) { 1515 assert((TargetFlags == 0 || isTarget) && 1516 "Cannot set target flags on target-independent globals"); 1517 if (!Alignment) 1518 Alignment = shouldOptForSize() 1519 ? getDataLayout().getABITypeAlign(C->getType()) 1520 : getDataLayout().getPrefTypeAlign(C->getType()); 1521 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1522 FoldingSetNodeID ID; 1523 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1524 ID.AddInteger(Alignment->value()); 1525 ID.AddInteger(Offset); 1526 ID.AddPointer(C); 1527 ID.AddInteger(TargetFlags); 1528 void *IP = nullptr; 1529 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1530 return SDValue(E, 0); 1531 1532 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1533 TargetFlags); 1534 CSEMap.InsertNode(N, IP); 1535 InsertNode(N); 1536 SDValue V = SDValue(N, 0); 1537 NewSDValueDbgMsg(V, "Creating new constant pool: ", this); 1538 return V; 1539 } 1540 1541 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1542 MaybeAlign Alignment, int Offset, 1543 bool isTarget, unsigned TargetFlags) { 1544 assert((TargetFlags == 0 || isTarget) && 1545 "Cannot set target flags on target-independent globals"); 1546 if (!Alignment) 1547 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); 1548 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1549 FoldingSetNodeID ID; 1550 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1551 ID.AddInteger(Alignment->value()); 1552 ID.AddInteger(Offset); 1553 C->addSelectionDAGCSEId(ID); 1554 ID.AddInteger(TargetFlags); 1555 void *IP = nullptr; 1556 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1557 return SDValue(E, 0); 1558 1559 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, 1560 TargetFlags); 1561 CSEMap.InsertNode(N, IP); 1562 InsertNode(N); 1563 return SDValue(N, 0); 1564 } 1565 1566 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1567 unsigned TargetFlags) { 1568 FoldingSetNodeID ID; 1569 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1570 ID.AddInteger(Index); 1571 ID.AddInteger(Offset); 1572 ID.AddInteger(TargetFlags); 1573 void *IP = nullptr; 1574 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1575 return SDValue(E, 0); 1576 1577 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1578 CSEMap.InsertNode(N, IP); 1579 InsertNode(N); 1580 return SDValue(N, 0); 1581 } 1582 1583 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1584 FoldingSetNodeID ID; 1585 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1586 ID.AddPointer(MBB); 1587 void *IP = nullptr; 1588 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1589 return SDValue(E, 0); 1590 1591 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1592 CSEMap.InsertNode(N, IP); 1593 InsertNode(N); 1594 return SDValue(N, 0); 1595 } 1596 1597 SDValue SelectionDAG::getValueType(EVT VT) { 1598 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1599 ValueTypeNodes.size()) 1600 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1601 1602 SDNode *&N = VT.isExtended() ? 1603 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1604 1605 if (N) return SDValue(N, 0); 1606 N = newSDNode<VTSDNode>(VT); 1607 InsertNode(N); 1608 return SDValue(N, 0); 1609 } 1610 1611 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1612 SDNode *&N = ExternalSymbols[Sym]; 1613 if (N) return SDValue(N, 0); 1614 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1615 InsertNode(N); 1616 return SDValue(N, 0); 1617 } 1618 1619 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1620 SDNode *&N = MCSymbols[Sym]; 1621 if (N) 1622 return SDValue(N, 0); 1623 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1624 InsertNode(N); 1625 return SDValue(N, 0); 1626 } 1627 1628 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1629 unsigned TargetFlags) { 1630 SDNode *&N = 1631 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1632 if (N) return SDValue(N, 0); 1633 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1634 InsertNode(N); 1635 return SDValue(N, 0); 1636 } 1637 1638 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1639 if ((unsigned)Cond >= CondCodeNodes.size()) 1640 CondCodeNodes.resize(Cond+1); 1641 1642 if (!CondCodeNodes[Cond]) { 1643 auto *N = newSDNode<CondCodeSDNode>(Cond); 1644 CondCodeNodes[Cond] = N; 1645 InsertNode(N); 1646 } 1647 1648 return SDValue(CondCodeNodes[Cond], 0); 1649 } 1650 1651 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1652 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1653 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1654 std::swap(N1, N2); 1655 ShuffleVectorSDNode::commuteMask(M); 1656 } 1657 1658 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1659 SDValue N2, ArrayRef<int> Mask) { 1660 assert(VT.getVectorNumElements() == Mask.size() && 1661 "Must have the same number of vector elements as mask elements!"); 1662 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1663 "Invalid VECTOR_SHUFFLE"); 1664 1665 // Canonicalize shuffle undef, undef -> undef 1666 if (N1.isUndef() && N2.isUndef()) 1667 return getUNDEF(VT); 1668 1669 // Validate that all indices in Mask are within the range of the elements 1670 // input to the shuffle. 1671 int NElts = Mask.size(); 1672 assert(llvm::all_of(Mask, 1673 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1674 "Index out of range"); 1675 1676 // Copy the mask so we can do any needed cleanup. 1677 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1678 1679 // Canonicalize shuffle v, v -> v, undef 1680 if (N1 == N2) { 1681 N2 = getUNDEF(VT); 1682 for (int i = 0; i != NElts; ++i) 1683 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1684 } 1685 1686 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1687 if (N1.isUndef()) 1688 commuteShuffle(N1, N2, MaskVec); 1689 1690 if (TLI->hasVectorBlend()) { 1691 // If shuffling a splat, try to blend the splat instead. We do this here so 1692 // that even when this arises during lowering we don't have to re-handle it. 1693 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1694 BitVector UndefElements; 1695 SDValue Splat = BV->getSplatValue(&UndefElements); 1696 if (!Splat) 1697 return; 1698 1699 for (int i = 0; i < NElts; ++i) { 1700 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1701 continue; 1702 1703 // If this input comes from undef, mark it as such. 1704 if (UndefElements[MaskVec[i] - Offset]) { 1705 MaskVec[i] = -1; 1706 continue; 1707 } 1708 1709 // If we can blend a non-undef lane, use that instead. 1710 if (!UndefElements[i]) 1711 MaskVec[i] = i + Offset; 1712 } 1713 }; 1714 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1715 BlendSplat(N1BV, 0); 1716 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1717 BlendSplat(N2BV, NElts); 1718 } 1719 1720 // Canonicalize all index into lhs, -> shuffle lhs, undef 1721 // Canonicalize all index into rhs, -> shuffle rhs, undef 1722 bool AllLHS = true, AllRHS = true; 1723 bool N2Undef = N2.isUndef(); 1724 for (int i = 0; i != NElts; ++i) { 1725 if (MaskVec[i] >= NElts) { 1726 if (N2Undef) 1727 MaskVec[i] = -1; 1728 else 1729 AllLHS = false; 1730 } else if (MaskVec[i] >= 0) { 1731 AllRHS = false; 1732 } 1733 } 1734 if (AllLHS && AllRHS) 1735 return getUNDEF(VT); 1736 if (AllLHS && !N2Undef) 1737 N2 = getUNDEF(VT); 1738 if (AllRHS) { 1739 N1 = getUNDEF(VT); 1740 commuteShuffle(N1, N2, MaskVec); 1741 } 1742 // Reset our undef status after accounting for the mask. 1743 N2Undef = N2.isUndef(); 1744 // Re-check whether both sides ended up undef. 1745 if (N1.isUndef() && N2Undef) 1746 return getUNDEF(VT); 1747 1748 // If Identity shuffle return that node. 1749 bool Identity = true, AllSame = true; 1750 for (int i = 0; i != NElts; ++i) { 1751 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1752 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1753 } 1754 if (Identity && NElts) 1755 return N1; 1756 1757 // Shuffling a constant splat doesn't change the result. 1758 if (N2Undef) { 1759 SDValue V = N1; 1760 1761 // Look through any bitcasts. We check that these don't change the number 1762 // (and size) of elements and just changes their types. 1763 while (V.getOpcode() == ISD::BITCAST) 1764 V = V->getOperand(0); 1765 1766 // A splat should always show up as a build vector node. 1767 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1768 BitVector UndefElements; 1769 SDValue Splat = BV->getSplatValue(&UndefElements); 1770 // If this is a splat of an undef, shuffling it is also undef. 1771 if (Splat && Splat.isUndef()) 1772 return getUNDEF(VT); 1773 1774 bool SameNumElts = 1775 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1776 1777 // We only have a splat which can skip shuffles if there is a splatted 1778 // value and no undef lanes rearranged by the shuffle. 1779 if (Splat && UndefElements.none()) { 1780 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1781 // number of elements match or the value splatted is a zero constant. 1782 if (SameNumElts) 1783 return N1; 1784 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1785 if (C->isNullValue()) 1786 return N1; 1787 } 1788 1789 // If the shuffle itself creates a splat, build the vector directly. 1790 if (AllSame && SameNumElts) { 1791 EVT BuildVT = BV->getValueType(0); 1792 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1793 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1794 1795 // We may have jumped through bitcasts, so the type of the 1796 // BUILD_VECTOR may not match the type of the shuffle. 1797 if (BuildVT != VT) 1798 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1799 return NewBV; 1800 } 1801 } 1802 } 1803 1804 FoldingSetNodeID ID; 1805 SDValue Ops[2] = { N1, N2 }; 1806 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1807 for (int i = 0; i != NElts; ++i) 1808 ID.AddInteger(MaskVec[i]); 1809 1810 void* IP = nullptr; 1811 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1812 return SDValue(E, 0); 1813 1814 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1815 // SDNode doesn't have access to it. This memory will be "leaked" when 1816 // the node is deallocated, but recovered when the NodeAllocator is released. 1817 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1818 llvm::copy(MaskVec, MaskAlloc); 1819 1820 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1821 dl.getDebugLoc(), MaskAlloc); 1822 createOperands(N, Ops); 1823 1824 CSEMap.InsertNode(N, IP); 1825 InsertNode(N); 1826 SDValue V = SDValue(N, 0); 1827 NewSDValueDbgMsg(V, "Creating new node: ", this); 1828 return V; 1829 } 1830 1831 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1832 EVT VT = SV.getValueType(0); 1833 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1834 ShuffleVectorSDNode::commuteMask(MaskVec); 1835 1836 SDValue Op0 = SV.getOperand(0); 1837 SDValue Op1 = SV.getOperand(1); 1838 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1839 } 1840 1841 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1842 FoldingSetNodeID ID; 1843 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1844 ID.AddInteger(RegNo); 1845 void *IP = nullptr; 1846 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1847 return SDValue(E, 0); 1848 1849 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1850 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1851 CSEMap.InsertNode(N, IP); 1852 InsertNode(N); 1853 return SDValue(N, 0); 1854 } 1855 1856 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1857 FoldingSetNodeID ID; 1858 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1859 ID.AddPointer(RegMask); 1860 void *IP = nullptr; 1861 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1862 return SDValue(E, 0); 1863 1864 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1865 CSEMap.InsertNode(N, IP); 1866 InsertNode(N); 1867 return SDValue(N, 0); 1868 } 1869 1870 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1871 MCSymbol *Label) { 1872 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1873 } 1874 1875 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1876 SDValue Root, MCSymbol *Label) { 1877 FoldingSetNodeID ID; 1878 SDValue Ops[] = { Root }; 1879 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1880 ID.AddPointer(Label); 1881 void *IP = nullptr; 1882 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1883 return SDValue(E, 0); 1884 1885 auto *N = 1886 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1887 createOperands(N, Ops); 1888 1889 CSEMap.InsertNode(N, IP); 1890 InsertNode(N); 1891 return SDValue(N, 0); 1892 } 1893 1894 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1895 int64_t Offset, bool isTarget, 1896 unsigned TargetFlags) { 1897 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1898 1899 FoldingSetNodeID ID; 1900 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1901 ID.AddPointer(BA); 1902 ID.AddInteger(Offset); 1903 ID.AddInteger(TargetFlags); 1904 void *IP = nullptr; 1905 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1906 return SDValue(E, 0); 1907 1908 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1909 CSEMap.InsertNode(N, IP); 1910 InsertNode(N); 1911 return SDValue(N, 0); 1912 } 1913 1914 SDValue SelectionDAG::getSrcValue(const Value *V) { 1915 FoldingSetNodeID ID; 1916 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1917 ID.AddPointer(V); 1918 1919 void *IP = nullptr; 1920 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1921 return SDValue(E, 0); 1922 1923 auto *N = newSDNode<SrcValueSDNode>(V); 1924 CSEMap.InsertNode(N, IP); 1925 InsertNode(N); 1926 return SDValue(N, 0); 1927 } 1928 1929 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1930 FoldingSetNodeID ID; 1931 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1932 ID.AddPointer(MD); 1933 1934 void *IP = nullptr; 1935 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1936 return SDValue(E, 0); 1937 1938 auto *N = newSDNode<MDNodeSDNode>(MD); 1939 CSEMap.InsertNode(N, IP); 1940 InsertNode(N); 1941 return SDValue(N, 0); 1942 } 1943 1944 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1945 if (VT == V.getValueType()) 1946 return V; 1947 1948 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1949 } 1950 1951 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1952 unsigned SrcAS, unsigned DestAS) { 1953 SDValue Ops[] = {Ptr}; 1954 FoldingSetNodeID ID; 1955 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1956 ID.AddInteger(SrcAS); 1957 ID.AddInteger(DestAS); 1958 1959 void *IP = nullptr; 1960 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1961 return SDValue(E, 0); 1962 1963 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1964 VT, SrcAS, DestAS); 1965 createOperands(N, Ops); 1966 1967 CSEMap.InsertNode(N, IP); 1968 InsertNode(N); 1969 return SDValue(N, 0); 1970 } 1971 1972 SDValue SelectionDAG::getFreeze(SDValue V) { 1973 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); 1974 } 1975 1976 /// getShiftAmountOperand - Return the specified value casted to 1977 /// the target's desired shift amount type. 1978 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1979 EVT OpTy = Op.getValueType(); 1980 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1981 if (OpTy == ShTy || OpTy.isVector()) return Op; 1982 1983 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1984 } 1985 1986 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1987 SDLoc dl(Node); 1988 const TargetLowering &TLI = getTargetLoweringInfo(); 1989 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1990 EVT VT = Node->getValueType(0); 1991 SDValue Tmp1 = Node->getOperand(0); 1992 SDValue Tmp2 = Node->getOperand(1); 1993 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1994 1995 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1996 Tmp2, MachinePointerInfo(V)); 1997 SDValue VAList = VAListLoad; 1998 1999 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 2000 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2001 getConstant(MA->value() - 1, dl, VAList.getValueType())); 2002 2003 VAList = 2004 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 2005 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 2006 } 2007 2008 // Increment the pointer, VAList, to the next vaarg 2009 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 2010 getConstant(getDataLayout().getTypeAllocSize( 2011 VT.getTypeForEVT(*getContext())), 2012 dl, VAList.getValueType())); 2013 // Store the incremented VAList to the legalized pointer 2014 Tmp1 = 2015 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 2016 // Load the actual argument out of the pointer VAList 2017 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 2018 } 2019 2020 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 2021 SDLoc dl(Node); 2022 const TargetLowering &TLI = getTargetLoweringInfo(); 2023 // This defaults to loading a pointer from the input and storing it to the 2024 // output, returning the chain. 2025 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2026 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2027 SDValue Tmp1 = 2028 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 2029 Node->getOperand(2), MachinePointerInfo(VS)); 2030 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2031 MachinePointerInfo(VD)); 2032 } 2033 2034 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { 2035 const DataLayout &DL = getDataLayout(); 2036 Type *Ty = VT.getTypeForEVT(*getContext()); 2037 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2038 2039 if (TLI->isTypeLegal(VT) || !VT.isVector()) 2040 return RedAlign; 2041 2042 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2043 const Align StackAlign = TFI->getStackAlign(); 2044 2045 // See if we can choose a smaller ABI alignment in cases where it's an 2046 // illegal vector type that will get broken down. 2047 if (RedAlign > StackAlign) { 2048 EVT IntermediateVT; 2049 MVT RegisterVT; 2050 unsigned NumIntermediates; 2051 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, 2052 NumIntermediates, RegisterVT); 2053 Ty = IntermediateVT.getTypeForEVT(*getContext()); 2054 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); 2055 if (RedAlign2 < RedAlign) 2056 RedAlign = RedAlign2; 2057 } 2058 2059 return RedAlign; 2060 } 2061 2062 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { 2063 MachineFrameInfo &MFI = MF->getFrameInfo(); 2064 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2065 int StackID = 0; 2066 if (Bytes.isScalable()) 2067 StackID = TFI->getStackIDForScalableVectors(); 2068 // The stack id gives an indication of whether the object is scalable or 2069 // not, so it's safe to pass in the minimum size here. 2070 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment, 2071 false, nullptr, StackID); 2072 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 2073 } 2074 2075 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 2076 Type *Ty = VT.getTypeForEVT(*getContext()); 2077 Align StackAlign = 2078 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); 2079 return CreateStackTemporary(VT.getStoreSize(), StackAlign); 2080 } 2081 2082 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 2083 TypeSize VT1Size = VT1.getStoreSize(); 2084 TypeSize VT2Size = VT2.getStoreSize(); 2085 assert(VT1Size.isScalable() == VT2Size.isScalable() && 2086 "Don't know how to choose the maximum size when creating a stack " 2087 "temporary"); 2088 TypeSize Bytes = 2089 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size; 2090 2091 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 2092 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 2093 const DataLayout &DL = getDataLayout(); 2094 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); 2095 return CreateStackTemporary(Bytes, Align); 2096 } 2097 2098 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 2099 ISD::CondCode Cond, const SDLoc &dl) { 2100 EVT OpVT = N1.getValueType(); 2101 2102 // These setcc operations always fold. 2103 switch (Cond) { 2104 default: break; 2105 case ISD::SETFALSE: 2106 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2107 case ISD::SETTRUE: 2108 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2109 2110 case ISD::SETOEQ: 2111 case ISD::SETOGT: 2112 case ISD::SETOGE: 2113 case ISD::SETOLT: 2114 case ISD::SETOLE: 2115 case ISD::SETONE: 2116 case ISD::SETO: 2117 case ISD::SETUO: 2118 case ISD::SETUEQ: 2119 case ISD::SETUNE: 2120 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2121 break; 2122 } 2123 2124 if (OpVT.isInteger()) { 2125 // For EQ and NE, we can always pick a value for the undef to make the 2126 // predicate pass or fail, so we can return undef. 2127 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2128 // icmp eq/ne X, undef -> undef. 2129 if ((N1.isUndef() || N2.isUndef()) && 2130 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2131 return getUNDEF(VT); 2132 2133 // If both operands are undef, we can return undef for int comparison. 2134 // icmp undef, undef -> undef. 2135 if (N1.isUndef() && N2.isUndef()) 2136 return getUNDEF(VT); 2137 2138 // icmp X, X -> true/false 2139 // icmp X, undef -> true/false because undef could be X. 2140 if (N1 == N2) 2141 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2142 } 2143 2144 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2145 const APInt &C2 = N2C->getAPIntValue(); 2146 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2147 const APInt &C1 = N1C->getAPIntValue(); 2148 2149 switch (Cond) { 2150 default: llvm_unreachable("Unknown integer setcc!"); 2151 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2152 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2153 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2154 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2155 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2156 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2157 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2158 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2159 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2160 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2161 } 2162 } 2163 } 2164 2165 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2166 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2167 2168 if (N1CFP && N2CFP) { 2169 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2170 switch (Cond) { 2171 default: break; 2172 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2173 return getUNDEF(VT); 2174 LLVM_FALLTHROUGH; 2175 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2176 OpVT); 2177 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2178 return getUNDEF(VT); 2179 LLVM_FALLTHROUGH; 2180 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2181 R==APFloat::cmpLessThan, dl, VT, 2182 OpVT); 2183 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2184 return getUNDEF(VT); 2185 LLVM_FALLTHROUGH; 2186 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2187 OpVT); 2188 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2189 return getUNDEF(VT); 2190 LLVM_FALLTHROUGH; 2191 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2192 VT, OpVT); 2193 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2194 return getUNDEF(VT); 2195 LLVM_FALLTHROUGH; 2196 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2197 R==APFloat::cmpEqual, dl, VT, 2198 OpVT); 2199 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2200 return getUNDEF(VT); 2201 LLVM_FALLTHROUGH; 2202 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2203 R==APFloat::cmpEqual, dl, VT, OpVT); 2204 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2205 OpVT); 2206 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2207 OpVT); 2208 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2209 R==APFloat::cmpEqual, dl, VT, 2210 OpVT); 2211 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2212 OpVT); 2213 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2214 R==APFloat::cmpLessThan, dl, VT, 2215 OpVT); 2216 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2217 R==APFloat::cmpUnordered, dl, VT, 2218 OpVT); 2219 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2220 VT, OpVT); 2221 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2222 OpVT); 2223 } 2224 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2225 // Ensure that the constant occurs on the RHS. 2226 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2227 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2228 return SDValue(); 2229 return getSetCC(dl, VT, N2, N1, SwappedCond); 2230 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2231 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2232 // If an operand is known to be a nan (or undef that could be a nan), we can 2233 // fold it. 2234 // Choosing NaN for the undef will always make unordered comparison succeed 2235 // and ordered comparison fails. 2236 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2237 switch (ISD::getUnorderedFlavor(Cond)) { 2238 default: 2239 llvm_unreachable("Unknown flavor!"); 2240 case 0: // Known false. 2241 return getBoolConstant(false, dl, VT, OpVT); 2242 case 1: // Known true. 2243 return getBoolConstant(true, dl, VT, OpVT); 2244 case 2: // Undefined. 2245 return getUNDEF(VT); 2246 } 2247 } 2248 2249 // Could not fold it. 2250 return SDValue(); 2251 } 2252 2253 /// See if the specified operand can be simplified with the knowledge that only 2254 /// the bits specified by DemandedBits are used. 2255 /// TODO: really we should be making this into the DAG equivalent of 2256 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2257 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2258 EVT VT = V.getValueType(); 2259 2260 if (VT.isScalableVector()) 2261 return SDValue(); 2262 2263 APInt DemandedElts = VT.isVector() 2264 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2265 : APInt(1, 1); 2266 return GetDemandedBits(V, DemandedBits, DemandedElts); 2267 } 2268 2269 /// See if the specified operand can be simplified with the knowledge that only 2270 /// the bits specified by DemandedBits are used in the elements specified by 2271 /// DemandedElts. 2272 /// TODO: really we should be making this into the DAG equivalent of 2273 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2274 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2275 const APInt &DemandedElts) { 2276 switch (V.getOpcode()) { 2277 default: 2278 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2279 *this, 0); 2280 case ISD::Constant: { 2281 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue(); 2282 APInt NewVal = CVal & DemandedBits; 2283 if (NewVal != CVal) 2284 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2285 break; 2286 } 2287 case ISD::SRL: 2288 // Only look at single-use SRLs. 2289 if (!V.getNode()->hasOneUse()) 2290 break; 2291 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2292 // See if we can recursively simplify the LHS. 2293 unsigned Amt = RHSC->getZExtValue(); 2294 2295 // Watch out for shift count overflow though. 2296 if (Amt >= DemandedBits.getBitWidth()) 2297 break; 2298 APInt SrcDemandedBits = DemandedBits << Amt; 2299 if (SDValue SimplifyLHS = 2300 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2301 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2302 V.getOperand(1)); 2303 } 2304 break; 2305 } 2306 return SDValue(); 2307 } 2308 2309 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2310 /// use this predicate to simplify operations downstream. 2311 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2312 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2313 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2314 } 2315 2316 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2317 /// this predicate to simplify operations downstream. Mask is known to be zero 2318 /// for bits that V cannot have. 2319 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2320 unsigned Depth) const { 2321 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); 2322 } 2323 2324 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2325 /// DemandedElts. We use this predicate to simplify operations downstream. 2326 /// Mask is known to be zero for bits that V cannot have. 2327 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2328 const APInt &DemandedElts, 2329 unsigned Depth) const { 2330 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2331 } 2332 2333 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2334 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2335 unsigned Depth) const { 2336 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2337 } 2338 2339 /// isSplatValue - Return true if the vector V has the same value 2340 /// across all DemandedElts. For scalable vectors it does not make 2341 /// sense to specify which elements are demanded or undefined, therefore 2342 /// they are simply ignored. 2343 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2344 APInt &UndefElts) { 2345 EVT VT = V.getValueType(); 2346 assert(VT.isVector() && "Vector type expected"); 2347 2348 if (!VT.isScalableVector() && !DemandedElts) 2349 return false; // No demanded elts, better to assume we don't know anything. 2350 2351 // Deal with some common cases here that work for both fixed and scalable 2352 // vector types. 2353 switch (V.getOpcode()) { 2354 case ISD::SPLAT_VECTOR: 2355 UndefElts = V.getOperand(0).isUndef() 2356 ? APInt::getAllOnesValue(DemandedElts.getBitWidth()) 2357 : APInt(DemandedElts.getBitWidth(), 0); 2358 return true; 2359 case ISD::ADD: 2360 case ISD::SUB: 2361 case ISD::AND: { 2362 APInt UndefLHS, UndefRHS; 2363 SDValue LHS = V.getOperand(0); 2364 SDValue RHS = V.getOperand(1); 2365 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2366 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2367 UndefElts = UndefLHS | UndefRHS; 2368 return true; 2369 } 2370 break; 2371 } 2372 case ISD::TRUNCATE: 2373 case ISD::SIGN_EXTEND: 2374 case ISD::ZERO_EXTEND: 2375 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts); 2376 } 2377 2378 // We don't support other cases than those above for scalable vectors at 2379 // the moment. 2380 if (VT.isScalableVector()) 2381 return false; 2382 2383 unsigned NumElts = VT.getVectorNumElements(); 2384 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2385 UndefElts = APInt::getNullValue(NumElts); 2386 2387 switch (V.getOpcode()) { 2388 case ISD::BUILD_VECTOR: { 2389 SDValue Scl; 2390 for (unsigned i = 0; i != NumElts; ++i) { 2391 SDValue Op = V.getOperand(i); 2392 if (Op.isUndef()) { 2393 UndefElts.setBit(i); 2394 continue; 2395 } 2396 if (!DemandedElts[i]) 2397 continue; 2398 if (Scl && Scl != Op) 2399 return false; 2400 Scl = Op; 2401 } 2402 return true; 2403 } 2404 case ISD::VECTOR_SHUFFLE: { 2405 // Check if this is a shuffle node doing a splat. 2406 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2407 int SplatIndex = -1; 2408 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2409 for (int i = 0; i != (int)NumElts; ++i) { 2410 int M = Mask[i]; 2411 if (M < 0) { 2412 UndefElts.setBit(i); 2413 continue; 2414 } 2415 if (!DemandedElts[i]) 2416 continue; 2417 if (0 <= SplatIndex && SplatIndex != M) 2418 return false; 2419 SplatIndex = M; 2420 } 2421 return true; 2422 } 2423 case ISD::EXTRACT_SUBVECTOR: { 2424 // Offset the demanded elts by the subvector index. 2425 SDValue Src = V.getOperand(0); 2426 uint64_t Idx = V.getConstantOperandVal(1); 2427 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2428 APInt UndefSrcElts; 2429 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2430 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts)) { 2431 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2432 return true; 2433 } 2434 break; 2435 } 2436 } 2437 2438 return false; 2439 } 2440 2441 /// Helper wrapper to main isSplatValue function. 2442 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2443 EVT VT = V.getValueType(); 2444 assert(VT.isVector() && "Vector type expected"); 2445 2446 APInt UndefElts; 2447 APInt DemandedElts; 2448 2449 // For now we don't support this with scalable vectors. 2450 if (!VT.isScalableVector()) 2451 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2452 return isSplatValue(V, DemandedElts, UndefElts) && 2453 (AllowUndefs || !UndefElts); 2454 } 2455 2456 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2457 V = peekThroughExtractSubvectors(V); 2458 2459 EVT VT = V.getValueType(); 2460 unsigned Opcode = V.getOpcode(); 2461 switch (Opcode) { 2462 default: { 2463 APInt UndefElts; 2464 APInt DemandedElts; 2465 2466 if (!VT.isScalableVector()) 2467 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2468 2469 if (isSplatValue(V, DemandedElts, UndefElts)) { 2470 if (VT.isScalableVector()) { 2471 // DemandedElts and UndefElts are ignored for scalable vectors, since 2472 // the only supported cases are SPLAT_VECTOR nodes. 2473 SplatIdx = 0; 2474 } else { 2475 // Handle case where all demanded elements are UNDEF. 2476 if (DemandedElts.isSubsetOf(UndefElts)) { 2477 SplatIdx = 0; 2478 return getUNDEF(VT); 2479 } 2480 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2481 } 2482 return V; 2483 } 2484 break; 2485 } 2486 case ISD::SPLAT_VECTOR: 2487 SplatIdx = 0; 2488 return V; 2489 case ISD::VECTOR_SHUFFLE: { 2490 if (VT.isScalableVector()) 2491 return SDValue(); 2492 2493 // Check if this is a shuffle node doing a splat. 2494 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2495 // getTargetVShiftNode currently struggles without the splat source. 2496 auto *SVN = cast<ShuffleVectorSDNode>(V); 2497 if (!SVN->isSplat()) 2498 break; 2499 int Idx = SVN->getSplatIndex(); 2500 int NumElts = V.getValueType().getVectorNumElements(); 2501 SplatIdx = Idx % NumElts; 2502 return V.getOperand(Idx / NumElts); 2503 } 2504 } 2505 2506 return SDValue(); 2507 } 2508 2509 SDValue SelectionDAG::getSplatValue(SDValue V) { 2510 int SplatIdx; 2511 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2512 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2513 SrcVector.getValueType().getScalarType(), SrcVector, 2514 getVectorIdxConstant(SplatIdx, SDLoc(V))); 2515 return SDValue(); 2516 } 2517 2518 const APInt * 2519 SelectionDAG::getValidShiftAmountConstant(SDValue V, 2520 const APInt &DemandedElts) const { 2521 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2522 V.getOpcode() == ISD::SRA) && 2523 "Unknown shift node"); 2524 unsigned BitWidth = V.getScalarValueSizeInBits(); 2525 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { 2526 // Shifting more than the bitwidth is not valid. 2527 const APInt &ShAmt = SA->getAPIntValue(); 2528 if (ShAmt.ult(BitWidth)) 2529 return &ShAmt; 2530 } 2531 return nullptr; 2532 } 2533 2534 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( 2535 SDValue V, const APInt &DemandedElts) const { 2536 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2537 V.getOpcode() == ISD::SRA) && 2538 "Unknown shift node"); 2539 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2540 return ValidAmt; 2541 unsigned BitWidth = V.getScalarValueSizeInBits(); 2542 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2543 if (!BV) 2544 return nullptr; 2545 const APInt *MinShAmt = nullptr; 2546 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2547 if (!DemandedElts[i]) 2548 continue; 2549 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2550 if (!SA) 2551 return nullptr; 2552 // Shifting more than the bitwidth is not valid. 2553 const APInt &ShAmt = SA->getAPIntValue(); 2554 if (ShAmt.uge(BitWidth)) 2555 return nullptr; 2556 if (MinShAmt && MinShAmt->ule(ShAmt)) 2557 continue; 2558 MinShAmt = &ShAmt; 2559 } 2560 return MinShAmt; 2561 } 2562 2563 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( 2564 SDValue V, const APInt &DemandedElts) const { 2565 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || 2566 V.getOpcode() == ISD::SRA) && 2567 "Unknown shift node"); 2568 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) 2569 return ValidAmt; 2570 unsigned BitWidth = V.getScalarValueSizeInBits(); 2571 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2572 if (!BV) 2573 return nullptr; 2574 const APInt *MaxShAmt = nullptr; 2575 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2576 if (!DemandedElts[i]) 2577 continue; 2578 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2579 if (!SA) 2580 return nullptr; 2581 // Shifting more than the bitwidth is not valid. 2582 const APInt &ShAmt = SA->getAPIntValue(); 2583 if (ShAmt.uge(BitWidth)) 2584 return nullptr; 2585 if (MaxShAmt && MaxShAmt->uge(ShAmt)) 2586 continue; 2587 MaxShAmt = &ShAmt; 2588 } 2589 return MaxShAmt; 2590 } 2591 2592 /// Determine which bits of Op are known to be either zero or one and return 2593 /// them in Known. For vectors, the known bits are those that are shared by 2594 /// every vector element. 2595 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2596 EVT VT = Op.getValueType(); 2597 2598 // TOOD: Until we have a plan for how to represent demanded elements for 2599 // scalable vectors, we can just bail out for now. 2600 if (Op.getValueType().isScalableVector()) { 2601 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2602 return KnownBits(BitWidth); 2603 } 2604 2605 APInt DemandedElts = VT.isVector() 2606 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2607 : APInt(1, 1); 2608 return computeKnownBits(Op, DemandedElts, Depth); 2609 } 2610 2611 /// Determine which bits of Op are known to be either zero or one and return 2612 /// them in Known. The DemandedElts argument allows us to only collect the known 2613 /// bits that are shared by the requested vector elements. 2614 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2615 unsigned Depth) const { 2616 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2617 2618 KnownBits Known(BitWidth); // Don't know anything. 2619 2620 // TOOD: Until we have a plan for how to represent demanded elements for 2621 // scalable vectors, we can just bail out for now. 2622 if (Op.getValueType().isScalableVector()) 2623 return Known; 2624 2625 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2626 // We know all of the bits for a constant! 2627 Known.One = C->getAPIntValue(); 2628 Known.Zero = ~Known.One; 2629 return Known; 2630 } 2631 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2632 // We know all of the bits for a constant fp! 2633 Known.One = C->getValueAPF().bitcastToAPInt(); 2634 Known.Zero = ~Known.One; 2635 return Known; 2636 } 2637 2638 if (Depth >= MaxRecursionDepth) 2639 return Known; // Limit search depth. 2640 2641 KnownBits Known2; 2642 unsigned NumElts = DemandedElts.getBitWidth(); 2643 assert((!Op.getValueType().isVector() || 2644 NumElts == Op.getValueType().getVectorNumElements()) && 2645 "Unexpected vector size"); 2646 2647 if (!DemandedElts) 2648 return Known; // No demanded elts, better to assume we don't know anything. 2649 2650 unsigned Opcode = Op.getOpcode(); 2651 switch (Opcode) { 2652 case ISD::BUILD_VECTOR: 2653 // Collect the known bits that are shared by every demanded vector element. 2654 Known.Zero.setAllBits(); Known.One.setAllBits(); 2655 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2656 if (!DemandedElts[i]) 2657 continue; 2658 2659 SDValue SrcOp = Op.getOperand(i); 2660 Known2 = computeKnownBits(SrcOp, Depth + 1); 2661 2662 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2663 if (SrcOp.getValueSizeInBits() != BitWidth) { 2664 assert(SrcOp.getValueSizeInBits() > BitWidth && 2665 "Expected BUILD_VECTOR implicit truncation"); 2666 Known2 = Known2.trunc(BitWidth); 2667 } 2668 2669 // Known bits are the values that are shared by every demanded element. 2670 Known.One &= Known2.One; 2671 Known.Zero &= Known2.Zero; 2672 2673 // If we don't know any bits, early out. 2674 if (Known.isUnknown()) 2675 break; 2676 } 2677 break; 2678 case ISD::VECTOR_SHUFFLE: { 2679 // Collect the known bits that are shared by every vector element referenced 2680 // by the shuffle. 2681 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2682 Known.Zero.setAllBits(); Known.One.setAllBits(); 2683 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2684 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2685 for (unsigned i = 0; i != NumElts; ++i) { 2686 if (!DemandedElts[i]) 2687 continue; 2688 2689 int M = SVN->getMaskElt(i); 2690 if (M < 0) { 2691 // For UNDEF elements, we don't know anything about the common state of 2692 // the shuffle result. 2693 Known.resetAll(); 2694 DemandedLHS.clearAllBits(); 2695 DemandedRHS.clearAllBits(); 2696 break; 2697 } 2698 2699 if ((unsigned)M < NumElts) 2700 DemandedLHS.setBit((unsigned)M % NumElts); 2701 else 2702 DemandedRHS.setBit((unsigned)M % NumElts); 2703 } 2704 // Known bits are the values that are shared by every demanded element. 2705 if (!!DemandedLHS) { 2706 SDValue LHS = Op.getOperand(0); 2707 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2708 Known.One &= Known2.One; 2709 Known.Zero &= Known2.Zero; 2710 } 2711 // If we don't know any bits, early out. 2712 if (Known.isUnknown()) 2713 break; 2714 if (!!DemandedRHS) { 2715 SDValue RHS = Op.getOperand(1); 2716 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2717 Known.One &= Known2.One; 2718 Known.Zero &= Known2.Zero; 2719 } 2720 break; 2721 } 2722 case ISD::CONCAT_VECTORS: { 2723 // Split DemandedElts and test each of the demanded subvectors. 2724 Known.Zero.setAllBits(); Known.One.setAllBits(); 2725 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2726 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2727 unsigned NumSubVectors = Op.getNumOperands(); 2728 for (unsigned i = 0; i != NumSubVectors; ++i) { 2729 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2730 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2731 if (!!DemandedSub) { 2732 SDValue Sub = Op.getOperand(i); 2733 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2734 Known.One &= Known2.One; 2735 Known.Zero &= Known2.Zero; 2736 } 2737 // If we don't know any bits, early out. 2738 if (Known.isUnknown()) 2739 break; 2740 } 2741 break; 2742 } 2743 case ISD::INSERT_SUBVECTOR: { 2744 // Demand any elements from the subvector and the remainder from the src its 2745 // inserted into. 2746 SDValue Src = Op.getOperand(0); 2747 SDValue Sub = Op.getOperand(1); 2748 uint64_t Idx = Op.getConstantOperandVal(2); 2749 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2750 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2751 APInt DemandedSrcElts = DemandedElts; 2752 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2753 2754 Known.One.setAllBits(); 2755 Known.Zero.setAllBits(); 2756 if (!!DemandedSubElts) { 2757 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2758 if (Known.isUnknown()) 2759 break; // early-out. 2760 } 2761 if (!!DemandedSrcElts) { 2762 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2763 Known.One &= Known2.One; 2764 Known.Zero &= Known2.Zero; 2765 } 2766 break; 2767 } 2768 case ISD::EXTRACT_SUBVECTOR: { 2769 // Offset the demanded elts by the subvector index. 2770 SDValue Src = Op.getOperand(0); 2771 // Bail until we can represent demanded elements for scalable vectors. 2772 if (Src.getValueType().isScalableVector()) 2773 break; 2774 uint64_t Idx = Op.getConstantOperandVal(1); 2775 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2776 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2777 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2778 break; 2779 } 2780 case ISD::SCALAR_TO_VECTOR: { 2781 // We know about scalar_to_vector as much as we know about it source, 2782 // which becomes the first element of otherwise unknown vector. 2783 if (DemandedElts != 1) 2784 break; 2785 2786 SDValue N0 = Op.getOperand(0); 2787 Known = computeKnownBits(N0, Depth + 1); 2788 if (N0.getValueSizeInBits() != BitWidth) 2789 Known = Known.trunc(BitWidth); 2790 2791 break; 2792 } 2793 case ISD::BITCAST: { 2794 SDValue N0 = Op.getOperand(0); 2795 EVT SubVT = N0.getValueType(); 2796 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2797 2798 // Ignore bitcasts from unsupported types. 2799 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2800 break; 2801 2802 // Fast handling of 'identity' bitcasts. 2803 if (BitWidth == SubBitWidth) { 2804 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2805 break; 2806 } 2807 2808 bool IsLE = getDataLayout().isLittleEndian(); 2809 2810 // Bitcast 'small element' vector to 'large element' scalar/vector. 2811 if ((BitWidth % SubBitWidth) == 0) { 2812 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2813 2814 // Collect known bits for the (larger) output by collecting the known 2815 // bits from each set of sub elements and shift these into place. 2816 // We need to separately call computeKnownBits for each set of 2817 // sub elements as the knownbits for each is likely to be different. 2818 unsigned SubScale = BitWidth / SubBitWidth; 2819 APInt SubDemandedElts(NumElts * SubScale, 0); 2820 for (unsigned i = 0; i != NumElts; ++i) 2821 if (DemandedElts[i]) 2822 SubDemandedElts.setBit(i * SubScale); 2823 2824 for (unsigned i = 0; i != SubScale; ++i) { 2825 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2826 Depth + 1); 2827 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2828 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2829 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2830 } 2831 } 2832 2833 // Bitcast 'large element' scalar/vector to 'small element' vector. 2834 if ((SubBitWidth % BitWidth) == 0) { 2835 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2836 2837 // Collect known bits for the (smaller) output by collecting the known 2838 // bits from the overlapping larger input elements and extracting the 2839 // sub sections we actually care about. 2840 unsigned SubScale = SubBitWidth / BitWidth; 2841 APInt SubDemandedElts(NumElts / SubScale, 0); 2842 for (unsigned i = 0; i != NumElts; ++i) 2843 if (DemandedElts[i]) 2844 SubDemandedElts.setBit(i / SubScale); 2845 2846 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2847 2848 Known.Zero.setAllBits(); Known.One.setAllBits(); 2849 for (unsigned i = 0; i != NumElts; ++i) 2850 if (DemandedElts[i]) { 2851 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2852 unsigned Offset = (Shifts % SubScale) * BitWidth; 2853 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2854 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2855 // If we don't know any bits, early out. 2856 if (Known.isUnknown()) 2857 break; 2858 } 2859 } 2860 break; 2861 } 2862 case ISD::AND: 2863 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2864 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2865 2866 Known &= Known2; 2867 break; 2868 case ISD::OR: 2869 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2870 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2871 2872 Known |= Known2; 2873 break; 2874 case ISD::XOR: 2875 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2876 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2877 2878 Known ^= Known2; 2879 break; 2880 case ISD::MUL: { 2881 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2882 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2883 Known = KnownBits::computeForMul(Known, Known2); 2884 break; 2885 } 2886 case ISD::UDIV: { 2887 // For the purposes of computing leading zeros we can conservatively 2888 // treat a udiv as a logical right shift by the power of 2 known to 2889 // be less than the denominator. 2890 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2891 unsigned LeadZ = Known2.countMinLeadingZeros(); 2892 2893 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2894 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2895 if (RHSMaxLeadingZeros != BitWidth) 2896 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2897 2898 Known.Zero.setHighBits(LeadZ); 2899 break; 2900 } 2901 case ISD::SELECT: 2902 case ISD::VSELECT: 2903 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2904 // If we don't know any bits, early out. 2905 if (Known.isUnknown()) 2906 break; 2907 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2908 2909 // Only known if known in both the LHS and RHS. 2910 Known.One &= Known2.One; 2911 Known.Zero &= Known2.Zero; 2912 break; 2913 case ISD::SELECT_CC: 2914 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2915 // If we don't know any bits, early out. 2916 if (Known.isUnknown()) 2917 break; 2918 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2919 2920 // Only known if known in both the LHS and RHS. 2921 Known.One &= Known2.One; 2922 Known.Zero &= Known2.Zero; 2923 break; 2924 case ISD::SMULO: 2925 case ISD::UMULO: 2926 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2927 if (Op.getResNo() != 1) 2928 break; 2929 // The boolean result conforms to getBooleanContents. 2930 // If we know the result of a setcc has the top bits zero, use this info. 2931 // We know that we have an integer-based boolean since these operations 2932 // are only available for integer. 2933 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2934 TargetLowering::ZeroOrOneBooleanContent && 2935 BitWidth > 1) 2936 Known.Zero.setBitsFrom(1); 2937 break; 2938 case ISD::SETCC: 2939 case ISD::STRICT_FSETCC: 2940 case ISD::STRICT_FSETCCS: { 2941 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2942 // If we know the result of a setcc has the top bits zero, use this info. 2943 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2944 TargetLowering::ZeroOrOneBooleanContent && 2945 BitWidth > 1) 2946 Known.Zero.setBitsFrom(1); 2947 break; 2948 } 2949 case ISD::SHL: 2950 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2951 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2952 Known = KnownBits::shl(Known, Known2); 2953 2954 // Minimum shift low bits are known zero. 2955 if (const APInt *ShMinAmt = 2956 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2957 Known.Zero.setLowBits(ShMinAmt->getZExtValue()); 2958 break; 2959 case ISD::SRL: 2960 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2961 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2962 Known = KnownBits::lshr(Known, Known2); 2963 2964 // Minimum shift high bits are known zero. 2965 if (const APInt *ShMinAmt = 2966 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 2967 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2968 break; 2969 case ISD::SRA: 2970 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2971 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2972 Known = KnownBits::ashr(Known, Known2); 2973 // TODO: Add minimum shift high known sign bits. 2974 break; 2975 case ISD::FSHL: 2976 case ISD::FSHR: 2977 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2978 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2979 2980 // For fshl, 0-shift returns the 1st arg. 2981 // For fshr, 0-shift returns the 2nd arg. 2982 if (Amt == 0) { 2983 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2984 DemandedElts, Depth + 1); 2985 break; 2986 } 2987 2988 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2989 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2990 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2991 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2992 if (Opcode == ISD::FSHL) { 2993 Known.One <<= Amt; 2994 Known.Zero <<= Amt; 2995 Known2.One.lshrInPlace(BitWidth - Amt); 2996 Known2.Zero.lshrInPlace(BitWidth - Amt); 2997 } else { 2998 Known.One <<= BitWidth - Amt; 2999 Known.Zero <<= BitWidth - Amt; 3000 Known2.One.lshrInPlace(Amt); 3001 Known2.Zero.lshrInPlace(Amt); 3002 } 3003 Known.One |= Known2.One; 3004 Known.Zero |= Known2.Zero; 3005 } 3006 break; 3007 case ISD::SIGN_EXTEND_INREG: { 3008 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3009 unsigned EBits = EVT.getScalarSizeInBits(); 3010 3011 // Sign extension. Compute the demanded bits in the result that are not 3012 // present in the input. 3013 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 3014 3015 APInt InSignMask = APInt::getSignMask(EBits); 3016 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 3017 3018 // If the sign extended bits are demanded, we know that the sign 3019 // bit is demanded. 3020 InSignMask = InSignMask.zext(BitWidth); 3021 if (NewBits.getBoolValue()) 3022 InputDemandedBits |= InSignMask; 3023 3024 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3025 Known.One &= InputDemandedBits; 3026 Known.Zero &= InputDemandedBits; 3027 3028 // If the sign bit of the input is known set or clear, then we know the 3029 // top bits of the result. 3030 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 3031 Known.Zero |= NewBits; 3032 Known.One &= ~NewBits; 3033 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 3034 Known.One |= NewBits; 3035 Known.Zero &= ~NewBits; 3036 } else { // Input sign bit unknown 3037 Known.Zero &= ~NewBits; 3038 Known.One &= ~NewBits; 3039 } 3040 break; 3041 } 3042 case ISD::CTTZ: 3043 case ISD::CTTZ_ZERO_UNDEF: { 3044 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3045 // If we have a known 1, its position is our upper bound. 3046 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 3047 unsigned LowBits = Log2_32(PossibleTZ) + 1; 3048 Known.Zero.setBitsFrom(LowBits); 3049 break; 3050 } 3051 case ISD::CTLZ: 3052 case ISD::CTLZ_ZERO_UNDEF: { 3053 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3054 // If we have a known 1, its position is our upper bound. 3055 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 3056 unsigned LowBits = Log2_32(PossibleLZ) + 1; 3057 Known.Zero.setBitsFrom(LowBits); 3058 break; 3059 } 3060 case ISD::CTPOP: { 3061 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3062 // If we know some of the bits are zero, they can't be one. 3063 unsigned PossibleOnes = Known2.countMaxPopulation(); 3064 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 3065 break; 3066 } 3067 case ISD::PARITY: { 3068 // Parity returns 0 everywhere but the LSB. 3069 Known.Zero.setBitsFrom(1); 3070 break; 3071 } 3072 case ISD::LOAD: { 3073 LoadSDNode *LD = cast<LoadSDNode>(Op); 3074 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 3075 if (ISD::isNON_EXTLoad(LD) && Cst) { 3076 // Determine any common known bits from the loaded constant pool value. 3077 Type *CstTy = Cst->getType(); 3078 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 3079 // If its a vector splat, then we can (quickly) reuse the scalar path. 3080 // NOTE: We assume all elements match and none are UNDEF. 3081 if (CstTy->isVectorTy()) { 3082 if (const Constant *Splat = Cst->getSplatValue()) { 3083 Cst = Splat; 3084 CstTy = Cst->getType(); 3085 } 3086 } 3087 // TODO - do we need to handle different bitwidths? 3088 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 3089 // Iterate across all vector elements finding common known bits. 3090 Known.One.setAllBits(); 3091 Known.Zero.setAllBits(); 3092 for (unsigned i = 0; i != NumElts; ++i) { 3093 if (!DemandedElts[i]) 3094 continue; 3095 if (Constant *Elt = Cst->getAggregateElement(i)) { 3096 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3097 const APInt &Value = CInt->getValue(); 3098 Known.One &= Value; 3099 Known.Zero &= ~Value; 3100 continue; 3101 } 3102 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3103 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3104 Known.One &= Value; 3105 Known.Zero &= ~Value; 3106 continue; 3107 } 3108 } 3109 Known.One.clearAllBits(); 3110 Known.Zero.clearAllBits(); 3111 break; 3112 } 3113 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 3114 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 3115 const APInt &Value = CInt->getValue(); 3116 Known.One = Value; 3117 Known.Zero = ~Value; 3118 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3119 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3120 Known.One = Value; 3121 Known.Zero = ~Value; 3122 } 3123 } 3124 } 3125 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3126 // If this is a ZEXTLoad and we are looking at the loaded value. 3127 EVT VT = LD->getMemoryVT(); 3128 unsigned MemBits = VT.getScalarSizeInBits(); 3129 Known.Zero.setBitsFrom(MemBits); 3130 } else if (const MDNode *Ranges = LD->getRanges()) { 3131 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3132 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3133 } 3134 break; 3135 } 3136 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3137 EVT InVT = Op.getOperand(0).getValueType(); 3138 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3139 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3140 Known = Known.zext(BitWidth); 3141 break; 3142 } 3143 case ISD::ZERO_EXTEND: { 3144 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3145 Known = Known.zext(BitWidth); 3146 break; 3147 } 3148 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3149 EVT InVT = Op.getOperand(0).getValueType(); 3150 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3151 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3152 // If the sign bit is known to be zero or one, then sext will extend 3153 // it to the top bits, else it will just zext. 3154 Known = Known.sext(BitWidth); 3155 break; 3156 } 3157 case ISD::SIGN_EXTEND: { 3158 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3159 // If the sign bit is known to be zero or one, then sext will extend 3160 // it to the top bits, else it will just zext. 3161 Known = Known.sext(BitWidth); 3162 break; 3163 } 3164 case ISD::ANY_EXTEND_VECTOR_INREG: { 3165 EVT InVT = Op.getOperand(0).getValueType(); 3166 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3167 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3168 Known = Known.anyext(BitWidth); 3169 break; 3170 } 3171 case ISD::ANY_EXTEND: { 3172 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3173 Known = Known.anyext(BitWidth); 3174 break; 3175 } 3176 case ISD::TRUNCATE: { 3177 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3178 Known = Known.trunc(BitWidth); 3179 break; 3180 } 3181 case ISD::AssertZext: { 3182 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3183 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3184 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3185 Known.Zero |= (~InMask); 3186 Known.One &= (~Known.Zero); 3187 break; 3188 } 3189 case ISD::AssertAlign: { 3190 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); 3191 assert(LogOfAlign != 0); 3192 // If a node is guaranteed to be aligned, set low zero bits accordingly as 3193 // well as clearing one bits. 3194 Known.Zero.setLowBits(LogOfAlign); 3195 Known.One.clearLowBits(LogOfAlign); 3196 break; 3197 } 3198 case ISD::FGETSIGN: 3199 // All bits are zero except the low bit. 3200 Known.Zero.setBitsFrom(1); 3201 break; 3202 case ISD::USUBO: 3203 case ISD::SSUBO: 3204 if (Op.getResNo() == 1) { 3205 // If we know the result of a setcc has the top bits zero, use this info. 3206 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3207 TargetLowering::ZeroOrOneBooleanContent && 3208 BitWidth > 1) 3209 Known.Zero.setBitsFrom(1); 3210 break; 3211 } 3212 LLVM_FALLTHROUGH; 3213 case ISD::SUB: 3214 case ISD::SUBC: { 3215 assert(Op.getResNo() == 0 && 3216 "We only compute knownbits for the difference here."); 3217 3218 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3219 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3220 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3221 Known, Known2); 3222 break; 3223 } 3224 case ISD::UADDO: 3225 case ISD::SADDO: 3226 case ISD::ADDCARRY: 3227 if (Op.getResNo() == 1) { 3228 // If we know the result of a setcc has the top bits zero, use this info. 3229 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3230 TargetLowering::ZeroOrOneBooleanContent && 3231 BitWidth > 1) 3232 Known.Zero.setBitsFrom(1); 3233 break; 3234 } 3235 LLVM_FALLTHROUGH; 3236 case ISD::ADD: 3237 case ISD::ADDC: 3238 case ISD::ADDE: { 3239 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3240 3241 // With ADDE and ADDCARRY, a carry bit may be added in. 3242 KnownBits Carry(1); 3243 if (Opcode == ISD::ADDE) 3244 // Can't track carry from glue, set carry to unknown. 3245 Carry.resetAll(); 3246 else if (Opcode == ISD::ADDCARRY) 3247 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3248 // the trouble (how often will we find a known carry bit). And I haven't 3249 // tested this very much yet, but something like this might work: 3250 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3251 // Carry = Carry.zextOrTrunc(1, false); 3252 Carry.resetAll(); 3253 else 3254 Carry.setAllZero(); 3255 3256 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3257 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3258 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3259 break; 3260 } 3261 case ISD::SREM: 3262 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3263 const APInt &RA = Rem->getAPIntValue().abs(); 3264 if (RA.isPowerOf2()) { 3265 APInt LowBits = RA - 1; 3266 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3267 3268 // The low bits of the first operand are unchanged by the srem. 3269 Known.Zero = Known2.Zero & LowBits; 3270 Known.One = Known2.One & LowBits; 3271 3272 // If the first operand is non-negative or has all low bits zero, then 3273 // the upper bits are all zero. 3274 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3275 Known.Zero |= ~LowBits; 3276 3277 // If the first operand is negative and not all low bits are zero, then 3278 // the upper bits are all one. 3279 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3280 Known.One |= ~LowBits; 3281 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3282 } 3283 } 3284 break; 3285 case ISD::UREM: { 3286 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3287 const APInt &RA = Rem->getAPIntValue(); 3288 if (RA.isPowerOf2()) { 3289 APInt LowBits = (RA - 1); 3290 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3291 3292 // The upper bits are all zero, the lower ones are unchanged. 3293 Known.Zero = Known2.Zero | ~LowBits; 3294 Known.One = Known2.One & LowBits; 3295 break; 3296 } 3297 } 3298 3299 // Since the result is less than or equal to either operand, any leading 3300 // zero bits in either operand must also exist in the result. 3301 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3302 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3303 3304 uint32_t Leaders = 3305 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3306 Known.resetAll(); 3307 Known.Zero.setHighBits(Leaders); 3308 break; 3309 } 3310 case ISD::EXTRACT_ELEMENT: { 3311 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3312 const unsigned Index = Op.getConstantOperandVal(1); 3313 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3314 3315 // Remove low part of known bits mask 3316 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3317 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3318 3319 // Remove high part of known bit mask 3320 Known = Known.trunc(EltBitWidth); 3321 break; 3322 } 3323 case ISD::EXTRACT_VECTOR_ELT: { 3324 SDValue InVec = Op.getOperand(0); 3325 SDValue EltNo = Op.getOperand(1); 3326 EVT VecVT = InVec.getValueType(); 3327 // computeKnownBits not yet implemented for scalable vectors. 3328 if (VecVT.isScalableVector()) 3329 break; 3330 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3331 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3332 3333 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3334 // anything about the extended bits. 3335 if (BitWidth > EltBitWidth) 3336 Known = Known.trunc(EltBitWidth); 3337 3338 // If we know the element index, just demand that vector element, else for 3339 // an unknown element index, ignore DemandedElts and demand them all. 3340 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3341 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3342 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3343 DemandedSrcElts = 3344 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3345 3346 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); 3347 if (BitWidth > EltBitWidth) 3348 Known = Known.anyext(BitWidth); 3349 break; 3350 } 3351 case ISD::INSERT_VECTOR_ELT: { 3352 // If we know the element index, split the demand between the 3353 // source vector and the inserted element, otherwise assume we need 3354 // the original demanded vector elements and the value. 3355 SDValue InVec = Op.getOperand(0); 3356 SDValue InVal = Op.getOperand(1); 3357 SDValue EltNo = Op.getOperand(2); 3358 bool DemandedVal = true; 3359 APInt DemandedVecElts = DemandedElts; 3360 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3361 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3362 unsigned EltIdx = CEltNo->getZExtValue(); 3363 DemandedVal = !!DemandedElts[EltIdx]; 3364 DemandedVecElts.clearBit(EltIdx); 3365 } 3366 Known.One.setAllBits(); 3367 Known.Zero.setAllBits(); 3368 if (DemandedVal) { 3369 Known2 = computeKnownBits(InVal, Depth + 1); 3370 Known.One &= Known2.One.zextOrTrunc(BitWidth); 3371 Known.Zero &= Known2.Zero.zextOrTrunc(BitWidth); 3372 } 3373 if (!!DemandedVecElts) { 3374 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); 3375 Known.One &= Known2.One; 3376 Known.Zero &= Known2.Zero; 3377 } 3378 break; 3379 } 3380 case ISD::BITREVERSE: { 3381 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3382 Known = Known2.reverseBits(); 3383 break; 3384 } 3385 case ISD::BSWAP: { 3386 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3387 Known = Known2.byteSwap(); 3388 break; 3389 } 3390 case ISD::ABS: { 3391 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3392 Known = Known2.abs(); 3393 break; 3394 } 3395 case ISD::UMIN: { 3396 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3397 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3398 Known = KnownBits::umin(Known, Known2); 3399 break; 3400 } 3401 case ISD::UMAX: { 3402 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3403 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3404 Known = KnownBits::umax(Known, Known2); 3405 break; 3406 } 3407 case ISD::SMIN: 3408 case ISD::SMAX: { 3409 // If we have a clamp pattern, we know that the number of sign bits will be 3410 // the minimum of the clamp min/max range. 3411 bool IsMax = (Opcode == ISD::SMAX); 3412 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3413 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3414 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3415 CstHigh = 3416 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3417 if (CstLow && CstHigh) { 3418 if (!IsMax) 3419 std::swap(CstLow, CstHigh); 3420 3421 const APInt &ValueLow = CstLow->getAPIntValue(); 3422 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3423 if (ValueLow.sle(ValueHigh)) { 3424 unsigned LowSignBits = ValueLow.getNumSignBits(); 3425 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3426 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3427 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3428 Known.One.setHighBits(MinSignBits); 3429 break; 3430 } 3431 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3432 Known.Zero.setHighBits(MinSignBits); 3433 break; 3434 } 3435 } 3436 } 3437 3438 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3439 if (Known.isUnknown()) break; // Early-out 3440 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3441 if (IsMax) 3442 Known = KnownBits::smax(Known, Known2); 3443 else 3444 Known = KnownBits::smin(Known, Known2); 3445 break; 3446 } 3447 case ISD::FrameIndex: 3448 case ISD::TargetFrameIndex: 3449 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), 3450 Known, getMachineFunction()); 3451 break; 3452 3453 default: 3454 if (Opcode < ISD::BUILTIN_OP_END) 3455 break; 3456 LLVM_FALLTHROUGH; 3457 case ISD::INTRINSIC_WO_CHAIN: 3458 case ISD::INTRINSIC_W_CHAIN: 3459 case ISD::INTRINSIC_VOID: 3460 // Allow the target to implement this method for its nodes. 3461 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3462 break; 3463 } 3464 3465 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3466 return Known; 3467 } 3468 3469 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3470 SDValue N1) const { 3471 // X + 0 never overflow 3472 if (isNullConstant(N1)) 3473 return OFK_Never; 3474 3475 KnownBits N1Known = computeKnownBits(N1); 3476 if (N1Known.Zero.getBoolValue()) { 3477 KnownBits N0Known = computeKnownBits(N0); 3478 3479 bool overflow; 3480 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3481 if (!overflow) 3482 return OFK_Never; 3483 } 3484 3485 // mulhi + 1 never overflow 3486 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3487 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3488 return OFK_Never; 3489 3490 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3491 KnownBits N0Known = computeKnownBits(N0); 3492 3493 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3494 return OFK_Never; 3495 } 3496 3497 return OFK_Sometime; 3498 } 3499 3500 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3501 EVT OpVT = Val.getValueType(); 3502 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3503 3504 // Is the constant a known power of 2? 3505 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3506 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3507 3508 // A left-shift of a constant one will have exactly one bit set because 3509 // shifting the bit off the end is undefined. 3510 if (Val.getOpcode() == ISD::SHL) { 3511 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3512 if (C && C->getAPIntValue() == 1) 3513 return true; 3514 } 3515 3516 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3517 // one bit set. 3518 if (Val.getOpcode() == ISD::SRL) { 3519 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3520 if (C && C->getAPIntValue().isSignMask()) 3521 return true; 3522 } 3523 3524 // Are all operands of a build vector constant powers of two? 3525 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3526 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3527 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3528 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3529 return false; 3530 })) 3531 return true; 3532 3533 // More could be done here, though the above checks are enough 3534 // to handle some common cases. 3535 3536 // Fall back to computeKnownBits to catch other known cases. 3537 KnownBits Known = computeKnownBits(Val); 3538 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3539 } 3540 3541 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3542 EVT VT = Op.getValueType(); 3543 3544 // TODO: Assume we don't know anything for now. 3545 if (VT.isScalableVector()) 3546 return 1; 3547 3548 APInt DemandedElts = VT.isVector() 3549 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3550 : APInt(1, 1); 3551 return ComputeNumSignBits(Op, DemandedElts, Depth); 3552 } 3553 3554 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3555 unsigned Depth) const { 3556 EVT VT = Op.getValueType(); 3557 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3558 unsigned VTBits = VT.getScalarSizeInBits(); 3559 unsigned NumElts = DemandedElts.getBitWidth(); 3560 unsigned Tmp, Tmp2; 3561 unsigned FirstAnswer = 1; 3562 3563 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3564 const APInt &Val = C->getAPIntValue(); 3565 return Val.getNumSignBits(); 3566 } 3567 3568 if (Depth >= MaxRecursionDepth) 3569 return 1; // Limit search depth. 3570 3571 if (!DemandedElts || VT.isScalableVector()) 3572 return 1; // No demanded elts, better to assume we don't know anything. 3573 3574 unsigned Opcode = Op.getOpcode(); 3575 switch (Opcode) { 3576 default: break; 3577 case ISD::AssertSext: 3578 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3579 return VTBits-Tmp+1; 3580 case ISD::AssertZext: 3581 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3582 return VTBits-Tmp; 3583 3584 case ISD::BUILD_VECTOR: 3585 Tmp = VTBits; 3586 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3587 if (!DemandedElts[i]) 3588 continue; 3589 3590 SDValue SrcOp = Op.getOperand(i); 3591 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); 3592 3593 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3594 if (SrcOp.getValueSizeInBits() != VTBits) { 3595 assert(SrcOp.getValueSizeInBits() > VTBits && 3596 "Expected BUILD_VECTOR implicit truncation"); 3597 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3598 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3599 } 3600 Tmp = std::min(Tmp, Tmp2); 3601 } 3602 return Tmp; 3603 3604 case ISD::VECTOR_SHUFFLE: { 3605 // Collect the minimum number of sign bits that are shared by every vector 3606 // element referenced by the shuffle. 3607 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3608 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3609 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3610 for (unsigned i = 0; i != NumElts; ++i) { 3611 int M = SVN->getMaskElt(i); 3612 if (!DemandedElts[i]) 3613 continue; 3614 // For UNDEF elements, we don't know anything about the common state of 3615 // the shuffle result. 3616 if (M < 0) 3617 return 1; 3618 if ((unsigned)M < NumElts) 3619 DemandedLHS.setBit((unsigned)M % NumElts); 3620 else 3621 DemandedRHS.setBit((unsigned)M % NumElts); 3622 } 3623 Tmp = std::numeric_limits<unsigned>::max(); 3624 if (!!DemandedLHS) 3625 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3626 if (!!DemandedRHS) { 3627 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3628 Tmp = std::min(Tmp, Tmp2); 3629 } 3630 // If we don't know anything, early out and try computeKnownBits fall-back. 3631 if (Tmp == 1) 3632 break; 3633 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3634 return Tmp; 3635 } 3636 3637 case ISD::BITCAST: { 3638 SDValue N0 = Op.getOperand(0); 3639 EVT SrcVT = N0.getValueType(); 3640 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3641 3642 // Ignore bitcasts from unsupported types.. 3643 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3644 break; 3645 3646 // Fast handling of 'identity' bitcasts. 3647 if (VTBits == SrcBits) 3648 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3649 3650 bool IsLE = getDataLayout().isLittleEndian(); 3651 3652 // Bitcast 'large element' scalar/vector to 'small element' vector. 3653 if ((SrcBits % VTBits) == 0) { 3654 assert(VT.isVector() && "Expected bitcast to vector"); 3655 3656 unsigned Scale = SrcBits / VTBits; 3657 APInt SrcDemandedElts(NumElts / Scale, 0); 3658 for (unsigned i = 0; i != NumElts; ++i) 3659 if (DemandedElts[i]) 3660 SrcDemandedElts.setBit(i / Scale); 3661 3662 // Fast case - sign splat can be simply split across the small elements. 3663 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3664 if (Tmp == SrcBits) 3665 return VTBits; 3666 3667 // Slow case - determine how far the sign extends into each sub-element. 3668 Tmp2 = VTBits; 3669 for (unsigned i = 0; i != NumElts; ++i) 3670 if (DemandedElts[i]) { 3671 unsigned SubOffset = i % Scale; 3672 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3673 SubOffset = SubOffset * VTBits; 3674 if (Tmp <= SubOffset) 3675 return 1; 3676 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3677 } 3678 return Tmp2; 3679 } 3680 break; 3681 } 3682 3683 case ISD::SIGN_EXTEND: 3684 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3685 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3686 case ISD::SIGN_EXTEND_INREG: 3687 // Max of the input and what this extends. 3688 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3689 Tmp = VTBits-Tmp+1; 3690 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3691 return std::max(Tmp, Tmp2); 3692 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3693 SDValue Src = Op.getOperand(0); 3694 EVT SrcVT = Src.getValueType(); 3695 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3696 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3697 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3698 } 3699 case ISD::SRA: 3700 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3701 // SRA X, C -> adds C sign bits. 3702 if (const APInt *ShAmt = 3703 getValidMinimumShiftAmountConstant(Op, DemandedElts)) 3704 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); 3705 return Tmp; 3706 case ISD::SHL: 3707 if (const APInt *ShAmt = 3708 getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 3709 // shl destroys sign bits, ensure it doesn't shift out all sign bits. 3710 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3711 if (ShAmt->ult(Tmp)) 3712 return Tmp - ShAmt->getZExtValue(); 3713 } 3714 break; 3715 case ISD::AND: 3716 case ISD::OR: 3717 case ISD::XOR: // NOT is handled here. 3718 // Logical binary ops preserve the number of sign bits at the worst. 3719 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3720 if (Tmp != 1) { 3721 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3722 FirstAnswer = std::min(Tmp, Tmp2); 3723 // We computed what we know about the sign bits as our first 3724 // answer. Now proceed to the generic code that uses 3725 // computeKnownBits, and pick whichever answer is better. 3726 } 3727 break; 3728 3729 case ISD::SELECT: 3730 case ISD::VSELECT: 3731 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3732 if (Tmp == 1) return 1; // Early out. 3733 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3734 return std::min(Tmp, Tmp2); 3735 case ISD::SELECT_CC: 3736 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3737 if (Tmp == 1) return 1; // Early out. 3738 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3739 return std::min(Tmp, Tmp2); 3740 3741 case ISD::SMIN: 3742 case ISD::SMAX: { 3743 // If we have a clamp pattern, we know that the number of sign bits will be 3744 // the minimum of the clamp min/max range. 3745 bool IsMax = (Opcode == ISD::SMAX); 3746 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3747 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3748 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3749 CstHigh = 3750 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3751 if (CstLow && CstHigh) { 3752 if (!IsMax) 3753 std::swap(CstLow, CstHigh); 3754 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3755 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3756 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3757 return std::min(Tmp, Tmp2); 3758 } 3759 } 3760 3761 // Fallback - just get the minimum number of sign bits of the operands. 3762 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3763 if (Tmp == 1) 3764 return 1; // Early out. 3765 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3766 return std::min(Tmp, Tmp2); 3767 } 3768 case ISD::UMIN: 3769 case ISD::UMAX: 3770 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3771 if (Tmp == 1) 3772 return 1; // Early out. 3773 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3774 return std::min(Tmp, Tmp2); 3775 case ISD::SADDO: 3776 case ISD::UADDO: 3777 case ISD::SSUBO: 3778 case ISD::USUBO: 3779 case ISD::SMULO: 3780 case ISD::UMULO: 3781 if (Op.getResNo() != 1) 3782 break; 3783 // The boolean result conforms to getBooleanContents. Fall through. 3784 // If setcc returns 0/-1, all bits are sign bits. 3785 // We know that we have an integer-based boolean since these operations 3786 // are only available for integer. 3787 if (TLI->getBooleanContents(VT.isVector(), false) == 3788 TargetLowering::ZeroOrNegativeOneBooleanContent) 3789 return VTBits; 3790 break; 3791 case ISD::SETCC: 3792 case ISD::STRICT_FSETCC: 3793 case ISD::STRICT_FSETCCS: { 3794 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3795 // If setcc returns 0/-1, all bits are sign bits. 3796 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3797 TargetLowering::ZeroOrNegativeOneBooleanContent) 3798 return VTBits; 3799 break; 3800 } 3801 case ISD::ROTL: 3802 case ISD::ROTR: 3803 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3804 3805 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 3806 if (Tmp == VTBits) 3807 return VTBits; 3808 3809 if (ConstantSDNode *C = 3810 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3811 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3812 3813 // Handle rotate right by N like a rotate left by 32-N. 3814 if (Opcode == ISD::ROTR) 3815 RotAmt = (VTBits - RotAmt) % VTBits; 3816 3817 // If we aren't rotating out all of the known-in sign bits, return the 3818 // number that are left. This handles rotl(sext(x), 1) for example. 3819 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3820 } 3821 break; 3822 case ISD::ADD: 3823 case ISD::ADDC: 3824 // Add can have at most one carry bit. Thus we know that the output 3825 // is, at worst, one more bit than the inputs. 3826 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3827 if (Tmp == 1) return 1; // Early out. 3828 3829 // Special case decrementing a value (ADD X, -1): 3830 if (ConstantSDNode *CRHS = 3831 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) 3832 if (CRHS->isAllOnesValue()) { 3833 KnownBits Known = 3834 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3835 3836 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3837 // sign bits set. 3838 if ((Known.Zero | 1).isAllOnesValue()) 3839 return VTBits; 3840 3841 // If we are subtracting one from a positive number, there is no carry 3842 // out of the result. 3843 if (Known.isNonNegative()) 3844 return Tmp; 3845 } 3846 3847 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3848 if (Tmp2 == 1) return 1; // Early out. 3849 return std::min(Tmp, Tmp2) - 1; 3850 case ISD::SUB: 3851 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); 3852 if (Tmp2 == 1) return 1; // Early out. 3853 3854 // Handle NEG. 3855 if (ConstantSDNode *CLHS = 3856 isConstOrConstSplat(Op.getOperand(0), DemandedElts)) 3857 if (CLHS->isNullValue()) { 3858 KnownBits Known = 3859 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3860 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3861 // sign bits set. 3862 if ((Known.Zero | 1).isAllOnesValue()) 3863 return VTBits; 3864 3865 // If the input is known to be positive (the sign bit is known clear), 3866 // the output of the NEG has the same number of sign bits as the input. 3867 if (Known.isNonNegative()) 3868 return Tmp2; 3869 3870 // Otherwise, we treat this like a SUB. 3871 } 3872 3873 // Sub can have at most one carry bit. Thus we know that the output 3874 // is, at worst, one more bit than the inputs. 3875 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); 3876 if (Tmp == 1) return 1; // Early out. 3877 return std::min(Tmp, Tmp2) - 1; 3878 case ISD::MUL: { 3879 // The output of the Mul can be at most twice the valid bits in the inputs. 3880 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3881 if (SignBitsOp0 == 1) 3882 break; 3883 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3884 if (SignBitsOp1 == 1) 3885 break; 3886 unsigned OutValidBits = 3887 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3888 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3889 } 3890 case ISD::TRUNCATE: { 3891 // Check if the sign bits of source go down as far as the truncated value. 3892 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3893 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3894 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3895 return NumSrcSignBits - (NumSrcBits - VTBits); 3896 break; 3897 } 3898 case ISD::EXTRACT_ELEMENT: { 3899 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3900 const int BitWidth = Op.getValueSizeInBits(); 3901 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3902 3903 // Get reverse index (starting from 1), Op1 value indexes elements from 3904 // little end. Sign starts at big end. 3905 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3906 3907 // If the sign portion ends in our element the subtraction gives correct 3908 // result. Otherwise it gives either negative or > bitwidth result 3909 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3910 } 3911 case ISD::INSERT_VECTOR_ELT: { 3912 // If we know the element index, split the demand between the 3913 // source vector and the inserted element, otherwise assume we need 3914 // the original demanded vector elements and the value. 3915 SDValue InVec = Op.getOperand(0); 3916 SDValue InVal = Op.getOperand(1); 3917 SDValue EltNo = Op.getOperand(2); 3918 bool DemandedVal = true; 3919 APInt DemandedVecElts = DemandedElts; 3920 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3921 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3922 unsigned EltIdx = CEltNo->getZExtValue(); 3923 DemandedVal = !!DemandedElts[EltIdx]; 3924 DemandedVecElts.clearBit(EltIdx); 3925 } 3926 Tmp = std::numeric_limits<unsigned>::max(); 3927 if (DemandedVal) { 3928 // TODO - handle implicit truncation of inserted elements. 3929 if (InVal.getScalarValueSizeInBits() != VTBits) 3930 break; 3931 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3932 Tmp = std::min(Tmp, Tmp2); 3933 } 3934 if (!!DemandedVecElts) { 3935 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); 3936 Tmp = std::min(Tmp, Tmp2); 3937 } 3938 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3939 return Tmp; 3940 } 3941 case ISD::EXTRACT_VECTOR_ELT: { 3942 SDValue InVec = Op.getOperand(0); 3943 SDValue EltNo = Op.getOperand(1); 3944 EVT VecVT = InVec.getValueType(); 3945 const unsigned BitWidth = Op.getValueSizeInBits(); 3946 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3947 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3948 3949 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3950 // anything about sign bits. But if the sizes match we can derive knowledge 3951 // about sign bits from the vector operand. 3952 if (BitWidth != EltBitWidth) 3953 break; 3954 3955 // If we know the element index, just demand that vector element, else for 3956 // an unknown element index, ignore DemandedElts and demand them all. 3957 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3958 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3959 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3960 DemandedSrcElts = 3961 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3962 3963 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3964 } 3965 case ISD::EXTRACT_SUBVECTOR: { 3966 // Offset the demanded elts by the subvector index. 3967 SDValue Src = Op.getOperand(0); 3968 // Bail until we can represent demanded elements for scalable vectors. 3969 if (Src.getValueType().isScalableVector()) 3970 break; 3971 uint64_t Idx = Op.getConstantOperandVal(1); 3972 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3973 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3974 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3975 } 3976 case ISD::CONCAT_VECTORS: { 3977 // Determine the minimum number of sign bits across all demanded 3978 // elts of the input vectors. Early out if the result is already 1. 3979 Tmp = std::numeric_limits<unsigned>::max(); 3980 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3981 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3982 unsigned NumSubVectors = Op.getNumOperands(); 3983 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3984 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3985 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3986 if (!DemandedSub) 3987 continue; 3988 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3989 Tmp = std::min(Tmp, Tmp2); 3990 } 3991 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3992 return Tmp; 3993 } 3994 case ISD::INSERT_SUBVECTOR: { 3995 // Demand any elements from the subvector and the remainder from the src its 3996 // inserted into. 3997 SDValue Src = Op.getOperand(0); 3998 SDValue Sub = Op.getOperand(1); 3999 uint64_t Idx = Op.getConstantOperandVal(2); 4000 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 4001 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 4002 APInt DemandedSrcElts = DemandedElts; 4003 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 4004 4005 Tmp = std::numeric_limits<unsigned>::max(); 4006 if (!!DemandedSubElts) { 4007 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 4008 if (Tmp == 1) 4009 return 1; // early-out 4010 } 4011 if (!!DemandedSrcElts) { 4012 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 4013 Tmp = std::min(Tmp, Tmp2); 4014 } 4015 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 4016 return Tmp; 4017 } 4018 } 4019 4020 // If we are looking at the loaded value of the SDNode. 4021 if (Op.getResNo() == 0) { 4022 // Handle LOADX separately here. EXTLOAD case will fallthrough. 4023 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 4024 unsigned ExtType = LD->getExtensionType(); 4025 switch (ExtType) { 4026 default: break; 4027 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 4028 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4029 return VTBits - Tmp + 1; 4030 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 4031 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 4032 return VTBits - Tmp; 4033 case ISD::NON_EXTLOAD: 4034 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 4035 // We only need to handle vectors - computeKnownBits should handle 4036 // scalar cases. 4037 Type *CstTy = Cst->getType(); 4038 if (CstTy->isVectorTy() && 4039 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 4040 Tmp = VTBits; 4041 for (unsigned i = 0; i != NumElts; ++i) { 4042 if (!DemandedElts[i]) 4043 continue; 4044 if (Constant *Elt = Cst->getAggregateElement(i)) { 4045 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 4046 const APInt &Value = CInt->getValue(); 4047 Tmp = std::min(Tmp, Value.getNumSignBits()); 4048 continue; 4049 } 4050 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 4051 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 4052 Tmp = std::min(Tmp, Value.getNumSignBits()); 4053 continue; 4054 } 4055 } 4056 // Unknown type. Conservatively assume no bits match sign bit. 4057 return 1; 4058 } 4059 return Tmp; 4060 } 4061 } 4062 break; 4063 } 4064 } 4065 } 4066 4067 // Allow the target to implement this method for its nodes. 4068 if (Opcode >= ISD::BUILTIN_OP_END || 4069 Opcode == ISD::INTRINSIC_WO_CHAIN || 4070 Opcode == ISD::INTRINSIC_W_CHAIN || 4071 Opcode == ISD::INTRINSIC_VOID) { 4072 unsigned NumBits = 4073 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 4074 if (NumBits > 1) 4075 FirstAnswer = std::max(FirstAnswer, NumBits); 4076 } 4077 4078 // Finally, if we can prove that the top bits of the result are 0's or 1's, 4079 // use this information. 4080 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 4081 4082 APInt Mask; 4083 if (Known.isNonNegative()) { // sign bit is 0 4084 Mask = Known.Zero; 4085 } else if (Known.isNegative()) { // sign bit is 1; 4086 Mask = Known.One; 4087 } else { 4088 // Nothing known. 4089 return FirstAnswer; 4090 } 4091 4092 // Okay, we know that the sign bit in Mask is set. Use CLO to determine 4093 // the number of identical bits in the top of the input value. 4094 Mask <<= Mask.getBitWidth()-VTBits; 4095 return std::max(FirstAnswer, Mask.countLeadingOnes()); 4096 } 4097 4098 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4099 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4100 !isa<ConstantSDNode>(Op.getOperand(1))) 4101 return false; 4102 4103 if (Op.getOpcode() == ISD::OR && 4104 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4105 return false; 4106 4107 return true; 4108 } 4109 4110 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4111 // If we're told that NaNs won't happen, assume they won't. 4112 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4113 return true; 4114 4115 if (Depth >= MaxRecursionDepth) 4116 return false; // Limit search depth. 4117 4118 // TODO: Handle vectors. 4119 // If the value is a constant, we can obviously see if it is a NaN or not. 4120 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4121 return !C->getValueAPF().isNaN() || 4122 (SNaN && !C->getValueAPF().isSignaling()); 4123 } 4124 4125 unsigned Opcode = Op.getOpcode(); 4126 switch (Opcode) { 4127 case ISD::FADD: 4128 case ISD::FSUB: 4129 case ISD::FMUL: 4130 case ISD::FDIV: 4131 case ISD::FREM: 4132 case ISD::FSIN: 4133 case ISD::FCOS: { 4134 if (SNaN) 4135 return true; 4136 // TODO: Need isKnownNeverInfinity 4137 return false; 4138 } 4139 case ISD::FCANONICALIZE: 4140 case ISD::FEXP: 4141 case ISD::FEXP2: 4142 case ISD::FTRUNC: 4143 case ISD::FFLOOR: 4144 case ISD::FCEIL: 4145 case ISD::FROUND: 4146 case ISD::FROUNDEVEN: 4147 case ISD::FRINT: 4148 case ISD::FNEARBYINT: { 4149 if (SNaN) 4150 return true; 4151 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4152 } 4153 case ISD::FABS: 4154 case ISD::FNEG: 4155 case ISD::FCOPYSIGN: { 4156 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4157 } 4158 case ISD::SELECT: 4159 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4160 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4161 case ISD::FP_EXTEND: 4162 case ISD::FP_ROUND: { 4163 if (SNaN) 4164 return true; 4165 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4166 } 4167 case ISD::SINT_TO_FP: 4168 case ISD::UINT_TO_FP: 4169 return true; 4170 case ISD::FMA: 4171 case ISD::FMAD: { 4172 if (SNaN) 4173 return true; 4174 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4175 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4176 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4177 } 4178 case ISD::FSQRT: // Need is known positive 4179 case ISD::FLOG: 4180 case ISD::FLOG2: 4181 case ISD::FLOG10: 4182 case ISD::FPOWI: 4183 case ISD::FPOW: { 4184 if (SNaN) 4185 return true; 4186 // TODO: Refine on operand 4187 return false; 4188 } 4189 case ISD::FMINNUM: 4190 case ISD::FMAXNUM: { 4191 // Only one needs to be known not-nan, since it will be returned if the 4192 // other ends up being one. 4193 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4194 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4195 } 4196 case ISD::FMINNUM_IEEE: 4197 case ISD::FMAXNUM_IEEE: { 4198 if (SNaN) 4199 return true; 4200 // This can return a NaN if either operand is an sNaN, or if both operands 4201 // are NaN. 4202 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4203 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4204 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4205 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4206 } 4207 case ISD::FMINIMUM: 4208 case ISD::FMAXIMUM: { 4209 // TODO: Does this quiet or return the origina NaN as-is? 4210 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4211 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4212 } 4213 case ISD::EXTRACT_VECTOR_ELT: { 4214 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4215 } 4216 default: 4217 if (Opcode >= ISD::BUILTIN_OP_END || 4218 Opcode == ISD::INTRINSIC_WO_CHAIN || 4219 Opcode == ISD::INTRINSIC_W_CHAIN || 4220 Opcode == ISD::INTRINSIC_VOID) { 4221 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4222 } 4223 4224 return false; 4225 } 4226 } 4227 4228 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4229 assert(Op.getValueType().isFloatingPoint() && 4230 "Floating point type expected"); 4231 4232 // If the value is a constant, we can obviously see if it is a zero or not. 4233 // TODO: Add BuildVector support. 4234 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4235 return !C->isZero(); 4236 return false; 4237 } 4238 4239 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4240 assert(!Op.getValueType().isFloatingPoint() && 4241 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4242 4243 // If the value is a constant, we can obviously see if it is a zero or not. 4244 if (ISD::matchUnaryPredicate( 4245 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4246 return true; 4247 4248 // TODO: Recognize more cases here. 4249 switch (Op.getOpcode()) { 4250 default: break; 4251 case ISD::OR: 4252 if (isKnownNeverZero(Op.getOperand(1)) || 4253 isKnownNeverZero(Op.getOperand(0))) 4254 return true; 4255 break; 4256 } 4257 4258 return false; 4259 } 4260 4261 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4262 // Check the obvious case. 4263 if (A == B) return true; 4264 4265 // For for negative and positive zero. 4266 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4267 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4268 if (CA->isZero() && CB->isZero()) return true; 4269 4270 // Otherwise they may not be equal. 4271 return false; 4272 } 4273 4274 // FIXME: unify with llvm::haveNoCommonBitsSet. 4275 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4276 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4277 assert(A.getValueType() == B.getValueType() && 4278 "Values must have the same type"); 4279 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4280 } 4281 4282 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4283 ArrayRef<SDValue> Ops, 4284 SelectionDAG &DAG) { 4285 int NumOps = Ops.size(); 4286 assert(NumOps != 0 && "Can't build an empty vector!"); 4287 assert(!VT.isScalableVector() && 4288 "BUILD_VECTOR cannot be used with scalable types"); 4289 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4290 "Incorrect element count in BUILD_VECTOR!"); 4291 4292 // BUILD_VECTOR of UNDEFs is UNDEF. 4293 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4294 return DAG.getUNDEF(VT); 4295 4296 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4297 SDValue IdentitySrc; 4298 bool IsIdentity = true; 4299 for (int i = 0; i != NumOps; ++i) { 4300 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4301 Ops[i].getOperand(0).getValueType() != VT || 4302 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4303 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4304 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4305 IsIdentity = false; 4306 break; 4307 } 4308 IdentitySrc = Ops[i].getOperand(0); 4309 } 4310 if (IsIdentity) 4311 return IdentitySrc; 4312 4313 return SDValue(); 4314 } 4315 4316 /// Try to simplify vector concatenation to an input value, undef, or build 4317 /// vector. 4318 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4319 ArrayRef<SDValue> Ops, 4320 SelectionDAG &DAG) { 4321 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4322 assert(llvm::all_of(Ops, 4323 [Ops](SDValue Op) { 4324 return Ops[0].getValueType() == Op.getValueType(); 4325 }) && 4326 "Concatenation of vectors with inconsistent value types!"); 4327 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == 4328 VT.getVectorElementCount() && 4329 "Incorrect element count in vector concatenation!"); 4330 4331 if (Ops.size() == 1) 4332 return Ops[0]; 4333 4334 // Concat of UNDEFs is UNDEF. 4335 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4336 return DAG.getUNDEF(VT); 4337 4338 // Scan the operands and look for extract operations from a single source 4339 // that correspond to insertion at the same location via this concatenation: 4340 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4341 SDValue IdentitySrc; 4342 bool IsIdentity = true; 4343 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4344 SDValue Op = Ops[i]; 4345 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); 4346 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4347 Op.getOperand(0).getValueType() != VT || 4348 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4349 Op.getConstantOperandVal(1) != IdentityIndex) { 4350 IsIdentity = false; 4351 break; 4352 } 4353 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4354 "Unexpected identity source vector for concat of extracts"); 4355 IdentitySrc = Op.getOperand(0); 4356 } 4357 if (IsIdentity) { 4358 assert(IdentitySrc && "Failed to set source vector of extracts"); 4359 return IdentitySrc; 4360 } 4361 4362 // The code below this point is only designed to work for fixed width 4363 // vectors, so we bail out for now. 4364 if (VT.isScalableVector()) 4365 return SDValue(); 4366 4367 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4368 // simplified to one big BUILD_VECTOR. 4369 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4370 EVT SVT = VT.getScalarType(); 4371 SmallVector<SDValue, 16> Elts; 4372 for (SDValue Op : Ops) { 4373 EVT OpVT = Op.getValueType(); 4374 if (Op.isUndef()) 4375 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4376 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4377 Elts.append(Op->op_begin(), Op->op_end()); 4378 else 4379 return SDValue(); 4380 } 4381 4382 // BUILD_VECTOR requires all inputs to be of the same type, find the 4383 // maximum type and extend them all. 4384 for (SDValue Op : Elts) 4385 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4386 4387 if (SVT.bitsGT(VT.getScalarType())) { 4388 for (SDValue &Op : Elts) { 4389 if (Op.isUndef()) 4390 Op = DAG.getUNDEF(SVT); 4391 else 4392 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4393 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4394 : DAG.getSExtOrTrunc(Op, DL, SVT); 4395 } 4396 } 4397 4398 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4399 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4400 return V; 4401 } 4402 4403 /// Gets or creates the specified node. 4404 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4405 FoldingSetNodeID ID; 4406 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4407 void *IP = nullptr; 4408 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4409 return SDValue(E, 0); 4410 4411 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4412 getVTList(VT)); 4413 CSEMap.InsertNode(N, IP); 4414 4415 InsertNode(N); 4416 SDValue V = SDValue(N, 0); 4417 NewSDValueDbgMsg(V, "Creating new node: ", this); 4418 return V; 4419 } 4420 4421 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4422 SDValue Operand) { 4423 SDNodeFlags Flags; 4424 if (Inserter) 4425 Flags = Inserter->getFlags(); 4426 return getNode(Opcode, DL, VT, Operand, Flags); 4427 } 4428 4429 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4430 SDValue Operand, const SDNodeFlags Flags) { 4431 // Constant fold unary operations with an integer constant operand. Even 4432 // opaque constant will be folded, because the folding of unary operations 4433 // doesn't create new constants with different values. Nevertheless, the 4434 // opaque flag is preserved during folding to prevent future folding with 4435 // other constants. 4436 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4437 const APInt &Val = C->getAPIntValue(); 4438 switch (Opcode) { 4439 default: break; 4440 case ISD::SIGN_EXTEND: 4441 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4442 C->isTargetOpcode(), C->isOpaque()); 4443 case ISD::TRUNCATE: 4444 if (C->isOpaque()) 4445 break; 4446 LLVM_FALLTHROUGH; 4447 case ISD::ANY_EXTEND: 4448 case ISD::ZERO_EXTEND: 4449 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4450 C->isTargetOpcode(), C->isOpaque()); 4451 case ISD::UINT_TO_FP: 4452 case ISD::SINT_TO_FP: { 4453 APFloat apf(EVTToAPFloatSemantics(VT), 4454 APInt::getNullValue(VT.getSizeInBits())); 4455 (void)apf.convertFromAPInt(Val, 4456 Opcode==ISD::SINT_TO_FP, 4457 APFloat::rmNearestTiesToEven); 4458 return getConstantFP(apf, DL, VT); 4459 } 4460 case ISD::BITCAST: 4461 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4462 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4463 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4464 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4465 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4466 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4467 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4468 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4469 break; 4470 case ISD::ABS: 4471 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4472 C->isOpaque()); 4473 case ISD::BITREVERSE: 4474 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4475 C->isOpaque()); 4476 case ISD::BSWAP: 4477 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4478 C->isOpaque()); 4479 case ISD::CTPOP: 4480 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4481 C->isOpaque()); 4482 case ISD::CTLZ: 4483 case ISD::CTLZ_ZERO_UNDEF: 4484 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4485 C->isOpaque()); 4486 case ISD::CTTZ: 4487 case ISD::CTTZ_ZERO_UNDEF: 4488 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4489 C->isOpaque()); 4490 case ISD::FP16_TO_FP: { 4491 bool Ignored; 4492 APFloat FPV(APFloat::IEEEhalf(), 4493 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4494 4495 // This can return overflow, underflow, or inexact; we don't care. 4496 // FIXME need to be more flexible about rounding mode. 4497 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4498 APFloat::rmNearestTiesToEven, &Ignored); 4499 return getConstantFP(FPV, DL, VT); 4500 } 4501 } 4502 } 4503 4504 // Constant fold unary operations with a floating point constant operand. 4505 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4506 APFloat V = C->getValueAPF(); // make copy 4507 switch (Opcode) { 4508 case ISD::FNEG: 4509 V.changeSign(); 4510 return getConstantFP(V, DL, VT); 4511 case ISD::FABS: 4512 V.clearSign(); 4513 return getConstantFP(V, DL, VT); 4514 case ISD::FCEIL: { 4515 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4516 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4517 return getConstantFP(V, DL, VT); 4518 break; 4519 } 4520 case ISD::FTRUNC: { 4521 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4522 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4523 return getConstantFP(V, DL, VT); 4524 break; 4525 } 4526 case ISD::FFLOOR: { 4527 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4528 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4529 return getConstantFP(V, DL, VT); 4530 break; 4531 } 4532 case ISD::FP_EXTEND: { 4533 bool ignored; 4534 // This can return overflow, underflow, or inexact; we don't care. 4535 // FIXME need to be more flexible about rounding mode. 4536 (void)V.convert(EVTToAPFloatSemantics(VT), 4537 APFloat::rmNearestTiesToEven, &ignored); 4538 return getConstantFP(V, DL, VT); 4539 } 4540 case ISD::FP_TO_SINT: 4541 case ISD::FP_TO_UINT: { 4542 bool ignored; 4543 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4544 // FIXME need to be more flexible about rounding mode. 4545 APFloat::opStatus s = 4546 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4547 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4548 break; 4549 return getConstant(IntVal, DL, VT); 4550 } 4551 case ISD::BITCAST: 4552 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4553 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4554 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4555 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4556 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4557 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4558 break; 4559 case ISD::FP_TO_FP16: { 4560 bool Ignored; 4561 // This can return overflow, underflow, or inexact; we don't care. 4562 // FIXME need to be more flexible about rounding mode. 4563 (void)V.convert(APFloat::IEEEhalf(), 4564 APFloat::rmNearestTiesToEven, &Ignored); 4565 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4566 } 4567 } 4568 } 4569 4570 // Constant fold unary operations with a vector integer or float operand. 4571 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4572 if (BV->isConstant()) { 4573 switch (Opcode) { 4574 default: 4575 // FIXME: Entirely reasonable to perform folding of other unary 4576 // operations here as the need arises. 4577 break; 4578 case ISD::FNEG: 4579 case ISD::FABS: 4580 case ISD::FCEIL: 4581 case ISD::FTRUNC: 4582 case ISD::FFLOOR: 4583 case ISD::FP_EXTEND: 4584 case ISD::FP_TO_SINT: 4585 case ISD::FP_TO_UINT: 4586 case ISD::TRUNCATE: 4587 case ISD::ANY_EXTEND: 4588 case ISD::ZERO_EXTEND: 4589 case ISD::SIGN_EXTEND: 4590 case ISD::UINT_TO_FP: 4591 case ISD::SINT_TO_FP: 4592 case ISD::ABS: 4593 case ISD::BITREVERSE: 4594 case ISD::BSWAP: 4595 case ISD::CTLZ: 4596 case ISD::CTLZ_ZERO_UNDEF: 4597 case ISD::CTTZ: 4598 case ISD::CTTZ_ZERO_UNDEF: 4599 case ISD::CTPOP: { 4600 SDValue Ops = { Operand }; 4601 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4602 return Fold; 4603 } 4604 } 4605 } 4606 } 4607 4608 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4609 switch (Opcode) { 4610 case ISD::FREEZE: 4611 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4612 break; 4613 case ISD::TokenFactor: 4614 case ISD::MERGE_VALUES: 4615 case ISD::CONCAT_VECTORS: 4616 return Operand; // Factor, merge or concat of one node? No need. 4617 case ISD::BUILD_VECTOR: { 4618 // Attempt to simplify BUILD_VECTOR. 4619 SDValue Ops[] = {Operand}; 4620 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4621 return V; 4622 break; 4623 } 4624 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4625 case ISD::FP_EXTEND: 4626 assert(VT.isFloatingPoint() && 4627 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4628 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4629 assert((!VT.isVector() || 4630 VT.getVectorElementCount() == 4631 Operand.getValueType().getVectorElementCount()) && 4632 "Vector element count mismatch!"); 4633 assert(Operand.getValueType().bitsLT(VT) && 4634 "Invalid fpext node, dst < src!"); 4635 if (Operand.isUndef()) 4636 return getUNDEF(VT); 4637 break; 4638 case ISD::FP_TO_SINT: 4639 case ISD::FP_TO_UINT: 4640 if (Operand.isUndef()) 4641 return getUNDEF(VT); 4642 break; 4643 case ISD::SINT_TO_FP: 4644 case ISD::UINT_TO_FP: 4645 // [us]itofp(undef) = 0, because the result value is bounded. 4646 if (Operand.isUndef()) 4647 return getConstantFP(0.0, DL, VT); 4648 break; 4649 case ISD::SIGN_EXTEND: 4650 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4651 "Invalid SIGN_EXTEND!"); 4652 assert(VT.isVector() == Operand.getValueType().isVector() && 4653 "SIGN_EXTEND result type type should be vector iff the operand " 4654 "type is vector!"); 4655 if (Operand.getValueType() == VT) return Operand; // noop extension 4656 assert((!VT.isVector() || 4657 VT.getVectorElementCount() == 4658 Operand.getValueType().getVectorElementCount()) && 4659 "Vector element count mismatch!"); 4660 assert(Operand.getValueType().bitsLT(VT) && 4661 "Invalid sext node, dst < src!"); 4662 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4663 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4664 else if (OpOpcode == ISD::UNDEF) 4665 // sext(undef) = 0, because the top bits will all be the same. 4666 return getConstant(0, DL, VT); 4667 break; 4668 case ISD::ZERO_EXTEND: 4669 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4670 "Invalid ZERO_EXTEND!"); 4671 assert(VT.isVector() == Operand.getValueType().isVector() && 4672 "ZERO_EXTEND result type type should be vector iff the operand " 4673 "type is vector!"); 4674 if (Operand.getValueType() == VT) return Operand; // noop extension 4675 assert((!VT.isVector() || 4676 VT.getVectorElementCount() == 4677 Operand.getValueType().getVectorElementCount()) && 4678 "Vector element count mismatch!"); 4679 assert(Operand.getValueType().bitsLT(VT) && 4680 "Invalid zext node, dst < src!"); 4681 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4682 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4683 else if (OpOpcode == ISD::UNDEF) 4684 // zext(undef) = 0, because the top bits will be zero. 4685 return getConstant(0, DL, VT); 4686 break; 4687 case ISD::ANY_EXTEND: 4688 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4689 "Invalid ANY_EXTEND!"); 4690 assert(VT.isVector() == Operand.getValueType().isVector() && 4691 "ANY_EXTEND result type type should be vector iff the operand " 4692 "type is vector!"); 4693 if (Operand.getValueType() == VT) return Operand; // noop extension 4694 assert((!VT.isVector() || 4695 VT.getVectorElementCount() == 4696 Operand.getValueType().getVectorElementCount()) && 4697 "Vector element count mismatch!"); 4698 assert(Operand.getValueType().bitsLT(VT) && 4699 "Invalid anyext node, dst < src!"); 4700 4701 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4702 OpOpcode == ISD::ANY_EXTEND) 4703 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4704 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4705 else if (OpOpcode == ISD::UNDEF) 4706 return getUNDEF(VT); 4707 4708 // (ext (trunc x)) -> x 4709 if (OpOpcode == ISD::TRUNCATE) { 4710 SDValue OpOp = Operand.getOperand(0); 4711 if (OpOp.getValueType() == VT) { 4712 transferDbgValues(Operand, OpOp); 4713 return OpOp; 4714 } 4715 } 4716 break; 4717 case ISD::TRUNCATE: 4718 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4719 "Invalid TRUNCATE!"); 4720 assert(VT.isVector() == Operand.getValueType().isVector() && 4721 "TRUNCATE result type type should be vector iff the operand " 4722 "type is vector!"); 4723 if (Operand.getValueType() == VT) return Operand; // noop truncate 4724 assert((!VT.isVector() || 4725 VT.getVectorElementCount() == 4726 Operand.getValueType().getVectorElementCount()) && 4727 "Vector element count mismatch!"); 4728 assert(Operand.getValueType().bitsGT(VT) && 4729 "Invalid truncate node, src < dst!"); 4730 if (OpOpcode == ISD::TRUNCATE) 4731 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4732 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4733 OpOpcode == ISD::ANY_EXTEND) { 4734 // If the source is smaller than the dest, we still need an extend. 4735 if (Operand.getOperand(0).getValueType().getScalarType() 4736 .bitsLT(VT.getScalarType())) 4737 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4738 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4739 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4740 return Operand.getOperand(0); 4741 } 4742 if (OpOpcode == ISD::UNDEF) 4743 return getUNDEF(VT); 4744 break; 4745 case ISD::ANY_EXTEND_VECTOR_INREG: 4746 case ISD::ZERO_EXTEND_VECTOR_INREG: 4747 case ISD::SIGN_EXTEND_VECTOR_INREG: 4748 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4749 assert(Operand.getValueType().bitsLE(VT) && 4750 "The input must be the same size or smaller than the result."); 4751 assert(VT.getVectorNumElements() < 4752 Operand.getValueType().getVectorNumElements() && 4753 "The destination vector type must have fewer lanes than the input."); 4754 break; 4755 case ISD::ABS: 4756 assert(VT.isInteger() && VT == Operand.getValueType() && 4757 "Invalid ABS!"); 4758 if (OpOpcode == ISD::UNDEF) 4759 return getUNDEF(VT); 4760 break; 4761 case ISD::BSWAP: 4762 assert(VT.isInteger() && VT == Operand.getValueType() && 4763 "Invalid BSWAP!"); 4764 assert((VT.getScalarSizeInBits() % 16 == 0) && 4765 "BSWAP types must be a multiple of 16 bits!"); 4766 if (OpOpcode == ISD::UNDEF) 4767 return getUNDEF(VT); 4768 break; 4769 case ISD::BITREVERSE: 4770 assert(VT.isInteger() && VT == Operand.getValueType() && 4771 "Invalid BITREVERSE!"); 4772 if (OpOpcode == ISD::UNDEF) 4773 return getUNDEF(VT); 4774 break; 4775 case ISD::BITCAST: 4776 // Basic sanity checking. 4777 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4778 "Cannot BITCAST between types of different sizes!"); 4779 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4780 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4781 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4782 if (OpOpcode == ISD::UNDEF) 4783 return getUNDEF(VT); 4784 break; 4785 case ISD::SCALAR_TO_VECTOR: 4786 assert(VT.isVector() && !Operand.getValueType().isVector() && 4787 (VT.getVectorElementType() == Operand.getValueType() || 4788 (VT.getVectorElementType().isInteger() && 4789 Operand.getValueType().isInteger() && 4790 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4791 "Illegal SCALAR_TO_VECTOR node!"); 4792 if (OpOpcode == ISD::UNDEF) 4793 return getUNDEF(VT); 4794 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4795 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4796 isa<ConstantSDNode>(Operand.getOperand(1)) && 4797 Operand.getConstantOperandVal(1) == 0 && 4798 Operand.getOperand(0).getValueType() == VT) 4799 return Operand.getOperand(0); 4800 break; 4801 case ISD::FNEG: 4802 // Negation of an unknown bag of bits is still completely undefined. 4803 if (OpOpcode == ISD::UNDEF) 4804 return getUNDEF(VT); 4805 4806 if (OpOpcode == ISD::FNEG) // --X -> X 4807 return Operand.getOperand(0); 4808 break; 4809 case ISD::FABS: 4810 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4811 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4812 break; 4813 case ISD::VSCALE: 4814 assert(VT == Operand.getValueType() && "Unexpected VT!"); 4815 break; 4816 case ISD::VECREDUCE_SMIN: 4817 case ISD::VECREDUCE_UMAX: 4818 if (Operand.getValueType().getScalarType() == MVT::i1) 4819 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand); 4820 break; 4821 case ISD::VECREDUCE_SMAX: 4822 case ISD::VECREDUCE_UMIN: 4823 if (Operand.getValueType().getScalarType() == MVT::i1) 4824 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand); 4825 break; 4826 } 4827 4828 SDNode *N; 4829 SDVTList VTs = getVTList(VT); 4830 SDValue Ops[] = {Operand}; 4831 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4832 FoldingSetNodeID ID; 4833 AddNodeIDNode(ID, Opcode, VTs, Ops); 4834 void *IP = nullptr; 4835 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4836 E->intersectFlagsWith(Flags); 4837 return SDValue(E, 0); 4838 } 4839 4840 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4841 N->setFlags(Flags); 4842 createOperands(N, Ops); 4843 CSEMap.InsertNode(N, IP); 4844 } else { 4845 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4846 createOperands(N, Ops); 4847 } 4848 4849 InsertNode(N); 4850 SDValue V = SDValue(N, 0); 4851 NewSDValueDbgMsg(V, "Creating new node: ", this); 4852 return V; 4853 } 4854 4855 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4856 const APInt &C2) { 4857 switch (Opcode) { 4858 case ISD::ADD: return C1 + C2; 4859 case ISD::SUB: return C1 - C2; 4860 case ISD::MUL: return C1 * C2; 4861 case ISD::AND: return C1 & C2; 4862 case ISD::OR: return C1 | C2; 4863 case ISD::XOR: return C1 ^ C2; 4864 case ISD::SHL: return C1 << C2; 4865 case ISD::SRL: return C1.lshr(C2); 4866 case ISD::SRA: return C1.ashr(C2); 4867 case ISD::ROTL: return C1.rotl(C2); 4868 case ISD::ROTR: return C1.rotr(C2); 4869 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4870 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4871 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4872 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4873 case ISD::SADDSAT: return C1.sadd_sat(C2); 4874 case ISD::UADDSAT: return C1.uadd_sat(C2); 4875 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4876 case ISD::USUBSAT: return C1.usub_sat(C2); 4877 case ISD::UDIV: 4878 if (!C2.getBoolValue()) 4879 break; 4880 return C1.udiv(C2); 4881 case ISD::UREM: 4882 if (!C2.getBoolValue()) 4883 break; 4884 return C1.urem(C2); 4885 case ISD::SDIV: 4886 if (!C2.getBoolValue()) 4887 break; 4888 return C1.sdiv(C2); 4889 case ISD::SREM: 4890 if (!C2.getBoolValue()) 4891 break; 4892 return C1.srem(C2); 4893 } 4894 return llvm::None; 4895 } 4896 4897 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4898 const GlobalAddressSDNode *GA, 4899 const SDNode *N2) { 4900 if (GA->getOpcode() != ISD::GlobalAddress) 4901 return SDValue(); 4902 if (!TLI->isOffsetFoldingLegal(GA)) 4903 return SDValue(); 4904 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4905 if (!C2) 4906 return SDValue(); 4907 int64_t Offset = C2->getSExtValue(); 4908 switch (Opcode) { 4909 case ISD::ADD: break; 4910 case ISD::SUB: Offset = -uint64_t(Offset); break; 4911 default: return SDValue(); 4912 } 4913 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4914 GA->getOffset() + uint64_t(Offset)); 4915 } 4916 4917 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4918 switch (Opcode) { 4919 case ISD::SDIV: 4920 case ISD::UDIV: 4921 case ISD::SREM: 4922 case ISD::UREM: { 4923 // If a divisor is zero/undef or any element of a divisor vector is 4924 // zero/undef, the whole op is undef. 4925 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4926 SDValue Divisor = Ops[1]; 4927 if (Divisor.isUndef() || isNullConstant(Divisor)) 4928 return true; 4929 4930 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4931 llvm::any_of(Divisor->op_values(), 4932 [](SDValue V) { return V.isUndef() || 4933 isNullConstant(V); }); 4934 // TODO: Handle signed overflow. 4935 } 4936 // TODO: Handle oversized shifts. 4937 default: 4938 return false; 4939 } 4940 } 4941 4942 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4943 EVT VT, ArrayRef<SDValue> Ops) { 4944 // If the opcode is a target-specific ISD node, there's nothing we can 4945 // do here and the operand rules may not line up with the below, so 4946 // bail early. 4947 if (Opcode >= ISD::BUILTIN_OP_END) 4948 return SDValue(); 4949 4950 // For now, the array Ops should only contain two values. 4951 // This enforcement will be removed once this function is merged with 4952 // FoldConstantVectorArithmetic 4953 if (Ops.size() != 2) 4954 return SDValue(); 4955 4956 if (isUndef(Opcode, Ops)) 4957 return getUNDEF(VT); 4958 4959 SDNode *N1 = Ops[0].getNode(); 4960 SDNode *N2 = Ops[1].getNode(); 4961 4962 // Handle the case of two scalars. 4963 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4964 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4965 if (C1->isOpaque() || C2->isOpaque()) 4966 return SDValue(); 4967 4968 Optional<APInt> FoldAttempt = 4969 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); 4970 if (!FoldAttempt) 4971 return SDValue(); 4972 4973 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT); 4974 assert((!Folded || !VT.isVector()) && 4975 "Can't fold vectors ops with scalar operands"); 4976 return Folded; 4977 } 4978 } 4979 4980 // fold (add Sym, c) -> Sym+c 4981 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4982 return FoldSymbolOffset(Opcode, VT, GA, N2); 4983 if (TLI->isCommutativeBinOp(Opcode)) 4984 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4985 return FoldSymbolOffset(Opcode, VT, GA, N1); 4986 4987 // TODO: All the folds below are performed lane-by-lane and assume a fixed 4988 // vector width, however we should be able to do constant folds involving 4989 // splat vector nodes too. 4990 if (VT.isScalableVector()) 4991 return SDValue(); 4992 4993 // For fixed width vectors, extract each constant element and fold them 4994 // individually. Either input may be an undef value. 4995 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4996 if (!BV1 && !N1->isUndef()) 4997 return SDValue(); 4998 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4999 if (!BV2 && !N2->isUndef()) 5000 return SDValue(); 5001 // If both operands are undef, that's handled the same way as scalars. 5002 if (!BV1 && !BV2) 5003 return SDValue(); 5004 5005 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 5006 "Vector binop with different number of elements in operands?"); 5007 5008 EVT SVT = VT.getScalarType(); 5009 EVT LegalSVT = SVT; 5010 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5011 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5012 if (LegalSVT.bitsLT(SVT)) 5013 return SDValue(); 5014 } 5015 SmallVector<SDValue, 4> Outputs; 5016 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 5017 for (unsigned I = 0; I != NumOps; ++I) { 5018 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 5019 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 5020 if (SVT.isInteger()) { 5021 if (V1->getValueType(0).bitsGT(SVT)) 5022 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 5023 if (V2->getValueType(0).bitsGT(SVT)) 5024 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 5025 } 5026 5027 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 5028 return SDValue(); 5029 5030 // Fold one vector element. 5031 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 5032 if (LegalSVT != SVT) 5033 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5034 5035 // Scalar folding only succeeded if the result is a constant or UNDEF. 5036 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5037 ScalarResult.getOpcode() != ISD::ConstantFP) 5038 return SDValue(); 5039 Outputs.push_back(ScalarResult); 5040 } 5041 5042 assert(VT.getVectorNumElements() == Outputs.size() && 5043 "Vector size mismatch!"); 5044 5045 // We may have a vector type but a scalar result. Create a splat. 5046 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 5047 5048 // Build a big vector out of the scalar elements we generated. 5049 return getBuildVector(VT, SDLoc(), Outputs); 5050 } 5051 5052 // TODO: Merge with FoldConstantArithmetic 5053 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 5054 const SDLoc &DL, EVT VT, 5055 ArrayRef<SDValue> Ops, 5056 const SDNodeFlags Flags) { 5057 // If the opcode is a target-specific ISD node, there's nothing we can 5058 // do here and the operand rules may not line up with the below, so 5059 // bail early. 5060 if (Opcode >= ISD::BUILTIN_OP_END) 5061 return SDValue(); 5062 5063 if (isUndef(Opcode, Ops)) 5064 return getUNDEF(VT); 5065 5066 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 5067 if (!VT.isVector()) 5068 return SDValue(); 5069 5070 // TODO: All the folds below are performed lane-by-lane and assume a fixed 5071 // vector width, however we should be able to do constant folds involving 5072 // splat vector nodes too. 5073 if (VT.isScalableVector()) 5074 return SDValue(); 5075 5076 // From this point onwards all vectors are assumed to be fixed width. 5077 unsigned NumElts = VT.getVectorNumElements(); 5078 5079 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 5080 return !Op.getValueType().isVector() || 5081 Op.getValueType().getVectorNumElements() == NumElts; 5082 }; 5083 5084 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 5085 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 5086 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 5087 (BV && BV->isConstant()); 5088 }; 5089 5090 // All operands must be vector types with the same number of elements as 5091 // the result type and must be either UNDEF or a build vector of constant 5092 // or UNDEF scalars. 5093 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 5094 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 5095 return SDValue(); 5096 5097 // If we are comparing vectors, then the result needs to be a i1 boolean 5098 // that is then sign-extended back to the legal result type. 5099 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 5100 5101 // Find legal integer scalar type for constant promotion and 5102 // ensure that its scalar size is at least as large as source. 5103 EVT LegalSVT = VT.getScalarType(); 5104 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 5105 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 5106 if (LegalSVT.bitsLT(VT.getScalarType())) 5107 return SDValue(); 5108 } 5109 5110 // Constant fold each scalar lane separately. 5111 SmallVector<SDValue, 4> ScalarResults; 5112 for (unsigned i = 0; i != NumElts; i++) { 5113 SmallVector<SDValue, 4> ScalarOps; 5114 for (SDValue Op : Ops) { 5115 EVT InSVT = Op.getValueType().getScalarType(); 5116 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 5117 if (!InBV) { 5118 // We've checked that this is UNDEF or a constant of some kind. 5119 if (Op.isUndef()) 5120 ScalarOps.push_back(getUNDEF(InSVT)); 5121 else 5122 ScalarOps.push_back(Op); 5123 continue; 5124 } 5125 5126 SDValue ScalarOp = InBV->getOperand(i); 5127 EVT ScalarVT = ScalarOp.getValueType(); 5128 5129 // Build vector (integer) scalar operands may need implicit 5130 // truncation - do this before constant folding. 5131 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 5132 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 5133 5134 ScalarOps.push_back(ScalarOp); 5135 } 5136 5137 // Constant fold the scalar operands. 5138 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 5139 5140 // Legalize the (integer) scalar constant if necessary. 5141 if (LegalSVT != SVT) 5142 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 5143 5144 // Scalar folding only succeeded if the result is a constant or UNDEF. 5145 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 5146 ScalarResult.getOpcode() != ISD::ConstantFP) 5147 return SDValue(); 5148 ScalarResults.push_back(ScalarResult); 5149 } 5150 5151 SDValue V = getBuildVector(VT, DL, ScalarResults); 5152 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5153 return V; 5154 } 5155 5156 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5157 EVT VT, SDValue N1, SDValue N2) { 5158 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5159 // should. That will require dealing with a potentially non-default 5160 // rounding mode, checking the "opStatus" return value from the APFloat 5161 // math calculations, and possibly other variations. 5162 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5163 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5164 if (N1CFP && N2CFP) { 5165 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5166 switch (Opcode) { 5167 case ISD::FADD: 5168 C1.add(C2, APFloat::rmNearestTiesToEven); 5169 return getConstantFP(C1, DL, VT); 5170 case ISD::FSUB: 5171 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5172 return getConstantFP(C1, DL, VT); 5173 case ISD::FMUL: 5174 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5175 return getConstantFP(C1, DL, VT); 5176 case ISD::FDIV: 5177 C1.divide(C2, APFloat::rmNearestTiesToEven); 5178 return getConstantFP(C1, DL, VT); 5179 case ISD::FREM: 5180 C1.mod(C2); 5181 return getConstantFP(C1, DL, VT); 5182 case ISD::FCOPYSIGN: 5183 C1.copySign(C2); 5184 return getConstantFP(C1, DL, VT); 5185 default: break; 5186 } 5187 } 5188 if (N1CFP && Opcode == ISD::FP_ROUND) { 5189 APFloat C1 = N1CFP->getValueAPF(); // make copy 5190 bool Unused; 5191 // This can return overflow, underflow, or inexact; we don't care. 5192 // FIXME need to be more flexible about rounding mode. 5193 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5194 &Unused); 5195 return getConstantFP(C1, DL, VT); 5196 } 5197 5198 switch (Opcode) { 5199 case ISD::FSUB: 5200 // -0.0 - undef --> undef (consistent with "fneg undef") 5201 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef()) 5202 return getUNDEF(VT); 5203 LLVM_FALLTHROUGH; 5204 5205 case ISD::FADD: 5206 case ISD::FMUL: 5207 case ISD::FDIV: 5208 case ISD::FREM: 5209 // If both operands are undef, the result is undef. If 1 operand is undef, 5210 // the result is NaN. This should match the behavior of the IR optimizer. 5211 if (N1.isUndef() && N2.isUndef()) 5212 return getUNDEF(VT); 5213 if (N1.isUndef() || N2.isUndef()) 5214 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5215 } 5216 return SDValue(); 5217 } 5218 5219 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { 5220 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!"); 5221 5222 // There's no need to assert on a byte-aligned pointer. All pointers are at 5223 // least byte aligned. 5224 if (A == Align(1)) 5225 return Val; 5226 5227 FoldingSetNodeID ID; 5228 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); 5229 ID.AddInteger(A.value()); 5230 5231 void *IP = nullptr; 5232 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 5233 return SDValue(E, 0); 5234 5235 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), 5236 Val.getValueType(), A); 5237 createOperands(N, {Val}); 5238 5239 CSEMap.InsertNode(N, IP); 5240 InsertNode(N); 5241 5242 SDValue V(N, 0); 5243 NewSDValueDbgMsg(V, "Creating new node: ", this); 5244 return V; 5245 } 5246 5247 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5248 SDValue N1, SDValue N2) { 5249 SDNodeFlags Flags; 5250 if (Inserter) 5251 Flags = Inserter->getFlags(); 5252 return getNode(Opcode, DL, VT, N1, N2, Flags); 5253 } 5254 5255 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5256 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5257 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5258 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5259 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5260 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5261 5262 // Canonicalize constant to RHS if commutative. 5263 if (TLI->isCommutativeBinOp(Opcode)) { 5264 if (N1C && !N2C) { 5265 std::swap(N1C, N2C); 5266 std::swap(N1, N2); 5267 } else if (N1CFP && !N2CFP) { 5268 std::swap(N1CFP, N2CFP); 5269 std::swap(N1, N2); 5270 } 5271 } 5272 5273 switch (Opcode) { 5274 default: break; 5275 case ISD::TokenFactor: 5276 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5277 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5278 // Fold trivial token factors. 5279 if (N1.getOpcode() == ISD::EntryToken) return N2; 5280 if (N2.getOpcode() == ISD::EntryToken) return N1; 5281 if (N1 == N2) return N1; 5282 break; 5283 case ISD::BUILD_VECTOR: { 5284 // Attempt to simplify BUILD_VECTOR. 5285 SDValue Ops[] = {N1, N2}; 5286 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5287 return V; 5288 break; 5289 } 5290 case ISD::CONCAT_VECTORS: { 5291 SDValue Ops[] = {N1, N2}; 5292 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5293 return V; 5294 break; 5295 } 5296 case ISD::AND: 5297 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5298 assert(N1.getValueType() == N2.getValueType() && 5299 N1.getValueType() == VT && "Binary operator types must match!"); 5300 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5301 // worth handling here. 5302 if (N2C && N2C->isNullValue()) 5303 return N2; 5304 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5305 return N1; 5306 break; 5307 case ISD::OR: 5308 case ISD::XOR: 5309 case ISD::ADD: 5310 case ISD::SUB: 5311 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5312 assert(N1.getValueType() == N2.getValueType() && 5313 N1.getValueType() == VT && "Binary operator types must match!"); 5314 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5315 // it's worth handling here. 5316 if (N2C && N2C->isNullValue()) 5317 return N1; 5318 break; 5319 case ISD::MUL: 5320 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5321 assert(N1.getValueType() == N2.getValueType() && 5322 N1.getValueType() == VT && "Binary operator types must match!"); 5323 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5324 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5325 APInt N2CImm = N2C->getAPIntValue(); 5326 return getVScale(DL, VT, MulImm * N2CImm); 5327 } 5328 break; 5329 case ISD::UDIV: 5330 case ISD::UREM: 5331 case ISD::MULHU: 5332 case ISD::MULHS: 5333 case ISD::SDIV: 5334 case ISD::SREM: 5335 case ISD::SADDSAT: 5336 case ISD::SSUBSAT: 5337 case ISD::UADDSAT: 5338 case ISD::USUBSAT: 5339 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5340 assert(N1.getValueType() == N2.getValueType() && 5341 N1.getValueType() == VT && "Binary operator types must match!"); 5342 break; 5343 case ISD::SMIN: 5344 case ISD::UMAX: 5345 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5346 assert(N1.getValueType() == N2.getValueType() && 5347 N1.getValueType() == VT && "Binary operator types must match!"); 5348 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5349 return getNode(ISD::OR, DL, VT, N1, N2); 5350 break; 5351 case ISD::SMAX: 5352 case ISD::UMIN: 5353 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5354 assert(N1.getValueType() == N2.getValueType() && 5355 N1.getValueType() == VT && "Binary operator types must match!"); 5356 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) 5357 return getNode(ISD::AND, DL, VT, N1, N2); 5358 break; 5359 case ISD::FADD: 5360 case ISD::FSUB: 5361 case ISD::FMUL: 5362 case ISD::FDIV: 5363 case ISD::FREM: 5364 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5365 assert(N1.getValueType() == N2.getValueType() && 5366 N1.getValueType() == VT && "Binary operator types must match!"); 5367 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) 5368 return V; 5369 break; 5370 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5371 assert(N1.getValueType() == VT && 5372 N1.getValueType().isFloatingPoint() && 5373 N2.getValueType().isFloatingPoint() && 5374 "Invalid FCOPYSIGN!"); 5375 break; 5376 case ISD::SHL: 5377 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { 5378 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue(); 5379 APInt ShiftImm = N2C->getAPIntValue(); 5380 return getVScale(DL, VT, MulImm << ShiftImm); 5381 } 5382 LLVM_FALLTHROUGH; 5383 case ISD::SRA: 5384 case ISD::SRL: 5385 if (SDValue V = simplifyShift(N1, N2)) 5386 return V; 5387 LLVM_FALLTHROUGH; 5388 case ISD::ROTL: 5389 case ISD::ROTR: 5390 assert(VT == N1.getValueType() && 5391 "Shift operators return type must be the same as their first arg"); 5392 assert(VT.isInteger() && N2.getValueType().isInteger() && 5393 "Shifts only work on integers"); 5394 assert((!VT.isVector() || VT == N2.getValueType()) && 5395 "Vector shift amounts must be in the same as their first arg"); 5396 // Verify that the shift amount VT is big enough to hold valid shift 5397 // amounts. This catches things like trying to shift an i1024 value by an 5398 // i8, which is easy to fall into in generic code that uses 5399 // TLI.getShiftAmount(). 5400 assert(N2.getValueType().getScalarSizeInBits() >= 5401 Log2_32_Ceil(VT.getScalarSizeInBits()) && 5402 "Invalid use of small shift amount with oversized value!"); 5403 5404 // Always fold shifts of i1 values so the code generator doesn't need to 5405 // handle them. Since we know the size of the shift has to be less than the 5406 // size of the value, the shift/rotate count is guaranteed to be zero. 5407 if (VT == MVT::i1) 5408 return N1; 5409 if (N2C && N2C->isNullValue()) 5410 return N1; 5411 break; 5412 case ISD::FP_ROUND: 5413 assert(VT.isFloatingPoint() && 5414 N1.getValueType().isFloatingPoint() && 5415 VT.bitsLE(N1.getValueType()) && 5416 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5417 "Invalid FP_ROUND!"); 5418 if (N1.getValueType() == VT) return N1; // noop conversion. 5419 break; 5420 case ISD::AssertSext: 5421 case ISD::AssertZext: { 5422 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5423 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5424 assert(VT.isInteger() && EVT.isInteger() && 5425 "Cannot *_EXTEND_INREG FP types"); 5426 assert(!EVT.isVector() && 5427 "AssertSExt/AssertZExt type should be the vector element type " 5428 "rather than the vector type!"); 5429 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5430 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5431 break; 5432 } 5433 case ISD::SIGN_EXTEND_INREG: { 5434 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5435 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5436 assert(VT.isInteger() && EVT.isInteger() && 5437 "Cannot *_EXTEND_INREG FP types"); 5438 assert(EVT.isVector() == VT.isVector() && 5439 "SIGN_EXTEND_INREG type should be vector iff the operand " 5440 "type is vector!"); 5441 assert((!EVT.isVector() || 5442 EVT.getVectorElementCount() == VT.getVectorElementCount()) && 5443 "Vector element counts must match in SIGN_EXTEND_INREG"); 5444 assert(EVT.bitsLE(VT) && "Not extending!"); 5445 if (EVT == VT) return N1; // Not actually extending 5446 5447 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5448 unsigned FromBits = EVT.getScalarSizeInBits(); 5449 Val <<= Val.getBitWidth() - FromBits; 5450 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5451 return getConstant(Val, DL, ConstantVT); 5452 }; 5453 5454 if (N1C) { 5455 const APInt &Val = N1C->getAPIntValue(); 5456 return SignExtendInReg(Val, VT); 5457 } 5458 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5459 SmallVector<SDValue, 8> Ops; 5460 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5461 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5462 SDValue Op = N1.getOperand(i); 5463 if (Op.isUndef()) { 5464 Ops.push_back(getUNDEF(OpVT)); 5465 continue; 5466 } 5467 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5468 APInt Val = C->getAPIntValue(); 5469 Ops.push_back(SignExtendInReg(Val, OpVT)); 5470 } 5471 return getBuildVector(VT, DL, Ops); 5472 } 5473 break; 5474 } 5475 case ISD::EXTRACT_VECTOR_ELT: 5476 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5477 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5478 element type of the vector."); 5479 5480 // Extract from an undefined value or using an undefined index is undefined. 5481 if (N1.isUndef() || N2.isUndef()) 5482 return getUNDEF(VT); 5483 5484 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length 5485 // vectors. For scalable vectors we will provide appropriate support for 5486 // dealing with arbitrary indices. 5487 if (N2C && N1.getValueType().isFixedLengthVector() && 5488 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5489 return getUNDEF(VT); 5490 5491 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5492 // expanding copies of large vectors from registers. This only works for 5493 // fixed length vectors, since we need to know the exact number of 5494 // elements. 5495 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() && 5496 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) { 5497 unsigned Factor = 5498 N1.getOperand(0).getValueType().getVectorNumElements(); 5499 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5500 N1.getOperand(N2C->getZExtValue() / Factor), 5501 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); 5502 } 5503 5504 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while 5505 // lowering is expanding large vector constants. 5506 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || 5507 N1.getOpcode() == ISD::SPLAT_VECTOR)) { 5508 assert((N1.getOpcode() != ISD::BUILD_VECTOR || 5509 N1.getValueType().isFixedLengthVector()) && 5510 "BUILD_VECTOR used for scalable vectors"); 5511 unsigned Index = 5512 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; 5513 SDValue Elt = N1.getOperand(Index); 5514 5515 if (VT != Elt.getValueType()) 5516 // If the vector element type is not legal, the BUILD_VECTOR operands 5517 // are promoted and implicitly truncated, and the result implicitly 5518 // extended. Make that explicit here. 5519 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5520 5521 return Elt; 5522 } 5523 5524 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5525 // operations are lowered to scalars. 5526 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5527 // If the indices are the same, return the inserted element else 5528 // if the indices are known different, extract the element from 5529 // the original vector. 5530 SDValue N1Op2 = N1.getOperand(2); 5531 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5532 5533 if (N1Op2C && N2C) { 5534 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5535 if (VT == N1.getOperand(1).getValueType()) 5536 return N1.getOperand(1); 5537 else 5538 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5539 } 5540 5541 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5542 } 5543 } 5544 5545 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5546 // when vector types are scalarized and v1iX is legal. 5547 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). 5548 // Here we are completely ignoring the extract element index (N2), 5549 // which is fine for fixed width vectors, since any index other than 0 5550 // is undefined anyway. However, this cannot be ignored for scalable 5551 // vectors - in theory we could support this, but we don't want to do this 5552 // without a profitability check. 5553 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5554 N1.getValueType().isFixedLengthVector() && 5555 N1.getValueType().getVectorNumElements() == 1) { 5556 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5557 N1.getOperand(1)); 5558 } 5559 break; 5560 case ISD::EXTRACT_ELEMENT: 5561 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5562 assert(!N1.getValueType().isVector() && !VT.isVector() && 5563 (N1.getValueType().isInteger() == VT.isInteger()) && 5564 N1.getValueType() != VT && 5565 "Wrong types for EXTRACT_ELEMENT!"); 5566 5567 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5568 // 64-bit integers into 32-bit parts. Instead of building the extract of 5569 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5570 if (N1.getOpcode() == ISD::BUILD_PAIR) 5571 return N1.getOperand(N2C->getZExtValue()); 5572 5573 // EXTRACT_ELEMENT of a constant int is also very common. 5574 if (N1C) { 5575 unsigned ElementSize = VT.getSizeInBits(); 5576 unsigned Shift = ElementSize * N2C->getZExtValue(); 5577 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5578 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5579 } 5580 break; 5581 case ISD::EXTRACT_SUBVECTOR: 5582 EVT N1VT = N1.getValueType(); 5583 assert(VT.isVector() && N1VT.isVector() && 5584 "Extract subvector VTs must be vectors!"); 5585 assert(VT.getVectorElementType() == N1VT.getVectorElementType() && 5586 "Extract subvector VTs must have the same element type!"); 5587 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && 5588 "Cannot extract a scalable vector from a fixed length vector!"); 5589 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5590 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && 5591 "Extract subvector must be from larger vector to smaller vector!"); 5592 assert(N2C && "Extract subvector index must be a constant"); 5593 assert((VT.isScalableVector() != N1VT.isScalableVector() || 5594 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= 5595 N1VT.getVectorMinNumElements()) && 5596 "Extract subvector overflow!"); 5597 assert(N2C->getAPIntValue().getBitWidth() == 5598 TLI->getVectorIdxTy(getDataLayout()) 5599 .getSizeInBits() 5600 .getFixedSize() && 5601 "Constant index for EXTRACT_SUBVECTOR has an invalid size"); 5602 5603 // Trivial extraction. 5604 if (VT == N1VT) 5605 return N1; 5606 5607 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5608 if (N1.isUndef()) 5609 return getUNDEF(VT); 5610 5611 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5612 // the concat have the same type as the extract. 5613 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 && 5614 VT == N1.getOperand(0).getValueType()) { 5615 unsigned Factor = VT.getVectorMinNumElements(); 5616 return N1.getOperand(N2C->getZExtValue() / Factor); 5617 } 5618 5619 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5620 // during shuffle legalization. 5621 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5622 VT == N1.getOperand(1).getValueType()) 5623 return N1.getOperand(1); 5624 break; 5625 } 5626 5627 // Perform trivial constant folding. 5628 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) 5629 return SV; 5630 5631 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5632 return V; 5633 5634 // Canonicalize an UNDEF to the RHS, even over a constant. 5635 if (N1.isUndef()) { 5636 if (TLI->isCommutativeBinOp(Opcode)) { 5637 std::swap(N1, N2); 5638 } else { 5639 switch (Opcode) { 5640 case ISD::SIGN_EXTEND_INREG: 5641 case ISD::SUB: 5642 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5643 case ISD::UDIV: 5644 case ISD::SDIV: 5645 case ISD::UREM: 5646 case ISD::SREM: 5647 case ISD::SSUBSAT: 5648 case ISD::USUBSAT: 5649 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5650 } 5651 } 5652 } 5653 5654 // Fold a bunch of operators when the RHS is undef. 5655 if (N2.isUndef()) { 5656 switch (Opcode) { 5657 case ISD::XOR: 5658 if (N1.isUndef()) 5659 // Handle undef ^ undef -> 0 special case. This is a common 5660 // idiom (misuse). 5661 return getConstant(0, DL, VT); 5662 LLVM_FALLTHROUGH; 5663 case ISD::ADD: 5664 case ISD::SUB: 5665 case ISD::UDIV: 5666 case ISD::SDIV: 5667 case ISD::UREM: 5668 case ISD::SREM: 5669 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5670 case ISD::MUL: 5671 case ISD::AND: 5672 case ISD::SSUBSAT: 5673 case ISD::USUBSAT: 5674 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5675 case ISD::OR: 5676 case ISD::SADDSAT: 5677 case ISD::UADDSAT: 5678 return getAllOnesConstant(DL, VT); 5679 } 5680 } 5681 5682 // Memoize this node if possible. 5683 SDNode *N; 5684 SDVTList VTs = getVTList(VT); 5685 SDValue Ops[] = {N1, N2}; 5686 if (VT != MVT::Glue) { 5687 FoldingSetNodeID ID; 5688 AddNodeIDNode(ID, Opcode, VTs, Ops); 5689 void *IP = nullptr; 5690 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5691 E->intersectFlagsWith(Flags); 5692 return SDValue(E, 0); 5693 } 5694 5695 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5696 N->setFlags(Flags); 5697 createOperands(N, Ops); 5698 CSEMap.InsertNode(N, IP); 5699 } else { 5700 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5701 createOperands(N, Ops); 5702 } 5703 5704 InsertNode(N); 5705 SDValue V = SDValue(N, 0); 5706 NewSDValueDbgMsg(V, "Creating new node: ", this); 5707 return V; 5708 } 5709 5710 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5711 SDValue N1, SDValue N2, SDValue N3) { 5712 SDNodeFlags Flags; 5713 if (Inserter) 5714 Flags = Inserter->getFlags(); 5715 return getNode(Opcode, DL, VT, N1, N2, N3, Flags); 5716 } 5717 5718 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5719 SDValue N1, SDValue N2, SDValue N3, 5720 const SDNodeFlags Flags) { 5721 // Perform various simplifications. 5722 switch (Opcode) { 5723 case ISD::FMA: { 5724 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5725 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5726 N3.getValueType() == VT && "FMA types must match!"); 5727 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5728 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5729 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5730 if (N1CFP && N2CFP && N3CFP) { 5731 APFloat V1 = N1CFP->getValueAPF(); 5732 const APFloat &V2 = N2CFP->getValueAPF(); 5733 const APFloat &V3 = N3CFP->getValueAPF(); 5734 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5735 return getConstantFP(V1, DL, VT); 5736 } 5737 break; 5738 } 5739 case ISD::BUILD_VECTOR: { 5740 // Attempt to simplify BUILD_VECTOR. 5741 SDValue Ops[] = {N1, N2, N3}; 5742 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5743 return V; 5744 break; 5745 } 5746 case ISD::CONCAT_VECTORS: { 5747 SDValue Ops[] = {N1, N2, N3}; 5748 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5749 return V; 5750 break; 5751 } 5752 case ISD::SETCC: { 5753 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5754 assert(N1.getValueType() == N2.getValueType() && 5755 "SETCC operands must have the same type!"); 5756 assert(VT.isVector() == N1.getValueType().isVector() && 5757 "SETCC type should be vector iff the operand type is vector!"); 5758 assert((!VT.isVector() || VT.getVectorElementCount() == 5759 N1.getValueType().getVectorElementCount()) && 5760 "SETCC vector element counts must match!"); 5761 // Use FoldSetCC to simplify SETCC's. 5762 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5763 return V; 5764 // Vector constant folding. 5765 SDValue Ops[] = {N1, N2, N3}; 5766 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5767 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5768 return V; 5769 } 5770 break; 5771 } 5772 case ISD::SELECT: 5773 case ISD::VSELECT: 5774 if (SDValue V = simplifySelect(N1, N2, N3)) 5775 return V; 5776 break; 5777 case ISD::VECTOR_SHUFFLE: 5778 llvm_unreachable("should use getVectorShuffle constructor!"); 5779 case ISD::INSERT_VECTOR_ELT: { 5780 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5781 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except 5782 // for scalable vectors where we will generate appropriate code to 5783 // deal with out-of-bounds cases correctly. 5784 if (N3C && N1.getValueType().isFixedLengthVector() && 5785 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5786 return getUNDEF(VT); 5787 5788 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5789 if (N3.isUndef()) 5790 return getUNDEF(VT); 5791 5792 // If the inserted element is an UNDEF, just use the input vector. 5793 if (N2.isUndef()) 5794 return N1; 5795 5796 break; 5797 } 5798 case ISD::INSERT_SUBVECTOR: { 5799 // Inserting undef into undef is still undef. 5800 if (N1.isUndef() && N2.isUndef()) 5801 return getUNDEF(VT); 5802 5803 EVT N2VT = N2.getValueType(); 5804 assert(VT == N1.getValueType() && 5805 "Dest and insert subvector source types must match!"); 5806 assert(VT.isVector() && N2VT.isVector() && 5807 "Insert subvector VTs must be vectors!"); 5808 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && 5809 "Cannot insert a scalable vector into a fixed length vector!"); 5810 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5811 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && 5812 "Insert subvector must be from smaller vector to larger vector!"); 5813 assert(isa<ConstantSDNode>(N3) && 5814 "Insert subvector index must be constant"); 5815 assert((VT.isScalableVector() != N2VT.isScalableVector() || 5816 (N2VT.getVectorMinNumElements() + 5817 cast<ConstantSDNode>(N3)->getZExtValue()) <= 5818 VT.getVectorMinNumElements()) && 5819 "Insert subvector overflow!"); 5820 5821 // Trivial insertion. 5822 if (VT == N2VT) 5823 return N2; 5824 5825 // If this is an insert of an extracted vector into an undef vector, we 5826 // can just use the input to the extract. 5827 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5828 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5829 return N2.getOperand(0); 5830 break; 5831 } 5832 case ISD::BITCAST: 5833 // Fold bit_convert nodes from a type to themselves. 5834 if (N1.getValueType() == VT) 5835 return N1; 5836 break; 5837 } 5838 5839 // Memoize node if it doesn't produce a flag. 5840 SDNode *N; 5841 SDVTList VTs = getVTList(VT); 5842 SDValue Ops[] = {N1, N2, N3}; 5843 if (VT != MVT::Glue) { 5844 FoldingSetNodeID ID; 5845 AddNodeIDNode(ID, Opcode, VTs, Ops); 5846 void *IP = nullptr; 5847 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5848 E->intersectFlagsWith(Flags); 5849 return SDValue(E, 0); 5850 } 5851 5852 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5853 N->setFlags(Flags); 5854 createOperands(N, Ops); 5855 CSEMap.InsertNode(N, IP); 5856 } else { 5857 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5858 createOperands(N, Ops); 5859 } 5860 5861 InsertNode(N); 5862 SDValue V = SDValue(N, 0); 5863 NewSDValueDbgMsg(V, "Creating new node: ", this); 5864 return V; 5865 } 5866 5867 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5868 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5869 SDValue Ops[] = { N1, N2, N3, N4 }; 5870 return getNode(Opcode, DL, VT, Ops); 5871 } 5872 5873 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5874 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5875 SDValue N5) { 5876 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5877 return getNode(Opcode, DL, VT, Ops); 5878 } 5879 5880 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5881 /// the incoming stack arguments to be loaded from the stack. 5882 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5883 SmallVector<SDValue, 8> ArgChains; 5884 5885 // Include the original chain at the beginning of the list. When this is 5886 // used by target LowerCall hooks, this helps legalize find the 5887 // CALLSEQ_BEGIN node. 5888 ArgChains.push_back(Chain); 5889 5890 // Add a chain value for each stack argument. 5891 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5892 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5893 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5894 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5895 if (FI->getIndex() < 0) 5896 ArgChains.push_back(SDValue(L, 1)); 5897 5898 // Build a tokenfactor for all the chains. 5899 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5900 } 5901 5902 /// getMemsetValue - Vectorized representation of the memset value 5903 /// operand. 5904 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5905 const SDLoc &dl) { 5906 assert(!Value.isUndef()); 5907 5908 unsigned NumBits = VT.getScalarSizeInBits(); 5909 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5910 assert(C->getAPIntValue().getBitWidth() == 8); 5911 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5912 if (VT.isInteger()) { 5913 bool IsOpaque = VT.getSizeInBits() > 64 || 5914 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5915 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5916 } 5917 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5918 VT); 5919 } 5920 5921 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5922 EVT IntVT = VT.getScalarType(); 5923 if (!IntVT.isInteger()) 5924 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5925 5926 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5927 if (NumBits > 8) { 5928 // Use a multiplication with 0x010101... to extend the input to the 5929 // required length. 5930 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5931 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5932 DAG.getConstant(Magic, dl, IntVT)); 5933 } 5934 5935 if (VT != Value.getValueType() && !VT.isInteger()) 5936 Value = DAG.getBitcast(VT.getScalarType(), Value); 5937 if (VT != Value.getValueType()) 5938 Value = DAG.getSplatBuildVector(VT, dl, Value); 5939 5940 return Value; 5941 } 5942 5943 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5944 /// used when a memcpy is turned into a memset when the source is a constant 5945 /// string ptr. 5946 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5947 const TargetLowering &TLI, 5948 const ConstantDataArraySlice &Slice) { 5949 // Handle vector with all elements zero. 5950 if (Slice.Array == nullptr) { 5951 if (VT.isInteger()) 5952 return DAG.getConstant(0, dl, VT); 5953 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5954 return DAG.getConstantFP(0.0, dl, VT); 5955 else if (VT.isVector()) { 5956 unsigned NumElts = VT.getVectorNumElements(); 5957 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5958 return DAG.getNode(ISD::BITCAST, dl, VT, 5959 DAG.getConstant(0, dl, 5960 EVT::getVectorVT(*DAG.getContext(), 5961 EltVT, NumElts))); 5962 } else 5963 llvm_unreachable("Expected type!"); 5964 } 5965 5966 assert(!VT.isVector() && "Can't handle vector type here!"); 5967 unsigned NumVTBits = VT.getSizeInBits(); 5968 unsigned NumVTBytes = NumVTBits / 8; 5969 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5970 5971 APInt Val(NumVTBits, 0); 5972 if (DAG.getDataLayout().isLittleEndian()) { 5973 for (unsigned i = 0; i != NumBytes; ++i) 5974 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5975 } else { 5976 for (unsigned i = 0; i != NumBytes; ++i) 5977 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5978 } 5979 5980 // If the "cost" of materializing the integer immediate is less than the cost 5981 // of a load, then it is cost effective to turn the load into the immediate. 5982 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5983 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5984 return DAG.getConstant(Val, dl, VT); 5985 return SDValue(nullptr, 0); 5986 } 5987 5988 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset, 5989 const SDLoc &DL, 5990 const SDNodeFlags Flags) { 5991 EVT VT = Base.getValueType(); 5992 SDValue Index; 5993 5994 if (Offset.isScalable()) 5995 Index = getVScale(DL, Base.getValueType(), 5996 APInt(Base.getValueSizeInBits().getFixedSize(), 5997 Offset.getKnownMinSize())); 5998 else 5999 Index = getConstant(Offset.getFixedSize(), DL, VT); 6000 6001 return getMemBasePlusOffset(Base, Index, DL, Flags); 6002 } 6003 6004 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 6005 const SDLoc &DL, 6006 const SDNodeFlags Flags) { 6007 assert(Offset.getValueType().isInteger()); 6008 EVT BasePtrVT = Ptr.getValueType(); 6009 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 6010 } 6011 6012 /// Returns true if memcpy source is constant data. 6013 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 6014 uint64_t SrcDelta = 0; 6015 GlobalAddressSDNode *G = nullptr; 6016 if (Src.getOpcode() == ISD::GlobalAddress) 6017 G = cast<GlobalAddressSDNode>(Src); 6018 else if (Src.getOpcode() == ISD::ADD && 6019 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 6020 Src.getOperand(1).getOpcode() == ISD::Constant) { 6021 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 6022 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 6023 } 6024 if (!G) 6025 return false; 6026 6027 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 6028 SrcDelta + G->getOffset()); 6029 } 6030 6031 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 6032 SelectionDAG &DAG) { 6033 // On Darwin, -Os means optimize for size without hurting performance, so 6034 // only really optimize for size when -Oz (MinSize) is used. 6035 if (MF.getTarget().getTargetTriple().isOSDarwin()) 6036 return MF.getFunction().hasMinSize(); 6037 return DAG.shouldOptForSize(); 6038 } 6039 6040 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 6041 SmallVector<SDValue, 32> &OutChains, unsigned From, 6042 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 6043 SmallVector<SDValue, 16> &OutStoreChains) { 6044 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 6045 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 6046 SmallVector<SDValue, 16> GluedLoadChains; 6047 for (unsigned i = From; i < To; ++i) { 6048 OutChains.push_back(OutLoadChains[i]); 6049 GluedLoadChains.push_back(OutLoadChains[i]); 6050 } 6051 6052 // Chain for all loads. 6053 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 6054 GluedLoadChains); 6055 6056 for (unsigned i = From; i < To; ++i) { 6057 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 6058 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 6059 ST->getBasePtr(), ST->getMemoryVT(), 6060 ST->getMemOperand()); 6061 OutChains.push_back(NewStore); 6062 } 6063 } 6064 6065 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6066 SDValue Chain, SDValue Dst, SDValue Src, 6067 uint64_t Size, Align Alignment, 6068 bool isVol, bool AlwaysInline, 6069 MachinePointerInfo DstPtrInfo, 6070 MachinePointerInfo SrcPtrInfo) { 6071 // Turn a memcpy of undef to nop. 6072 // FIXME: We need to honor volatile even is Src is undef. 6073 if (Src.isUndef()) 6074 return Chain; 6075 6076 // Expand memcpy to a series of load and store ops if the size operand falls 6077 // below a certain threshold. 6078 // TODO: In the AlwaysInline case, if the size is big then generate a loop 6079 // rather than maybe a humongous number of loads and stores. 6080 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6081 const DataLayout &DL = DAG.getDataLayout(); 6082 LLVMContext &C = *DAG.getContext(); 6083 std::vector<EVT> MemOps; 6084 bool DstAlignCanChange = false; 6085 MachineFunction &MF = DAG.getMachineFunction(); 6086 MachineFrameInfo &MFI = MF.getFrameInfo(); 6087 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6088 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6089 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6090 DstAlignCanChange = true; 6091 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6092 if (!SrcAlign || Alignment > *SrcAlign) 6093 SrcAlign = Alignment; 6094 assert(SrcAlign && "SrcAlign must be set"); 6095 ConstantDataArraySlice Slice; 6096 // If marked as volatile, perform a copy even when marked as constant. 6097 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice); 6098 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 6099 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 6100 const MemOp Op = isZeroConstant 6101 ? MemOp::Set(Size, DstAlignCanChange, Alignment, 6102 /*IsZeroMemset*/ true, isVol) 6103 : MemOp::Copy(Size, DstAlignCanChange, Alignment, 6104 *SrcAlign, isVol, CopyFromConstant); 6105 if (!TLI.findOptimalMemOpLowering( 6106 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), 6107 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 6108 return SDValue(); 6109 6110 if (DstAlignCanChange) { 6111 Type *Ty = MemOps[0].getTypeForEVT(C); 6112 Align NewAlign = DL.getABITypeAlign(Ty); 6113 6114 // Don't promote to an alignment that would require dynamic stack 6115 // realignment. 6116 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 6117 if (!TRI->needsStackRealignment(MF)) 6118 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) 6119 NewAlign = NewAlign / 2; 6120 6121 if (NewAlign > Alignment) { 6122 // Give the stack frame object a larger alignment if needed. 6123 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6124 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6125 Alignment = NewAlign; 6126 } 6127 } 6128 6129 MachineMemOperand::Flags MMOFlags = 6130 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6131 SmallVector<SDValue, 16> OutLoadChains; 6132 SmallVector<SDValue, 16> OutStoreChains; 6133 SmallVector<SDValue, 32> OutChains; 6134 unsigned NumMemOps = MemOps.size(); 6135 uint64_t SrcOff = 0, DstOff = 0; 6136 for (unsigned i = 0; i != NumMemOps; ++i) { 6137 EVT VT = MemOps[i]; 6138 unsigned VTSize = VT.getSizeInBits() / 8; 6139 SDValue Value, Store; 6140 6141 if (VTSize > Size) { 6142 // Issuing an unaligned load / store pair that overlaps with the previous 6143 // pair. Adjust the offset accordingly. 6144 assert(i == NumMemOps-1 && i != 0); 6145 SrcOff -= VTSize - Size; 6146 DstOff -= VTSize - Size; 6147 } 6148 6149 if (CopyFromConstant && 6150 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 6151 // It's unlikely a store of a vector immediate can be done in a single 6152 // instruction. It would require a load from a constantpool first. 6153 // We only handle zero vectors here. 6154 // FIXME: Handle other cases where store of vector immediate is done in 6155 // a single instruction. 6156 ConstantDataArraySlice SubSlice; 6157 if (SrcOff < Slice.Length) { 6158 SubSlice = Slice; 6159 SubSlice.move(SrcOff); 6160 } else { 6161 // This is an out-of-bounds access and hence UB. Pretend we read zero. 6162 SubSlice.Array = nullptr; 6163 SubSlice.Offset = 0; 6164 SubSlice.Length = VTSize; 6165 } 6166 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 6167 if (Value.getNode()) { 6168 Store = DAG.getStore( 6169 Chain, dl, Value, 6170 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6171 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6172 OutChains.push_back(Store); 6173 } 6174 } 6175 6176 if (!Store.getNode()) { 6177 // The type might not be legal for the target. This should only happen 6178 // if the type is smaller than a legal type, as on PPC, so the right 6179 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 6180 // to Load/Store if NVT==VT. 6181 // FIXME does the case above also need this? 6182 EVT NVT = TLI.getTypeToTransformTo(C, VT); 6183 assert(NVT.bitsGE(VT)); 6184 6185 bool isDereferenceable = 6186 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6187 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6188 if (isDereferenceable) 6189 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6190 6191 Value = DAG.getExtLoad( 6192 ISD::EXTLOAD, dl, NVT, Chain, 6193 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6194 SrcPtrInfo.getWithOffset(SrcOff), VT, 6195 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags); 6196 OutLoadChains.push_back(Value.getValue(1)); 6197 6198 Store = DAG.getTruncStore( 6199 Chain, dl, Value, 6200 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6201 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 6202 OutStoreChains.push_back(Store); 6203 } 6204 SrcOff += VTSize; 6205 DstOff += VTSize; 6206 Size -= VTSize; 6207 } 6208 6209 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 6210 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 6211 unsigned NumLdStInMemcpy = OutStoreChains.size(); 6212 6213 if (NumLdStInMemcpy) { 6214 // It may be that memcpy might be converted to memset if it's memcpy 6215 // of constants. In such a case, we won't have loads and stores, but 6216 // just stores. In the absence of loads, there is nothing to gang up. 6217 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 6218 // If target does not care, just leave as it. 6219 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 6220 OutChains.push_back(OutLoadChains[i]); 6221 OutChains.push_back(OutStoreChains[i]); 6222 } 6223 } else { 6224 // Ld/St less than/equal limit set by target. 6225 if (NumLdStInMemcpy <= GluedLdStLimit) { 6226 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6227 NumLdStInMemcpy, OutLoadChains, 6228 OutStoreChains); 6229 } else { 6230 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 6231 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 6232 unsigned GlueIter = 0; 6233 6234 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 6235 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 6236 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 6237 6238 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 6239 OutLoadChains, OutStoreChains); 6240 GlueIter += GluedLdStLimit; 6241 } 6242 6243 // Residual ld/st. 6244 if (RemainingLdStInMemcpy) { 6245 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 6246 RemainingLdStInMemcpy, OutLoadChains, 6247 OutStoreChains); 6248 } 6249 } 6250 } 6251 } 6252 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6253 } 6254 6255 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 6256 SDValue Chain, SDValue Dst, SDValue Src, 6257 uint64_t Size, Align Alignment, 6258 bool isVol, bool AlwaysInline, 6259 MachinePointerInfo DstPtrInfo, 6260 MachinePointerInfo SrcPtrInfo) { 6261 // Turn a memmove of undef to nop. 6262 // FIXME: We need to honor volatile even is Src is undef. 6263 if (Src.isUndef()) 6264 return Chain; 6265 6266 // Expand memmove to a series of load and store ops if the size operand falls 6267 // below a certain threshold. 6268 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6269 const DataLayout &DL = DAG.getDataLayout(); 6270 LLVMContext &C = *DAG.getContext(); 6271 std::vector<EVT> MemOps; 6272 bool DstAlignCanChange = false; 6273 MachineFunction &MF = DAG.getMachineFunction(); 6274 MachineFrameInfo &MFI = MF.getFrameInfo(); 6275 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6276 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6277 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6278 DstAlignCanChange = true; 6279 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); 6280 if (!SrcAlign || Alignment > *SrcAlign) 6281 SrcAlign = Alignment; 6282 assert(SrcAlign && "SrcAlign must be set"); 6283 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6284 if (!TLI.findOptimalMemOpLowering( 6285 MemOps, Limit, 6286 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, 6287 /*IsVolatile*/ true), 6288 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6289 MF.getFunction().getAttributes())) 6290 return SDValue(); 6291 6292 if (DstAlignCanChange) { 6293 Type *Ty = MemOps[0].getTypeForEVT(C); 6294 Align NewAlign = DL.getABITypeAlign(Ty); 6295 if (NewAlign > Alignment) { 6296 // Give the stack frame object a larger alignment if needed. 6297 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6298 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6299 Alignment = NewAlign; 6300 } 6301 } 6302 6303 MachineMemOperand::Flags MMOFlags = 6304 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6305 uint64_t SrcOff = 0, DstOff = 0; 6306 SmallVector<SDValue, 8> LoadValues; 6307 SmallVector<SDValue, 8> LoadChains; 6308 SmallVector<SDValue, 8> OutChains; 6309 unsigned NumMemOps = MemOps.size(); 6310 for (unsigned i = 0; i < NumMemOps; i++) { 6311 EVT VT = MemOps[i]; 6312 unsigned VTSize = VT.getSizeInBits() / 8; 6313 SDValue Value; 6314 6315 bool isDereferenceable = 6316 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6317 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6318 if (isDereferenceable) 6319 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6320 6321 Value = 6322 DAG.getLoad(VT, dl, Chain, 6323 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl), 6324 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags); 6325 LoadValues.push_back(Value); 6326 LoadChains.push_back(Value.getValue(1)); 6327 SrcOff += VTSize; 6328 } 6329 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6330 OutChains.clear(); 6331 for (unsigned i = 0; i < NumMemOps; i++) { 6332 EVT VT = MemOps[i]; 6333 unsigned VTSize = VT.getSizeInBits() / 8; 6334 SDValue Store; 6335 6336 Store = 6337 DAG.getStore(Chain, dl, LoadValues[i], 6338 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6339 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 6340 OutChains.push_back(Store); 6341 DstOff += VTSize; 6342 } 6343 6344 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6345 } 6346 6347 /// Lower the call to 'memset' intrinsic function into a series of store 6348 /// operations. 6349 /// 6350 /// \param DAG Selection DAG where lowered code is placed. 6351 /// \param dl Link to corresponding IR location. 6352 /// \param Chain Control flow dependency. 6353 /// \param Dst Pointer to destination memory location. 6354 /// \param Src Value of byte to write into the memory. 6355 /// \param Size Number of bytes to write. 6356 /// \param Alignment Alignment of the destination in bytes. 6357 /// \param isVol True if destination is volatile. 6358 /// \param DstPtrInfo IR information on the memory pointer. 6359 /// \returns New head in the control flow, if lowering was successful, empty 6360 /// SDValue otherwise. 6361 /// 6362 /// The function tries to replace 'llvm.memset' intrinsic with several store 6363 /// operations and value calculation code. This is usually profitable for small 6364 /// memory size. 6365 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6366 SDValue Chain, SDValue Dst, SDValue Src, 6367 uint64_t Size, Align Alignment, bool isVol, 6368 MachinePointerInfo DstPtrInfo) { 6369 // Turn a memset of undef to nop. 6370 // FIXME: We need to honor volatile even is Src is undef. 6371 if (Src.isUndef()) 6372 return Chain; 6373 6374 // Expand memset to a series of load/store ops if the size operand 6375 // falls below a certain threshold. 6376 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6377 std::vector<EVT> MemOps; 6378 bool DstAlignCanChange = false; 6379 MachineFunction &MF = DAG.getMachineFunction(); 6380 MachineFrameInfo &MFI = MF.getFrameInfo(); 6381 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6382 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6383 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6384 DstAlignCanChange = true; 6385 bool IsZeroVal = 6386 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6387 if (!TLI.findOptimalMemOpLowering( 6388 MemOps, TLI.getMaxStoresPerMemset(OptSize), 6389 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), 6390 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) 6391 return SDValue(); 6392 6393 if (DstAlignCanChange) { 6394 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6395 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty); 6396 if (NewAlign > Alignment) { 6397 // Give the stack frame object a larger alignment if needed. 6398 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) 6399 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6400 Alignment = NewAlign; 6401 } 6402 } 6403 6404 SmallVector<SDValue, 8> OutChains; 6405 uint64_t DstOff = 0; 6406 unsigned NumMemOps = MemOps.size(); 6407 6408 // Find the largest store and generate the bit pattern for it. 6409 EVT LargestVT = MemOps[0]; 6410 for (unsigned i = 1; i < NumMemOps; i++) 6411 if (MemOps[i].bitsGT(LargestVT)) 6412 LargestVT = MemOps[i]; 6413 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6414 6415 for (unsigned i = 0; i < NumMemOps; i++) { 6416 EVT VT = MemOps[i]; 6417 unsigned VTSize = VT.getSizeInBits() / 8; 6418 if (VTSize > Size) { 6419 // Issuing an unaligned load / store pair that overlaps with the previous 6420 // pair. Adjust the offset accordingly. 6421 assert(i == NumMemOps-1 && i != 0); 6422 DstOff -= VTSize - Size; 6423 } 6424 6425 // If this store is smaller than the largest store see whether we can get 6426 // the smaller value for free with a truncate. 6427 SDValue Value = MemSetValue; 6428 if (VT.bitsLT(LargestVT)) { 6429 if (!LargestVT.isVector() && !VT.isVector() && 6430 TLI.isTruncateFree(LargestVT, VT)) 6431 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6432 else 6433 Value = getMemsetValue(Src, VT, DAG, dl); 6434 } 6435 assert(Value.getValueType() == VT && "Value with wrong type."); 6436 SDValue Store = DAG.getStore( 6437 Chain, dl, Value, 6438 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl), 6439 DstPtrInfo.getWithOffset(DstOff), Alignment, 6440 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6441 OutChains.push_back(Store); 6442 DstOff += VT.getSizeInBits() / 8; 6443 Size -= VTSize; 6444 } 6445 6446 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6447 } 6448 6449 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6450 unsigned AS) { 6451 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6452 // pointer operands can be losslessly bitcasted to pointers of address space 0 6453 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { 6454 report_fatal_error("cannot lower memory intrinsic in address space " + 6455 Twine(AS)); 6456 } 6457 } 6458 6459 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6460 SDValue Src, SDValue Size, Align Alignment, 6461 bool isVol, bool AlwaysInline, bool isTailCall, 6462 MachinePointerInfo DstPtrInfo, 6463 MachinePointerInfo SrcPtrInfo) { 6464 // Check to see if we should lower the memcpy to loads and stores first. 6465 // For cases within the target-specified limits, this is the best choice. 6466 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6467 if (ConstantSize) { 6468 // Memcpy with size zero? Just return the original chain. 6469 if (ConstantSize->isNullValue()) 6470 return Chain; 6471 6472 SDValue Result = getMemcpyLoadsAndStores( 6473 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6474 isVol, false, DstPtrInfo, SrcPtrInfo); 6475 if (Result.getNode()) 6476 return Result; 6477 } 6478 6479 // Then check to see if we should lower the memcpy with target-specific 6480 // code. If the target chooses to do this, this is the next best. 6481 if (TSI) { 6482 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6483 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, 6484 DstPtrInfo, SrcPtrInfo); 6485 if (Result.getNode()) 6486 return Result; 6487 } 6488 6489 // If we really need inline code and the target declined to provide it, 6490 // use a (potentially long) sequence of loads and stores. 6491 if (AlwaysInline) { 6492 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6493 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6494 ConstantSize->getZExtValue(), Alignment, 6495 isVol, true, DstPtrInfo, SrcPtrInfo); 6496 } 6497 6498 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6499 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6500 6501 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6502 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6503 // respect volatile, so they may do things like read or write memory 6504 // beyond the given memory regions. But fixing this isn't easy, and most 6505 // people don't care. 6506 6507 // Emit a library call. 6508 TargetLowering::ArgListTy Args; 6509 TargetLowering::ArgListEntry Entry; 6510 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6511 Entry.Node = Dst; Args.push_back(Entry); 6512 Entry.Node = Src; Args.push_back(Entry); 6513 6514 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6515 Entry.Node = Size; Args.push_back(Entry); 6516 // FIXME: pass in SDLoc 6517 TargetLowering::CallLoweringInfo CLI(*this); 6518 CLI.setDebugLoc(dl) 6519 .setChain(Chain) 6520 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6521 Dst.getValueType().getTypeForEVT(*getContext()), 6522 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6523 TLI->getPointerTy(getDataLayout())), 6524 std::move(Args)) 6525 .setDiscardResult() 6526 .setTailCall(isTailCall); 6527 6528 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6529 return CallResult.second; 6530 } 6531 6532 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6533 SDValue Dst, unsigned DstAlign, 6534 SDValue Src, unsigned SrcAlign, 6535 SDValue Size, Type *SizeTy, 6536 unsigned ElemSz, bool isTailCall, 6537 MachinePointerInfo DstPtrInfo, 6538 MachinePointerInfo SrcPtrInfo) { 6539 // Emit a library call. 6540 TargetLowering::ArgListTy Args; 6541 TargetLowering::ArgListEntry Entry; 6542 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6543 Entry.Node = Dst; 6544 Args.push_back(Entry); 6545 6546 Entry.Node = Src; 6547 Args.push_back(Entry); 6548 6549 Entry.Ty = SizeTy; 6550 Entry.Node = Size; 6551 Args.push_back(Entry); 6552 6553 RTLIB::Libcall LibraryCall = 6554 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6555 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6556 report_fatal_error("Unsupported element size"); 6557 6558 TargetLowering::CallLoweringInfo CLI(*this); 6559 CLI.setDebugLoc(dl) 6560 .setChain(Chain) 6561 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6562 Type::getVoidTy(*getContext()), 6563 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6564 TLI->getPointerTy(getDataLayout())), 6565 std::move(Args)) 6566 .setDiscardResult() 6567 .setTailCall(isTailCall); 6568 6569 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6570 return CallResult.second; 6571 } 6572 6573 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6574 SDValue Src, SDValue Size, Align Alignment, 6575 bool isVol, bool isTailCall, 6576 MachinePointerInfo DstPtrInfo, 6577 MachinePointerInfo SrcPtrInfo) { 6578 // Check to see if we should lower the memmove to loads and stores first. 6579 // For cases within the target-specified limits, this is the best choice. 6580 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6581 if (ConstantSize) { 6582 // Memmove with size zero? Just return the original chain. 6583 if (ConstantSize->isNullValue()) 6584 return Chain; 6585 6586 SDValue Result = getMemmoveLoadsAndStores( 6587 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, 6588 isVol, false, DstPtrInfo, SrcPtrInfo); 6589 if (Result.getNode()) 6590 return Result; 6591 } 6592 6593 // Then check to see if we should lower the memmove with target-specific 6594 // code. If the target chooses to do this, this is the next best. 6595 if (TSI) { 6596 SDValue Result = 6597 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, 6598 Alignment, isVol, DstPtrInfo, SrcPtrInfo); 6599 if (Result.getNode()) 6600 return Result; 6601 } 6602 6603 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6604 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6605 6606 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6607 // not be safe. See memcpy above for more details. 6608 6609 // Emit a library call. 6610 TargetLowering::ArgListTy Args; 6611 TargetLowering::ArgListEntry Entry; 6612 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6613 Entry.Node = Dst; Args.push_back(Entry); 6614 Entry.Node = Src; Args.push_back(Entry); 6615 6616 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6617 Entry.Node = Size; Args.push_back(Entry); 6618 // FIXME: pass in SDLoc 6619 TargetLowering::CallLoweringInfo CLI(*this); 6620 CLI.setDebugLoc(dl) 6621 .setChain(Chain) 6622 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6623 Dst.getValueType().getTypeForEVT(*getContext()), 6624 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6625 TLI->getPointerTy(getDataLayout())), 6626 std::move(Args)) 6627 .setDiscardResult() 6628 .setTailCall(isTailCall); 6629 6630 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6631 return CallResult.second; 6632 } 6633 6634 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6635 SDValue Dst, unsigned DstAlign, 6636 SDValue Src, unsigned SrcAlign, 6637 SDValue Size, Type *SizeTy, 6638 unsigned ElemSz, bool isTailCall, 6639 MachinePointerInfo DstPtrInfo, 6640 MachinePointerInfo SrcPtrInfo) { 6641 // Emit a library call. 6642 TargetLowering::ArgListTy Args; 6643 TargetLowering::ArgListEntry Entry; 6644 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6645 Entry.Node = Dst; 6646 Args.push_back(Entry); 6647 6648 Entry.Node = Src; 6649 Args.push_back(Entry); 6650 6651 Entry.Ty = SizeTy; 6652 Entry.Node = Size; 6653 Args.push_back(Entry); 6654 6655 RTLIB::Libcall LibraryCall = 6656 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6657 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6658 report_fatal_error("Unsupported element size"); 6659 6660 TargetLowering::CallLoweringInfo CLI(*this); 6661 CLI.setDebugLoc(dl) 6662 .setChain(Chain) 6663 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6664 Type::getVoidTy(*getContext()), 6665 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6666 TLI->getPointerTy(getDataLayout())), 6667 std::move(Args)) 6668 .setDiscardResult() 6669 .setTailCall(isTailCall); 6670 6671 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6672 return CallResult.second; 6673 } 6674 6675 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6676 SDValue Src, SDValue Size, Align Alignment, 6677 bool isVol, bool isTailCall, 6678 MachinePointerInfo DstPtrInfo) { 6679 // Check to see if we should lower the memset to stores first. 6680 // For cases within the target-specified limits, this is the best choice. 6681 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6682 if (ConstantSize) { 6683 // Memset with size zero? Just return the original chain. 6684 if (ConstantSize->isNullValue()) 6685 return Chain; 6686 6687 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, 6688 ConstantSize->getZExtValue(), Alignment, 6689 isVol, DstPtrInfo); 6690 6691 if (Result.getNode()) 6692 return Result; 6693 } 6694 6695 // Then check to see if we should lower the memset with target-specific 6696 // code. If the target chooses to do this, this is the next best. 6697 if (TSI) { 6698 SDValue Result = TSI->EmitTargetCodeForMemset( 6699 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo); 6700 if (Result.getNode()) 6701 return Result; 6702 } 6703 6704 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6705 6706 // Emit a library call. 6707 TargetLowering::ArgListTy Args; 6708 TargetLowering::ArgListEntry Entry; 6709 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6710 Args.push_back(Entry); 6711 Entry.Node = Src; 6712 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6713 Args.push_back(Entry); 6714 Entry.Node = Size; 6715 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6716 Args.push_back(Entry); 6717 6718 // FIXME: pass in SDLoc 6719 TargetLowering::CallLoweringInfo CLI(*this); 6720 CLI.setDebugLoc(dl) 6721 .setChain(Chain) 6722 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6723 Dst.getValueType().getTypeForEVT(*getContext()), 6724 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6725 TLI->getPointerTy(getDataLayout())), 6726 std::move(Args)) 6727 .setDiscardResult() 6728 .setTailCall(isTailCall); 6729 6730 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6731 return CallResult.second; 6732 } 6733 6734 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6735 SDValue Dst, unsigned DstAlign, 6736 SDValue Value, SDValue Size, Type *SizeTy, 6737 unsigned ElemSz, bool isTailCall, 6738 MachinePointerInfo DstPtrInfo) { 6739 // Emit a library call. 6740 TargetLowering::ArgListTy Args; 6741 TargetLowering::ArgListEntry Entry; 6742 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6743 Entry.Node = Dst; 6744 Args.push_back(Entry); 6745 6746 Entry.Ty = Type::getInt8Ty(*getContext()); 6747 Entry.Node = Value; 6748 Args.push_back(Entry); 6749 6750 Entry.Ty = SizeTy; 6751 Entry.Node = Size; 6752 Args.push_back(Entry); 6753 6754 RTLIB::Libcall LibraryCall = 6755 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6756 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6757 report_fatal_error("Unsupported element size"); 6758 6759 TargetLowering::CallLoweringInfo CLI(*this); 6760 CLI.setDebugLoc(dl) 6761 .setChain(Chain) 6762 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6763 Type::getVoidTy(*getContext()), 6764 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6765 TLI->getPointerTy(getDataLayout())), 6766 std::move(Args)) 6767 .setDiscardResult() 6768 .setTailCall(isTailCall); 6769 6770 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6771 return CallResult.second; 6772 } 6773 6774 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6775 SDVTList VTList, ArrayRef<SDValue> Ops, 6776 MachineMemOperand *MMO) { 6777 FoldingSetNodeID ID; 6778 ID.AddInteger(MemVT.getRawBits()); 6779 AddNodeIDNode(ID, Opcode, VTList, Ops); 6780 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6781 void* IP = nullptr; 6782 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6783 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6784 return SDValue(E, 0); 6785 } 6786 6787 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6788 VTList, MemVT, MMO); 6789 createOperands(N, Ops); 6790 6791 CSEMap.InsertNode(N, IP); 6792 InsertNode(N); 6793 return SDValue(N, 0); 6794 } 6795 6796 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6797 EVT MemVT, SDVTList VTs, SDValue Chain, 6798 SDValue Ptr, SDValue Cmp, SDValue Swp, 6799 MachineMemOperand *MMO) { 6800 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6801 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6802 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6803 6804 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6805 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6806 } 6807 6808 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6809 SDValue Chain, SDValue Ptr, SDValue Val, 6810 MachineMemOperand *MMO) { 6811 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6812 Opcode == ISD::ATOMIC_LOAD_SUB || 6813 Opcode == ISD::ATOMIC_LOAD_AND || 6814 Opcode == ISD::ATOMIC_LOAD_CLR || 6815 Opcode == ISD::ATOMIC_LOAD_OR || 6816 Opcode == ISD::ATOMIC_LOAD_XOR || 6817 Opcode == ISD::ATOMIC_LOAD_NAND || 6818 Opcode == ISD::ATOMIC_LOAD_MIN || 6819 Opcode == ISD::ATOMIC_LOAD_MAX || 6820 Opcode == ISD::ATOMIC_LOAD_UMIN || 6821 Opcode == ISD::ATOMIC_LOAD_UMAX || 6822 Opcode == ISD::ATOMIC_LOAD_FADD || 6823 Opcode == ISD::ATOMIC_LOAD_FSUB || 6824 Opcode == ISD::ATOMIC_SWAP || 6825 Opcode == ISD::ATOMIC_STORE) && 6826 "Invalid Atomic Op"); 6827 6828 EVT VT = Val.getValueType(); 6829 6830 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6831 getVTList(VT, MVT::Other); 6832 SDValue Ops[] = {Chain, Ptr, Val}; 6833 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6834 } 6835 6836 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6837 EVT VT, SDValue Chain, SDValue Ptr, 6838 MachineMemOperand *MMO) { 6839 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6840 6841 SDVTList VTs = getVTList(VT, MVT::Other); 6842 SDValue Ops[] = {Chain, Ptr}; 6843 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6844 } 6845 6846 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6847 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6848 if (Ops.size() == 1) 6849 return Ops[0]; 6850 6851 SmallVector<EVT, 4> VTs; 6852 VTs.reserve(Ops.size()); 6853 for (unsigned i = 0; i < Ops.size(); ++i) 6854 VTs.push_back(Ops[i].getValueType()); 6855 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6856 } 6857 6858 SDValue SelectionDAG::getMemIntrinsicNode( 6859 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6860 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, 6861 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6862 if (!Size && MemVT.isScalableVector()) 6863 Size = MemoryLocation::UnknownSize; 6864 else if (!Size) 6865 Size = MemVT.getStoreSize(); 6866 6867 MachineFunction &MF = getMachineFunction(); 6868 MachineMemOperand *MMO = 6869 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); 6870 6871 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6872 } 6873 6874 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6875 SDVTList VTList, 6876 ArrayRef<SDValue> Ops, EVT MemVT, 6877 MachineMemOperand *MMO) { 6878 assert((Opcode == ISD::INTRINSIC_VOID || 6879 Opcode == ISD::INTRINSIC_W_CHAIN || 6880 Opcode == ISD::PREFETCH || 6881 ((int)Opcode <= std::numeric_limits<int>::max() && 6882 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6883 "Opcode is not a memory-accessing opcode!"); 6884 6885 // Memoize the node unless it returns a flag. 6886 MemIntrinsicSDNode *N; 6887 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6888 FoldingSetNodeID ID; 6889 AddNodeIDNode(ID, Opcode, VTList, Ops); 6890 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6891 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6892 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6893 void *IP = nullptr; 6894 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6895 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6896 return SDValue(E, 0); 6897 } 6898 6899 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6900 VTList, MemVT, MMO); 6901 createOperands(N, Ops); 6902 6903 CSEMap.InsertNode(N, IP); 6904 } else { 6905 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6906 VTList, MemVT, MMO); 6907 createOperands(N, Ops); 6908 } 6909 InsertNode(N); 6910 SDValue V(N, 0); 6911 NewSDValueDbgMsg(V, "Creating new node: ", this); 6912 return V; 6913 } 6914 6915 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6916 SDValue Chain, int FrameIndex, 6917 int64_t Size, int64_t Offset) { 6918 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6919 const auto VTs = getVTList(MVT::Other); 6920 SDValue Ops[2] = { 6921 Chain, 6922 getFrameIndex(FrameIndex, 6923 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6924 true)}; 6925 6926 FoldingSetNodeID ID; 6927 AddNodeIDNode(ID, Opcode, VTs, Ops); 6928 ID.AddInteger(FrameIndex); 6929 ID.AddInteger(Size); 6930 ID.AddInteger(Offset); 6931 void *IP = nullptr; 6932 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6933 return SDValue(E, 0); 6934 6935 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6936 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6937 createOperands(N, Ops); 6938 CSEMap.InsertNode(N, IP); 6939 InsertNode(N); 6940 SDValue V(N, 0); 6941 NewSDValueDbgMsg(V, "Creating new node: ", this); 6942 return V; 6943 } 6944 6945 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6946 /// MachinePointerInfo record from it. This is particularly useful because the 6947 /// code generator has many cases where it doesn't bother passing in a 6948 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6949 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6950 SelectionDAG &DAG, SDValue Ptr, 6951 int64_t Offset = 0) { 6952 // If this is FI+Offset, we can model it. 6953 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6954 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6955 FI->getIndex(), Offset); 6956 6957 // If this is (FI+Offset1)+Offset2, we can model it. 6958 if (Ptr.getOpcode() != ISD::ADD || 6959 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6960 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6961 return Info; 6962 6963 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6964 return MachinePointerInfo::getFixedStack( 6965 DAG.getMachineFunction(), FI, 6966 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6967 } 6968 6969 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6970 /// MachinePointerInfo record from it. This is particularly useful because the 6971 /// code generator has many cases where it doesn't bother passing in a 6972 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6973 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6974 SelectionDAG &DAG, SDValue Ptr, 6975 SDValue OffsetOp) { 6976 // If the 'Offset' value isn't a constant, we can't handle this. 6977 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6978 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6979 if (OffsetOp.isUndef()) 6980 return InferPointerInfo(Info, DAG, Ptr); 6981 return Info; 6982 } 6983 6984 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6985 EVT VT, const SDLoc &dl, SDValue Chain, 6986 SDValue Ptr, SDValue Offset, 6987 MachinePointerInfo PtrInfo, EVT MemVT, 6988 Align Alignment, 6989 MachineMemOperand::Flags MMOFlags, 6990 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6991 assert(Chain.getValueType() == MVT::Other && 6992 "Invalid chain type"); 6993 6994 MMOFlags |= MachineMemOperand::MOLoad; 6995 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6996 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6997 // clients. 6998 if (PtrInfo.V.isNull()) 6999 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 7000 7001 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); 7002 MachineFunction &MF = getMachineFunction(); 7003 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, 7004 Alignment, AAInfo, Ranges); 7005 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 7006 } 7007 7008 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 7009 EVT VT, const SDLoc &dl, SDValue Chain, 7010 SDValue Ptr, SDValue Offset, EVT MemVT, 7011 MachineMemOperand *MMO) { 7012 if (VT == MemVT) { 7013 ExtType = ISD::NON_EXTLOAD; 7014 } else if (ExtType == ISD::NON_EXTLOAD) { 7015 assert(VT == MemVT && "Non-extending load from different memory type!"); 7016 } else { 7017 // Extending load. 7018 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 7019 "Should only be an extending load, not truncating!"); 7020 assert(VT.isInteger() == MemVT.isInteger() && 7021 "Cannot convert from FP to Int or Int -> FP!"); 7022 assert(VT.isVector() == MemVT.isVector() && 7023 "Cannot use an ext load to convert to or from a vector!"); 7024 assert((!VT.isVector() || 7025 VT.getVectorElementCount() == MemVT.getVectorElementCount()) && 7026 "Cannot use an ext load to change the number of vector elements!"); 7027 } 7028 7029 bool Indexed = AM != ISD::UNINDEXED; 7030 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 7031 7032 SDVTList VTs = Indexed ? 7033 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 7034 SDValue Ops[] = { Chain, Ptr, Offset }; 7035 FoldingSetNodeID ID; 7036 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 7037 ID.AddInteger(MemVT.getRawBits()); 7038 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 7039 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 7040 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7041 void *IP = nullptr; 7042 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7043 cast<LoadSDNode>(E)->refineAlignment(MMO); 7044 return SDValue(E, 0); 7045 } 7046 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7047 ExtType, MemVT, MMO); 7048 createOperands(N, Ops); 7049 7050 CSEMap.InsertNode(N, IP); 7051 InsertNode(N); 7052 SDValue V(N, 0); 7053 NewSDValueDbgMsg(V, "Creating new node: ", this); 7054 return V; 7055 } 7056 7057 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7058 SDValue Ptr, MachinePointerInfo PtrInfo, 7059 MaybeAlign Alignment, 7060 MachineMemOperand::Flags MMOFlags, 7061 const AAMDNodes &AAInfo, const MDNode *Ranges) { 7062 SDValue Undef = getUNDEF(Ptr.getValueType()); 7063 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7064 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 7065 } 7066 7067 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7068 SDValue Ptr, MachineMemOperand *MMO) { 7069 SDValue Undef = getUNDEF(Ptr.getValueType()); 7070 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 7071 VT, MMO); 7072 } 7073 7074 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7075 EVT VT, SDValue Chain, SDValue Ptr, 7076 MachinePointerInfo PtrInfo, EVT MemVT, 7077 MaybeAlign Alignment, 7078 MachineMemOperand::Flags MMOFlags, 7079 const AAMDNodes &AAInfo) { 7080 SDValue Undef = getUNDEF(Ptr.getValueType()); 7081 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 7082 MemVT, Alignment, MMOFlags, AAInfo); 7083 } 7084 7085 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 7086 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 7087 MachineMemOperand *MMO) { 7088 SDValue Undef = getUNDEF(Ptr.getValueType()); 7089 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 7090 MemVT, MMO); 7091 } 7092 7093 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 7094 SDValue Base, SDValue Offset, 7095 ISD::MemIndexedMode AM) { 7096 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 7097 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 7098 // Don't propagate the invariant or dereferenceable flags. 7099 auto MMOFlags = 7100 LD->getMemOperand()->getFlags() & 7101 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 7102 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 7103 LD->getChain(), Base, Offset, LD->getPointerInfo(), 7104 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo()); 7105 } 7106 7107 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7108 SDValue Ptr, MachinePointerInfo PtrInfo, 7109 Align Alignment, 7110 MachineMemOperand::Flags MMOFlags, 7111 const AAMDNodes &AAInfo) { 7112 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 7113 7114 MMOFlags |= MachineMemOperand::MOStore; 7115 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7116 7117 if (PtrInfo.V.isNull()) 7118 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7119 7120 MachineFunction &MF = getMachineFunction(); 7121 uint64_t Size = 7122 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); 7123 MachineMemOperand *MMO = 7124 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); 7125 return getStore(Chain, dl, Val, Ptr, MMO); 7126 } 7127 7128 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7129 SDValue Ptr, MachineMemOperand *MMO) { 7130 assert(Chain.getValueType() == MVT::Other && 7131 "Invalid chain type"); 7132 EVT VT = Val.getValueType(); 7133 SDVTList VTs = getVTList(MVT::Other); 7134 SDValue Undef = getUNDEF(Ptr.getValueType()); 7135 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7136 FoldingSetNodeID ID; 7137 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7138 ID.AddInteger(VT.getRawBits()); 7139 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7140 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 7141 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7142 void *IP = nullptr; 7143 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7144 cast<StoreSDNode>(E)->refineAlignment(MMO); 7145 return SDValue(E, 0); 7146 } 7147 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7148 ISD::UNINDEXED, false, VT, MMO); 7149 createOperands(N, Ops); 7150 7151 CSEMap.InsertNode(N, IP); 7152 InsertNode(N); 7153 SDValue V(N, 0); 7154 NewSDValueDbgMsg(V, "Creating new node: ", this); 7155 return V; 7156 } 7157 7158 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7159 SDValue Ptr, MachinePointerInfo PtrInfo, 7160 EVT SVT, Align Alignment, 7161 MachineMemOperand::Flags MMOFlags, 7162 const AAMDNodes &AAInfo) { 7163 assert(Chain.getValueType() == MVT::Other && 7164 "Invalid chain type"); 7165 7166 MMOFlags |= MachineMemOperand::MOStore; 7167 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 7168 7169 if (PtrInfo.V.isNull()) 7170 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 7171 7172 MachineFunction &MF = getMachineFunction(); 7173 MachineMemOperand *MMO = MF.getMachineMemOperand( 7174 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()), 7175 Alignment, AAInfo); 7176 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 7177 } 7178 7179 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 7180 SDValue Ptr, EVT SVT, 7181 MachineMemOperand *MMO) { 7182 EVT VT = Val.getValueType(); 7183 7184 assert(Chain.getValueType() == MVT::Other && 7185 "Invalid chain type"); 7186 if (VT == SVT) 7187 return getStore(Chain, dl, Val, Ptr, MMO); 7188 7189 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 7190 "Should only be a truncating store, not extending!"); 7191 assert(VT.isInteger() == SVT.isInteger() && 7192 "Can't do FP-INT conversion!"); 7193 assert(VT.isVector() == SVT.isVector() && 7194 "Cannot use trunc store to convert to or from a vector!"); 7195 assert((!VT.isVector() || 7196 VT.getVectorElementCount() == SVT.getVectorElementCount()) && 7197 "Cannot use trunc store to change the number of vector elements!"); 7198 7199 SDVTList VTs = getVTList(MVT::Other); 7200 SDValue Undef = getUNDEF(Ptr.getValueType()); 7201 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 7202 FoldingSetNodeID ID; 7203 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7204 ID.AddInteger(SVT.getRawBits()); 7205 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 7206 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 7207 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7208 void *IP = nullptr; 7209 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7210 cast<StoreSDNode>(E)->refineAlignment(MMO); 7211 return SDValue(E, 0); 7212 } 7213 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7214 ISD::UNINDEXED, true, SVT, MMO); 7215 createOperands(N, Ops); 7216 7217 CSEMap.InsertNode(N, IP); 7218 InsertNode(N); 7219 SDValue V(N, 0); 7220 NewSDValueDbgMsg(V, "Creating new node: ", this); 7221 return V; 7222 } 7223 7224 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 7225 SDValue Base, SDValue Offset, 7226 ISD::MemIndexedMode AM) { 7227 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 7228 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 7229 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 7230 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 7231 FoldingSetNodeID ID; 7232 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 7233 ID.AddInteger(ST->getMemoryVT().getRawBits()); 7234 ID.AddInteger(ST->getRawSubclassData()); 7235 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 7236 void *IP = nullptr; 7237 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 7238 return SDValue(E, 0); 7239 7240 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7241 ST->isTruncatingStore(), ST->getMemoryVT(), 7242 ST->getMemOperand()); 7243 createOperands(N, Ops); 7244 7245 CSEMap.InsertNode(N, IP); 7246 InsertNode(N); 7247 SDValue V(N, 0); 7248 NewSDValueDbgMsg(V, "Creating new node: ", this); 7249 return V; 7250 } 7251 7252 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7253 SDValue Base, SDValue Offset, SDValue Mask, 7254 SDValue PassThru, EVT MemVT, 7255 MachineMemOperand *MMO, 7256 ISD::MemIndexedMode AM, 7257 ISD::LoadExtType ExtTy, bool isExpanding) { 7258 bool Indexed = AM != ISD::UNINDEXED; 7259 assert((Indexed || Offset.isUndef()) && 7260 "Unindexed masked load with an offset!"); 7261 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7262 : getVTList(VT, MVT::Other); 7263 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7264 FoldingSetNodeID ID; 7265 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7266 ID.AddInteger(MemVT.getRawBits()); 7267 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7268 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7269 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7270 void *IP = nullptr; 7271 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7272 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7273 return SDValue(E, 0); 7274 } 7275 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7276 AM, ExtTy, isExpanding, MemVT, MMO); 7277 createOperands(N, Ops); 7278 7279 CSEMap.InsertNode(N, IP); 7280 InsertNode(N); 7281 SDValue V(N, 0); 7282 NewSDValueDbgMsg(V, "Creating new node: ", this); 7283 return V; 7284 } 7285 7286 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7287 SDValue Base, SDValue Offset, 7288 ISD::MemIndexedMode AM) { 7289 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7290 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7291 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7292 Offset, LD->getMask(), LD->getPassThru(), 7293 LD->getMemoryVT(), LD->getMemOperand(), AM, 7294 LD->getExtensionType(), LD->isExpandingLoad()); 7295 } 7296 7297 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7298 SDValue Val, SDValue Base, SDValue Offset, 7299 SDValue Mask, EVT MemVT, 7300 MachineMemOperand *MMO, 7301 ISD::MemIndexedMode AM, bool IsTruncating, 7302 bool IsCompressing) { 7303 assert(Chain.getValueType() == MVT::Other && 7304 "Invalid chain type"); 7305 bool Indexed = AM != ISD::UNINDEXED; 7306 assert((Indexed || Offset.isUndef()) && 7307 "Unindexed masked store with an offset!"); 7308 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7309 : getVTList(MVT::Other); 7310 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7311 FoldingSetNodeID ID; 7312 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7313 ID.AddInteger(MemVT.getRawBits()); 7314 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7315 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7316 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7317 void *IP = nullptr; 7318 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7319 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7320 return SDValue(E, 0); 7321 } 7322 auto *N = 7323 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7324 IsTruncating, IsCompressing, MemVT, MMO); 7325 createOperands(N, Ops); 7326 7327 CSEMap.InsertNode(N, IP); 7328 InsertNode(N); 7329 SDValue V(N, 0); 7330 NewSDValueDbgMsg(V, "Creating new node: ", this); 7331 return V; 7332 } 7333 7334 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7335 SDValue Base, SDValue Offset, 7336 ISD::MemIndexedMode AM) { 7337 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7338 assert(ST->getOffset().isUndef() && 7339 "Masked store is already a indexed store!"); 7340 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7341 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7342 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7343 } 7344 7345 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7346 ArrayRef<SDValue> Ops, 7347 MachineMemOperand *MMO, 7348 ISD::MemIndexType IndexType) { 7349 assert(Ops.size() == 6 && "Incompatible number of operands"); 7350 7351 FoldingSetNodeID ID; 7352 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7353 ID.AddInteger(VT.getRawBits()); 7354 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7355 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7356 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7357 void *IP = nullptr; 7358 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7359 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7360 return SDValue(E, 0); 7361 } 7362 7363 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7364 VTs, VT, MMO, IndexType); 7365 createOperands(N, Ops); 7366 7367 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7368 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7369 assert(N->getMask().getValueType().getVectorNumElements() == 7370 N->getValueType(0).getVectorNumElements() && 7371 "Vector width mismatch between mask and data"); 7372 assert(N->getIndex().getValueType().getVectorNumElements() >= 7373 N->getValueType(0).getVectorNumElements() && 7374 "Vector width mismatch between index and data"); 7375 assert(isa<ConstantSDNode>(N->getScale()) && 7376 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7377 "Scale should be a constant power of 2"); 7378 7379 CSEMap.InsertNode(N, IP); 7380 InsertNode(N); 7381 SDValue V(N, 0); 7382 NewSDValueDbgMsg(V, "Creating new node: ", this); 7383 return V; 7384 } 7385 7386 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7387 ArrayRef<SDValue> Ops, 7388 MachineMemOperand *MMO, 7389 ISD::MemIndexType IndexType) { 7390 assert(Ops.size() == 6 && "Incompatible number of operands"); 7391 7392 FoldingSetNodeID ID; 7393 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7394 ID.AddInteger(VT.getRawBits()); 7395 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7396 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7397 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7398 void *IP = nullptr; 7399 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7400 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7401 return SDValue(E, 0); 7402 } 7403 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7404 VTs, VT, MMO, IndexType); 7405 createOperands(N, Ops); 7406 7407 assert(N->getMask().getValueType().getVectorNumElements() == 7408 N->getValue().getValueType().getVectorNumElements() && 7409 "Vector width mismatch between mask and data"); 7410 assert(N->getIndex().getValueType().getVectorNumElements() >= 7411 N->getValue().getValueType().getVectorNumElements() && 7412 "Vector width mismatch between index and data"); 7413 assert(isa<ConstantSDNode>(N->getScale()) && 7414 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7415 "Scale should be a constant power of 2"); 7416 7417 CSEMap.InsertNode(N, IP); 7418 InsertNode(N); 7419 SDValue V(N, 0); 7420 NewSDValueDbgMsg(V, "Creating new node: ", this); 7421 return V; 7422 } 7423 7424 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7425 // select undef, T, F --> T (if T is a constant), otherwise F 7426 // select, ?, undef, F --> F 7427 // select, ?, T, undef --> T 7428 if (Cond.isUndef()) 7429 return isConstantValueOfAnyType(T) ? T : F; 7430 if (T.isUndef()) 7431 return F; 7432 if (F.isUndef()) 7433 return T; 7434 7435 // select true, T, F --> T 7436 // select false, T, F --> F 7437 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7438 return CondC->isNullValue() ? F : T; 7439 7440 // TODO: This should simplify VSELECT with constant condition using something 7441 // like this (but check boolean contents to be complete?): 7442 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7443 // return T; 7444 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7445 // return F; 7446 7447 // select ?, T, T --> T 7448 if (T == F) 7449 return T; 7450 7451 return SDValue(); 7452 } 7453 7454 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7455 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7456 if (X.isUndef()) 7457 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7458 // shift X, undef --> undef (because it may shift by the bitwidth) 7459 if (Y.isUndef()) 7460 return getUNDEF(X.getValueType()); 7461 7462 // shift 0, Y --> 0 7463 // shift X, 0 --> X 7464 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7465 return X; 7466 7467 // shift X, C >= bitwidth(X) --> undef 7468 // All vector elements must be too big (or undef) to avoid partial undefs. 7469 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7470 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7471 }; 7472 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7473 return getUNDEF(X.getValueType()); 7474 7475 return SDValue(); 7476 } 7477 7478 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, 7479 SDNodeFlags Flags) { 7480 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 7481 // (an undef operand can be chosen to be Nan/Inf), then the result of this 7482 // operation is poison. That result can be relaxed to undef. 7483 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); 7484 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7485 bool HasNan = (XC && XC->getValueAPF().isNaN()) || 7486 (YC && YC->getValueAPF().isNaN()); 7487 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || 7488 (YC && YC->getValueAPF().isInfinity()); 7489 7490 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) 7491 return getUNDEF(X.getValueType()); 7492 7493 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) 7494 return getUNDEF(X.getValueType()); 7495 7496 if (!YC) 7497 return SDValue(); 7498 7499 // X + -0.0 --> X 7500 if (Opcode == ISD::FADD) 7501 if (YC->getValueAPF().isNegZero()) 7502 return X; 7503 7504 // X - +0.0 --> X 7505 if (Opcode == ISD::FSUB) 7506 if (YC->getValueAPF().isPosZero()) 7507 return X; 7508 7509 // X * 1.0 --> X 7510 // X / 1.0 --> X 7511 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7512 if (YC->getValueAPF().isExactlyValue(1.0)) 7513 return X; 7514 7515 // X * 0.0 --> 0.0 7516 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros()) 7517 if (YC->getValueAPF().isZero()) 7518 return getConstantFP(0.0, SDLoc(Y), Y.getValueType()); 7519 7520 return SDValue(); 7521 } 7522 7523 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7524 SDValue Ptr, SDValue SV, unsigned Align) { 7525 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7526 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7527 } 7528 7529 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7530 ArrayRef<SDUse> Ops) { 7531 switch (Ops.size()) { 7532 case 0: return getNode(Opcode, DL, VT); 7533 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7534 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7535 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7536 default: break; 7537 } 7538 7539 // Copy from an SDUse array into an SDValue array for use with 7540 // the regular getNode logic. 7541 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7542 return getNode(Opcode, DL, VT, NewOps); 7543 } 7544 7545 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7546 ArrayRef<SDValue> Ops) { 7547 SDNodeFlags Flags; 7548 if (Inserter) 7549 Flags = Inserter->getFlags(); 7550 return getNode(Opcode, DL, VT, Ops, Flags); 7551 } 7552 7553 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7554 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7555 unsigned NumOps = Ops.size(); 7556 switch (NumOps) { 7557 case 0: return getNode(Opcode, DL, VT); 7558 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7559 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7560 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7561 default: break; 7562 } 7563 7564 switch (Opcode) { 7565 default: break; 7566 case ISD::BUILD_VECTOR: 7567 // Attempt to simplify BUILD_VECTOR. 7568 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7569 return V; 7570 break; 7571 case ISD::CONCAT_VECTORS: 7572 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7573 return V; 7574 break; 7575 case ISD::SELECT_CC: 7576 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7577 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7578 "LHS and RHS of condition must have same type!"); 7579 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7580 "True and False arms of SelectCC must have same type!"); 7581 assert(Ops[2].getValueType() == VT && 7582 "select_cc node must be of same type as true and false value!"); 7583 break; 7584 case ISD::BR_CC: 7585 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7586 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7587 "LHS/RHS of comparison should match types!"); 7588 break; 7589 } 7590 7591 // Memoize nodes. 7592 SDNode *N; 7593 SDVTList VTs = getVTList(VT); 7594 7595 if (VT != MVT::Glue) { 7596 FoldingSetNodeID ID; 7597 AddNodeIDNode(ID, Opcode, VTs, Ops); 7598 void *IP = nullptr; 7599 7600 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7601 return SDValue(E, 0); 7602 7603 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7604 createOperands(N, Ops); 7605 7606 CSEMap.InsertNode(N, IP); 7607 } else { 7608 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7609 createOperands(N, Ops); 7610 } 7611 7612 N->setFlags(Flags); 7613 InsertNode(N); 7614 SDValue V(N, 0); 7615 NewSDValueDbgMsg(V, "Creating new node: ", this); 7616 return V; 7617 } 7618 7619 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7620 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7621 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7622 } 7623 7624 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7625 ArrayRef<SDValue> Ops) { 7626 SDNodeFlags Flags; 7627 if (Inserter) 7628 Flags = Inserter->getFlags(); 7629 return getNode(Opcode, DL, VTList, Ops, Flags); 7630 } 7631 7632 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7633 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7634 if (VTList.NumVTs == 1) 7635 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7636 7637 switch (Opcode) { 7638 case ISD::STRICT_FP_EXTEND: 7639 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7640 "Invalid STRICT_FP_EXTEND!"); 7641 assert(VTList.VTs[0].isFloatingPoint() && 7642 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7643 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7644 "STRICT_FP_EXTEND result type should be vector iff the operand " 7645 "type is vector!"); 7646 assert((!VTList.VTs[0].isVector() || 7647 VTList.VTs[0].getVectorNumElements() == 7648 Ops[1].getValueType().getVectorNumElements()) && 7649 "Vector element count mismatch!"); 7650 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7651 "Invalid fpext node, dst <= src!"); 7652 break; 7653 case ISD::STRICT_FP_ROUND: 7654 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7655 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7656 "STRICT_FP_ROUND result type should be vector iff the operand " 7657 "type is vector!"); 7658 assert((!VTList.VTs[0].isVector() || 7659 VTList.VTs[0].getVectorNumElements() == 7660 Ops[1].getValueType().getVectorNumElements()) && 7661 "Vector element count mismatch!"); 7662 assert(VTList.VTs[0].isFloatingPoint() && 7663 Ops[1].getValueType().isFloatingPoint() && 7664 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7665 isa<ConstantSDNode>(Ops[2]) && 7666 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7667 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7668 "Invalid STRICT_FP_ROUND!"); 7669 break; 7670 #if 0 7671 // FIXME: figure out how to safely handle things like 7672 // int foo(int x) { return 1 << (x & 255); } 7673 // int bar() { return foo(256); } 7674 case ISD::SRA_PARTS: 7675 case ISD::SRL_PARTS: 7676 case ISD::SHL_PARTS: 7677 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7678 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7679 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7680 else if (N3.getOpcode() == ISD::AND) 7681 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7682 // If the and is only masking out bits that cannot effect the shift, 7683 // eliminate the and. 7684 unsigned NumBits = VT.getScalarSizeInBits()*2; 7685 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7686 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7687 } 7688 break; 7689 #endif 7690 } 7691 7692 // Memoize the node unless it returns a flag. 7693 SDNode *N; 7694 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7695 FoldingSetNodeID ID; 7696 AddNodeIDNode(ID, Opcode, VTList, Ops); 7697 void *IP = nullptr; 7698 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7699 return SDValue(E, 0); 7700 7701 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7702 createOperands(N, Ops); 7703 CSEMap.InsertNode(N, IP); 7704 } else { 7705 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7706 createOperands(N, Ops); 7707 } 7708 7709 N->setFlags(Flags); 7710 InsertNode(N); 7711 SDValue V(N, 0); 7712 NewSDValueDbgMsg(V, "Creating new node: ", this); 7713 return V; 7714 } 7715 7716 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7717 SDVTList VTList) { 7718 return getNode(Opcode, DL, VTList, None); 7719 } 7720 7721 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7722 SDValue N1) { 7723 SDValue Ops[] = { N1 }; 7724 return getNode(Opcode, DL, VTList, Ops); 7725 } 7726 7727 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7728 SDValue N1, SDValue N2) { 7729 SDValue Ops[] = { N1, N2 }; 7730 return getNode(Opcode, DL, VTList, Ops); 7731 } 7732 7733 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7734 SDValue N1, SDValue N2, SDValue N3) { 7735 SDValue Ops[] = { N1, N2, N3 }; 7736 return getNode(Opcode, DL, VTList, Ops); 7737 } 7738 7739 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7740 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7741 SDValue Ops[] = { N1, N2, N3, N4 }; 7742 return getNode(Opcode, DL, VTList, Ops); 7743 } 7744 7745 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7746 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7747 SDValue N5) { 7748 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7749 return getNode(Opcode, DL, VTList, Ops); 7750 } 7751 7752 SDVTList SelectionDAG::getVTList(EVT VT) { 7753 return makeVTList(SDNode::getValueTypeList(VT), 1); 7754 } 7755 7756 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7757 FoldingSetNodeID ID; 7758 ID.AddInteger(2U); 7759 ID.AddInteger(VT1.getRawBits()); 7760 ID.AddInteger(VT2.getRawBits()); 7761 7762 void *IP = nullptr; 7763 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7764 if (!Result) { 7765 EVT *Array = Allocator.Allocate<EVT>(2); 7766 Array[0] = VT1; 7767 Array[1] = VT2; 7768 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7769 VTListMap.InsertNode(Result, IP); 7770 } 7771 return Result->getSDVTList(); 7772 } 7773 7774 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7775 FoldingSetNodeID ID; 7776 ID.AddInteger(3U); 7777 ID.AddInteger(VT1.getRawBits()); 7778 ID.AddInteger(VT2.getRawBits()); 7779 ID.AddInteger(VT3.getRawBits()); 7780 7781 void *IP = nullptr; 7782 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7783 if (!Result) { 7784 EVT *Array = Allocator.Allocate<EVT>(3); 7785 Array[0] = VT1; 7786 Array[1] = VT2; 7787 Array[2] = VT3; 7788 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7789 VTListMap.InsertNode(Result, IP); 7790 } 7791 return Result->getSDVTList(); 7792 } 7793 7794 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7795 FoldingSetNodeID ID; 7796 ID.AddInteger(4U); 7797 ID.AddInteger(VT1.getRawBits()); 7798 ID.AddInteger(VT2.getRawBits()); 7799 ID.AddInteger(VT3.getRawBits()); 7800 ID.AddInteger(VT4.getRawBits()); 7801 7802 void *IP = nullptr; 7803 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7804 if (!Result) { 7805 EVT *Array = Allocator.Allocate<EVT>(4); 7806 Array[0] = VT1; 7807 Array[1] = VT2; 7808 Array[2] = VT3; 7809 Array[3] = VT4; 7810 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7811 VTListMap.InsertNode(Result, IP); 7812 } 7813 return Result->getSDVTList(); 7814 } 7815 7816 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7817 unsigned NumVTs = VTs.size(); 7818 FoldingSetNodeID ID; 7819 ID.AddInteger(NumVTs); 7820 for (unsigned index = 0; index < NumVTs; index++) { 7821 ID.AddInteger(VTs[index].getRawBits()); 7822 } 7823 7824 void *IP = nullptr; 7825 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7826 if (!Result) { 7827 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7828 llvm::copy(VTs, Array); 7829 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7830 VTListMap.InsertNode(Result, IP); 7831 } 7832 return Result->getSDVTList(); 7833 } 7834 7835 7836 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7837 /// specified operands. If the resultant node already exists in the DAG, 7838 /// this does not modify the specified node, instead it returns the node that 7839 /// already exists. If the resultant node does not exist in the DAG, the 7840 /// input node is returned. As a degenerate case, if you specify the same 7841 /// input operands as the node already has, the input node is returned. 7842 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7843 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7844 7845 // Check to see if there is no change. 7846 if (Op == N->getOperand(0)) return N; 7847 7848 // See if the modified node already exists. 7849 void *InsertPos = nullptr; 7850 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7851 return Existing; 7852 7853 // Nope it doesn't. Remove the node from its current place in the maps. 7854 if (InsertPos) 7855 if (!RemoveNodeFromCSEMaps(N)) 7856 InsertPos = nullptr; 7857 7858 // Now we update the operands. 7859 N->OperandList[0].set(Op); 7860 7861 updateDivergence(N); 7862 // If this gets put into a CSE map, add it. 7863 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7864 return N; 7865 } 7866 7867 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7868 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7869 7870 // Check to see if there is no change. 7871 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7872 return N; // No operands changed, just return the input node. 7873 7874 // See if the modified node already exists. 7875 void *InsertPos = nullptr; 7876 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7877 return Existing; 7878 7879 // Nope it doesn't. Remove the node from its current place in the maps. 7880 if (InsertPos) 7881 if (!RemoveNodeFromCSEMaps(N)) 7882 InsertPos = nullptr; 7883 7884 // Now we update the operands. 7885 if (N->OperandList[0] != Op1) 7886 N->OperandList[0].set(Op1); 7887 if (N->OperandList[1] != Op2) 7888 N->OperandList[1].set(Op2); 7889 7890 updateDivergence(N); 7891 // If this gets put into a CSE map, add it. 7892 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7893 return N; 7894 } 7895 7896 SDNode *SelectionDAG:: 7897 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7898 SDValue Ops[] = { Op1, Op2, Op3 }; 7899 return UpdateNodeOperands(N, Ops); 7900 } 7901 7902 SDNode *SelectionDAG:: 7903 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7904 SDValue Op3, SDValue Op4) { 7905 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7906 return UpdateNodeOperands(N, Ops); 7907 } 7908 7909 SDNode *SelectionDAG:: 7910 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7911 SDValue Op3, SDValue Op4, SDValue Op5) { 7912 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7913 return UpdateNodeOperands(N, Ops); 7914 } 7915 7916 SDNode *SelectionDAG:: 7917 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7918 unsigned NumOps = Ops.size(); 7919 assert(N->getNumOperands() == NumOps && 7920 "Update with wrong number of operands"); 7921 7922 // If no operands changed just return the input node. 7923 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7924 return N; 7925 7926 // See if the modified node already exists. 7927 void *InsertPos = nullptr; 7928 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7929 return Existing; 7930 7931 // Nope it doesn't. Remove the node from its current place in the maps. 7932 if (InsertPos) 7933 if (!RemoveNodeFromCSEMaps(N)) 7934 InsertPos = nullptr; 7935 7936 // Now we update the operands. 7937 for (unsigned i = 0; i != NumOps; ++i) 7938 if (N->OperandList[i] != Ops[i]) 7939 N->OperandList[i].set(Ops[i]); 7940 7941 updateDivergence(N); 7942 // If this gets put into a CSE map, add it. 7943 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7944 return N; 7945 } 7946 7947 /// DropOperands - Release the operands and set this node to have 7948 /// zero operands. 7949 void SDNode::DropOperands() { 7950 // Unlike the code in MorphNodeTo that does this, we don't need to 7951 // watch for dead nodes here. 7952 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7953 SDUse &Use = *I++; 7954 Use.set(SDValue()); 7955 } 7956 } 7957 7958 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7959 ArrayRef<MachineMemOperand *> NewMemRefs) { 7960 if (NewMemRefs.empty()) { 7961 N->clearMemRefs(); 7962 return; 7963 } 7964 7965 // Check if we can avoid allocating by storing a single reference directly. 7966 if (NewMemRefs.size() == 1) { 7967 N->MemRefs = NewMemRefs[0]; 7968 N->NumMemRefs = 1; 7969 return; 7970 } 7971 7972 MachineMemOperand **MemRefsBuffer = 7973 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7974 llvm::copy(NewMemRefs, MemRefsBuffer); 7975 N->MemRefs = MemRefsBuffer; 7976 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7977 } 7978 7979 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7980 /// machine opcode. 7981 /// 7982 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7983 EVT VT) { 7984 SDVTList VTs = getVTList(VT); 7985 return SelectNodeTo(N, MachineOpc, VTs, None); 7986 } 7987 7988 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7989 EVT VT, SDValue Op1) { 7990 SDVTList VTs = getVTList(VT); 7991 SDValue Ops[] = { Op1 }; 7992 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7993 } 7994 7995 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7996 EVT VT, SDValue Op1, 7997 SDValue Op2) { 7998 SDVTList VTs = getVTList(VT); 7999 SDValue Ops[] = { Op1, Op2 }; 8000 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8001 } 8002 8003 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8004 EVT VT, SDValue Op1, 8005 SDValue Op2, SDValue Op3) { 8006 SDVTList VTs = getVTList(VT); 8007 SDValue Ops[] = { Op1, Op2, Op3 }; 8008 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8009 } 8010 8011 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8012 EVT VT, ArrayRef<SDValue> Ops) { 8013 SDVTList VTs = getVTList(VT); 8014 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8015 } 8016 8017 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8018 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 8019 SDVTList VTs = getVTList(VT1, VT2); 8020 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8021 } 8022 8023 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8024 EVT VT1, EVT VT2) { 8025 SDVTList VTs = getVTList(VT1, VT2); 8026 return SelectNodeTo(N, MachineOpc, VTs, None); 8027 } 8028 8029 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8030 EVT VT1, EVT VT2, EVT VT3, 8031 ArrayRef<SDValue> Ops) { 8032 SDVTList VTs = getVTList(VT1, VT2, VT3); 8033 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8034 } 8035 8036 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8037 EVT VT1, EVT VT2, 8038 SDValue Op1, SDValue Op2) { 8039 SDVTList VTs = getVTList(VT1, VT2); 8040 SDValue Ops[] = { Op1, Op2 }; 8041 return SelectNodeTo(N, MachineOpc, VTs, Ops); 8042 } 8043 8044 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 8045 SDVTList VTs,ArrayRef<SDValue> Ops) { 8046 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 8047 // Reset the NodeID to -1. 8048 New->setNodeId(-1); 8049 if (New != N) { 8050 ReplaceAllUsesWith(N, New); 8051 RemoveDeadNode(N); 8052 } 8053 return New; 8054 } 8055 8056 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 8057 /// the line number information on the merged node since it is not possible to 8058 /// preserve the information that operation is associated with multiple lines. 8059 /// This will make the debugger working better at -O0, were there is a higher 8060 /// probability having other instructions associated with that line. 8061 /// 8062 /// For IROrder, we keep the smaller of the two 8063 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 8064 DebugLoc NLoc = N->getDebugLoc(); 8065 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 8066 N->setDebugLoc(DebugLoc()); 8067 } 8068 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 8069 N->setIROrder(Order); 8070 return N; 8071 } 8072 8073 /// MorphNodeTo - This *mutates* the specified node to have the specified 8074 /// return type, opcode, and operands. 8075 /// 8076 /// Note that MorphNodeTo returns the resultant node. If there is already a 8077 /// node of the specified opcode and operands, it returns that node instead of 8078 /// the current one. Note that the SDLoc need not be the same. 8079 /// 8080 /// Using MorphNodeTo is faster than creating a new node and swapping it in 8081 /// with ReplaceAllUsesWith both because it often avoids allocating a new 8082 /// node, and because it doesn't require CSE recalculation for any of 8083 /// the node's users. 8084 /// 8085 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 8086 /// As a consequence it isn't appropriate to use from within the DAG combiner or 8087 /// the legalizer which maintain worklists that would need to be updated when 8088 /// deleting things. 8089 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 8090 SDVTList VTs, ArrayRef<SDValue> Ops) { 8091 // If an identical node already exists, use it. 8092 void *IP = nullptr; 8093 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 8094 FoldingSetNodeID ID; 8095 AddNodeIDNode(ID, Opc, VTs, Ops); 8096 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 8097 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 8098 } 8099 8100 if (!RemoveNodeFromCSEMaps(N)) 8101 IP = nullptr; 8102 8103 // Start the morphing. 8104 N->NodeType = Opc; 8105 N->ValueList = VTs.VTs; 8106 N->NumValues = VTs.NumVTs; 8107 8108 // Clear the operands list, updating used nodes to remove this from their 8109 // use list. Keep track of any operands that become dead as a result. 8110 SmallPtrSet<SDNode*, 16> DeadNodeSet; 8111 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 8112 SDUse &Use = *I++; 8113 SDNode *Used = Use.getNode(); 8114 Use.set(SDValue()); 8115 if (Used->use_empty()) 8116 DeadNodeSet.insert(Used); 8117 } 8118 8119 // For MachineNode, initialize the memory references information. 8120 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 8121 MN->clearMemRefs(); 8122 8123 // Swap for an appropriately sized array from the recycler. 8124 removeOperands(N); 8125 createOperands(N, Ops); 8126 8127 // Delete any nodes that are still dead after adding the uses for the 8128 // new operands. 8129 if (!DeadNodeSet.empty()) { 8130 SmallVector<SDNode *, 16> DeadNodes; 8131 for (SDNode *N : DeadNodeSet) 8132 if (N->use_empty()) 8133 DeadNodes.push_back(N); 8134 RemoveDeadNodes(DeadNodes); 8135 } 8136 8137 if (IP) 8138 CSEMap.InsertNode(N, IP); // Memoize the new node. 8139 return N; 8140 } 8141 8142 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 8143 unsigned OrigOpc = Node->getOpcode(); 8144 unsigned NewOpc; 8145 switch (OrigOpc) { 8146 default: 8147 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 8148 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8149 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 8150 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8151 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 8152 #include "llvm/IR/ConstrainedOps.def" 8153 } 8154 8155 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 8156 8157 // We're taking this node out of the chain, so we need to re-link things. 8158 SDValue InputChain = Node->getOperand(0); 8159 SDValue OutputChain = SDValue(Node, 1); 8160 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 8161 8162 SmallVector<SDValue, 3> Ops; 8163 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 8164 Ops.push_back(Node->getOperand(i)); 8165 8166 SDVTList VTs = getVTList(Node->getValueType(0)); 8167 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 8168 8169 // MorphNodeTo can operate in two ways: if an existing node with the 8170 // specified operands exists, it can just return it. Otherwise, it 8171 // updates the node in place to have the requested operands. 8172 if (Res == Node) { 8173 // If we updated the node in place, reset the node ID. To the isel, 8174 // this should be just like a newly allocated machine node. 8175 Res->setNodeId(-1); 8176 } else { 8177 ReplaceAllUsesWith(Node, Res); 8178 RemoveDeadNode(Node); 8179 } 8180 8181 return Res; 8182 } 8183 8184 /// getMachineNode - These are used for target selectors to create a new node 8185 /// with specified return type(s), MachineInstr opcode, and operands. 8186 /// 8187 /// Note that getMachineNode returns the resultant node. If there is already a 8188 /// node of the specified opcode and operands, it returns that node instead of 8189 /// the current one. 8190 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8191 EVT VT) { 8192 SDVTList VTs = getVTList(VT); 8193 return getMachineNode(Opcode, dl, VTs, None); 8194 } 8195 8196 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8197 EVT VT, SDValue Op1) { 8198 SDVTList VTs = getVTList(VT); 8199 SDValue Ops[] = { Op1 }; 8200 return getMachineNode(Opcode, dl, VTs, Ops); 8201 } 8202 8203 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8204 EVT VT, SDValue Op1, SDValue Op2) { 8205 SDVTList VTs = getVTList(VT); 8206 SDValue Ops[] = { Op1, Op2 }; 8207 return getMachineNode(Opcode, dl, VTs, Ops); 8208 } 8209 8210 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8211 EVT VT, SDValue Op1, SDValue Op2, 8212 SDValue Op3) { 8213 SDVTList VTs = getVTList(VT); 8214 SDValue Ops[] = { Op1, Op2, Op3 }; 8215 return getMachineNode(Opcode, dl, VTs, Ops); 8216 } 8217 8218 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8219 EVT VT, ArrayRef<SDValue> Ops) { 8220 SDVTList VTs = getVTList(VT); 8221 return getMachineNode(Opcode, dl, VTs, Ops); 8222 } 8223 8224 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8225 EVT VT1, EVT VT2, SDValue Op1, 8226 SDValue Op2) { 8227 SDVTList VTs = getVTList(VT1, VT2); 8228 SDValue Ops[] = { Op1, Op2 }; 8229 return getMachineNode(Opcode, dl, VTs, Ops); 8230 } 8231 8232 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8233 EVT VT1, EVT VT2, SDValue Op1, 8234 SDValue Op2, SDValue Op3) { 8235 SDVTList VTs = getVTList(VT1, VT2); 8236 SDValue Ops[] = { Op1, Op2, Op3 }; 8237 return getMachineNode(Opcode, dl, VTs, Ops); 8238 } 8239 8240 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8241 EVT VT1, EVT VT2, 8242 ArrayRef<SDValue> Ops) { 8243 SDVTList VTs = getVTList(VT1, VT2); 8244 return getMachineNode(Opcode, dl, VTs, Ops); 8245 } 8246 8247 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8248 EVT VT1, EVT VT2, EVT VT3, 8249 SDValue Op1, SDValue Op2) { 8250 SDVTList VTs = getVTList(VT1, VT2, VT3); 8251 SDValue Ops[] = { Op1, Op2 }; 8252 return getMachineNode(Opcode, dl, VTs, Ops); 8253 } 8254 8255 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8256 EVT VT1, EVT VT2, EVT VT3, 8257 SDValue Op1, SDValue Op2, 8258 SDValue Op3) { 8259 SDVTList VTs = getVTList(VT1, VT2, VT3); 8260 SDValue Ops[] = { Op1, Op2, Op3 }; 8261 return getMachineNode(Opcode, dl, VTs, Ops); 8262 } 8263 8264 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8265 EVT VT1, EVT VT2, EVT VT3, 8266 ArrayRef<SDValue> Ops) { 8267 SDVTList VTs = getVTList(VT1, VT2, VT3); 8268 return getMachineNode(Opcode, dl, VTs, Ops); 8269 } 8270 8271 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 8272 ArrayRef<EVT> ResultTys, 8273 ArrayRef<SDValue> Ops) { 8274 SDVTList VTs = getVTList(ResultTys); 8275 return getMachineNode(Opcode, dl, VTs, Ops); 8276 } 8277 8278 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 8279 SDVTList VTs, 8280 ArrayRef<SDValue> Ops) { 8281 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 8282 MachineSDNode *N; 8283 void *IP = nullptr; 8284 8285 if (DoCSE) { 8286 FoldingSetNodeID ID; 8287 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8288 IP = nullptr; 8289 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8290 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8291 } 8292 } 8293 8294 // Allocate a new MachineSDNode. 8295 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8296 createOperands(N, Ops); 8297 8298 if (DoCSE) 8299 CSEMap.InsertNode(N, IP); 8300 8301 InsertNode(N); 8302 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8303 return N; 8304 } 8305 8306 /// getTargetExtractSubreg - A convenience function for creating 8307 /// TargetOpcode::EXTRACT_SUBREG nodes. 8308 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8309 SDValue Operand) { 8310 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8311 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8312 VT, Operand, SRIdxVal); 8313 return SDValue(Subreg, 0); 8314 } 8315 8316 /// getTargetInsertSubreg - A convenience function for creating 8317 /// TargetOpcode::INSERT_SUBREG nodes. 8318 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8319 SDValue Operand, SDValue Subreg) { 8320 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8321 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8322 VT, Operand, Subreg, SRIdxVal); 8323 return SDValue(Result, 0); 8324 } 8325 8326 /// getNodeIfExists - Get the specified node if it's already available, or 8327 /// else return NULL. 8328 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8329 ArrayRef<SDValue> Ops) { 8330 SDNodeFlags Flags; 8331 if (Inserter) 8332 Flags = Inserter->getFlags(); 8333 return getNodeIfExists(Opcode, VTList, Ops, Flags); 8334 } 8335 8336 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8337 ArrayRef<SDValue> Ops, 8338 const SDNodeFlags Flags) { 8339 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8340 FoldingSetNodeID ID; 8341 AddNodeIDNode(ID, Opcode, VTList, Ops); 8342 void *IP = nullptr; 8343 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8344 E->intersectFlagsWith(Flags); 8345 return E; 8346 } 8347 } 8348 return nullptr; 8349 } 8350 8351 /// getDbgValue - Creates a SDDbgValue node. 8352 /// 8353 /// SDNode 8354 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8355 SDNode *N, unsigned R, bool IsIndirect, 8356 const DebugLoc &DL, unsigned O) { 8357 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8358 "Expected inlined-at fields to agree"); 8359 return new (DbgInfo->getAlloc()) 8360 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8361 } 8362 8363 /// Constant 8364 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8365 DIExpression *Expr, 8366 const Value *C, 8367 const DebugLoc &DL, unsigned O) { 8368 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8369 "Expected inlined-at fields to agree"); 8370 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8371 } 8372 8373 /// FrameIndex 8374 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8375 DIExpression *Expr, unsigned FI, 8376 bool IsIndirect, 8377 const DebugLoc &DL, 8378 unsigned O) { 8379 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8380 "Expected inlined-at fields to agree"); 8381 return new (DbgInfo->getAlloc()) 8382 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8383 } 8384 8385 /// VReg 8386 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8387 DIExpression *Expr, 8388 unsigned VReg, bool IsIndirect, 8389 const DebugLoc &DL, unsigned O) { 8390 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8391 "Expected inlined-at fields to agree"); 8392 return new (DbgInfo->getAlloc()) 8393 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8394 } 8395 8396 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8397 unsigned OffsetInBits, unsigned SizeInBits, 8398 bool InvalidateDbg) { 8399 SDNode *FromNode = From.getNode(); 8400 SDNode *ToNode = To.getNode(); 8401 assert(FromNode && ToNode && "Can't modify dbg values"); 8402 8403 // PR35338 8404 // TODO: assert(From != To && "Redundant dbg value transfer"); 8405 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8406 if (From == To || FromNode == ToNode) 8407 return; 8408 8409 if (!FromNode->getHasDebugValue()) 8410 return; 8411 8412 SmallVector<SDDbgValue *, 2> ClonedDVs; 8413 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8414 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8415 continue; 8416 8417 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8418 8419 // Just transfer the dbg value attached to From. 8420 if (Dbg->getResNo() != From.getResNo()) 8421 continue; 8422 8423 DIVariable *Var = Dbg->getVariable(); 8424 auto *Expr = Dbg->getExpression(); 8425 // If a fragment is requested, update the expression. 8426 if (SizeInBits) { 8427 // When splitting a larger (e.g., sign-extended) value whose 8428 // lower bits are described with an SDDbgValue, do not attempt 8429 // to transfer the SDDbgValue to the upper bits. 8430 if (auto FI = Expr->getFragmentInfo()) 8431 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8432 continue; 8433 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8434 SizeInBits); 8435 if (!Fragment) 8436 continue; 8437 Expr = *Fragment; 8438 } 8439 // Clone the SDDbgValue and move it to To. 8440 SDDbgValue *Clone = getDbgValue( 8441 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8442 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8443 ClonedDVs.push_back(Clone); 8444 8445 if (InvalidateDbg) { 8446 // Invalidate value and indicate the SDDbgValue should not be emitted. 8447 Dbg->setIsInvalidated(); 8448 Dbg->setIsEmitted(); 8449 } 8450 } 8451 8452 for (SDDbgValue *Dbg : ClonedDVs) 8453 AddDbgValue(Dbg, ToNode, false); 8454 } 8455 8456 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8457 if (!N.getHasDebugValue()) 8458 return; 8459 8460 SmallVector<SDDbgValue *, 2> ClonedDVs; 8461 for (auto DV : GetDbgValues(&N)) { 8462 if (DV->isInvalidated()) 8463 continue; 8464 switch (N.getOpcode()) { 8465 default: 8466 break; 8467 case ISD::ADD: 8468 SDValue N0 = N.getOperand(0); 8469 SDValue N1 = N.getOperand(1); 8470 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8471 isConstantIntBuildVectorOrConstantInt(N1)) { 8472 uint64_t Offset = N.getConstantOperandVal(1); 8473 // Rewrite an ADD constant node into a DIExpression. Since we are 8474 // performing arithmetic to compute the variable's *value* in the 8475 // DIExpression, we need to mark the expression with a 8476 // DW_OP_stack_value. 8477 auto *DIExpr = DV->getExpression(); 8478 DIExpr = 8479 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8480 SDDbgValue *Clone = 8481 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8482 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8483 ClonedDVs.push_back(Clone); 8484 DV->setIsInvalidated(); 8485 DV->setIsEmitted(); 8486 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8487 N0.getNode()->dumprFull(this); 8488 dbgs() << " into " << *DIExpr << '\n'); 8489 } 8490 } 8491 } 8492 8493 for (SDDbgValue *Dbg : ClonedDVs) 8494 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8495 } 8496 8497 /// Creates a SDDbgLabel node. 8498 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8499 const DebugLoc &DL, unsigned O) { 8500 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8501 "Expected inlined-at fields to agree"); 8502 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8503 } 8504 8505 namespace { 8506 8507 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8508 /// pointed to by a use iterator is deleted, increment the use iterator 8509 /// so that it doesn't dangle. 8510 /// 8511 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8512 SDNode::use_iterator &UI; 8513 SDNode::use_iterator &UE; 8514 8515 void NodeDeleted(SDNode *N, SDNode *E) override { 8516 // Increment the iterator as needed. 8517 while (UI != UE && N == *UI) 8518 ++UI; 8519 } 8520 8521 public: 8522 RAUWUpdateListener(SelectionDAG &d, 8523 SDNode::use_iterator &ui, 8524 SDNode::use_iterator &ue) 8525 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8526 }; 8527 8528 } // end anonymous namespace 8529 8530 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8531 /// This can cause recursive merging of nodes in the DAG. 8532 /// 8533 /// This version assumes From has a single result value. 8534 /// 8535 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8536 SDNode *From = FromN.getNode(); 8537 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8538 "Cannot replace with this method!"); 8539 assert(From != To.getNode() && "Cannot replace uses of with self"); 8540 8541 // Preserve Debug Values 8542 transferDbgValues(FromN, To); 8543 8544 // Iterate over all the existing uses of From. New uses will be added 8545 // to the beginning of the use list, which we avoid visiting. 8546 // This specifically avoids visiting uses of From that arise while the 8547 // replacement is happening, because any such uses would be the result 8548 // of CSE: If an existing node looks like From after one of its operands 8549 // is replaced by To, we don't want to replace of all its users with To 8550 // too. See PR3018 for more info. 8551 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8552 RAUWUpdateListener Listener(*this, UI, UE); 8553 while (UI != UE) { 8554 SDNode *User = *UI; 8555 8556 // This node is about to morph, remove its old self from the CSE maps. 8557 RemoveNodeFromCSEMaps(User); 8558 8559 // A user can appear in a use list multiple times, and when this 8560 // happens the uses are usually next to each other in the list. 8561 // To help reduce the number of CSE recomputations, process all 8562 // the uses of this user that we can find this way. 8563 do { 8564 SDUse &Use = UI.getUse(); 8565 ++UI; 8566 Use.set(To); 8567 if (To->isDivergent() != From->isDivergent()) 8568 updateDivergence(User); 8569 } while (UI != UE && *UI == User); 8570 // Now that we have modified User, add it back to the CSE maps. If it 8571 // already exists there, recursively merge the results together. 8572 AddModifiedNodeToCSEMaps(User); 8573 } 8574 8575 // If we just RAUW'd the root, take note. 8576 if (FromN == getRoot()) 8577 setRoot(To); 8578 } 8579 8580 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8581 /// This can cause recursive merging of nodes in the DAG. 8582 /// 8583 /// This version assumes that for each value of From, there is a 8584 /// corresponding value in To in the same position with the same type. 8585 /// 8586 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8587 #ifndef NDEBUG 8588 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8589 assert((!From->hasAnyUseOfValue(i) || 8590 From->getValueType(i) == To->getValueType(i)) && 8591 "Cannot use this version of ReplaceAllUsesWith!"); 8592 #endif 8593 8594 // Handle the trivial case. 8595 if (From == To) 8596 return; 8597 8598 // Preserve Debug Info. Only do this if there's a use. 8599 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8600 if (From->hasAnyUseOfValue(i)) { 8601 assert((i < To->getNumValues()) && "Invalid To location"); 8602 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8603 } 8604 8605 // Iterate over just the existing users of From. See the comments in 8606 // the ReplaceAllUsesWith above. 8607 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8608 RAUWUpdateListener Listener(*this, UI, UE); 8609 while (UI != UE) { 8610 SDNode *User = *UI; 8611 8612 // This node is about to morph, remove its old self from the CSE maps. 8613 RemoveNodeFromCSEMaps(User); 8614 8615 // A user can appear in a use list multiple times, and when this 8616 // happens the uses are usually next to each other in the list. 8617 // To help reduce the number of CSE recomputations, process all 8618 // the uses of this user that we can find this way. 8619 do { 8620 SDUse &Use = UI.getUse(); 8621 ++UI; 8622 Use.setNode(To); 8623 if (To->isDivergent() != From->isDivergent()) 8624 updateDivergence(User); 8625 } while (UI != UE && *UI == User); 8626 8627 // Now that we have modified User, add it back to the CSE maps. If it 8628 // already exists there, recursively merge the results together. 8629 AddModifiedNodeToCSEMaps(User); 8630 } 8631 8632 // If we just RAUW'd the root, take note. 8633 if (From == getRoot().getNode()) 8634 setRoot(SDValue(To, getRoot().getResNo())); 8635 } 8636 8637 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8638 /// This can cause recursive merging of nodes in the DAG. 8639 /// 8640 /// This version can replace From with any result values. To must match the 8641 /// number and types of values returned by From. 8642 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8643 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8644 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8645 8646 // Preserve Debug Info. 8647 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8648 transferDbgValues(SDValue(From, i), To[i]); 8649 8650 // Iterate over just the existing users of From. See the comments in 8651 // the ReplaceAllUsesWith above. 8652 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8653 RAUWUpdateListener Listener(*this, UI, UE); 8654 while (UI != UE) { 8655 SDNode *User = *UI; 8656 8657 // This node is about to morph, remove its old self from the CSE maps. 8658 RemoveNodeFromCSEMaps(User); 8659 8660 // A user can appear in a use list multiple times, and when this happens the 8661 // uses are usually next to each other in the list. To help reduce the 8662 // number of CSE and divergence recomputations, process all the uses of this 8663 // user that we can find this way. 8664 bool To_IsDivergent = false; 8665 do { 8666 SDUse &Use = UI.getUse(); 8667 const SDValue &ToOp = To[Use.getResNo()]; 8668 ++UI; 8669 Use.set(ToOp); 8670 To_IsDivergent |= ToOp->isDivergent(); 8671 } while (UI != UE && *UI == User); 8672 8673 if (To_IsDivergent != From->isDivergent()) 8674 updateDivergence(User); 8675 8676 // Now that we have modified User, add it back to the CSE maps. If it 8677 // already exists there, recursively merge the results together. 8678 AddModifiedNodeToCSEMaps(User); 8679 } 8680 8681 // If we just RAUW'd the root, take note. 8682 if (From == getRoot().getNode()) 8683 setRoot(SDValue(To[getRoot().getResNo()])); 8684 } 8685 8686 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8687 /// uses of other values produced by From.getNode() alone. The Deleted 8688 /// vector is handled the same way as for ReplaceAllUsesWith. 8689 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8690 // Handle the really simple, really trivial case efficiently. 8691 if (From == To) return; 8692 8693 // Handle the simple, trivial, case efficiently. 8694 if (From.getNode()->getNumValues() == 1) { 8695 ReplaceAllUsesWith(From, To); 8696 return; 8697 } 8698 8699 // Preserve Debug Info. 8700 transferDbgValues(From, To); 8701 8702 // Iterate over just the existing users of From. See the comments in 8703 // the ReplaceAllUsesWith above. 8704 SDNode::use_iterator UI = From.getNode()->use_begin(), 8705 UE = From.getNode()->use_end(); 8706 RAUWUpdateListener Listener(*this, UI, UE); 8707 while (UI != UE) { 8708 SDNode *User = *UI; 8709 bool UserRemovedFromCSEMaps = false; 8710 8711 // A user can appear in a use list multiple times, and when this 8712 // happens the uses are usually next to each other in the list. 8713 // To help reduce the number of CSE recomputations, process all 8714 // the uses of this user that we can find this way. 8715 do { 8716 SDUse &Use = UI.getUse(); 8717 8718 // Skip uses of different values from the same node. 8719 if (Use.getResNo() != From.getResNo()) { 8720 ++UI; 8721 continue; 8722 } 8723 8724 // If this node hasn't been modified yet, it's still in the CSE maps, 8725 // so remove its old self from the CSE maps. 8726 if (!UserRemovedFromCSEMaps) { 8727 RemoveNodeFromCSEMaps(User); 8728 UserRemovedFromCSEMaps = true; 8729 } 8730 8731 ++UI; 8732 Use.set(To); 8733 if (To->isDivergent() != From->isDivergent()) 8734 updateDivergence(User); 8735 } while (UI != UE && *UI == User); 8736 // We are iterating over all uses of the From node, so if a use 8737 // doesn't use the specific value, no changes are made. 8738 if (!UserRemovedFromCSEMaps) 8739 continue; 8740 8741 // Now that we have modified User, add it back to the CSE maps. If it 8742 // already exists there, recursively merge the results together. 8743 AddModifiedNodeToCSEMaps(User); 8744 } 8745 8746 // If we just RAUW'd the root, take note. 8747 if (From == getRoot()) 8748 setRoot(To); 8749 } 8750 8751 namespace { 8752 8753 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8754 /// to record information about a use. 8755 struct UseMemo { 8756 SDNode *User; 8757 unsigned Index; 8758 SDUse *Use; 8759 }; 8760 8761 /// operator< - Sort Memos by User. 8762 bool operator<(const UseMemo &L, const UseMemo &R) { 8763 return (intptr_t)L.User < (intptr_t)R.User; 8764 } 8765 8766 } // end anonymous namespace 8767 8768 bool SelectionDAG::calculateDivergence(SDNode *N) { 8769 if (TLI->isSDNodeAlwaysUniform(N)) { 8770 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) && 8771 "Conflicting divergence information!"); 8772 return false; 8773 } 8774 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA)) 8775 return true; 8776 for (auto &Op : N->ops()) { 8777 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent()) 8778 return true; 8779 } 8780 return false; 8781 } 8782 8783 void SelectionDAG::updateDivergence(SDNode *N) { 8784 SmallVector<SDNode *, 16> Worklist(1, N); 8785 do { 8786 N = Worklist.pop_back_val(); 8787 bool IsDivergent = calculateDivergence(N); 8788 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8789 N->SDNodeBits.IsDivergent = IsDivergent; 8790 Worklist.insert(Worklist.end(), N->use_begin(), N->use_end()); 8791 } 8792 } while (!Worklist.empty()); 8793 } 8794 8795 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8796 DenseMap<SDNode *, unsigned> Degree; 8797 Order.reserve(AllNodes.size()); 8798 for (auto &N : allnodes()) { 8799 unsigned NOps = N.getNumOperands(); 8800 Degree[&N] = NOps; 8801 if (0 == NOps) 8802 Order.push_back(&N); 8803 } 8804 for (size_t I = 0; I != Order.size(); ++I) { 8805 SDNode *N = Order[I]; 8806 for (auto U : N->uses()) { 8807 unsigned &UnsortedOps = Degree[U]; 8808 if (0 == --UnsortedOps) 8809 Order.push_back(U); 8810 } 8811 } 8812 } 8813 8814 #ifndef NDEBUG 8815 void SelectionDAG::VerifyDAGDiverence() { 8816 std::vector<SDNode *> TopoOrder; 8817 CreateTopologicalOrder(TopoOrder); 8818 for (auto *N : TopoOrder) { 8819 assert(calculateDivergence(N) == N->isDivergent() && 8820 "Divergence bit inconsistency detected"); 8821 } 8822 } 8823 #endif 8824 8825 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8826 /// uses of other values produced by From.getNode() alone. The same value 8827 /// may appear in both the From and To list. The Deleted vector is 8828 /// handled the same way as for ReplaceAllUsesWith. 8829 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8830 const SDValue *To, 8831 unsigned Num){ 8832 // Handle the simple, trivial case efficiently. 8833 if (Num == 1) 8834 return ReplaceAllUsesOfValueWith(*From, *To); 8835 8836 transferDbgValues(*From, *To); 8837 8838 // Read up all the uses and make records of them. This helps 8839 // processing new uses that are introduced during the 8840 // replacement process. 8841 SmallVector<UseMemo, 4> Uses; 8842 for (unsigned i = 0; i != Num; ++i) { 8843 unsigned FromResNo = From[i].getResNo(); 8844 SDNode *FromNode = From[i].getNode(); 8845 for (SDNode::use_iterator UI = FromNode->use_begin(), 8846 E = FromNode->use_end(); UI != E; ++UI) { 8847 SDUse &Use = UI.getUse(); 8848 if (Use.getResNo() == FromResNo) { 8849 UseMemo Memo = { *UI, i, &Use }; 8850 Uses.push_back(Memo); 8851 } 8852 } 8853 } 8854 8855 // Sort the uses, so that all the uses from a given User are together. 8856 llvm::sort(Uses); 8857 8858 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8859 UseIndex != UseIndexEnd; ) { 8860 // We know that this user uses some value of From. If it is the right 8861 // value, update it. 8862 SDNode *User = Uses[UseIndex].User; 8863 8864 // This node is about to morph, remove its old self from the CSE maps. 8865 RemoveNodeFromCSEMaps(User); 8866 8867 // The Uses array is sorted, so all the uses for a given User 8868 // are next to each other in the list. 8869 // To help reduce the number of CSE recomputations, process all 8870 // the uses of this user that we can find this way. 8871 do { 8872 unsigned i = Uses[UseIndex].Index; 8873 SDUse &Use = *Uses[UseIndex].Use; 8874 ++UseIndex; 8875 8876 Use.set(To[i]); 8877 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8878 8879 // Now that we have modified User, add it back to the CSE maps. If it 8880 // already exists there, recursively merge the results together. 8881 AddModifiedNodeToCSEMaps(User); 8882 } 8883 } 8884 8885 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8886 /// based on their topological order. It returns the maximum id and a vector 8887 /// of the SDNodes* in assigned order by reference. 8888 unsigned SelectionDAG::AssignTopologicalOrder() { 8889 unsigned DAGSize = 0; 8890 8891 // SortedPos tracks the progress of the algorithm. Nodes before it are 8892 // sorted, nodes after it are unsorted. When the algorithm completes 8893 // it is at the end of the list. 8894 allnodes_iterator SortedPos = allnodes_begin(); 8895 8896 // Visit all the nodes. Move nodes with no operands to the front of 8897 // the list immediately. Annotate nodes that do have operands with their 8898 // operand count. Before we do this, the Node Id fields of the nodes 8899 // may contain arbitrary values. After, the Node Id fields for nodes 8900 // before SortedPos will contain the topological sort index, and the 8901 // Node Id fields for nodes At SortedPos and after will contain the 8902 // count of outstanding operands. 8903 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8904 SDNode *N = &*I++; 8905 checkForCycles(N, this); 8906 unsigned Degree = N->getNumOperands(); 8907 if (Degree == 0) { 8908 // A node with no uses, add it to the result array immediately. 8909 N->setNodeId(DAGSize++); 8910 allnodes_iterator Q(N); 8911 if (Q != SortedPos) 8912 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8913 assert(SortedPos != AllNodes.end() && "Overran node list"); 8914 ++SortedPos; 8915 } else { 8916 // Temporarily use the Node Id as scratch space for the degree count. 8917 N->setNodeId(Degree); 8918 } 8919 } 8920 8921 // Visit all the nodes. As we iterate, move nodes into sorted order, 8922 // such that by the time the end is reached all nodes will be sorted. 8923 for (SDNode &Node : allnodes()) { 8924 SDNode *N = &Node; 8925 checkForCycles(N, this); 8926 // N is in sorted position, so all its uses have one less operand 8927 // that needs to be sorted. 8928 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8929 UI != UE; ++UI) { 8930 SDNode *P = *UI; 8931 unsigned Degree = P->getNodeId(); 8932 assert(Degree != 0 && "Invalid node degree"); 8933 --Degree; 8934 if (Degree == 0) { 8935 // All of P's operands are sorted, so P may sorted now. 8936 P->setNodeId(DAGSize++); 8937 if (P->getIterator() != SortedPos) 8938 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8939 assert(SortedPos != AllNodes.end() && "Overran node list"); 8940 ++SortedPos; 8941 } else { 8942 // Update P's outstanding operand count. 8943 P->setNodeId(Degree); 8944 } 8945 } 8946 if (Node.getIterator() == SortedPos) { 8947 #ifndef NDEBUG 8948 allnodes_iterator I(N); 8949 SDNode *S = &*++I; 8950 dbgs() << "Overran sorted position:\n"; 8951 S->dumprFull(this); dbgs() << "\n"; 8952 dbgs() << "Checking if this is due to cycles\n"; 8953 checkForCycles(this, true); 8954 #endif 8955 llvm_unreachable(nullptr); 8956 } 8957 } 8958 8959 assert(SortedPos == AllNodes.end() && 8960 "Topological sort incomplete!"); 8961 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8962 "First node in topological sort is not the entry token!"); 8963 assert(AllNodes.front().getNodeId() == 0 && 8964 "First node in topological sort has non-zero id!"); 8965 assert(AllNodes.front().getNumOperands() == 0 && 8966 "First node in topological sort has operands!"); 8967 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8968 "Last node in topologic sort has unexpected id!"); 8969 assert(AllNodes.back().use_empty() && 8970 "Last node in topologic sort has users!"); 8971 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8972 return DAGSize; 8973 } 8974 8975 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8976 /// value is produced by SD. 8977 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8978 if (SD) { 8979 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8980 SD->setHasDebugValue(true); 8981 } 8982 DbgInfo->add(DB, SD, isParameter); 8983 } 8984 8985 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8986 DbgInfo->add(DB); 8987 } 8988 8989 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8990 SDValue NewMemOp) { 8991 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8992 // The new memory operation must have the same position as the old load in 8993 // terms of memory dependency. Create a TokenFactor for the old load and new 8994 // memory operation and update uses of the old load's output chain to use that 8995 // TokenFactor. 8996 SDValue OldChain = SDValue(OldLoad, 1); 8997 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8998 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8999 return NewChain; 9000 9001 SDValue TokenFactor = 9002 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 9003 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 9004 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 9005 return TokenFactor; 9006 } 9007 9008 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 9009 Function **OutFunction) { 9010 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 9011 9012 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 9013 auto *Module = MF->getFunction().getParent(); 9014 auto *Function = Module->getFunction(Symbol); 9015 9016 if (OutFunction != nullptr) 9017 *OutFunction = Function; 9018 9019 if (Function != nullptr) { 9020 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 9021 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 9022 } 9023 9024 std::string ErrorStr; 9025 raw_string_ostream ErrorFormatter(ErrorStr); 9026 9027 ErrorFormatter << "Undefined external symbol "; 9028 ErrorFormatter << '"' << Symbol << '"'; 9029 ErrorFormatter.flush(); 9030 9031 report_fatal_error(ErrorStr); 9032 } 9033 9034 //===----------------------------------------------------------------------===// 9035 // SDNode Class 9036 //===----------------------------------------------------------------------===// 9037 9038 bool llvm::isNullConstant(SDValue V) { 9039 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9040 return Const != nullptr && Const->isNullValue(); 9041 } 9042 9043 bool llvm::isNullFPConstant(SDValue V) { 9044 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 9045 return Const != nullptr && Const->isZero() && !Const->isNegative(); 9046 } 9047 9048 bool llvm::isAllOnesConstant(SDValue V) { 9049 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9050 return Const != nullptr && Const->isAllOnesValue(); 9051 } 9052 9053 bool llvm::isOneConstant(SDValue V) { 9054 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 9055 return Const != nullptr && Const->isOne(); 9056 } 9057 9058 SDValue llvm::peekThroughBitcasts(SDValue V) { 9059 while (V.getOpcode() == ISD::BITCAST) 9060 V = V.getOperand(0); 9061 return V; 9062 } 9063 9064 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 9065 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 9066 V = V.getOperand(0); 9067 return V; 9068 } 9069 9070 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 9071 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 9072 V = V.getOperand(0); 9073 return V; 9074 } 9075 9076 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 9077 if (V.getOpcode() != ISD::XOR) 9078 return false; 9079 V = peekThroughBitcasts(V.getOperand(1)); 9080 unsigned NumBits = V.getScalarValueSizeInBits(); 9081 ConstantSDNode *C = 9082 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 9083 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 9084 } 9085 9086 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 9087 bool AllowTruncation) { 9088 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9089 return CN; 9090 9091 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9092 BitVector UndefElements; 9093 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 9094 9095 // BuildVectors can truncate their operands. Ignore that case here unless 9096 // AllowTruncation is set. 9097 if (CN && (UndefElements.none() || AllowUndefs)) { 9098 EVT CVT = CN->getValueType(0); 9099 EVT NSVT = N.getValueType().getScalarType(); 9100 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9101 if (AllowTruncation || (CVT == NSVT)) 9102 return CN; 9103 } 9104 } 9105 9106 return nullptr; 9107 } 9108 9109 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 9110 bool AllowUndefs, 9111 bool AllowTruncation) { 9112 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 9113 return CN; 9114 9115 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9116 BitVector UndefElements; 9117 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 9118 9119 // BuildVectors can truncate their operands. Ignore that case here unless 9120 // AllowTruncation is set. 9121 if (CN && (UndefElements.none() || AllowUndefs)) { 9122 EVT CVT = CN->getValueType(0); 9123 EVT NSVT = N.getValueType().getScalarType(); 9124 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 9125 if (AllowTruncation || (CVT == NSVT)) 9126 return CN; 9127 } 9128 } 9129 9130 return nullptr; 9131 } 9132 9133 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 9134 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9135 return CN; 9136 9137 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9138 BitVector UndefElements; 9139 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 9140 if (CN && (UndefElements.none() || AllowUndefs)) 9141 return CN; 9142 } 9143 9144 if (N.getOpcode() == ISD::SPLAT_VECTOR) 9145 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0))) 9146 return CN; 9147 9148 return nullptr; 9149 } 9150 9151 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 9152 const APInt &DemandedElts, 9153 bool AllowUndefs) { 9154 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 9155 return CN; 9156 9157 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 9158 BitVector UndefElements; 9159 ConstantFPSDNode *CN = 9160 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 9161 if (CN && (UndefElements.none() || AllowUndefs)) 9162 return CN; 9163 } 9164 9165 return nullptr; 9166 } 9167 9168 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 9169 // TODO: may want to use peekThroughBitcast() here. 9170 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 9171 return C && C->isNullValue(); 9172 } 9173 9174 bool llvm::isOneOrOneSplat(SDValue N) { 9175 // TODO: may want to use peekThroughBitcast() here. 9176 unsigned BitWidth = N.getScalarValueSizeInBits(); 9177 ConstantSDNode *C = isConstOrConstSplat(N); 9178 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 9179 } 9180 9181 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 9182 N = peekThroughBitcasts(N); 9183 unsigned BitWidth = N.getScalarValueSizeInBits(); 9184 ConstantSDNode *C = isConstOrConstSplat(N); 9185 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 9186 } 9187 9188 HandleSDNode::~HandleSDNode() { 9189 DropOperands(); 9190 } 9191 9192 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 9193 const DebugLoc &DL, 9194 const GlobalValue *GA, EVT VT, 9195 int64_t o, unsigned TF) 9196 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 9197 TheGlobal = GA; 9198 } 9199 9200 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 9201 EVT VT, unsigned SrcAS, 9202 unsigned DestAS) 9203 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 9204 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 9205 9206 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 9207 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 9208 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 9209 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 9210 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 9211 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 9212 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 9213 9214 // We check here that the size of the memory operand fits within the size of 9215 // the MMO. This is because the MMO might indicate only a possible address 9216 // range instead of specifying the affected memory addresses precisely. 9217 // TODO: Make MachineMemOperands aware of scalable vectors. 9218 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 9219 "Size mismatch!"); 9220 } 9221 9222 /// Profile - Gather unique data for the node. 9223 /// 9224 void SDNode::Profile(FoldingSetNodeID &ID) const { 9225 AddNodeIDNode(ID, this); 9226 } 9227 9228 namespace { 9229 9230 struct EVTArray { 9231 std::vector<EVT> VTs; 9232 9233 EVTArray() { 9234 VTs.reserve(MVT::LAST_VALUETYPE); 9235 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 9236 VTs.push_back(MVT((MVT::SimpleValueType)i)); 9237 } 9238 }; 9239 9240 } // end anonymous namespace 9241 9242 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 9243 static ManagedStatic<EVTArray> SimpleVTArray; 9244 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 9245 9246 /// getValueTypeList - Return a pointer to the specified value type. 9247 /// 9248 const EVT *SDNode::getValueTypeList(EVT VT) { 9249 if (VT.isExtended()) { 9250 sys::SmartScopedLock<true> Lock(*VTMutex); 9251 return &(*EVTs->insert(VT).first); 9252 } else { 9253 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 9254 "Value type out of range!"); 9255 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 9256 } 9257 } 9258 9259 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 9260 /// indicated value. This method ignores uses of other values defined by this 9261 /// operation. 9262 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 9263 assert(Value < getNumValues() && "Bad value!"); 9264 9265 // TODO: Only iterate over uses of a given value of the node 9266 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 9267 if (UI.getUse().getResNo() == Value) { 9268 if (NUses == 0) 9269 return false; 9270 --NUses; 9271 } 9272 } 9273 9274 // Found exactly the right number of uses? 9275 return NUses == 0; 9276 } 9277 9278 /// hasAnyUseOfValue - Return true if there are any use of the indicated 9279 /// value. This method ignores uses of other values defined by this operation. 9280 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 9281 assert(Value < getNumValues() && "Bad value!"); 9282 9283 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 9284 if (UI.getUse().getResNo() == Value) 9285 return true; 9286 9287 return false; 9288 } 9289 9290 /// isOnlyUserOf - Return true if this node is the only use of N. 9291 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9292 bool Seen = false; 9293 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9294 SDNode *User = *I; 9295 if (User == this) 9296 Seen = true; 9297 else 9298 return false; 9299 } 9300 9301 return Seen; 9302 } 9303 9304 /// Return true if the only users of N are contained in Nodes. 9305 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9306 bool Seen = false; 9307 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9308 SDNode *User = *I; 9309 if (llvm::any_of(Nodes, 9310 [&User](const SDNode *Node) { return User == Node; })) 9311 Seen = true; 9312 else 9313 return false; 9314 } 9315 9316 return Seen; 9317 } 9318 9319 /// isOperand - Return true if this node is an operand of N. 9320 bool SDValue::isOperandOf(const SDNode *N) const { 9321 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9322 } 9323 9324 bool SDNode::isOperandOf(const SDNode *N) const { 9325 return any_of(N->op_values(), 9326 [this](SDValue Op) { return this == Op.getNode(); }); 9327 } 9328 9329 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9330 /// be a chain) reaches the specified operand without crossing any 9331 /// side-effecting instructions on any chain path. In practice, this looks 9332 /// through token factors and non-volatile loads. In order to remain efficient, 9333 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9334 /// 9335 /// Note that we only need to examine chains when we're searching for 9336 /// side-effects; SelectionDAG requires that all side-effects are represented 9337 /// by chains, even if another operand would force a specific ordering. This 9338 /// constraint is necessary to allow transformations like splitting loads. 9339 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9340 unsigned Depth) const { 9341 if (*this == Dest) return true; 9342 9343 // Don't search too deeply, we just want to be able to see through 9344 // TokenFactor's etc. 9345 if (Depth == 0) return false; 9346 9347 // If this is a token factor, all inputs to the TF happen in parallel. 9348 if (getOpcode() == ISD::TokenFactor) { 9349 // First, try a shallow search. 9350 if (is_contained((*this)->ops(), Dest)) { 9351 // We found the chain we want as an operand of this TokenFactor. 9352 // Essentially, we reach the chain without side-effects if we could 9353 // serialize the TokenFactor into a simple chain of operations with 9354 // Dest as the last operation. This is automatically true if the 9355 // chain has one use: there are no other ordering constraints. 9356 // If the chain has more than one use, we give up: some other 9357 // use of Dest might force a side-effect between Dest and the current 9358 // node. 9359 if (Dest.hasOneUse()) 9360 return true; 9361 } 9362 // Next, try a deep search: check whether every operand of the TokenFactor 9363 // reaches Dest. 9364 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9365 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9366 }); 9367 } 9368 9369 // Loads don't have side effects, look through them. 9370 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9371 if (Ld->isUnordered()) 9372 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9373 } 9374 return false; 9375 } 9376 9377 bool SDNode::hasPredecessor(const SDNode *N) const { 9378 SmallPtrSet<const SDNode *, 32> Visited; 9379 SmallVector<const SDNode *, 16> Worklist; 9380 Worklist.push_back(this); 9381 return hasPredecessorHelper(N, Visited, Worklist); 9382 } 9383 9384 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9385 this->Flags.intersectWith(Flags); 9386 } 9387 9388 SDValue 9389 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9390 ArrayRef<ISD::NodeType> CandidateBinOps, 9391 bool AllowPartials) { 9392 // The pattern must end in an extract from index 0. 9393 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9394 !isNullConstant(Extract->getOperand(1))) 9395 return SDValue(); 9396 9397 // Match against one of the candidate binary ops. 9398 SDValue Op = Extract->getOperand(0); 9399 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9400 return Op.getOpcode() == unsigned(BinOp); 9401 })) 9402 return SDValue(); 9403 9404 // Floating-point reductions may require relaxed constraints on the final step 9405 // of the reduction because they may reorder intermediate operations. 9406 unsigned CandidateBinOp = Op.getOpcode(); 9407 if (Op.getValueType().isFloatingPoint()) { 9408 SDNodeFlags Flags = Op->getFlags(); 9409 switch (CandidateBinOp) { 9410 case ISD::FADD: 9411 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9412 return SDValue(); 9413 break; 9414 default: 9415 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9416 } 9417 } 9418 9419 // Matching failed - attempt to see if we did enough stages that a partial 9420 // reduction from a subvector is possible. 9421 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9422 if (!AllowPartials || !Op) 9423 return SDValue(); 9424 EVT OpVT = Op.getValueType(); 9425 EVT OpSVT = OpVT.getScalarType(); 9426 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9427 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9428 return SDValue(); 9429 BinOp = (ISD::NodeType)CandidateBinOp; 9430 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9431 getVectorIdxConstant(0, SDLoc(Op))); 9432 }; 9433 9434 // At each stage, we're looking for something that looks like: 9435 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9436 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9437 // i32 undef, i32 undef, i32 undef, i32 undef> 9438 // %a = binop <8 x i32> %op, %s 9439 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9440 // we expect something like: 9441 // <4,5,6,7,u,u,u,u> 9442 // <2,3,u,u,u,u,u,u> 9443 // <1,u,u,u,u,u,u,u> 9444 // While a partial reduction match would be: 9445 // <2,3,u,u,u,u,u,u> 9446 // <1,u,u,u,u,u,u,u> 9447 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9448 SDValue PrevOp; 9449 for (unsigned i = 0; i < Stages; ++i) { 9450 unsigned MaskEnd = (1 << i); 9451 9452 if (Op.getOpcode() != CandidateBinOp) 9453 return PartialReduction(PrevOp, MaskEnd); 9454 9455 SDValue Op0 = Op.getOperand(0); 9456 SDValue Op1 = Op.getOperand(1); 9457 9458 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9459 if (Shuffle) { 9460 Op = Op1; 9461 } else { 9462 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9463 Op = Op0; 9464 } 9465 9466 // The first operand of the shuffle should be the same as the other operand 9467 // of the binop. 9468 if (!Shuffle || Shuffle->getOperand(0) != Op) 9469 return PartialReduction(PrevOp, MaskEnd); 9470 9471 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9472 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9473 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9474 return PartialReduction(PrevOp, MaskEnd); 9475 9476 PrevOp = Op; 9477 } 9478 9479 // Handle subvector reductions, which tend to appear after the shuffle 9480 // reduction stages. 9481 while (Op.getOpcode() == CandidateBinOp) { 9482 unsigned NumElts = Op.getValueType().getVectorNumElements(); 9483 SDValue Op0 = Op.getOperand(0); 9484 SDValue Op1 = Op.getOperand(1); 9485 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9486 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR || 9487 Op0.getOperand(0) != Op1.getOperand(0)) 9488 break; 9489 SDValue Src = Op0.getOperand(0); 9490 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 9491 if (NumSrcElts != (2 * NumElts)) 9492 break; 9493 if (!(Op0.getConstantOperandAPInt(1) == 0 && 9494 Op1.getConstantOperandAPInt(1) == NumElts) && 9495 !(Op1.getConstantOperandAPInt(1) == 0 && 9496 Op0.getConstantOperandAPInt(1) == NumElts)) 9497 break; 9498 Op = Src; 9499 } 9500 9501 BinOp = (ISD::NodeType)CandidateBinOp; 9502 return Op; 9503 } 9504 9505 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9506 assert(N->getNumValues() == 1 && 9507 "Can't unroll a vector with multiple results!"); 9508 9509 EVT VT = N->getValueType(0); 9510 unsigned NE = VT.getVectorNumElements(); 9511 EVT EltVT = VT.getVectorElementType(); 9512 SDLoc dl(N); 9513 9514 SmallVector<SDValue, 8> Scalars; 9515 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9516 9517 // If ResNE is 0, fully unroll the vector op. 9518 if (ResNE == 0) 9519 ResNE = NE; 9520 else if (NE > ResNE) 9521 NE = ResNE; 9522 9523 unsigned i; 9524 for (i= 0; i != NE; ++i) { 9525 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9526 SDValue Operand = N->getOperand(j); 9527 EVT OperandVT = Operand.getValueType(); 9528 if (OperandVT.isVector()) { 9529 // A vector operand; extract a single element. 9530 EVT OperandEltVT = OperandVT.getVectorElementType(); 9531 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, 9532 Operand, getVectorIdxConstant(i, dl)); 9533 } else { 9534 // A scalar operand; just use it as is. 9535 Operands[j] = Operand; 9536 } 9537 } 9538 9539 switch (N->getOpcode()) { 9540 default: { 9541 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9542 N->getFlags())); 9543 break; 9544 } 9545 case ISD::VSELECT: 9546 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9547 break; 9548 case ISD::SHL: 9549 case ISD::SRA: 9550 case ISD::SRL: 9551 case ISD::ROTL: 9552 case ISD::ROTR: 9553 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9554 getShiftAmountOperand(Operands[0].getValueType(), 9555 Operands[1]))); 9556 break; 9557 case ISD::SIGN_EXTEND_INREG: { 9558 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9559 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9560 Operands[0], 9561 getValueType(ExtVT))); 9562 } 9563 } 9564 } 9565 9566 for (; i < ResNE; ++i) 9567 Scalars.push_back(getUNDEF(EltVT)); 9568 9569 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9570 return getBuildVector(VecVT, dl, Scalars); 9571 } 9572 9573 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9574 SDNode *N, unsigned ResNE) { 9575 unsigned Opcode = N->getOpcode(); 9576 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9577 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9578 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9579 "Expected an overflow opcode"); 9580 9581 EVT ResVT = N->getValueType(0); 9582 EVT OvVT = N->getValueType(1); 9583 EVT ResEltVT = ResVT.getVectorElementType(); 9584 EVT OvEltVT = OvVT.getVectorElementType(); 9585 SDLoc dl(N); 9586 9587 // If ResNE is 0, fully unroll the vector op. 9588 unsigned NE = ResVT.getVectorNumElements(); 9589 if (ResNE == 0) 9590 ResNE = NE; 9591 else if (NE > ResNE) 9592 NE = ResNE; 9593 9594 SmallVector<SDValue, 8> LHSScalars; 9595 SmallVector<SDValue, 8> RHSScalars; 9596 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9597 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9598 9599 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9600 SDVTList VTs = getVTList(ResEltVT, SVT); 9601 SmallVector<SDValue, 8> ResScalars; 9602 SmallVector<SDValue, 8> OvScalars; 9603 for (unsigned i = 0; i < NE; ++i) { 9604 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9605 SDValue Ov = 9606 getSelect(dl, OvEltVT, Res.getValue(1), 9607 getBoolConstant(true, dl, OvEltVT, ResVT), 9608 getConstant(0, dl, OvEltVT)); 9609 9610 ResScalars.push_back(Res); 9611 OvScalars.push_back(Ov); 9612 } 9613 9614 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9615 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9616 9617 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9618 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9619 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9620 getBuildVector(NewOvVT, dl, OvScalars)); 9621 } 9622 9623 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9624 LoadSDNode *Base, 9625 unsigned Bytes, 9626 int Dist) const { 9627 if (LD->isVolatile() || Base->isVolatile()) 9628 return false; 9629 // TODO: probably too restrictive for atomics, revisit 9630 if (!LD->isSimple()) 9631 return false; 9632 if (LD->isIndexed() || Base->isIndexed()) 9633 return false; 9634 if (LD->getChain() != Base->getChain()) 9635 return false; 9636 EVT VT = LD->getValueType(0); 9637 if (VT.getSizeInBits() / 8 != Bytes) 9638 return false; 9639 9640 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9641 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9642 9643 int64_t Offset = 0; 9644 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9645 return (Dist * Bytes == Offset); 9646 return false; 9647 } 9648 9649 /// InferPtrAlignment - Infer alignment of a load / store address. Return None 9650 /// if it cannot be inferred. 9651 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { 9652 // If this is a GlobalAddress + cst, return the alignment. 9653 const GlobalValue *GV = nullptr; 9654 int64_t GVOffset = 0; 9655 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9656 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9657 KnownBits Known(PtrWidth); 9658 llvm::computeKnownBits(GV, Known, getDataLayout()); 9659 unsigned AlignBits = Known.countMinTrailingZeros(); 9660 if (AlignBits) 9661 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); 9662 } 9663 9664 // If this is a direct reference to a stack slot, use information about the 9665 // stack slot's alignment. 9666 int FrameIdx = INT_MIN; 9667 int64_t FrameOffset = 0; 9668 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9669 FrameIdx = FI->getIndex(); 9670 } else if (isBaseWithConstantOffset(Ptr) && 9671 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9672 // Handle FI+Cst 9673 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9674 FrameOffset = Ptr.getConstantOperandVal(1); 9675 } 9676 9677 if (FrameIdx != INT_MIN) { 9678 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9679 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); 9680 } 9681 9682 return None; 9683 } 9684 9685 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9686 /// which is split (or expanded) into two not necessarily identical pieces. 9687 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9688 // Currently all types are split in half. 9689 EVT LoVT, HiVT; 9690 if (!VT.isVector()) 9691 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9692 else 9693 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9694 9695 return std::make_pair(LoVT, HiVT); 9696 } 9697 9698 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a 9699 /// type, dependent on an enveloping VT that has been split into two identical 9700 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. 9701 std::pair<EVT, EVT> 9702 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, 9703 bool *HiIsEmpty) const { 9704 EVT EltTp = VT.getVectorElementType(); 9705 // Examples: 9706 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty) 9707 // custom VL=9 with enveloping VL=8/8 yields 8/1 9708 // custom VL=10 with enveloping VL=8/8 yields 8/2 9709 // etc. 9710 ElementCount VTNumElts = VT.getVectorElementCount(); 9711 ElementCount EnvNumElts = EnvVT.getVectorElementCount(); 9712 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() && 9713 "Mixing fixed width and scalable vectors when enveloping a type"); 9714 EVT LoVT, HiVT; 9715 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) { 9716 LoVT = EnvVT; 9717 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts); 9718 *HiIsEmpty = false; 9719 } else { 9720 // Flag that hi type has zero storage size, but return split envelop type 9721 // (this would be easier if vector types with zero elements were allowed). 9722 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts); 9723 HiVT = EnvVT; 9724 *HiIsEmpty = true; 9725 } 9726 return std::make_pair(LoVT, HiVT); 9727 } 9728 9729 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9730 /// low/high part. 9731 std::pair<SDValue, SDValue> 9732 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9733 const EVT &HiVT) { 9734 assert(LoVT.isScalableVector() == HiVT.isScalableVector() && 9735 LoVT.isScalableVector() == N.getValueType().isScalableVector() && 9736 "Splitting vector with an invalid mixture of fixed and scalable " 9737 "vector types"); 9738 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <= 9739 N.getValueType().getVectorMinNumElements() && 9740 "More vector elements requested than available!"); 9741 SDValue Lo, Hi; 9742 Lo = 9743 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); 9744 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements() 9745 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales 9746 // IDX with the runtime scaling factor of the result vector type. For 9747 // fixed-width result vectors, that runtime scaling factor is 1. 9748 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9749 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL)); 9750 return std::make_pair(Lo, Hi); 9751 } 9752 9753 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9754 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9755 EVT VT = N.getValueType(); 9756 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9757 NextPowerOf2(VT.getVectorNumElements())); 9758 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9759 getVectorIdxConstant(0, DL)); 9760 } 9761 9762 void SelectionDAG::ExtractVectorElements(SDValue Op, 9763 SmallVectorImpl<SDValue> &Args, 9764 unsigned Start, unsigned Count, 9765 EVT EltVT) { 9766 EVT VT = Op.getValueType(); 9767 if (Count == 0) 9768 Count = VT.getVectorNumElements(); 9769 if (EltVT == EVT()) 9770 EltVT = VT.getVectorElementType(); 9771 SDLoc SL(Op); 9772 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9773 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, 9774 getVectorIdxConstant(i, SL))); 9775 } 9776 } 9777 9778 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9779 unsigned GlobalAddressSDNode::getAddressSpace() const { 9780 return getGlobal()->getType()->getAddressSpace(); 9781 } 9782 9783 Type *ConstantPoolSDNode::getType() const { 9784 if (isMachineConstantPoolEntry()) 9785 return Val.MachineCPVal->getType(); 9786 return Val.ConstVal->getType(); 9787 } 9788 9789 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9790 unsigned &SplatBitSize, 9791 bool &HasAnyUndefs, 9792 unsigned MinSplatBits, 9793 bool IsBigEndian) const { 9794 EVT VT = getValueType(0); 9795 assert(VT.isVector() && "Expected a vector type"); 9796 unsigned VecWidth = VT.getSizeInBits(); 9797 if (MinSplatBits > VecWidth) 9798 return false; 9799 9800 // FIXME: The widths are based on this node's type, but build vectors can 9801 // truncate their operands. 9802 SplatValue = APInt(VecWidth, 0); 9803 SplatUndef = APInt(VecWidth, 0); 9804 9805 // Get the bits. Bits with undefined values (when the corresponding element 9806 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9807 // in SplatValue. If any of the values are not constant, give up and return 9808 // false. 9809 unsigned int NumOps = getNumOperands(); 9810 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9811 unsigned EltWidth = VT.getScalarSizeInBits(); 9812 9813 for (unsigned j = 0; j < NumOps; ++j) { 9814 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9815 SDValue OpVal = getOperand(i); 9816 unsigned BitPos = j * EltWidth; 9817 9818 if (OpVal.isUndef()) 9819 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9820 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9821 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9822 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9823 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9824 else 9825 return false; 9826 } 9827 9828 // The build_vector is all constants or undefs. Find the smallest element 9829 // size that splats the vector. 9830 HasAnyUndefs = (SplatUndef != 0); 9831 9832 // FIXME: This does not work for vectors with elements less than 8 bits. 9833 while (VecWidth > 8) { 9834 unsigned HalfSize = VecWidth / 2; 9835 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9836 APInt LowValue = SplatValue.trunc(HalfSize); 9837 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9838 APInt LowUndef = SplatUndef.trunc(HalfSize); 9839 9840 // If the two halves do not match (ignoring undef bits), stop here. 9841 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9842 MinSplatBits > HalfSize) 9843 break; 9844 9845 SplatValue = HighValue | LowValue; 9846 SplatUndef = HighUndef & LowUndef; 9847 9848 VecWidth = HalfSize; 9849 } 9850 9851 SplatBitSize = VecWidth; 9852 return true; 9853 } 9854 9855 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9856 BitVector *UndefElements) const { 9857 unsigned NumOps = getNumOperands(); 9858 if (UndefElements) { 9859 UndefElements->clear(); 9860 UndefElements->resize(NumOps); 9861 } 9862 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 9863 if (!DemandedElts) 9864 return SDValue(); 9865 SDValue Splatted; 9866 for (unsigned i = 0; i != NumOps; ++i) { 9867 if (!DemandedElts[i]) 9868 continue; 9869 SDValue Op = getOperand(i); 9870 if (Op.isUndef()) { 9871 if (UndefElements) 9872 (*UndefElements)[i] = true; 9873 } else if (!Splatted) { 9874 Splatted = Op; 9875 } else if (Splatted != Op) { 9876 return SDValue(); 9877 } 9878 } 9879 9880 if (!Splatted) { 9881 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9882 assert(getOperand(FirstDemandedIdx).isUndef() && 9883 "Can only have a splat without a constant for all undefs."); 9884 return getOperand(FirstDemandedIdx); 9885 } 9886 9887 return Splatted; 9888 } 9889 9890 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9891 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9892 return getSplatValue(DemandedElts, UndefElements); 9893 } 9894 9895 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts, 9896 SmallVectorImpl<SDValue> &Sequence, 9897 BitVector *UndefElements) const { 9898 unsigned NumOps = getNumOperands(); 9899 Sequence.clear(); 9900 if (UndefElements) { 9901 UndefElements->clear(); 9902 UndefElements->resize(NumOps); 9903 } 9904 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); 9905 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps)) 9906 return false; 9907 9908 // Set the undefs even if we don't find a sequence (like getSplatValue). 9909 if (UndefElements) 9910 for (unsigned I = 0; I != NumOps; ++I) 9911 if (DemandedElts[I] && getOperand(I).isUndef()) 9912 (*UndefElements)[I] = true; 9913 9914 // Iteratively widen the sequence length looking for repetitions. 9915 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) { 9916 Sequence.append(SeqLen, SDValue()); 9917 for (unsigned I = 0; I != NumOps; ++I) { 9918 if (!DemandedElts[I]) 9919 continue; 9920 SDValue &SeqOp = Sequence[I % SeqLen]; 9921 SDValue Op = getOperand(I); 9922 if (Op.isUndef()) { 9923 if (!SeqOp) 9924 SeqOp = Op; 9925 continue; 9926 } 9927 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) { 9928 Sequence.clear(); 9929 break; 9930 } 9931 SeqOp = Op; 9932 } 9933 if (!Sequence.empty()) 9934 return true; 9935 } 9936 9937 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern"); 9938 return false; 9939 } 9940 9941 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, 9942 BitVector *UndefElements) const { 9943 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9944 return getRepeatedSequence(DemandedElts, Sequence, UndefElements); 9945 } 9946 9947 ConstantSDNode * 9948 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9949 BitVector *UndefElements) const { 9950 return dyn_cast_or_null<ConstantSDNode>( 9951 getSplatValue(DemandedElts, UndefElements)); 9952 } 9953 9954 ConstantSDNode * 9955 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9956 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9957 } 9958 9959 ConstantFPSDNode * 9960 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9961 BitVector *UndefElements) const { 9962 return dyn_cast_or_null<ConstantFPSDNode>( 9963 getSplatValue(DemandedElts, UndefElements)); 9964 } 9965 9966 ConstantFPSDNode * 9967 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9968 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9969 } 9970 9971 int32_t 9972 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9973 uint32_t BitWidth) const { 9974 if (ConstantFPSDNode *CN = 9975 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9976 bool IsExact; 9977 APSInt IntVal(BitWidth); 9978 const APFloat &APF = CN->getValueAPF(); 9979 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9980 APFloat::opOK || 9981 !IsExact) 9982 return -1; 9983 9984 return IntVal.exactLogBase2(); 9985 } 9986 return -1; 9987 } 9988 9989 bool BuildVectorSDNode::isConstant() const { 9990 for (const SDValue &Op : op_values()) { 9991 unsigned Opc = Op.getOpcode(); 9992 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9993 return false; 9994 } 9995 return true; 9996 } 9997 9998 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9999 // Find the first non-undef value in the shuffle mask. 10000 unsigned i, e; 10001 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 10002 /* search */; 10003 10004 // If all elements are undefined, this shuffle can be considered a splat 10005 // (although it should eventually get simplified away completely). 10006 if (i == e) 10007 return true; 10008 10009 // Make sure all remaining elements are either undef or the same as the first 10010 // non-undef value. 10011 for (int Idx = Mask[i]; i != e; ++i) 10012 if (Mask[i] >= 0 && Mask[i] != Idx) 10013 return false; 10014 return true; 10015 } 10016 10017 // Returns the SDNode if it is a constant integer BuildVector 10018 // or constant integer. 10019 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 10020 if (isa<ConstantSDNode>(N)) 10021 return N.getNode(); 10022 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 10023 return N.getNode(); 10024 // Treat a GlobalAddress supporting constant offset folding as a 10025 // constant integer. 10026 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 10027 if (GA->getOpcode() == ISD::GlobalAddress && 10028 TLI->isOffsetFoldingLegal(GA)) 10029 return GA; 10030 if ((N.getOpcode() == ISD::SPLAT_VECTOR) && 10031 isa<ConstantSDNode>(N.getOperand(0))) 10032 return N.getNode(); 10033 return nullptr; 10034 } 10035 10036 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 10037 if (isa<ConstantFPSDNode>(N)) 10038 return N.getNode(); 10039 10040 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 10041 return N.getNode(); 10042 10043 return nullptr; 10044 } 10045 10046 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 10047 assert(!Node->OperandList && "Node already has operands"); 10048 assert(SDNode::getMaxNumOperands() >= Vals.size() && 10049 "too many operands to fit into SDNode"); 10050 SDUse *Ops = OperandRecycler.allocate( 10051 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 10052 10053 bool IsDivergent = false; 10054 for (unsigned I = 0; I != Vals.size(); ++I) { 10055 Ops[I].setUser(Node); 10056 Ops[I].setInitial(Vals[I]); 10057 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 10058 IsDivergent |= Ops[I].getNode()->isDivergent(); 10059 } 10060 Node->NumOperands = Vals.size(); 10061 Node->OperandList = Ops; 10062 if (!TLI->isSDNodeAlwaysUniform(Node)) { 10063 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 10064 Node->SDNodeBits.IsDivergent = IsDivergent; 10065 } 10066 checkForCycles(Node); 10067 } 10068 10069 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 10070 SmallVectorImpl<SDValue> &Vals) { 10071 size_t Limit = SDNode::getMaxNumOperands(); 10072 while (Vals.size() > Limit) { 10073 unsigned SliceIdx = Vals.size() - Limit; 10074 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 10075 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 10076 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 10077 Vals.emplace_back(NewTF); 10078 } 10079 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 10080 } 10081 10082 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL, 10083 EVT VT, SDNodeFlags Flags) { 10084 switch (Opcode) { 10085 default: 10086 return SDValue(); 10087 case ISD::ADD: 10088 case ISD::OR: 10089 case ISD::XOR: 10090 case ISD::UMAX: 10091 return getConstant(0, DL, VT); 10092 case ISD::MUL: 10093 return getConstant(1, DL, VT); 10094 case ISD::AND: 10095 case ISD::UMIN: 10096 return getAllOnesConstant(DL, VT); 10097 case ISD::SMAX: 10098 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT); 10099 case ISD::SMIN: 10100 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT); 10101 case ISD::FADD: 10102 return getConstantFP(-0.0, DL, VT); 10103 case ISD::FMUL: 10104 return getConstantFP(1.0, DL, VT); 10105 case ISD::FMINNUM: 10106 case ISD::FMAXNUM: { 10107 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF. 10108 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT); 10109 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) : 10110 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) : 10111 APFloat::getLargest(Semantics); 10112 if (Opcode == ISD::FMAXNUM) 10113 NeutralAF.changeSign(); 10114 10115 return getConstantFP(NeutralAF, DL, VT); 10116 } 10117 } 10118 } 10119 10120 #ifndef NDEBUG 10121 static void checkForCyclesHelper(const SDNode *N, 10122 SmallPtrSetImpl<const SDNode*> &Visited, 10123 SmallPtrSetImpl<const SDNode*> &Checked, 10124 const llvm::SelectionDAG *DAG) { 10125 // If this node has already been checked, don't check it again. 10126 if (Checked.count(N)) 10127 return; 10128 10129 // If a node has already been visited on this depth-first walk, reject it as 10130 // a cycle. 10131 if (!Visited.insert(N).second) { 10132 errs() << "Detected cycle in SelectionDAG\n"; 10133 dbgs() << "Offending node:\n"; 10134 N->dumprFull(DAG); dbgs() << "\n"; 10135 abort(); 10136 } 10137 10138 for (const SDValue &Op : N->op_values()) 10139 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 10140 10141 Checked.insert(N); 10142 Visited.erase(N); 10143 } 10144 #endif 10145 10146 void llvm::checkForCycles(const llvm::SDNode *N, 10147 const llvm::SelectionDAG *DAG, 10148 bool force) { 10149 #ifndef NDEBUG 10150 bool check = force; 10151 #ifdef EXPENSIVE_CHECKS 10152 check = true; 10153 #endif // EXPENSIVE_CHECKS 10154 if (check) { 10155 assert(N && "Checking nonexistent SDNode"); 10156 SmallPtrSet<const SDNode*, 32> visited; 10157 SmallPtrSet<const SDNode*, 32> checked; 10158 checkForCyclesHelper(N, visited, checked, DAG); 10159 } 10160 #endif // !NDEBUG 10161 } 10162 10163 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 10164 checkForCycles(DAG->getRoot().getNode(), DAG, force); 10165 } 10166