1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetLowering.h" 42 #include "llvm/CodeGen/TargetRegisterInfo.h" 43 #include "llvm/CodeGen/TargetSubtargetInfo.h" 44 #include "llvm/CodeGen/ValueTypes.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/DerivedTypes.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Compiler.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/KnownBits.h" 62 #include "llvm/Support/MachineValueType.h" 63 #include "llvm/Support/ManagedStatic.h" 64 #include "llvm/Support/MathExtras.h" 65 #include "llvm/Support/Mutex.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Target/TargetMachine.h" 68 #include "llvm/Target/TargetOptions.h" 69 #include "llvm/Transforms/Utils/SizeOpts.h" 70 #include <algorithm> 71 #include <cassert> 72 #include <cstdint> 73 #include <cstdlib> 74 #include <limits> 75 #include <set> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 using namespace llvm; 81 82 /// makeVTList - Return an instance of the SDVTList struct initialized with the 83 /// specified members. 84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 85 SDVTList Res = {VTs, NumVTs}; 86 return Res; 87 } 88 89 // Default null implementations of the callbacks. 90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 93 94 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 95 96 #define DEBUG_TYPE "selectiondag" 97 98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 99 cl::Hidden, cl::init(true), 100 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 101 102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 103 cl::desc("Number limit for gluing ld/st of memcpy."), 104 cl::Hidden, cl::init(0)); 105 106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 108 } 109 110 //===----------------------------------------------------------------------===// 111 // ConstantFPSDNode Class 112 //===----------------------------------------------------------------------===// 113 114 /// isExactlyValue - We don't rely on operator== working on double values, as 115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 116 /// As such, this method can be used to do an exact bit-for-bit comparison of 117 /// two floating point values. 118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 119 return getValueAPF().bitwiseIsEqual(V); 120 } 121 122 bool ConstantFPSDNode::isValueValidForType(EVT VT, 123 const APFloat& Val) { 124 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 125 126 // convert modifies in place, so make a copy. 127 APFloat Val2 = APFloat(Val); 128 bool losesInfo; 129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 130 APFloat::rmNearestTiesToEven, 131 &losesInfo); 132 return !losesInfo; 133 } 134 135 //===----------------------------------------------------------------------===// 136 // ISD Namespace 137 //===----------------------------------------------------------------------===// 138 139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 140 auto *BV = dyn_cast<BuildVectorSDNode>(N); 141 if (!BV) 142 return false; 143 144 APInt SplatUndef; 145 unsigned SplatBitSize; 146 bool HasUndefs; 147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 149 EltSize) && 150 EltSize == SplatBitSize; 151 } 152 153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 154 // specializations of the more general isConstantSplatVector()? 155 156 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 157 // Look through a bit convert. 158 while (N->getOpcode() == ISD::BITCAST) 159 N = N->getOperand(0).getNode(); 160 161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 162 163 unsigned i = 0, e = N->getNumOperands(); 164 165 // Skip over all of the undef values. 166 while (i != e && N->getOperand(i).isUndef()) 167 ++i; 168 169 // Do not accept an all-undef vector. 170 if (i == e) return false; 171 172 // Do not accept build_vectors that aren't all constants or which have non-~0 173 // elements. We have to be a bit careful here, as the type of the constant 174 // may not be the same as the type of the vector elements due to type 175 // legalization (the elements are promoted to a legal type for the target and 176 // a vector of a type may be legal when the base element type is not). 177 // We only want to check enough bits to cover the vector elements, because 178 // we care if the resultant vector is all ones, not whether the individual 179 // constants are. 180 SDValue NotZero = N->getOperand(i); 181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 183 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 184 return false; 185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 187 return false; 188 } else 189 return false; 190 191 // Okay, we have at least one ~0 value, check to see if the rest match or are 192 // undefs. Even with the above element type twiddling, this should be OK, as 193 // the same type legalization should have applied to all the elements. 194 for (++i; i != e; ++i) 195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 196 return false; 197 return true; 198 } 199 200 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 201 // Look through a bit convert. 202 while (N->getOpcode() == ISD::BITCAST) 203 N = N->getOperand(0).getNode(); 204 205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 206 207 bool IsAllUndef = true; 208 for (const SDValue &Op : N->op_values()) { 209 if (Op.isUndef()) 210 continue; 211 IsAllUndef = false; 212 // Do not accept build_vectors that aren't all constants or which have non-0 213 // elements. We have to be a bit careful here, as the type of the constant 214 // may not be the same as the type of the vector elements due to type 215 // legalization (the elements are promoted to a legal type for the target 216 // and a vector of a type may be legal when the base element type is not). 217 // We only want to check enough bits to cover the vector elements, because 218 // we care if the resultant vector is all zeros, not whether the individual 219 // constants are. 220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 222 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 223 return false; 224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 226 return false; 227 } else 228 return false; 229 } 230 231 // Do not accept an all-undef vector. 232 if (IsAllUndef) 233 return false; 234 return true; 235 } 236 237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 238 if (N->getOpcode() != ISD::BUILD_VECTOR) 239 return false; 240 241 for (const SDValue &Op : N->op_values()) { 242 if (Op.isUndef()) 243 continue; 244 if (!isa<ConstantSDNode>(Op)) 245 return false; 246 } 247 return true; 248 } 249 250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 251 if (N->getOpcode() != ISD::BUILD_VECTOR) 252 return false; 253 254 for (const SDValue &Op : N->op_values()) { 255 if (Op.isUndef()) 256 continue; 257 if (!isa<ConstantFPSDNode>(Op)) 258 return false; 259 } 260 return true; 261 } 262 263 bool ISD::allOperandsUndef(const SDNode *N) { 264 // Return false if the node has no operands. 265 // This is "logically inconsistent" with the definition of "all" but 266 // is probably the desired behavior. 267 if (N->getNumOperands() == 0) 268 return false; 269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 270 } 271 272 bool ISD::matchUnaryPredicate(SDValue Op, 273 std::function<bool(ConstantSDNode *)> Match, 274 bool AllowUndefs) { 275 // FIXME: Add support for scalar UNDEF cases? 276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 277 return Match(Cst); 278 279 // FIXME: Add support for vector UNDEF cases? 280 if (ISD::BUILD_VECTOR != Op.getOpcode()) 281 return false; 282 283 EVT SVT = Op.getValueType().getScalarType(); 284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 285 if (AllowUndefs && Op.getOperand(i).isUndef()) { 286 if (!Match(nullptr)) 287 return false; 288 continue; 289 } 290 291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 293 return false; 294 } 295 return true; 296 } 297 298 bool ISD::matchBinaryPredicate( 299 SDValue LHS, SDValue RHS, 300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 301 bool AllowUndefs, bool AllowTypeMismatch) { 302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 303 return false; 304 305 // TODO: Add support for scalar UNDEF cases? 306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 308 return Match(LHSCst, RHSCst); 309 310 // TODO: Add support for vector UNDEF cases? 311 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 312 ISD::BUILD_VECTOR != RHS.getOpcode()) 313 return false; 314 315 EVT SVT = LHS.getValueType().getScalarType(); 316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 317 SDValue LHSOp = LHS.getOperand(i); 318 SDValue RHSOp = RHS.getOperand(i); 319 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 320 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 324 return false; 325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 326 LHSOp.getValueType() != RHSOp.getValueType())) 327 return false; 328 if (!Match(LHSCst, RHSCst)) 329 return false; 330 } 331 return true; 332 } 333 334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 335 switch (ExtType) { 336 case ISD::EXTLOAD: 337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 338 case ISD::SEXTLOAD: 339 return ISD::SIGN_EXTEND; 340 case ISD::ZEXTLOAD: 341 return ISD::ZERO_EXTEND; 342 default: 343 break; 344 } 345 346 llvm_unreachable("Invalid LoadExtType"); 347 } 348 349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 350 // To perform this operation, we just need to swap the L and G bits of the 351 // operation. 352 unsigned OldL = (Operation >> 2) & 1; 353 unsigned OldG = (Operation >> 1) & 1; 354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 355 (OldL << 1) | // New G bit 356 (OldG << 2)); // New L bit. 357 } 358 359 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 360 bool IsInteger = Type.isInteger(); 361 unsigned Operation = Op; 362 if (IsInteger) 363 Operation ^= 7; // Flip L, G, E bits, but not U. 364 else 365 Operation ^= 15; // Flip all of the condition bits. 366 367 if (Operation > ISD::SETTRUE2) 368 Operation &= ~8; // Don't let N and U bits get set. 369 370 return ISD::CondCode(Operation); 371 } 372 373 /// For an integer comparison, return 1 if the comparison is a signed operation 374 /// and 2 if the result is an unsigned comparison. Return zero if the operation 375 /// does not depend on the sign of the input (setne and seteq). 376 static int isSignedOp(ISD::CondCode Opcode) { 377 switch (Opcode) { 378 default: llvm_unreachable("Illegal integer setcc operation!"); 379 case ISD::SETEQ: 380 case ISD::SETNE: return 0; 381 case ISD::SETLT: 382 case ISD::SETLE: 383 case ISD::SETGT: 384 case ISD::SETGE: return 1; 385 case ISD::SETULT: 386 case ISD::SETULE: 387 case ISD::SETUGT: 388 case ISD::SETUGE: return 2; 389 } 390 } 391 392 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 393 EVT Type) { 394 bool IsInteger = Type.isInteger(); 395 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 396 // Cannot fold a signed integer setcc with an unsigned integer setcc. 397 return ISD::SETCC_INVALID; 398 399 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 400 401 // If the N and U bits get set, then the resultant comparison DOES suddenly 402 // care about orderedness, and it is true when ordered. 403 if (Op > ISD::SETTRUE2) 404 Op &= ~16; // Clear the U bit if the N bit is set. 405 406 // Canonicalize illegal integer setcc's. 407 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 408 Op = ISD::SETNE; 409 410 return ISD::CondCode(Op); 411 } 412 413 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 414 EVT Type) { 415 bool IsInteger = Type.isInteger(); 416 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 417 // Cannot fold a signed setcc with an unsigned setcc. 418 return ISD::SETCC_INVALID; 419 420 // Combine all of the condition bits. 421 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 422 423 // Canonicalize illegal integer setcc's. 424 if (IsInteger) { 425 switch (Result) { 426 default: break; 427 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 428 case ISD::SETOEQ: // SETEQ & SETU[LG]E 429 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 430 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 431 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 432 } 433 } 434 435 return Result; 436 } 437 438 //===----------------------------------------------------------------------===// 439 // SDNode Profile Support 440 //===----------------------------------------------------------------------===// 441 442 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 443 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 444 ID.AddInteger(OpC); 445 } 446 447 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 448 /// solely with their pointer. 449 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 450 ID.AddPointer(VTList.VTs); 451 } 452 453 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 454 static void AddNodeIDOperands(FoldingSetNodeID &ID, 455 ArrayRef<SDValue> Ops) { 456 for (auto& Op : Ops) { 457 ID.AddPointer(Op.getNode()); 458 ID.AddInteger(Op.getResNo()); 459 } 460 } 461 462 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 463 static void AddNodeIDOperands(FoldingSetNodeID &ID, 464 ArrayRef<SDUse> Ops) { 465 for (auto& Op : Ops) { 466 ID.AddPointer(Op.getNode()); 467 ID.AddInteger(Op.getResNo()); 468 } 469 } 470 471 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 472 SDVTList VTList, ArrayRef<SDValue> OpList) { 473 AddNodeIDOpcode(ID, OpC); 474 AddNodeIDValueTypes(ID, VTList); 475 AddNodeIDOperands(ID, OpList); 476 } 477 478 /// If this is an SDNode with special info, add this info to the NodeID data. 479 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 480 switch (N->getOpcode()) { 481 case ISD::TargetExternalSymbol: 482 case ISD::ExternalSymbol: 483 case ISD::MCSymbol: 484 llvm_unreachable("Should only be used on nodes with operands"); 485 default: break; // Normal nodes don't need extra info. 486 case ISD::TargetConstant: 487 case ISD::Constant: { 488 const ConstantSDNode *C = cast<ConstantSDNode>(N); 489 ID.AddPointer(C->getConstantIntValue()); 490 ID.AddBoolean(C->isOpaque()); 491 break; 492 } 493 case ISD::TargetConstantFP: 494 case ISD::ConstantFP: 495 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 496 break; 497 case ISD::TargetGlobalAddress: 498 case ISD::GlobalAddress: 499 case ISD::TargetGlobalTLSAddress: 500 case ISD::GlobalTLSAddress: { 501 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 502 ID.AddPointer(GA->getGlobal()); 503 ID.AddInteger(GA->getOffset()); 504 ID.AddInteger(GA->getTargetFlags()); 505 break; 506 } 507 case ISD::BasicBlock: 508 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 509 break; 510 case ISD::Register: 511 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 512 break; 513 case ISD::RegisterMask: 514 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 515 break; 516 case ISD::SRCVALUE: 517 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 518 break; 519 case ISD::FrameIndex: 520 case ISD::TargetFrameIndex: 521 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 522 break; 523 case ISD::LIFETIME_START: 524 case ISD::LIFETIME_END: 525 if (cast<LifetimeSDNode>(N)->hasOffset()) { 526 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 527 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 528 } 529 break; 530 case ISD::JumpTable: 531 case ISD::TargetJumpTable: 532 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 533 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 534 break; 535 case ISD::ConstantPool: 536 case ISD::TargetConstantPool: { 537 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 538 ID.AddInteger(CP->getAlignment()); 539 ID.AddInteger(CP->getOffset()); 540 if (CP->isMachineConstantPoolEntry()) 541 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 542 else 543 ID.AddPointer(CP->getConstVal()); 544 ID.AddInteger(CP->getTargetFlags()); 545 break; 546 } 547 case ISD::TargetIndex: { 548 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 549 ID.AddInteger(TI->getIndex()); 550 ID.AddInteger(TI->getOffset()); 551 ID.AddInteger(TI->getTargetFlags()); 552 break; 553 } 554 case ISD::LOAD: { 555 const LoadSDNode *LD = cast<LoadSDNode>(N); 556 ID.AddInteger(LD->getMemoryVT().getRawBits()); 557 ID.AddInteger(LD->getRawSubclassData()); 558 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 559 break; 560 } 561 case ISD::STORE: { 562 const StoreSDNode *ST = cast<StoreSDNode>(N); 563 ID.AddInteger(ST->getMemoryVT().getRawBits()); 564 ID.AddInteger(ST->getRawSubclassData()); 565 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 566 break; 567 } 568 case ISD::MLOAD: { 569 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 570 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 571 ID.AddInteger(MLD->getRawSubclassData()); 572 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 573 break; 574 } 575 case ISD::MSTORE: { 576 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 577 ID.AddInteger(MST->getMemoryVT().getRawBits()); 578 ID.AddInteger(MST->getRawSubclassData()); 579 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 580 break; 581 } 582 case ISD::MGATHER: { 583 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 584 ID.AddInteger(MG->getMemoryVT().getRawBits()); 585 ID.AddInteger(MG->getRawSubclassData()); 586 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 587 break; 588 } 589 case ISD::MSCATTER: { 590 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 591 ID.AddInteger(MS->getMemoryVT().getRawBits()); 592 ID.AddInteger(MS->getRawSubclassData()); 593 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 594 break; 595 } 596 case ISD::ATOMIC_CMP_SWAP: 597 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 598 case ISD::ATOMIC_SWAP: 599 case ISD::ATOMIC_LOAD_ADD: 600 case ISD::ATOMIC_LOAD_SUB: 601 case ISD::ATOMIC_LOAD_AND: 602 case ISD::ATOMIC_LOAD_CLR: 603 case ISD::ATOMIC_LOAD_OR: 604 case ISD::ATOMIC_LOAD_XOR: 605 case ISD::ATOMIC_LOAD_NAND: 606 case ISD::ATOMIC_LOAD_MIN: 607 case ISD::ATOMIC_LOAD_MAX: 608 case ISD::ATOMIC_LOAD_UMIN: 609 case ISD::ATOMIC_LOAD_UMAX: 610 case ISD::ATOMIC_LOAD: 611 case ISD::ATOMIC_STORE: { 612 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 613 ID.AddInteger(AT->getMemoryVT().getRawBits()); 614 ID.AddInteger(AT->getRawSubclassData()); 615 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 616 break; 617 } 618 case ISD::PREFETCH: { 619 const MemSDNode *PF = cast<MemSDNode>(N); 620 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 621 break; 622 } 623 case ISD::VECTOR_SHUFFLE: { 624 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 625 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 626 i != e; ++i) 627 ID.AddInteger(SVN->getMaskElt(i)); 628 break; 629 } 630 case ISD::TargetBlockAddress: 631 case ISD::BlockAddress: { 632 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 633 ID.AddPointer(BA->getBlockAddress()); 634 ID.AddInteger(BA->getOffset()); 635 ID.AddInteger(BA->getTargetFlags()); 636 break; 637 } 638 } // end switch (N->getOpcode()) 639 640 // Target specific memory nodes could also have address spaces to check. 641 if (N->isTargetMemoryOpcode()) 642 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 643 } 644 645 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 646 /// data. 647 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 648 AddNodeIDOpcode(ID, N->getOpcode()); 649 // Add the return value info. 650 AddNodeIDValueTypes(ID, N->getVTList()); 651 // Add the operand info. 652 AddNodeIDOperands(ID, N->ops()); 653 654 // Handle SDNode leafs with special info. 655 AddNodeIDCustom(ID, N); 656 } 657 658 //===----------------------------------------------------------------------===// 659 // SelectionDAG Class 660 //===----------------------------------------------------------------------===// 661 662 /// doNotCSE - Return true if CSE should not be performed for this node. 663 static bool doNotCSE(SDNode *N) { 664 if (N->getValueType(0) == MVT::Glue) 665 return true; // Never CSE anything that produces a flag. 666 667 switch (N->getOpcode()) { 668 default: break; 669 case ISD::HANDLENODE: 670 case ISD::EH_LABEL: 671 return true; // Never CSE these nodes. 672 } 673 674 // Check that remaining values produced are not flags. 675 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 676 if (N->getValueType(i) == MVT::Glue) 677 return true; // Never CSE anything that produces a flag. 678 679 return false; 680 } 681 682 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 683 /// SelectionDAG. 684 void SelectionDAG::RemoveDeadNodes() { 685 // Create a dummy node (which is not added to allnodes), that adds a reference 686 // to the root node, preventing it from being deleted. 687 HandleSDNode Dummy(getRoot()); 688 689 SmallVector<SDNode*, 128> DeadNodes; 690 691 // Add all obviously-dead nodes to the DeadNodes worklist. 692 for (SDNode &Node : allnodes()) 693 if (Node.use_empty()) 694 DeadNodes.push_back(&Node); 695 696 RemoveDeadNodes(DeadNodes); 697 698 // If the root changed (e.g. it was a dead load, update the root). 699 setRoot(Dummy.getValue()); 700 } 701 702 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 703 /// given list, and any nodes that become unreachable as a result. 704 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 705 706 // Process the worklist, deleting the nodes and adding their uses to the 707 // worklist. 708 while (!DeadNodes.empty()) { 709 SDNode *N = DeadNodes.pop_back_val(); 710 // Skip to next node if we've already managed to delete the node. This could 711 // happen if replacing a node causes a node previously added to the node to 712 // be deleted. 713 if (N->getOpcode() == ISD::DELETED_NODE) 714 continue; 715 716 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 717 DUL->NodeDeleted(N, nullptr); 718 719 // Take the node out of the appropriate CSE map. 720 RemoveNodeFromCSEMaps(N); 721 722 // Next, brutally remove the operand list. This is safe to do, as there are 723 // no cycles in the graph. 724 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 725 SDUse &Use = *I++; 726 SDNode *Operand = Use.getNode(); 727 Use.set(SDValue()); 728 729 // Now that we removed this operand, see if there are no uses of it left. 730 if (Operand->use_empty()) 731 DeadNodes.push_back(Operand); 732 } 733 734 DeallocateNode(N); 735 } 736 } 737 738 void SelectionDAG::RemoveDeadNode(SDNode *N){ 739 SmallVector<SDNode*, 16> DeadNodes(1, N); 740 741 // Create a dummy node that adds a reference to the root node, preventing 742 // it from being deleted. (This matters if the root is an operand of the 743 // dead node.) 744 HandleSDNode Dummy(getRoot()); 745 746 RemoveDeadNodes(DeadNodes); 747 } 748 749 void SelectionDAG::DeleteNode(SDNode *N) { 750 // First take this out of the appropriate CSE map. 751 RemoveNodeFromCSEMaps(N); 752 753 // Finally, remove uses due to operands of this node, remove from the 754 // AllNodes list, and delete the node. 755 DeleteNodeNotInCSEMaps(N); 756 } 757 758 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 759 assert(N->getIterator() != AllNodes.begin() && 760 "Cannot delete the entry node!"); 761 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 762 763 // Drop all of the operands and decrement used node's use counts. 764 N->DropOperands(); 765 766 DeallocateNode(N); 767 } 768 769 void SDDbgInfo::erase(const SDNode *Node) { 770 DbgValMapType::iterator I = DbgValMap.find(Node); 771 if (I == DbgValMap.end()) 772 return; 773 for (auto &Val: I->second) 774 Val->setIsInvalidated(); 775 DbgValMap.erase(I); 776 } 777 778 void SelectionDAG::DeallocateNode(SDNode *N) { 779 // If we have operands, deallocate them. 780 removeOperands(N); 781 782 NodeAllocator.Deallocate(AllNodes.remove(N)); 783 784 // Set the opcode to DELETED_NODE to help catch bugs when node 785 // memory is reallocated. 786 // FIXME: There are places in SDag that have grown a dependency on the opcode 787 // value in the released node. 788 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 789 N->NodeType = ISD::DELETED_NODE; 790 791 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 792 // them and forget about that node. 793 DbgInfo->erase(N); 794 } 795 796 #ifndef NDEBUG 797 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 798 static void VerifySDNode(SDNode *N) { 799 switch (N->getOpcode()) { 800 default: 801 break; 802 case ISD::BUILD_PAIR: { 803 EVT VT = N->getValueType(0); 804 assert(N->getNumValues() == 1 && "Too many results!"); 805 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 806 "Wrong return type!"); 807 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 808 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 809 "Mismatched operand types!"); 810 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 811 "Wrong operand type!"); 812 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 813 "Wrong return type size"); 814 break; 815 } 816 case ISD::BUILD_VECTOR: { 817 assert(N->getNumValues() == 1 && "Too many results!"); 818 assert(N->getValueType(0).isVector() && "Wrong return type!"); 819 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 820 "Wrong number of operands!"); 821 EVT EltVT = N->getValueType(0).getVectorElementType(); 822 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 823 assert((I->getValueType() == EltVT || 824 (EltVT.isInteger() && I->getValueType().isInteger() && 825 EltVT.bitsLE(I->getValueType()))) && 826 "Wrong operand type!"); 827 assert(I->getValueType() == N->getOperand(0).getValueType() && 828 "Operands must all have the same type"); 829 } 830 break; 831 } 832 } 833 } 834 #endif // NDEBUG 835 836 /// Insert a newly allocated node into the DAG. 837 /// 838 /// Handles insertion into the all nodes list and CSE map, as well as 839 /// verification and other common operations when a new node is allocated. 840 void SelectionDAG::InsertNode(SDNode *N) { 841 AllNodes.push_back(N); 842 #ifndef NDEBUG 843 N->PersistentId = NextPersistentId++; 844 VerifySDNode(N); 845 #endif 846 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 847 DUL->NodeInserted(N); 848 } 849 850 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 851 /// correspond to it. This is useful when we're about to delete or repurpose 852 /// the node. We don't want future request for structurally identical nodes 853 /// to return N anymore. 854 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 855 bool Erased = false; 856 switch (N->getOpcode()) { 857 case ISD::HANDLENODE: return false; // noop. 858 case ISD::CONDCODE: 859 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 860 "Cond code doesn't exist!"); 861 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 862 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 863 break; 864 case ISD::ExternalSymbol: 865 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 866 break; 867 case ISD::TargetExternalSymbol: { 868 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 869 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 870 ESN->getSymbol(), ESN->getTargetFlags())); 871 break; 872 } 873 case ISD::MCSymbol: { 874 auto *MCSN = cast<MCSymbolSDNode>(N); 875 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 876 break; 877 } 878 case ISD::VALUETYPE: { 879 EVT VT = cast<VTSDNode>(N)->getVT(); 880 if (VT.isExtended()) { 881 Erased = ExtendedValueTypeNodes.erase(VT); 882 } else { 883 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 884 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 885 } 886 break; 887 } 888 default: 889 // Remove it from the CSE Map. 890 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 891 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 892 Erased = CSEMap.RemoveNode(N); 893 break; 894 } 895 #ifndef NDEBUG 896 // Verify that the node was actually in one of the CSE maps, unless it has a 897 // flag result (which cannot be CSE'd) or is one of the special cases that are 898 // not subject to CSE. 899 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 900 !N->isMachineOpcode() && !doNotCSE(N)) { 901 N->dump(this); 902 dbgs() << "\n"; 903 llvm_unreachable("Node is not in map!"); 904 } 905 #endif 906 return Erased; 907 } 908 909 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 910 /// maps and modified in place. Add it back to the CSE maps, unless an identical 911 /// node already exists, in which case transfer all its users to the existing 912 /// node. This transfer can potentially trigger recursive merging. 913 void 914 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 915 // For node types that aren't CSE'd, just act as if no identical node 916 // already exists. 917 if (!doNotCSE(N)) { 918 SDNode *Existing = CSEMap.GetOrInsertNode(N); 919 if (Existing != N) { 920 // If there was already an existing matching node, use ReplaceAllUsesWith 921 // to replace the dead one with the existing one. This can cause 922 // recursive merging of other unrelated nodes down the line. 923 ReplaceAllUsesWith(N, Existing); 924 925 // N is now dead. Inform the listeners and delete it. 926 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 927 DUL->NodeDeleted(N, Existing); 928 DeleteNodeNotInCSEMaps(N); 929 return; 930 } 931 } 932 933 // If the node doesn't already exist, we updated it. Inform listeners. 934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 935 DUL->NodeUpdated(N); 936 } 937 938 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 939 /// were replaced with those specified. If this node is never memoized, 940 /// return null, otherwise return a pointer to the slot it would take. If a 941 /// node already exists with these operands, the slot will be non-null. 942 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 943 void *&InsertPos) { 944 if (doNotCSE(N)) 945 return nullptr; 946 947 SDValue Ops[] = { Op }; 948 FoldingSetNodeID ID; 949 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 950 AddNodeIDCustom(ID, N); 951 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 952 if (Node) 953 Node->intersectFlagsWith(N->getFlags()); 954 return Node; 955 } 956 957 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 958 /// were replaced with those specified. If this node is never memoized, 959 /// return null, otherwise return a pointer to the slot it would take. If a 960 /// node already exists with these operands, the slot will be non-null. 961 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 962 SDValue Op1, SDValue Op2, 963 void *&InsertPos) { 964 if (doNotCSE(N)) 965 return nullptr; 966 967 SDValue Ops[] = { Op1, Op2 }; 968 FoldingSetNodeID ID; 969 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 970 AddNodeIDCustom(ID, N); 971 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 972 if (Node) 973 Node->intersectFlagsWith(N->getFlags()); 974 return Node; 975 } 976 977 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 978 /// were replaced with those specified. If this node is never memoized, 979 /// return null, otherwise return a pointer to the slot it would take. If a 980 /// node already exists with these operands, the slot will be non-null. 981 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 982 void *&InsertPos) { 983 if (doNotCSE(N)) 984 return nullptr; 985 986 FoldingSetNodeID ID; 987 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 988 AddNodeIDCustom(ID, N); 989 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 990 if (Node) 991 Node->intersectFlagsWith(N->getFlags()); 992 return Node; 993 } 994 995 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 996 Type *Ty = VT == MVT::iPTR ? 997 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 998 VT.getTypeForEVT(*getContext()); 999 1000 return getDataLayout().getABITypeAlignment(Ty); 1001 } 1002 1003 // EntryNode could meaningfully have debug info if we can find it... 1004 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1005 : TM(tm), OptLevel(OL), 1006 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1007 Root(getEntryNode()) { 1008 InsertNode(&EntryNode); 1009 DbgInfo = new SDDbgInfo(); 1010 } 1011 1012 void SelectionDAG::init(MachineFunction &NewMF, 1013 OptimizationRemarkEmitter &NewORE, 1014 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1015 LegacyDivergenceAnalysis * Divergence, 1016 ProfileSummaryInfo *PSIin, 1017 BlockFrequencyInfo *BFIin) { 1018 MF = &NewMF; 1019 SDAGISelPass = PassPtr; 1020 ORE = &NewORE; 1021 TLI = getSubtarget().getTargetLowering(); 1022 TSI = getSubtarget().getSelectionDAGInfo(); 1023 LibInfo = LibraryInfo; 1024 Context = &MF->getFunction().getContext(); 1025 DA = Divergence; 1026 PSI = PSIin; 1027 BFI = BFIin; 1028 } 1029 1030 SelectionDAG::~SelectionDAG() { 1031 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1032 allnodes_clear(); 1033 OperandRecycler.clear(OperandAllocator); 1034 delete DbgInfo; 1035 } 1036 1037 bool SelectionDAG::shouldOptForSize() const { 1038 return MF->getFunction().hasOptSize() || 1039 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1040 } 1041 1042 void SelectionDAG::allnodes_clear() { 1043 assert(&*AllNodes.begin() == &EntryNode); 1044 AllNodes.remove(AllNodes.begin()); 1045 while (!AllNodes.empty()) 1046 DeallocateNode(&AllNodes.front()); 1047 #ifndef NDEBUG 1048 NextPersistentId = 0; 1049 #endif 1050 } 1051 1052 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1053 void *&InsertPos) { 1054 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1055 if (N) { 1056 switch (N->getOpcode()) { 1057 default: break; 1058 case ISD::Constant: 1059 case ISD::ConstantFP: 1060 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1061 "debug location. Use another overload."); 1062 } 1063 } 1064 return N; 1065 } 1066 1067 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1068 const SDLoc &DL, void *&InsertPos) { 1069 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1070 if (N) { 1071 switch (N->getOpcode()) { 1072 case ISD::Constant: 1073 case ISD::ConstantFP: 1074 // Erase debug location from the node if the node is used at several 1075 // different places. Do not propagate one location to all uses as it 1076 // will cause a worse single stepping debugging experience. 1077 if (N->getDebugLoc() != DL.getDebugLoc()) 1078 N->setDebugLoc(DebugLoc()); 1079 break; 1080 default: 1081 // When the node's point of use is located earlier in the instruction 1082 // sequence than its prior point of use, update its debug info to the 1083 // earlier location. 1084 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1085 N->setDebugLoc(DL.getDebugLoc()); 1086 break; 1087 } 1088 } 1089 return N; 1090 } 1091 1092 void SelectionDAG::clear() { 1093 allnodes_clear(); 1094 OperandRecycler.clear(OperandAllocator); 1095 OperandAllocator.Reset(); 1096 CSEMap.clear(); 1097 1098 ExtendedValueTypeNodes.clear(); 1099 ExternalSymbols.clear(); 1100 TargetExternalSymbols.clear(); 1101 MCSymbols.clear(); 1102 SDCallSiteDbgInfo.clear(); 1103 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1104 static_cast<CondCodeSDNode*>(nullptr)); 1105 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1106 static_cast<SDNode*>(nullptr)); 1107 1108 EntryNode.UseList = nullptr; 1109 InsertNode(&EntryNode); 1110 Root = getEntryNode(); 1111 DbgInfo->clear(); 1112 } 1113 1114 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1115 return VT.bitsGT(Op.getValueType()) 1116 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1117 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1118 } 1119 1120 std::pair<SDValue, SDValue> 1121 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, 1122 const SDLoc &DL, EVT VT) { 1123 assert(!VT.bitsEq(Op.getValueType()) && 1124 "Strict no-op FP extend/round not allowed."); 1125 SDValue Res = 1126 VT.bitsGT(Op.getValueType()) 1127 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) 1128 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, 1129 {Chain, Op, getIntPtrConstant(0, DL)}); 1130 1131 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); 1132 } 1133 1134 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1135 return VT.bitsGT(Op.getValueType()) ? 1136 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1137 getNode(ISD::TRUNCATE, DL, VT, Op); 1138 } 1139 1140 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1141 return VT.bitsGT(Op.getValueType()) ? 1142 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1143 getNode(ISD::TRUNCATE, DL, VT, Op); 1144 } 1145 1146 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1147 return VT.bitsGT(Op.getValueType()) ? 1148 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1149 getNode(ISD::TRUNCATE, DL, VT, Op); 1150 } 1151 1152 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1153 EVT OpVT) { 1154 if (VT.bitsLE(Op.getValueType())) 1155 return getNode(ISD::TRUNCATE, SL, VT, Op); 1156 1157 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1158 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1159 } 1160 1161 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1162 assert(!VT.isVector() && 1163 "getZeroExtendInReg should use the vector element type instead of " 1164 "the vector type!"); 1165 if (Op.getValueType().getScalarType() == VT) return Op; 1166 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1167 APInt Imm = APInt::getLowBitsSet(BitWidth, 1168 VT.getSizeInBits()); 1169 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1170 getConstant(Imm, DL, Op.getValueType())); 1171 } 1172 1173 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1174 // Only unsigned pointer semantics are supported right now. In the future this 1175 // might delegate to TLI to check pointer signedness. 1176 return getZExtOrTrunc(Op, DL, VT); 1177 } 1178 1179 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1180 // Only unsigned pointer semantics are supported right now. In the future this 1181 // might delegate to TLI to check pointer signedness. 1182 return getZeroExtendInReg(Op, DL, VT); 1183 } 1184 1185 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1186 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1187 EVT EltVT = VT.getScalarType(); 1188 SDValue NegOne = 1189 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1190 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1191 } 1192 1193 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1194 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1195 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1196 } 1197 1198 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1199 EVT OpVT) { 1200 if (!V) 1201 return getConstant(0, DL, VT); 1202 1203 switch (TLI->getBooleanContents(OpVT)) { 1204 case TargetLowering::ZeroOrOneBooleanContent: 1205 case TargetLowering::UndefinedBooleanContent: 1206 return getConstant(1, DL, VT); 1207 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1208 return getAllOnesConstant(DL, VT); 1209 } 1210 llvm_unreachable("Unexpected boolean content enum!"); 1211 } 1212 1213 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1214 bool isT, bool isO) { 1215 EVT EltVT = VT.getScalarType(); 1216 assert((EltVT.getSizeInBits() >= 64 || 1217 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1218 "getConstant with a uint64_t value that doesn't fit in the type!"); 1219 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1220 } 1221 1222 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1223 bool isT, bool isO) { 1224 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1225 } 1226 1227 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1228 EVT VT, bool isT, bool isO) { 1229 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1230 1231 EVT EltVT = VT.getScalarType(); 1232 const ConstantInt *Elt = &Val; 1233 1234 // In some cases the vector type is legal but the element type is illegal and 1235 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1236 // inserted value (the type does not need to match the vector element type). 1237 // Any extra bits introduced will be truncated away. 1238 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1239 TargetLowering::TypePromoteInteger) { 1240 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1241 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1242 Elt = ConstantInt::get(*getContext(), NewVal); 1243 } 1244 // In other cases the element type is illegal and needs to be expanded, for 1245 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1246 // the value into n parts and use a vector type with n-times the elements. 1247 // Then bitcast to the type requested. 1248 // Legalizing constants too early makes the DAGCombiner's job harder so we 1249 // only legalize if the DAG tells us we must produce legal types. 1250 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1251 TLI->getTypeAction(*getContext(), EltVT) == 1252 TargetLowering::TypeExpandInteger) { 1253 const APInt &NewVal = Elt->getValue(); 1254 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1255 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1256 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1257 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1258 1259 // Check the temporary vector is the correct size. If this fails then 1260 // getTypeToTransformTo() probably returned a type whose size (in bits) 1261 // isn't a power-of-2 factor of the requested type size. 1262 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1263 1264 SmallVector<SDValue, 2> EltParts; 1265 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1266 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1267 .zextOrTrunc(ViaEltSizeInBits), DL, 1268 ViaEltVT, isT, isO)); 1269 } 1270 1271 // EltParts is currently in little endian order. If we actually want 1272 // big-endian order then reverse it now. 1273 if (getDataLayout().isBigEndian()) 1274 std::reverse(EltParts.begin(), EltParts.end()); 1275 1276 // The elements must be reversed when the element order is different 1277 // to the endianness of the elements (because the BITCAST is itself a 1278 // vector shuffle in this situation). However, we do not need any code to 1279 // perform this reversal because getConstant() is producing a vector 1280 // splat. 1281 // This situation occurs in MIPS MSA. 1282 1283 SmallVector<SDValue, 8> Ops; 1284 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1285 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1286 1287 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1288 return V; 1289 } 1290 1291 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1292 "APInt size does not match type size!"); 1293 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1294 FoldingSetNodeID ID; 1295 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1296 ID.AddPointer(Elt); 1297 ID.AddBoolean(isO); 1298 void *IP = nullptr; 1299 SDNode *N = nullptr; 1300 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1301 if (!VT.isVector()) 1302 return SDValue(N, 0); 1303 1304 if (!N) { 1305 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1306 CSEMap.InsertNode(N, IP); 1307 InsertNode(N); 1308 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1309 } 1310 1311 SDValue Result(N, 0); 1312 if (VT.isScalableVector()) 1313 Result = getSplatVector(VT, DL, Result); 1314 else if (VT.isVector()) 1315 Result = getSplatBuildVector(VT, DL, Result); 1316 1317 return Result; 1318 } 1319 1320 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1321 bool isTarget) { 1322 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1323 } 1324 1325 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1326 const SDLoc &DL, bool LegalTypes) { 1327 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1328 return getConstant(Val, DL, ShiftVT); 1329 } 1330 1331 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1332 bool isTarget) { 1333 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1334 } 1335 1336 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1337 EVT VT, bool isTarget) { 1338 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1339 1340 EVT EltVT = VT.getScalarType(); 1341 1342 // Do the map lookup using the actual bit pattern for the floating point 1343 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1344 // we don't have issues with SNANs. 1345 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1346 FoldingSetNodeID ID; 1347 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1348 ID.AddPointer(&V); 1349 void *IP = nullptr; 1350 SDNode *N = nullptr; 1351 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1352 if (!VT.isVector()) 1353 return SDValue(N, 0); 1354 1355 if (!N) { 1356 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1357 CSEMap.InsertNode(N, IP); 1358 InsertNode(N); 1359 } 1360 1361 SDValue Result(N, 0); 1362 if (VT.isVector()) 1363 Result = getSplatBuildVector(VT, DL, Result); 1364 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1365 return Result; 1366 } 1367 1368 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1369 bool isTarget) { 1370 EVT EltVT = VT.getScalarType(); 1371 if (EltVT == MVT::f32) 1372 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1373 else if (EltVT == MVT::f64) 1374 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1375 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1376 EltVT == MVT::f16) { 1377 bool Ignored; 1378 APFloat APF = APFloat(Val); 1379 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1380 &Ignored); 1381 return getConstantFP(APF, DL, VT, isTarget); 1382 } else 1383 llvm_unreachable("Unsupported type in getConstantFP"); 1384 } 1385 1386 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1387 EVT VT, int64_t Offset, bool isTargetGA, 1388 unsigned TargetFlags) { 1389 assert((TargetFlags == 0 || isTargetGA) && 1390 "Cannot set target flags on target-independent globals"); 1391 1392 // Truncate (with sign-extension) the offset value to the pointer size. 1393 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1394 if (BitWidth < 64) 1395 Offset = SignExtend64(Offset, BitWidth); 1396 1397 unsigned Opc; 1398 if (GV->isThreadLocal()) 1399 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1400 else 1401 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1402 1403 FoldingSetNodeID ID; 1404 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1405 ID.AddPointer(GV); 1406 ID.AddInteger(Offset); 1407 ID.AddInteger(TargetFlags); 1408 void *IP = nullptr; 1409 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1410 return SDValue(E, 0); 1411 1412 auto *N = newSDNode<GlobalAddressSDNode>( 1413 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1414 CSEMap.InsertNode(N, IP); 1415 InsertNode(N); 1416 return SDValue(N, 0); 1417 } 1418 1419 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1420 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1421 FoldingSetNodeID ID; 1422 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1423 ID.AddInteger(FI); 1424 void *IP = nullptr; 1425 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1426 return SDValue(E, 0); 1427 1428 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1429 CSEMap.InsertNode(N, IP); 1430 InsertNode(N); 1431 return SDValue(N, 0); 1432 } 1433 1434 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1435 unsigned TargetFlags) { 1436 assert((TargetFlags == 0 || isTarget) && 1437 "Cannot set target flags on target-independent jump tables"); 1438 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1439 FoldingSetNodeID ID; 1440 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1441 ID.AddInteger(JTI); 1442 ID.AddInteger(TargetFlags); 1443 void *IP = nullptr; 1444 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1445 return SDValue(E, 0); 1446 1447 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1448 CSEMap.InsertNode(N, IP); 1449 InsertNode(N); 1450 return SDValue(N, 0); 1451 } 1452 1453 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1454 unsigned Alignment, int Offset, 1455 bool isTarget, 1456 unsigned TargetFlags) { 1457 assert((TargetFlags == 0 || isTarget) && 1458 "Cannot set target flags on target-independent globals"); 1459 if (Alignment == 0) 1460 Alignment = shouldOptForSize() 1461 ? getDataLayout().getABITypeAlignment(C->getType()) 1462 : getDataLayout().getPrefTypeAlignment(C->getType()); 1463 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1464 FoldingSetNodeID ID; 1465 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1466 ID.AddInteger(Alignment); 1467 ID.AddInteger(Offset); 1468 ID.AddPointer(C); 1469 ID.AddInteger(TargetFlags); 1470 void *IP = nullptr; 1471 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1472 return SDValue(E, 0); 1473 1474 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1475 TargetFlags); 1476 CSEMap.InsertNode(N, IP); 1477 InsertNode(N); 1478 return SDValue(N, 0); 1479 } 1480 1481 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1482 unsigned Alignment, int Offset, 1483 bool isTarget, 1484 unsigned TargetFlags) { 1485 assert((TargetFlags == 0 || isTarget) && 1486 "Cannot set target flags on target-independent globals"); 1487 if (Alignment == 0) 1488 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1489 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1490 FoldingSetNodeID ID; 1491 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1492 ID.AddInteger(Alignment); 1493 ID.AddInteger(Offset); 1494 C->addSelectionDAGCSEId(ID); 1495 ID.AddInteger(TargetFlags); 1496 void *IP = nullptr; 1497 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1498 return SDValue(E, 0); 1499 1500 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1501 TargetFlags); 1502 CSEMap.InsertNode(N, IP); 1503 InsertNode(N); 1504 return SDValue(N, 0); 1505 } 1506 1507 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1508 unsigned TargetFlags) { 1509 FoldingSetNodeID ID; 1510 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1511 ID.AddInteger(Index); 1512 ID.AddInteger(Offset); 1513 ID.AddInteger(TargetFlags); 1514 void *IP = nullptr; 1515 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1516 return SDValue(E, 0); 1517 1518 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1519 CSEMap.InsertNode(N, IP); 1520 InsertNode(N); 1521 return SDValue(N, 0); 1522 } 1523 1524 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1525 FoldingSetNodeID ID; 1526 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1527 ID.AddPointer(MBB); 1528 void *IP = nullptr; 1529 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1530 return SDValue(E, 0); 1531 1532 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1533 CSEMap.InsertNode(N, IP); 1534 InsertNode(N); 1535 return SDValue(N, 0); 1536 } 1537 1538 SDValue SelectionDAG::getValueType(EVT VT) { 1539 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1540 ValueTypeNodes.size()) 1541 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1542 1543 SDNode *&N = VT.isExtended() ? 1544 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1545 1546 if (N) return SDValue(N, 0); 1547 N = newSDNode<VTSDNode>(VT); 1548 InsertNode(N); 1549 return SDValue(N, 0); 1550 } 1551 1552 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1553 SDNode *&N = ExternalSymbols[Sym]; 1554 if (N) return SDValue(N, 0); 1555 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1556 InsertNode(N); 1557 return SDValue(N, 0); 1558 } 1559 1560 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1561 SDNode *&N = MCSymbols[Sym]; 1562 if (N) 1563 return SDValue(N, 0); 1564 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1565 InsertNode(N); 1566 return SDValue(N, 0); 1567 } 1568 1569 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1570 unsigned TargetFlags) { 1571 SDNode *&N = 1572 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1573 if (N) return SDValue(N, 0); 1574 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1575 InsertNode(N); 1576 return SDValue(N, 0); 1577 } 1578 1579 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1580 if ((unsigned)Cond >= CondCodeNodes.size()) 1581 CondCodeNodes.resize(Cond+1); 1582 1583 if (!CondCodeNodes[Cond]) { 1584 auto *N = newSDNode<CondCodeSDNode>(Cond); 1585 CondCodeNodes[Cond] = N; 1586 InsertNode(N); 1587 } 1588 1589 return SDValue(CondCodeNodes[Cond], 0); 1590 } 1591 1592 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1593 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1594 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1595 std::swap(N1, N2); 1596 ShuffleVectorSDNode::commuteMask(M); 1597 } 1598 1599 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1600 SDValue N2, ArrayRef<int> Mask) { 1601 assert(VT.getVectorNumElements() == Mask.size() && 1602 "Must have the same number of vector elements as mask elements!"); 1603 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1604 "Invalid VECTOR_SHUFFLE"); 1605 1606 // Canonicalize shuffle undef, undef -> undef 1607 if (N1.isUndef() && N2.isUndef()) 1608 return getUNDEF(VT); 1609 1610 // Validate that all indices in Mask are within the range of the elements 1611 // input to the shuffle. 1612 int NElts = Mask.size(); 1613 assert(llvm::all_of(Mask, 1614 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1615 "Index out of range"); 1616 1617 // Copy the mask so we can do any needed cleanup. 1618 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1619 1620 // Canonicalize shuffle v, v -> v, undef 1621 if (N1 == N2) { 1622 N2 = getUNDEF(VT); 1623 for (int i = 0; i != NElts; ++i) 1624 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1625 } 1626 1627 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1628 if (N1.isUndef()) 1629 commuteShuffle(N1, N2, MaskVec); 1630 1631 if (TLI->hasVectorBlend()) { 1632 // If shuffling a splat, try to blend the splat instead. We do this here so 1633 // that even when this arises during lowering we don't have to re-handle it. 1634 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1635 BitVector UndefElements; 1636 SDValue Splat = BV->getSplatValue(&UndefElements); 1637 if (!Splat) 1638 return; 1639 1640 for (int i = 0; i < NElts; ++i) { 1641 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1642 continue; 1643 1644 // If this input comes from undef, mark it as such. 1645 if (UndefElements[MaskVec[i] - Offset]) { 1646 MaskVec[i] = -1; 1647 continue; 1648 } 1649 1650 // If we can blend a non-undef lane, use that instead. 1651 if (!UndefElements[i]) 1652 MaskVec[i] = i + Offset; 1653 } 1654 }; 1655 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1656 BlendSplat(N1BV, 0); 1657 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1658 BlendSplat(N2BV, NElts); 1659 } 1660 1661 // Canonicalize all index into lhs, -> shuffle lhs, undef 1662 // Canonicalize all index into rhs, -> shuffle rhs, undef 1663 bool AllLHS = true, AllRHS = true; 1664 bool N2Undef = N2.isUndef(); 1665 for (int i = 0; i != NElts; ++i) { 1666 if (MaskVec[i] >= NElts) { 1667 if (N2Undef) 1668 MaskVec[i] = -1; 1669 else 1670 AllLHS = false; 1671 } else if (MaskVec[i] >= 0) { 1672 AllRHS = false; 1673 } 1674 } 1675 if (AllLHS && AllRHS) 1676 return getUNDEF(VT); 1677 if (AllLHS && !N2Undef) 1678 N2 = getUNDEF(VT); 1679 if (AllRHS) { 1680 N1 = getUNDEF(VT); 1681 commuteShuffle(N1, N2, MaskVec); 1682 } 1683 // Reset our undef status after accounting for the mask. 1684 N2Undef = N2.isUndef(); 1685 // Re-check whether both sides ended up undef. 1686 if (N1.isUndef() && N2Undef) 1687 return getUNDEF(VT); 1688 1689 // If Identity shuffle return that node. 1690 bool Identity = true, AllSame = true; 1691 for (int i = 0; i != NElts; ++i) { 1692 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1693 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1694 } 1695 if (Identity && NElts) 1696 return N1; 1697 1698 // Shuffling a constant splat doesn't change the result. 1699 if (N2Undef) { 1700 SDValue V = N1; 1701 1702 // Look through any bitcasts. We check that these don't change the number 1703 // (and size) of elements and just changes their types. 1704 while (V.getOpcode() == ISD::BITCAST) 1705 V = V->getOperand(0); 1706 1707 // A splat should always show up as a build vector node. 1708 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1709 BitVector UndefElements; 1710 SDValue Splat = BV->getSplatValue(&UndefElements); 1711 // If this is a splat of an undef, shuffling it is also undef. 1712 if (Splat && Splat.isUndef()) 1713 return getUNDEF(VT); 1714 1715 bool SameNumElts = 1716 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1717 1718 // We only have a splat which can skip shuffles if there is a splatted 1719 // value and no undef lanes rearranged by the shuffle. 1720 if (Splat && UndefElements.none()) { 1721 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1722 // number of elements match or the value splatted is a zero constant. 1723 if (SameNumElts) 1724 return N1; 1725 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1726 if (C->isNullValue()) 1727 return N1; 1728 } 1729 1730 // If the shuffle itself creates a splat, build the vector directly. 1731 if (AllSame && SameNumElts) { 1732 EVT BuildVT = BV->getValueType(0); 1733 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1734 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1735 1736 // We may have jumped through bitcasts, so the type of the 1737 // BUILD_VECTOR may not match the type of the shuffle. 1738 if (BuildVT != VT) 1739 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1740 return NewBV; 1741 } 1742 } 1743 } 1744 1745 FoldingSetNodeID ID; 1746 SDValue Ops[2] = { N1, N2 }; 1747 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1748 for (int i = 0; i != NElts; ++i) 1749 ID.AddInteger(MaskVec[i]); 1750 1751 void* IP = nullptr; 1752 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1753 return SDValue(E, 0); 1754 1755 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1756 // SDNode doesn't have access to it. This memory will be "leaked" when 1757 // the node is deallocated, but recovered when the NodeAllocator is released. 1758 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1759 llvm::copy(MaskVec, MaskAlloc); 1760 1761 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1762 dl.getDebugLoc(), MaskAlloc); 1763 createOperands(N, Ops); 1764 1765 CSEMap.InsertNode(N, IP); 1766 InsertNode(N); 1767 SDValue V = SDValue(N, 0); 1768 NewSDValueDbgMsg(V, "Creating new node: ", this); 1769 return V; 1770 } 1771 1772 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1773 EVT VT = SV.getValueType(0); 1774 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1775 ShuffleVectorSDNode::commuteMask(MaskVec); 1776 1777 SDValue Op0 = SV.getOperand(0); 1778 SDValue Op1 = SV.getOperand(1); 1779 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1780 } 1781 1782 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1783 FoldingSetNodeID ID; 1784 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1785 ID.AddInteger(RegNo); 1786 void *IP = nullptr; 1787 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1788 return SDValue(E, 0); 1789 1790 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1791 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1792 CSEMap.InsertNode(N, IP); 1793 InsertNode(N); 1794 return SDValue(N, 0); 1795 } 1796 1797 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1798 FoldingSetNodeID ID; 1799 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1800 ID.AddPointer(RegMask); 1801 void *IP = nullptr; 1802 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1803 return SDValue(E, 0); 1804 1805 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1806 CSEMap.InsertNode(N, IP); 1807 InsertNode(N); 1808 return SDValue(N, 0); 1809 } 1810 1811 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1812 MCSymbol *Label) { 1813 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1814 } 1815 1816 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1817 SDValue Root, MCSymbol *Label) { 1818 FoldingSetNodeID ID; 1819 SDValue Ops[] = { Root }; 1820 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1821 ID.AddPointer(Label); 1822 void *IP = nullptr; 1823 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1824 return SDValue(E, 0); 1825 1826 auto *N = 1827 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1828 createOperands(N, Ops); 1829 1830 CSEMap.InsertNode(N, IP); 1831 InsertNode(N); 1832 return SDValue(N, 0); 1833 } 1834 1835 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1836 int64_t Offset, bool isTarget, 1837 unsigned TargetFlags) { 1838 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1839 1840 FoldingSetNodeID ID; 1841 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1842 ID.AddPointer(BA); 1843 ID.AddInteger(Offset); 1844 ID.AddInteger(TargetFlags); 1845 void *IP = nullptr; 1846 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1847 return SDValue(E, 0); 1848 1849 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1850 CSEMap.InsertNode(N, IP); 1851 InsertNode(N); 1852 return SDValue(N, 0); 1853 } 1854 1855 SDValue SelectionDAG::getSrcValue(const Value *V) { 1856 assert((!V || V->getType()->isPointerTy()) && 1857 "SrcValue is not a pointer?"); 1858 1859 FoldingSetNodeID ID; 1860 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1861 ID.AddPointer(V); 1862 1863 void *IP = nullptr; 1864 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1865 return SDValue(E, 0); 1866 1867 auto *N = newSDNode<SrcValueSDNode>(V); 1868 CSEMap.InsertNode(N, IP); 1869 InsertNode(N); 1870 return SDValue(N, 0); 1871 } 1872 1873 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1874 FoldingSetNodeID ID; 1875 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1876 ID.AddPointer(MD); 1877 1878 void *IP = nullptr; 1879 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1880 return SDValue(E, 0); 1881 1882 auto *N = newSDNode<MDNodeSDNode>(MD); 1883 CSEMap.InsertNode(N, IP); 1884 InsertNode(N); 1885 return SDValue(N, 0); 1886 } 1887 1888 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1889 if (VT == V.getValueType()) 1890 return V; 1891 1892 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1893 } 1894 1895 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1896 unsigned SrcAS, unsigned DestAS) { 1897 SDValue Ops[] = {Ptr}; 1898 FoldingSetNodeID ID; 1899 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1900 ID.AddInteger(SrcAS); 1901 ID.AddInteger(DestAS); 1902 1903 void *IP = nullptr; 1904 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1905 return SDValue(E, 0); 1906 1907 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1908 VT, SrcAS, DestAS); 1909 createOperands(N, Ops); 1910 1911 CSEMap.InsertNode(N, IP); 1912 InsertNode(N); 1913 return SDValue(N, 0); 1914 } 1915 1916 /// getShiftAmountOperand - Return the specified value casted to 1917 /// the target's desired shift amount type. 1918 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1919 EVT OpTy = Op.getValueType(); 1920 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1921 if (OpTy == ShTy || OpTy.isVector()) return Op; 1922 1923 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1924 } 1925 1926 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1927 SDLoc dl(Node); 1928 const TargetLowering &TLI = getTargetLoweringInfo(); 1929 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1930 EVT VT = Node->getValueType(0); 1931 SDValue Tmp1 = Node->getOperand(0); 1932 SDValue Tmp2 = Node->getOperand(1); 1933 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1934 1935 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1936 Tmp2, MachinePointerInfo(V)); 1937 SDValue VAList = VAListLoad; 1938 1939 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1940 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1941 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1942 1943 VAList = 1944 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1945 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1946 } 1947 1948 // Increment the pointer, VAList, to the next vaarg 1949 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1950 getConstant(getDataLayout().getTypeAllocSize( 1951 VT.getTypeForEVT(*getContext())), 1952 dl, VAList.getValueType())); 1953 // Store the incremented VAList to the legalized pointer 1954 Tmp1 = 1955 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1956 // Load the actual argument out of the pointer VAList 1957 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1958 } 1959 1960 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1961 SDLoc dl(Node); 1962 const TargetLowering &TLI = getTargetLoweringInfo(); 1963 // This defaults to loading a pointer from the input and storing it to the 1964 // output, returning the chain. 1965 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1966 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1967 SDValue Tmp1 = 1968 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1969 Node->getOperand(2), MachinePointerInfo(VS)); 1970 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1971 MachinePointerInfo(VD)); 1972 } 1973 1974 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1975 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1976 unsigned ByteSize = VT.getStoreSize(); 1977 Type *Ty = VT.getTypeForEVT(*getContext()); 1978 unsigned StackAlign = 1979 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1980 1981 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1982 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1983 } 1984 1985 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1986 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1987 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1988 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1989 const DataLayout &DL = getDataLayout(); 1990 unsigned Align = 1991 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1992 1993 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1994 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1995 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1996 } 1997 1998 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1999 ISD::CondCode Cond, const SDLoc &dl) { 2000 EVT OpVT = N1.getValueType(); 2001 2002 // These setcc operations always fold. 2003 switch (Cond) { 2004 default: break; 2005 case ISD::SETFALSE: 2006 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 2007 case ISD::SETTRUE: 2008 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 2009 2010 case ISD::SETOEQ: 2011 case ISD::SETOGT: 2012 case ISD::SETOGE: 2013 case ISD::SETOLT: 2014 case ISD::SETOLE: 2015 case ISD::SETONE: 2016 case ISD::SETO: 2017 case ISD::SETUO: 2018 case ISD::SETUEQ: 2019 case ISD::SETUNE: 2020 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2021 break; 2022 } 2023 2024 if (OpVT.isInteger()) { 2025 // For EQ and NE, we can always pick a value for the undef to make the 2026 // predicate pass or fail, so we can return undef. 2027 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2028 // icmp eq/ne X, undef -> undef. 2029 if ((N1.isUndef() || N2.isUndef()) && 2030 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2031 return getUNDEF(VT); 2032 2033 // If both operands are undef, we can return undef for int comparison. 2034 // icmp undef, undef -> undef. 2035 if (N1.isUndef() && N2.isUndef()) 2036 return getUNDEF(VT); 2037 2038 // icmp X, X -> true/false 2039 // icmp X, undef -> true/false because undef could be X. 2040 if (N1 == N2) 2041 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2042 } 2043 2044 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2045 const APInt &C2 = N2C->getAPIntValue(); 2046 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2047 const APInt &C1 = N1C->getAPIntValue(); 2048 2049 switch (Cond) { 2050 default: llvm_unreachable("Unknown integer setcc!"); 2051 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2052 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2053 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2054 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2055 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2056 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2057 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2058 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2059 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2060 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2061 } 2062 } 2063 } 2064 2065 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2066 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2067 2068 if (N1CFP && N2CFP) { 2069 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2070 switch (Cond) { 2071 default: break; 2072 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2073 return getUNDEF(VT); 2074 LLVM_FALLTHROUGH; 2075 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2076 OpVT); 2077 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2078 return getUNDEF(VT); 2079 LLVM_FALLTHROUGH; 2080 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2081 R==APFloat::cmpLessThan, dl, VT, 2082 OpVT); 2083 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2084 return getUNDEF(VT); 2085 LLVM_FALLTHROUGH; 2086 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2087 OpVT); 2088 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2089 return getUNDEF(VT); 2090 LLVM_FALLTHROUGH; 2091 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2092 VT, OpVT); 2093 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2094 return getUNDEF(VT); 2095 LLVM_FALLTHROUGH; 2096 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2097 R==APFloat::cmpEqual, dl, VT, 2098 OpVT); 2099 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2100 return getUNDEF(VT); 2101 LLVM_FALLTHROUGH; 2102 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2103 R==APFloat::cmpEqual, dl, VT, OpVT); 2104 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2105 OpVT); 2106 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2107 OpVT); 2108 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2109 R==APFloat::cmpEqual, dl, VT, 2110 OpVT); 2111 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2112 OpVT); 2113 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2114 R==APFloat::cmpLessThan, dl, VT, 2115 OpVT); 2116 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2117 R==APFloat::cmpUnordered, dl, VT, 2118 OpVT); 2119 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2120 VT, OpVT); 2121 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2122 OpVT); 2123 } 2124 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2125 // Ensure that the constant occurs on the RHS. 2126 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2127 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2128 return SDValue(); 2129 return getSetCC(dl, VT, N2, N1, SwappedCond); 2130 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2131 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2132 // If an operand is known to be a nan (or undef that could be a nan), we can 2133 // fold it. 2134 // Choosing NaN for the undef will always make unordered comparison succeed 2135 // and ordered comparison fails. 2136 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2137 switch (ISD::getUnorderedFlavor(Cond)) { 2138 default: 2139 llvm_unreachable("Unknown flavor!"); 2140 case 0: // Known false. 2141 return getBoolConstant(false, dl, VT, OpVT); 2142 case 1: // Known true. 2143 return getBoolConstant(true, dl, VT, OpVT); 2144 case 2: // Undefined. 2145 return getUNDEF(VT); 2146 } 2147 } 2148 2149 // Could not fold it. 2150 return SDValue(); 2151 } 2152 2153 /// See if the specified operand can be simplified with the knowledge that only 2154 /// the bits specified by DemandedBits are used. 2155 /// TODO: really we should be making this into the DAG equivalent of 2156 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2157 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2158 EVT VT = V.getValueType(); 2159 APInt DemandedElts = VT.isVector() 2160 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2161 : APInt(1, 1); 2162 return GetDemandedBits(V, DemandedBits, DemandedElts); 2163 } 2164 2165 /// See if the specified operand can be simplified with the knowledge that only 2166 /// the bits specified by DemandedBits are used in the elements specified by 2167 /// DemandedElts. 2168 /// TODO: really we should be making this into the DAG equivalent of 2169 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2170 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2171 const APInt &DemandedElts) { 2172 switch (V.getOpcode()) { 2173 default: 2174 break; 2175 case ISD::Constant: { 2176 auto *CV = cast<ConstantSDNode>(V.getNode()); 2177 assert(CV && "Const value should be ConstSDNode."); 2178 const APInt &CVal = CV->getAPIntValue(); 2179 APInt NewVal = CVal & DemandedBits; 2180 if (NewVal != CVal) 2181 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2182 break; 2183 } 2184 case ISD::OR: 2185 case ISD::XOR: 2186 case ISD::SIGN_EXTEND_INREG: 2187 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2188 *this, 0); 2189 case ISD::SRL: 2190 // Only look at single-use SRLs. 2191 if (!V.getNode()->hasOneUse()) 2192 break; 2193 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2194 // See if we can recursively simplify the LHS. 2195 unsigned Amt = RHSC->getZExtValue(); 2196 2197 // Watch out for shift count overflow though. 2198 if (Amt >= DemandedBits.getBitWidth()) 2199 break; 2200 APInt SrcDemandedBits = DemandedBits << Amt; 2201 if (SDValue SimplifyLHS = 2202 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2203 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2204 V.getOperand(1)); 2205 } 2206 break; 2207 case ISD::AND: { 2208 // X & -1 -> X (ignoring bits which aren't demanded). 2209 // Also handle the case where masked out bits in X are known to be zero. 2210 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2211 const APInt &AndVal = RHSC->getAPIntValue(); 2212 if (DemandedBits.isSubsetOf(AndVal) || 2213 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2214 AndVal)) 2215 return V.getOperand(0); 2216 } 2217 break; 2218 } 2219 case ISD::ANY_EXTEND: { 2220 SDValue Src = V.getOperand(0); 2221 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2222 // Being conservative here - only peek through if we only demand bits in the 2223 // non-extended source (even though the extended bits are technically 2224 // undef). 2225 if (DemandedBits.getActiveBits() > SrcBitWidth) 2226 break; 2227 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth); 2228 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits)) 2229 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2230 break; 2231 } 2232 } 2233 return SDValue(); 2234 } 2235 2236 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2237 /// use this predicate to simplify operations downstream. 2238 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2239 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2240 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2241 } 2242 2243 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2244 /// this predicate to simplify operations downstream. Mask is known to be zero 2245 /// for bits that V cannot have. 2246 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2247 unsigned Depth) const { 2248 EVT VT = V.getValueType(); 2249 APInt DemandedElts = VT.isVector() 2250 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2251 : APInt(1, 1); 2252 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2253 } 2254 2255 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2256 /// DemandedElts. We use this predicate to simplify operations downstream. 2257 /// Mask is known to be zero for bits that V cannot have. 2258 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2259 const APInt &DemandedElts, 2260 unsigned Depth) const { 2261 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2262 } 2263 2264 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2265 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2266 unsigned Depth) const { 2267 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2268 } 2269 2270 /// isSplatValue - Return true if the vector V has the same value 2271 /// across all DemandedElts. 2272 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2273 APInt &UndefElts) { 2274 if (!DemandedElts) 2275 return false; // No demanded elts, better to assume we don't know anything. 2276 2277 EVT VT = V.getValueType(); 2278 assert(VT.isVector() && "Vector type expected"); 2279 2280 unsigned NumElts = VT.getVectorNumElements(); 2281 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2282 UndefElts = APInt::getNullValue(NumElts); 2283 2284 switch (V.getOpcode()) { 2285 case ISD::BUILD_VECTOR: { 2286 SDValue Scl; 2287 for (unsigned i = 0; i != NumElts; ++i) { 2288 SDValue Op = V.getOperand(i); 2289 if (Op.isUndef()) { 2290 UndefElts.setBit(i); 2291 continue; 2292 } 2293 if (!DemandedElts[i]) 2294 continue; 2295 if (Scl && Scl != Op) 2296 return false; 2297 Scl = Op; 2298 } 2299 return true; 2300 } 2301 case ISD::VECTOR_SHUFFLE: { 2302 // Check if this is a shuffle node doing a splat. 2303 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2304 int SplatIndex = -1; 2305 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2306 for (int i = 0; i != (int)NumElts; ++i) { 2307 int M = Mask[i]; 2308 if (M < 0) { 2309 UndefElts.setBit(i); 2310 continue; 2311 } 2312 if (!DemandedElts[i]) 2313 continue; 2314 if (0 <= SplatIndex && SplatIndex != M) 2315 return false; 2316 SplatIndex = M; 2317 } 2318 return true; 2319 } 2320 case ISD::EXTRACT_SUBVECTOR: { 2321 SDValue Src = V.getOperand(0); 2322 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1)); 2323 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2324 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2325 // Offset the demanded elts by the subvector index. 2326 uint64_t Idx = SubIdx->getZExtValue(); 2327 APInt UndefSrcElts; 2328 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2329 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) { 2330 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2331 return true; 2332 } 2333 } 2334 break; 2335 } 2336 case ISD::ADD: 2337 case ISD::SUB: 2338 case ISD::AND: { 2339 APInt UndefLHS, UndefRHS; 2340 SDValue LHS = V.getOperand(0); 2341 SDValue RHS = V.getOperand(1); 2342 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2343 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2344 UndefElts = UndefLHS | UndefRHS; 2345 return true; 2346 } 2347 break; 2348 } 2349 } 2350 2351 return false; 2352 } 2353 2354 /// Helper wrapper to main isSplatValue function. 2355 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2356 EVT VT = V.getValueType(); 2357 assert(VT.isVector() && "Vector type expected"); 2358 unsigned NumElts = VT.getVectorNumElements(); 2359 2360 APInt UndefElts; 2361 APInt DemandedElts = APInt::getAllOnesValue(NumElts); 2362 return isSplatValue(V, DemandedElts, UndefElts) && 2363 (AllowUndefs || !UndefElts); 2364 } 2365 2366 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2367 V = peekThroughExtractSubvectors(V); 2368 2369 EVT VT = V.getValueType(); 2370 unsigned Opcode = V.getOpcode(); 2371 switch (Opcode) { 2372 default: { 2373 APInt UndefElts; 2374 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2375 if (isSplatValue(V, DemandedElts, UndefElts)) { 2376 // Handle case where all demanded elements are UNDEF. 2377 if (DemandedElts.isSubsetOf(UndefElts)) { 2378 SplatIdx = 0; 2379 return getUNDEF(VT); 2380 } 2381 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2382 return V; 2383 } 2384 break; 2385 } 2386 case ISD::VECTOR_SHUFFLE: { 2387 // Check if this is a shuffle node doing a splat. 2388 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2389 // getTargetVShiftNode currently struggles without the splat source. 2390 auto *SVN = cast<ShuffleVectorSDNode>(V); 2391 if (!SVN->isSplat()) 2392 break; 2393 int Idx = SVN->getSplatIndex(); 2394 int NumElts = V.getValueType().getVectorNumElements(); 2395 SplatIdx = Idx % NumElts; 2396 return V.getOperand(Idx / NumElts); 2397 } 2398 } 2399 2400 return SDValue(); 2401 } 2402 2403 SDValue SelectionDAG::getSplatValue(SDValue V) { 2404 int SplatIdx; 2405 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2406 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2407 SrcVector.getValueType().getScalarType(), SrcVector, 2408 getIntPtrConstant(SplatIdx, SDLoc(V))); 2409 return SDValue(); 2410 } 2411 2412 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2413 /// is less than the element bit-width of the shift node, return it. 2414 static const APInt *getValidShiftAmountConstant(SDValue V) { 2415 unsigned BitWidth = V.getScalarValueSizeInBits(); 2416 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2417 // Shifting more than the bitwidth is not valid. 2418 const APInt &ShAmt = SA->getAPIntValue(); 2419 if (ShAmt.ult(BitWidth)) 2420 return &ShAmt; 2421 } 2422 return nullptr; 2423 } 2424 2425 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less 2426 /// than the element bit-width of the shift node, return the minimum value. 2427 static const APInt *getValidMinimumShiftAmountConstant(SDValue V) { 2428 unsigned BitWidth = V.getScalarValueSizeInBits(); 2429 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2430 if (!BV) 2431 return nullptr; 2432 const APInt *MinShAmt = nullptr; 2433 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2434 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2435 if (!SA) 2436 return nullptr; 2437 // Shifting more than the bitwidth is not valid. 2438 const APInt &ShAmt = SA->getAPIntValue(); 2439 if (ShAmt.uge(BitWidth)) 2440 return nullptr; 2441 if (MinShAmt && MinShAmt->ule(ShAmt)) 2442 continue; 2443 MinShAmt = &ShAmt; 2444 } 2445 return MinShAmt; 2446 } 2447 2448 /// Determine which bits of Op are known to be either zero or one and return 2449 /// them in Known. For vectors, the known bits are those that are shared by 2450 /// every vector element. 2451 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2452 EVT VT = Op.getValueType(); 2453 APInt DemandedElts = VT.isVector() 2454 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2455 : APInt(1, 1); 2456 return computeKnownBits(Op, DemandedElts, Depth); 2457 } 2458 2459 /// Determine which bits of Op are known to be either zero or one and return 2460 /// them in Known. The DemandedElts argument allows us to only collect the known 2461 /// bits that are shared by the requested vector elements. 2462 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2463 unsigned Depth) const { 2464 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2465 2466 KnownBits Known(BitWidth); // Don't know anything. 2467 2468 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2469 // We know all of the bits for a constant! 2470 Known.One = C->getAPIntValue(); 2471 Known.Zero = ~Known.One; 2472 return Known; 2473 } 2474 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2475 // We know all of the bits for a constant fp! 2476 Known.One = C->getValueAPF().bitcastToAPInt(); 2477 Known.Zero = ~Known.One; 2478 return Known; 2479 } 2480 2481 if (Depth >= MaxRecursionDepth) 2482 return Known; // Limit search depth. 2483 2484 KnownBits Known2; 2485 unsigned NumElts = DemandedElts.getBitWidth(); 2486 assert((!Op.getValueType().isVector() || 2487 NumElts == Op.getValueType().getVectorNumElements()) && 2488 "Unexpected vector size"); 2489 2490 if (!DemandedElts) 2491 return Known; // No demanded elts, better to assume we don't know anything. 2492 2493 unsigned Opcode = Op.getOpcode(); 2494 switch (Opcode) { 2495 case ISD::BUILD_VECTOR: 2496 // Collect the known bits that are shared by every demanded vector element. 2497 Known.Zero.setAllBits(); Known.One.setAllBits(); 2498 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2499 if (!DemandedElts[i]) 2500 continue; 2501 2502 SDValue SrcOp = Op.getOperand(i); 2503 Known2 = computeKnownBits(SrcOp, Depth + 1); 2504 2505 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2506 if (SrcOp.getValueSizeInBits() != BitWidth) { 2507 assert(SrcOp.getValueSizeInBits() > BitWidth && 2508 "Expected BUILD_VECTOR implicit truncation"); 2509 Known2 = Known2.trunc(BitWidth); 2510 } 2511 2512 // Known bits are the values that are shared by every demanded element. 2513 Known.One &= Known2.One; 2514 Known.Zero &= Known2.Zero; 2515 2516 // If we don't know any bits, early out. 2517 if (Known.isUnknown()) 2518 break; 2519 } 2520 break; 2521 case ISD::VECTOR_SHUFFLE: { 2522 // Collect the known bits that are shared by every vector element referenced 2523 // by the shuffle. 2524 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2525 Known.Zero.setAllBits(); Known.One.setAllBits(); 2526 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2527 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2528 for (unsigned i = 0; i != NumElts; ++i) { 2529 if (!DemandedElts[i]) 2530 continue; 2531 2532 int M = SVN->getMaskElt(i); 2533 if (M < 0) { 2534 // For UNDEF elements, we don't know anything about the common state of 2535 // the shuffle result. 2536 Known.resetAll(); 2537 DemandedLHS.clearAllBits(); 2538 DemandedRHS.clearAllBits(); 2539 break; 2540 } 2541 2542 if ((unsigned)M < NumElts) 2543 DemandedLHS.setBit((unsigned)M % NumElts); 2544 else 2545 DemandedRHS.setBit((unsigned)M % NumElts); 2546 } 2547 // Known bits are the values that are shared by every demanded element. 2548 if (!!DemandedLHS) { 2549 SDValue LHS = Op.getOperand(0); 2550 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2551 Known.One &= Known2.One; 2552 Known.Zero &= Known2.Zero; 2553 } 2554 // If we don't know any bits, early out. 2555 if (Known.isUnknown()) 2556 break; 2557 if (!!DemandedRHS) { 2558 SDValue RHS = Op.getOperand(1); 2559 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2560 Known.One &= Known2.One; 2561 Known.Zero &= Known2.Zero; 2562 } 2563 break; 2564 } 2565 case ISD::CONCAT_VECTORS: { 2566 // Split DemandedElts and test each of the demanded subvectors. 2567 Known.Zero.setAllBits(); Known.One.setAllBits(); 2568 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2569 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2570 unsigned NumSubVectors = Op.getNumOperands(); 2571 for (unsigned i = 0; i != NumSubVectors; ++i) { 2572 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2573 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2574 if (!!DemandedSub) { 2575 SDValue Sub = Op.getOperand(i); 2576 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2577 Known.One &= Known2.One; 2578 Known.Zero &= Known2.Zero; 2579 } 2580 // If we don't know any bits, early out. 2581 if (Known.isUnknown()) 2582 break; 2583 } 2584 break; 2585 } 2586 case ISD::INSERT_SUBVECTOR: { 2587 // If we know the element index, demand any elements from the subvector and 2588 // the remainder from the src its inserted into, otherwise demand them all. 2589 SDValue Src = Op.getOperand(0); 2590 SDValue Sub = Op.getOperand(1); 2591 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2592 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2593 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2594 Known.One.setAllBits(); 2595 Known.Zero.setAllBits(); 2596 uint64_t Idx = SubIdx->getZExtValue(); 2597 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2598 if (!!DemandedSubElts) { 2599 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2600 if (Known.isUnknown()) 2601 break; // early-out. 2602 } 2603 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2604 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2605 if (!!DemandedSrcElts) { 2606 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2607 Known.One &= Known2.One; 2608 Known.Zero &= Known2.Zero; 2609 } 2610 } else { 2611 Known = computeKnownBits(Sub, Depth + 1); 2612 if (Known.isUnknown()) 2613 break; // early-out. 2614 Known2 = computeKnownBits(Src, Depth + 1); 2615 Known.One &= Known2.One; 2616 Known.Zero &= Known2.Zero; 2617 } 2618 break; 2619 } 2620 case ISD::EXTRACT_SUBVECTOR: { 2621 // If we know the element index, just demand that subvector elements, 2622 // otherwise demand them all. 2623 SDValue Src = Op.getOperand(0); 2624 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2625 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2626 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 2627 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2628 // Offset the demanded elts by the subvector index. 2629 uint64_t Idx = SubIdx->getZExtValue(); 2630 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2631 } 2632 Known = computeKnownBits(Src, DemandedSrc, Depth + 1); 2633 break; 2634 } 2635 case ISD::SCALAR_TO_VECTOR: { 2636 // We know about scalar_to_vector as much as we know about it source, 2637 // which becomes the first element of otherwise unknown vector. 2638 if (DemandedElts != 1) 2639 break; 2640 2641 SDValue N0 = Op.getOperand(0); 2642 Known = computeKnownBits(N0, Depth + 1); 2643 if (N0.getValueSizeInBits() != BitWidth) 2644 Known = Known.trunc(BitWidth); 2645 2646 break; 2647 } 2648 case ISD::BITCAST: { 2649 SDValue N0 = Op.getOperand(0); 2650 EVT SubVT = N0.getValueType(); 2651 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2652 2653 // Ignore bitcasts from unsupported types. 2654 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2655 break; 2656 2657 // Fast handling of 'identity' bitcasts. 2658 if (BitWidth == SubBitWidth) { 2659 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2660 break; 2661 } 2662 2663 bool IsLE = getDataLayout().isLittleEndian(); 2664 2665 // Bitcast 'small element' vector to 'large element' scalar/vector. 2666 if ((BitWidth % SubBitWidth) == 0) { 2667 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2668 2669 // Collect known bits for the (larger) output by collecting the known 2670 // bits from each set of sub elements and shift these into place. 2671 // We need to separately call computeKnownBits for each set of 2672 // sub elements as the knownbits for each is likely to be different. 2673 unsigned SubScale = BitWidth / SubBitWidth; 2674 APInt SubDemandedElts(NumElts * SubScale, 0); 2675 for (unsigned i = 0; i != NumElts; ++i) 2676 if (DemandedElts[i]) 2677 SubDemandedElts.setBit(i * SubScale); 2678 2679 for (unsigned i = 0; i != SubScale; ++i) { 2680 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2681 Depth + 1); 2682 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2683 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2684 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2685 } 2686 } 2687 2688 // Bitcast 'large element' scalar/vector to 'small element' vector. 2689 if ((SubBitWidth % BitWidth) == 0) { 2690 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2691 2692 // Collect known bits for the (smaller) output by collecting the known 2693 // bits from the overlapping larger input elements and extracting the 2694 // sub sections we actually care about. 2695 unsigned SubScale = SubBitWidth / BitWidth; 2696 APInt SubDemandedElts(NumElts / SubScale, 0); 2697 for (unsigned i = 0; i != NumElts; ++i) 2698 if (DemandedElts[i]) 2699 SubDemandedElts.setBit(i / SubScale); 2700 2701 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2702 2703 Known.Zero.setAllBits(); Known.One.setAllBits(); 2704 for (unsigned i = 0; i != NumElts; ++i) 2705 if (DemandedElts[i]) { 2706 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2707 unsigned Offset = (Shifts % SubScale) * BitWidth; 2708 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2709 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2710 // If we don't know any bits, early out. 2711 if (Known.isUnknown()) 2712 break; 2713 } 2714 } 2715 break; 2716 } 2717 case ISD::AND: 2718 // If either the LHS or the RHS are Zero, the result is zero. 2719 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2720 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2721 2722 // Output known-1 bits are only known if set in both the LHS & RHS. 2723 Known.One &= Known2.One; 2724 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2725 Known.Zero |= Known2.Zero; 2726 break; 2727 case ISD::OR: 2728 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2729 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2730 2731 // Output known-0 bits are only known if clear in both the LHS & RHS. 2732 Known.Zero &= Known2.Zero; 2733 // Output known-1 are known to be set if set in either the LHS | RHS. 2734 Known.One |= Known2.One; 2735 break; 2736 case ISD::XOR: { 2737 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2738 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2739 2740 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2741 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2742 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2743 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2744 Known.Zero = KnownZeroOut; 2745 break; 2746 } 2747 case ISD::MUL: { 2748 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2749 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2750 2751 // If low bits are zero in either operand, output low known-0 bits. 2752 // Also compute a conservative estimate for high known-0 bits. 2753 // More trickiness is possible, but this is sufficient for the 2754 // interesting case of alignment computation. 2755 unsigned TrailZ = Known.countMinTrailingZeros() + 2756 Known2.countMinTrailingZeros(); 2757 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2758 Known2.countMinLeadingZeros(), 2759 BitWidth) - BitWidth; 2760 2761 Known.resetAll(); 2762 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2763 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2764 break; 2765 } 2766 case ISD::UDIV: { 2767 // For the purposes of computing leading zeros we can conservatively 2768 // treat a udiv as a logical right shift by the power of 2 known to 2769 // be less than the denominator. 2770 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2771 unsigned LeadZ = Known2.countMinLeadingZeros(); 2772 2773 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2774 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2775 if (RHSMaxLeadingZeros != BitWidth) 2776 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2777 2778 Known.Zero.setHighBits(LeadZ); 2779 break; 2780 } 2781 case ISD::SELECT: 2782 case ISD::VSELECT: 2783 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2784 // If we don't know any bits, early out. 2785 if (Known.isUnknown()) 2786 break; 2787 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2788 2789 // Only known if known in both the LHS and RHS. 2790 Known.One &= Known2.One; 2791 Known.Zero &= Known2.Zero; 2792 break; 2793 case ISD::SELECT_CC: 2794 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2795 // If we don't know any bits, early out. 2796 if (Known.isUnknown()) 2797 break; 2798 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2799 2800 // Only known if known in both the LHS and RHS. 2801 Known.One &= Known2.One; 2802 Known.Zero &= Known2.Zero; 2803 break; 2804 case ISD::SMULO: 2805 case ISD::UMULO: 2806 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2807 if (Op.getResNo() != 1) 2808 break; 2809 // The boolean result conforms to getBooleanContents. 2810 // If we know the result of a setcc has the top bits zero, use this info. 2811 // We know that we have an integer-based boolean since these operations 2812 // are only available for integer. 2813 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2814 TargetLowering::ZeroOrOneBooleanContent && 2815 BitWidth > 1) 2816 Known.Zero.setBitsFrom(1); 2817 break; 2818 case ISD::SETCC: 2819 case ISD::STRICT_FSETCC: 2820 case ISD::STRICT_FSETCCS: { 2821 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2822 // If we know the result of a setcc has the top bits zero, use this info. 2823 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2824 TargetLowering::ZeroOrOneBooleanContent && 2825 BitWidth > 1) 2826 Known.Zero.setBitsFrom(1); 2827 break; 2828 } 2829 case ISD::SHL: 2830 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2831 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2832 unsigned Shift = ShAmt->getZExtValue(); 2833 Known.Zero <<= Shift; 2834 Known.One <<= Shift; 2835 // Low bits are known zero. 2836 Known.Zero.setLowBits(Shift); 2837 } 2838 break; 2839 case ISD::SRL: 2840 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2841 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2842 unsigned Shift = ShAmt->getZExtValue(); 2843 Known.Zero.lshrInPlace(Shift); 2844 Known.One.lshrInPlace(Shift); 2845 // High bits are known zero. 2846 Known.Zero.setHighBits(Shift); 2847 } else if (const APInt *ShMinAmt = getValidMinimumShiftAmountConstant(Op)) { 2848 // Minimum shift high bits are known zero. 2849 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2850 } 2851 break; 2852 case ISD::SRA: 2853 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2854 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2855 unsigned Shift = ShAmt->getZExtValue(); 2856 // Sign extend known zero/one bit (else is unknown). 2857 Known.Zero.ashrInPlace(Shift); 2858 Known.One.ashrInPlace(Shift); 2859 } 2860 break; 2861 case ISD::FSHL: 2862 case ISD::FSHR: 2863 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2864 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2865 2866 // For fshl, 0-shift returns the 1st arg. 2867 // For fshr, 0-shift returns the 2nd arg. 2868 if (Amt == 0) { 2869 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2870 DemandedElts, Depth + 1); 2871 break; 2872 } 2873 2874 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2875 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2876 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2877 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2878 if (Opcode == ISD::FSHL) { 2879 Known.One <<= Amt; 2880 Known.Zero <<= Amt; 2881 Known2.One.lshrInPlace(BitWidth - Amt); 2882 Known2.Zero.lshrInPlace(BitWidth - Amt); 2883 } else { 2884 Known.One <<= BitWidth - Amt; 2885 Known.Zero <<= BitWidth - Amt; 2886 Known2.One.lshrInPlace(Amt); 2887 Known2.Zero.lshrInPlace(Amt); 2888 } 2889 Known.One |= Known2.One; 2890 Known.Zero |= Known2.Zero; 2891 } 2892 break; 2893 case ISD::SIGN_EXTEND_INREG: { 2894 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2895 unsigned EBits = EVT.getScalarSizeInBits(); 2896 2897 // Sign extension. Compute the demanded bits in the result that are not 2898 // present in the input. 2899 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2900 2901 APInt InSignMask = APInt::getSignMask(EBits); 2902 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2903 2904 // If the sign extended bits are demanded, we know that the sign 2905 // bit is demanded. 2906 InSignMask = InSignMask.zext(BitWidth); 2907 if (NewBits.getBoolValue()) 2908 InputDemandedBits |= InSignMask; 2909 2910 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2911 Known.One &= InputDemandedBits; 2912 Known.Zero &= InputDemandedBits; 2913 2914 // If the sign bit of the input is known set or clear, then we know the 2915 // top bits of the result. 2916 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2917 Known.Zero |= NewBits; 2918 Known.One &= ~NewBits; 2919 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2920 Known.One |= NewBits; 2921 Known.Zero &= ~NewBits; 2922 } else { // Input sign bit unknown 2923 Known.Zero &= ~NewBits; 2924 Known.One &= ~NewBits; 2925 } 2926 break; 2927 } 2928 case ISD::CTTZ: 2929 case ISD::CTTZ_ZERO_UNDEF: { 2930 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2931 // If we have a known 1, its position is our upper bound. 2932 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2933 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2934 Known.Zero.setBitsFrom(LowBits); 2935 break; 2936 } 2937 case ISD::CTLZ: 2938 case ISD::CTLZ_ZERO_UNDEF: { 2939 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2940 // If we have a known 1, its position is our upper bound. 2941 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2942 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2943 Known.Zero.setBitsFrom(LowBits); 2944 break; 2945 } 2946 case ISD::CTPOP: { 2947 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2948 // If we know some of the bits are zero, they can't be one. 2949 unsigned PossibleOnes = Known2.countMaxPopulation(); 2950 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2951 break; 2952 } 2953 case ISD::LOAD: { 2954 LoadSDNode *LD = cast<LoadSDNode>(Op); 2955 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 2956 if (ISD::isNON_EXTLoad(LD) && Cst) { 2957 // Determine any common known bits from the loaded constant pool value. 2958 Type *CstTy = Cst->getType(); 2959 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 2960 // If its a vector splat, then we can (quickly) reuse the scalar path. 2961 // NOTE: We assume all elements match and none are UNDEF. 2962 if (CstTy->isVectorTy()) { 2963 if (const Constant *Splat = Cst->getSplatValue()) { 2964 Cst = Splat; 2965 CstTy = Cst->getType(); 2966 } 2967 } 2968 // TODO - do we need to handle different bitwidths? 2969 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 2970 // Iterate across all vector elements finding common known bits. 2971 Known.One.setAllBits(); 2972 Known.Zero.setAllBits(); 2973 for (unsigned i = 0; i != NumElts; ++i) { 2974 if (!DemandedElts[i]) 2975 continue; 2976 if (Constant *Elt = Cst->getAggregateElement(i)) { 2977 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 2978 const APInt &Value = CInt->getValue(); 2979 Known.One &= Value; 2980 Known.Zero &= ~Value; 2981 continue; 2982 } 2983 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 2984 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 2985 Known.One &= Value; 2986 Known.Zero &= ~Value; 2987 continue; 2988 } 2989 } 2990 Known.One.clearAllBits(); 2991 Known.Zero.clearAllBits(); 2992 break; 2993 } 2994 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 2995 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 2996 const APInt &Value = CInt->getValue(); 2997 Known.One = Value; 2998 Known.Zero = ~Value; 2999 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 3000 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3001 Known.One = Value; 3002 Known.Zero = ~Value; 3003 } 3004 } 3005 } 3006 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 3007 // If this is a ZEXTLoad and we are looking at the loaded value. 3008 EVT VT = LD->getMemoryVT(); 3009 unsigned MemBits = VT.getScalarSizeInBits(); 3010 Known.Zero.setBitsFrom(MemBits); 3011 } else if (const MDNode *Ranges = LD->getRanges()) { 3012 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 3013 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3014 } 3015 break; 3016 } 3017 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3018 EVT InVT = Op.getOperand(0).getValueType(); 3019 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3020 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3021 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3022 break; 3023 } 3024 case ISD::ZERO_EXTEND: { 3025 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3026 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3027 break; 3028 } 3029 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3030 EVT InVT = Op.getOperand(0).getValueType(); 3031 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3032 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3033 // If the sign bit is known to be zero or one, then sext will extend 3034 // it to the top bits, else it will just zext. 3035 Known = Known.sext(BitWidth); 3036 break; 3037 } 3038 case ISD::SIGN_EXTEND: { 3039 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3040 // If the sign bit is known to be zero or one, then sext will extend 3041 // it to the top bits, else it will just zext. 3042 Known = Known.sext(BitWidth); 3043 break; 3044 } 3045 case ISD::ANY_EXTEND: { 3046 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3047 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */); 3048 break; 3049 } 3050 case ISD::TRUNCATE: { 3051 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3052 Known = Known.trunc(BitWidth); 3053 break; 3054 } 3055 case ISD::AssertZext: { 3056 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3057 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3058 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3059 Known.Zero |= (~InMask); 3060 Known.One &= (~Known.Zero); 3061 break; 3062 } 3063 case ISD::FGETSIGN: 3064 // All bits are zero except the low bit. 3065 Known.Zero.setBitsFrom(1); 3066 break; 3067 case ISD::USUBO: 3068 case ISD::SSUBO: 3069 if (Op.getResNo() == 1) { 3070 // If we know the result of a setcc has the top bits zero, use this info. 3071 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3072 TargetLowering::ZeroOrOneBooleanContent && 3073 BitWidth > 1) 3074 Known.Zero.setBitsFrom(1); 3075 break; 3076 } 3077 LLVM_FALLTHROUGH; 3078 case ISD::SUB: 3079 case ISD::SUBC: { 3080 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3081 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3082 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3083 Known, Known2); 3084 break; 3085 } 3086 case ISD::UADDO: 3087 case ISD::SADDO: 3088 case ISD::ADDCARRY: 3089 if (Op.getResNo() == 1) { 3090 // If we know the result of a setcc has the top bits zero, use this info. 3091 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3092 TargetLowering::ZeroOrOneBooleanContent && 3093 BitWidth > 1) 3094 Known.Zero.setBitsFrom(1); 3095 break; 3096 } 3097 LLVM_FALLTHROUGH; 3098 case ISD::ADD: 3099 case ISD::ADDC: 3100 case ISD::ADDE: { 3101 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3102 3103 // With ADDE and ADDCARRY, a carry bit may be added in. 3104 KnownBits Carry(1); 3105 if (Opcode == ISD::ADDE) 3106 // Can't track carry from glue, set carry to unknown. 3107 Carry.resetAll(); 3108 else if (Opcode == ISD::ADDCARRY) 3109 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3110 // the trouble (how often will we find a known carry bit). And I haven't 3111 // tested this very much yet, but something like this might work: 3112 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3113 // Carry = Carry.zextOrTrunc(1, false); 3114 Carry.resetAll(); 3115 else 3116 Carry.setAllZero(); 3117 3118 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3119 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3120 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3121 break; 3122 } 3123 case ISD::SREM: 3124 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3125 const APInt &RA = Rem->getAPIntValue().abs(); 3126 if (RA.isPowerOf2()) { 3127 APInt LowBits = RA - 1; 3128 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3129 3130 // The low bits of the first operand are unchanged by the srem. 3131 Known.Zero = Known2.Zero & LowBits; 3132 Known.One = Known2.One & LowBits; 3133 3134 // If the first operand is non-negative or has all low bits zero, then 3135 // the upper bits are all zero. 3136 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3137 Known.Zero |= ~LowBits; 3138 3139 // If the first operand is negative and not all low bits are zero, then 3140 // the upper bits are all one. 3141 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3142 Known.One |= ~LowBits; 3143 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3144 } 3145 } 3146 break; 3147 case ISD::UREM: { 3148 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3149 const APInt &RA = Rem->getAPIntValue(); 3150 if (RA.isPowerOf2()) { 3151 APInt LowBits = (RA - 1); 3152 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3153 3154 // The upper bits are all zero, the lower ones are unchanged. 3155 Known.Zero = Known2.Zero | ~LowBits; 3156 Known.One = Known2.One & LowBits; 3157 break; 3158 } 3159 } 3160 3161 // Since the result is less than or equal to either operand, any leading 3162 // zero bits in either operand must also exist in the result. 3163 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3164 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3165 3166 uint32_t Leaders = 3167 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3168 Known.resetAll(); 3169 Known.Zero.setHighBits(Leaders); 3170 break; 3171 } 3172 case ISD::EXTRACT_ELEMENT: { 3173 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3174 const unsigned Index = Op.getConstantOperandVal(1); 3175 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3176 3177 // Remove low part of known bits mask 3178 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3179 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3180 3181 // Remove high part of known bit mask 3182 Known = Known.trunc(EltBitWidth); 3183 break; 3184 } 3185 case ISD::EXTRACT_VECTOR_ELT: { 3186 SDValue InVec = Op.getOperand(0); 3187 SDValue EltNo = Op.getOperand(1); 3188 EVT VecVT = InVec.getValueType(); 3189 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3190 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3191 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3192 // anything about the extended bits. 3193 if (BitWidth > EltBitWidth) 3194 Known = Known.trunc(EltBitWidth); 3195 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3196 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 3197 // If we know the element index, just demand that vector element. 3198 unsigned Idx = ConstEltNo->getZExtValue(); 3199 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 3200 Known = computeKnownBits(InVec, DemandedElt, Depth + 1); 3201 } else { 3202 // Unknown element index, so ignore DemandedElts and demand them all. 3203 Known = computeKnownBits(InVec, Depth + 1); 3204 } 3205 if (BitWidth > EltBitWidth) 3206 Known = Known.zext(BitWidth, false /* => any extend */); 3207 break; 3208 } 3209 case ISD::INSERT_VECTOR_ELT: { 3210 SDValue InVec = Op.getOperand(0); 3211 SDValue InVal = Op.getOperand(1); 3212 SDValue EltNo = Op.getOperand(2); 3213 3214 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3215 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3216 // If we know the element index, split the demand between the 3217 // source vector and the inserted element. 3218 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 3219 unsigned EltIdx = CEltNo->getZExtValue(); 3220 3221 // If we demand the inserted element then add its common known bits. 3222 if (DemandedElts[EltIdx]) { 3223 Known2 = computeKnownBits(InVal, Depth + 1); 3224 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3225 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3226 } 3227 3228 // If we demand the source vector then add its common known bits, ensuring 3229 // that we don't demand the inserted element. 3230 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 3231 if (!!VectorElts) { 3232 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1); 3233 Known.One &= Known2.One; 3234 Known.Zero &= Known2.Zero; 3235 } 3236 } else { 3237 // Unknown element index, so ignore DemandedElts and demand them all. 3238 Known = computeKnownBits(InVec, Depth + 1); 3239 Known2 = computeKnownBits(InVal, Depth + 1); 3240 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3241 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3242 } 3243 break; 3244 } 3245 case ISD::BITREVERSE: { 3246 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3247 Known.Zero = Known2.Zero.reverseBits(); 3248 Known.One = Known2.One.reverseBits(); 3249 break; 3250 } 3251 case ISD::BSWAP: { 3252 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3253 Known.Zero = Known2.Zero.byteSwap(); 3254 Known.One = Known2.One.byteSwap(); 3255 break; 3256 } 3257 case ISD::ABS: { 3258 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3259 3260 // If the source's MSB is zero then we know the rest of the bits already. 3261 if (Known2.isNonNegative()) { 3262 Known.Zero = Known2.Zero; 3263 Known.One = Known2.One; 3264 break; 3265 } 3266 3267 // We only know that the absolute values's MSB will be zero iff there is 3268 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3269 Known2.One.clearSignBit(); 3270 if (Known2.One.getBoolValue()) { 3271 Known.Zero = APInt::getSignMask(BitWidth); 3272 break; 3273 } 3274 break; 3275 } 3276 case ISD::UMIN: { 3277 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3278 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3279 3280 // UMIN - we know that the result will have the maximum of the 3281 // known zero leading bits of the inputs. 3282 unsigned LeadZero = Known.countMinLeadingZeros(); 3283 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3284 3285 Known.Zero &= Known2.Zero; 3286 Known.One &= Known2.One; 3287 Known.Zero.setHighBits(LeadZero); 3288 break; 3289 } 3290 case ISD::UMAX: { 3291 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3292 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3293 3294 // UMAX - we know that the result will have the maximum of the 3295 // known one leading bits of the inputs. 3296 unsigned LeadOne = Known.countMinLeadingOnes(); 3297 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3298 3299 Known.Zero &= Known2.Zero; 3300 Known.One &= Known2.One; 3301 Known.One.setHighBits(LeadOne); 3302 break; 3303 } 3304 case ISD::SMIN: 3305 case ISD::SMAX: { 3306 // If we have a clamp pattern, we know that the number of sign bits will be 3307 // the minimum of the clamp min/max range. 3308 bool IsMax = (Opcode == ISD::SMAX); 3309 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3310 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3311 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3312 CstHigh = 3313 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3314 if (CstLow && CstHigh) { 3315 if (!IsMax) 3316 std::swap(CstLow, CstHigh); 3317 3318 const APInt &ValueLow = CstLow->getAPIntValue(); 3319 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3320 if (ValueLow.sle(ValueHigh)) { 3321 unsigned LowSignBits = ValueLow.getNumSignBits(); 3322 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3323 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3324 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3325 Known.One.setHighBits(MinSignBits); 3326 break; 3327 } 3328 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3329 Known.Zero.setHighBits(MinSignBits); 3330 break; 3331 } 3332 } 3333 } 3334 3335 // Fallback - just get the shared known bits of the operands. 3336 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3337 if (Known.isUnknown()) break; // Early-out 3338 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3339 Known.Zero &= Known2.Zero; 3340 Known.One &= Known2.One; 3341 break; 3342 } 3343 case ISD::FrameIndex: 3344 case ISD::TargetFrameIndex: 3345 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3346 break; 3347 3348 default: 3349 if (Opcode < ISD::BUILTIN_OP_END) 3350 break; 3351 LLVM_FALLTHROUGH; 3352 case ISD::INTRINSIC_WO_CHAIN: 3353 case ISD::INTRINSIC_W_CHAIN: 3354 case ISD::INTRINSIC_VOID: 3355 // Allow the target to implement this method for its nodes. 3356 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3357 break; 3358 } 3359 3360 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3361 return Known; 3362 } 3363 3364 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3365 SDValue N1) const { 3366 // X + 0 never overflow 3367 if (isNullConstant(N1)) 3368 return OFK_Never; 3369 3370 KnownBits N1Known = computeKnownBits(N1); 3371 if (N1Known.Zero.getBoolValue()) { 3372 KnownBits N0Known = computeKnownBits(N0); 3373 3374 bool overflow; 3375 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3376 if (!overflow) 3377 return OFK_Never; 3378 } 3379 3380 // mulhi + 1 never overflow 3381 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3382 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3383 return OFK_Never; 3384 3385 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3386 KnownBits N0Known = computeKnownBits(N0); 3387 3388 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3389 return OFK_Never; 3390 } 3391 3392 return OFK_Sometime; 3393 } 3394 3395 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3396 EVT OpVT = Val.getValueType(); 3397 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3398 3399 // Is the constant a known power of 2? 3400 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3401 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3402 3403 // A left-shift of a constant one will have exactly one bit set because 3404 // shifting the bit off the end is undefined. 3405 if (Val.getOpcode() == ISD::SHL) { 3406 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3407 if (C && C->getAPIntValue() == 1) 3408 return true; 3409 } 3410 3411 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3412 // one bit set. 3413 if (Val.getOpcode() == ISD::SRL) { 3414 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3415 if (C && C->getAPIntValue().isSignMask()) 3416 return true; 3417 } 3418 3419 // Are all operands of a build vector constant powers of two? 3420 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3421 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3423 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3424 return false; 3425 })) 3426 return true; 3427 3428 // More could be done here, though the above checks are enough 3429 // to handle some common cases. 3430 3431 // Fall back to computeKnownBits to catch other known cases. 3432 KnownBits Known = computeKnownBits(Val); 3433 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3434 } 3435 3436 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3437 EVT VT = Op.getValueType(); 3438 APInt DemandedElts = VT.isVector() 3439 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3440 : APInt(1, 1); 3441 return ComputeNumSignBits(Op, DemandedElts, Depth); 3442 } 3443 3444 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3445 unsigned Depth) const { 3446 EVT VT = Op.getValueType(); 3447 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3448 unsigned VTBits = VT.getScalarSizeInBits(); 3449 unsigned NumElts = DemandedElts.getBitWidth(); 3450 unsigned Tmp, Tmp2; 3451 unsigned FirstAnswer = 1; 3452 3453 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3454 const APInt &Val = C->getAPIntValue(); 3455 return Val.getNumSignBits(); 3456 } 3457 3458 if (Depth >= MaxRecursionDepth) 3459 return 1; // Limit search depth. 3460 3461 if (!DemandedElts) 3462 return 1; // No demanded elts, better to assume we don't know anything. 3463 3464 unsigned Opcode = Op.getOpcode(); 3465 switch (Opcode) { 3466 default: break; 3467 case ISD::AssertSext: 3468 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3469 return VTBits-Tmp+1; 3470 case ISD::AssertZext: 3471 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3472 return VTBits-Tmp; 3473 3474 case ISD::BUILD_VECTOR: 3475 Tmp = VTBits; 3476 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3477 if (!DemandedElts[i]) 3478 continue; 3479 3480 SDValue SrcOp = Op.getOperand(i); 3481 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3482 3483 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3484 if (SrcOp.getValueSizeInBits() != VTBits) { 3485 assert(SrcOp.getValueSizeInBits() > VTBits && 3486 "Expected BUILD_VECTOR implicit truncation"); 3487 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3488 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3489 } 3490 Tmp = std::min(Tmp, Tmp2); 3491 } 3492 return Tmp; 3493 3494 case ISD::VECTOR_SHUFFLE: { 3495 // Collect the minimum number of sign bits that are shared by every vector 3496 // element referenced by the shuffle. 3497 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3498 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3499 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3500 for (unsigned i = 0; i != NumElts; ++i) { 3501 int M = SVN->getMaskElt(i); 3502 if (!DemandedElts[i]) 3503 continue; 3504 // For UNDEF elements, we don't know anything about the common state of 3505 // the shuffle result. 3506 if (M < 0) 3507 return 1; 3508 if ((unsigned)M < NumElts) 3509 DemandedLHS.setBit((unsigned)M % NumElts); 3510 else 3511 DemandedRHS.setBit((unsigned)M % NumElts); 3512 } 3513 Tmp = std::numeric_limits<unsigned>::max(); 3514 if (!!DemandedLHS) 3515 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3516 if (!!DemandedRHS) { 3517 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3518 Tmp = std::min(Tmp, Tmp2); 3519 } 3520 // If we don't know anything, early out and try computeKnownBits fall-back. 3521 if (Tmp == 1) 3522 break; 3523 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3524 return Tmp; 3525 } 3526 3527 case ISD::BITCAST: { 3528 SDValue N0 = Op.getOperand(0); 3529 EVT SrcVT = N0.getValueType(); 3530 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3531 3532 // Ignore bitcasts from unsupported types.. 3533 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3534 break; 3535 3536 // Fast handling of 'identity' bitcasts. 3537 if (VTBits == SrcBits) 3538 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3539 3540 bool IsLE = getDataLayout().isLittleEndian(); 3541 3542 // Bitcast 'large element' scalar/vector to 'small element' vector. 3543 if ((SrcBits % VTBits) == 0) { 3544 assert(VT.isVector() && "Expected bitcast to vector"); 3545 3546 unsigned Scale = SrcBits / VTBits; 3547 APInt SrcDemandedElts(NumElts / Scale, 0); 3548 for (unsigned i = 0; i != NumElts; ++i) 3549 if (DemandedElts[i]) 3550 SrcDemandedElts.setBit(i / Scale); 3551 3552 // Fast case - sign splat can be simply split across the small elements. 3553 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3554 if (Tmp == SrcBits) 3555 return VTBits; 3556 3557 // Slow case - determine how far the sign extends into each sub-element. 3558 Tmp2 = VTBits; 3559 for (unsigned i = 0; i != NumElts; ++i) 3560 if (DemandedElts[i]) { 3561 unsigned SubOffset = i % Scale; 3562 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3563 SubOffset = SubOffset * VTBits; 3564 if (Tmp <= SubOffset) 3565 return 1; 3566 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3567 } 3568 return Tmp2; 3569 } 3570 break; 3571 } 3572 3573 case ISD::SIGN_EXTEND: 3574 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3575 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3576 case ISD::SIGN_EXTEND_INREG: 3577 // Max of the input and what this extends. 3578 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3579 Tmp = VTBits-Tmp+1; 3580 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3581 return std::max(Tmp, Tmp2); 3582 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3583 SDValue Src = Op.getOperand(0); 3584 EVT SrcVT = Src.getValueType(); 3585 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3586 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3587 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3588 } 3589 3590 case ISD::SRA: 3591 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3592 // SRA X, C -> adds C sign bits. 3593 if (ConstantSDNode *C = 3594 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3595 APInt ShiftVal = C->getAPIntValue(); 3596 ShiftVal += Tmp; 3597 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3598 } 3599 return Tmp; 3600 case ISD::SHL: 3601 if (ConstantSDNode *C = 3602 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3603 // shl destroys sign bits. 3604 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3605 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3606 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3607 return Tmp - C->getZExtValue(); 3608 } 3609 break; 3610 case ISD::AND: 3611 case ISD::OR: 3612 case ISD::XOR: // NOT is handled here. 3613 // Logical binary ops preserve the number of sign bits at the worst. 3614 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3615 if (Tmp != 1) { 3616 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3617 FirstAnswer = std::min(Tmp, Tmp2); 3618 // We computed what we know about the sign bits as our first 3619 // answer. Now proceed to the generic code that uses 3620 // computeKnownBits, and pick whichever answer is better. 3621 } 3622 break; 3623 3624 case ISD::SELECT: 3625 case ISD::VSELECT: 3626 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3627 if (Tmp == 1) return 1; // Early out. 3628 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3629 return std::min(Tmp, Tmp2); 3630 case ISD::SELECT_CC: 3631 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3632 if (Tmp == 1) return 1; // Early out. 3633 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3634 return std::min(Tmp, Tmp2); 3635 3636 case ISD::SMIN: 3637 case ISD::SMAX: { 3638 // If we have a clamp pattern, we know that the number of sign bits will be 3639 // the minimum of the clamp min/max range. 3640 bool IsMax = (Opcode == ISD::SMAX); 3641 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3642 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3643 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3644 CstHigh = 3645 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3646 if (CstLow && CstHigh) { 3647 if (!IsMax) 3648 std::swap(CstLow, CstHigh); 3649 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3650 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3651 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3652 return std::min(Tmp, Tmp2); 3653 } 3654 } 3655 3656 // Fallback - just get the minimum number of sign bits of the operands. 3657 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3658 if (Tmp == 1) 3659 return 1; // Early out. 3660 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3661 return std::min(Tmp, Tmp2); 3662 } 3663 case ISD::UMIN: 3664 case ISD::UMAX: 3665 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3666 if (Tmp == 1) 3667 return 1; // Early out. 3668 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3669 return std::min(Tmp, Tmp2); 3670 case ISD::SADDO: 3671 case ISD::UADDO: 3672 case ISD::SSUBO: 3673 case ISD::USUBO: 3674 case ISD::SMULO: 3675 case ISD::UMULO: 3676 if (Op.getResNo() != 1) 3677 break; 3678 // The boolean result conforms to getBooleanContents. Fall through. 3679 // If setcc returns 0/-1, all bits are sign bits. 3680 // We know that we have an integer-based boolean since these operations 3681 // are only available for integer. 3682 if (TLI->getBooleanContents(VT.isVector(), false) == 3683 TargetLowering::ZeroOrNegativeOneBooleanContent) 3684 return VTBits; 3685 break; 3686 case ISD::SETCC: 3687 case ISD::STRICT_FSETCC: 3688 case ISD::STRICT_FSETCCS: { 3689 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3690 // If setcc returns 0/-1, all bits are sign bits. 3691 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3692 TargetLowering::ZeroOrNegativeOneBooleanContent) 3693 return VTBits; 3694 break; 3695 } 3696 case ISD::ROTL: 3697 case ISD::ROTR: 3698 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3699 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3700 3701 // Handle rotate right by N like a rotate left by 32-N. 3702 if (Opcode == ISD::ROTR) 3703 RotAmt = (VTBits - RotAmt) % VTBits; 3704 3705 // If we aren't rotating out all of the known-in sign bits, return the 3706 // number that are left. This handles rotl(sext(x), 1) for example. 3707 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3708 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3709 } 3710 break; 3711 case ISD::ADD: 3712 case ISD::ADDC: 3713 // Add can have at most one carry bit. Thus we know that the output 3714 // is, at worst, one more bit than the inputs. 3715 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3716 if (Tmp == 1) return 1; // Early out. 3717 3718 // Special case decrementing a value (ADD X, -1): 3719 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3720 if (CRHS->isAllOnesValue()) { 3721 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1); 3722 3723 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3724 // sign bits set. 3725 if ((Known.Zero | 1).isAllOnesValue()) 3726 return VTBits; 3727 3728 // If we are subtracting one from a positive number, there is no carry 3729 // out of the result. 3730 if (Known.isNonNegative()) 3731 return Tmp; 3732 } 3733 3734 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3735 if (Tmp2 == 1) return 1; 3736 return std::min(Tmp, Tmp2)-1; 3737 3738 case ISD::SUB: 3739 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3740 if (Tmp2 == 1) return 1; 3741 3742 // Handle NEG. 3743 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3744 if (CLHS->isNullValue()) { 3745 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1); 3746 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3747 // sign bits set. 3748 if ((Known.Zero | 1).isAllOnesValue()) 3749 return VTBits; 3750 3751 // If the input is known to be positive (the sign bit is known clear), 3752 // the output of the NEG has the same number of sign bits as the input. 3753 if (Known.isNonNegative()) 3754 return Tmp2; 3755 3756 // Otherwise, we treat this like a SUB. 3757 } 3758 3759 // Sub can have at most one carry bit. Thus we know that the output 3760 // is, at worst, one more bit than the inputs. 3761 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3762 if (Tmp == 1) return 1; // Early out. 3763 return std::min(Tmp, Tmp2)-1; 3764 case ISD::MUL: { 3765 // The output of the Mul can be at most twice the valid bits in the inputs. 3766 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3767 if (SignBitsOp0 == 1) 3768 break; 3769 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3770 if (SignBitsOp1 == 1) 3771 break; 3772 unsigned OutValidBits = 3773 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3774 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3775 } 3776 case ISD::TRUNCATE: { 3777 // Check if the sign bits of source go down as far as the truncated value. 3778 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3779 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3780 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3781 return NumSrcSignBits - (NumSrcBits - VTBits); 3782 break; 3783 } 3784 case ISD::EXTRACT_ELEMENT: { 3785 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3786 const int BitWidth = Op.getValueSizeInBits(); 3787 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3788 3789 // Get reverse index (starting from 1), Op1 value indexes elements from 3790 // little end. Sign starts at big end. 3791 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3792 3793 // If the sign portion ends in our element the subtraction gives correct 3794 // result. Otherwise it gives either negative or > bitwidth result 3795 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3796 } 3797 case ISD::INSERT_VECTOR_ELT: { 3798 SDValue InVec = Op.getOperand(0); 3799 SDValue InVal = Op.getOperand(1); 3800 SDValue EltNo = Op.getOperand(2); 3801 3802 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3803 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3804 // If we know the element index, split the demand between the 3805 // source vector and the inserted element. 3806 unsigned EltIdx = CEltNo->getZExtValue(); 3807 3808 // If we demand the inserted element then get its sign bits. 3809 Tmp = std::numeric_limits<unsigned>::max(); 3810 if (DemandedElts[EltIdx]) { 3811 // TODO - handle implicit truncation of inserted elements. 3812 if (InVal.getScalarValueSizeInBits() != VTBits) 3813 break; 3814 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3815 } 3816 3817 // If we demand the source vector then get its sign bits, and determine 3818 // the minimum. 3819 APInt VectorElts = DemandedElts; 3820 VectorElts.clearBit(EltIdx); 3821 if (!!VectorElts) { 3822 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3823 Tmp = std::min(Tmp, Tmp2); 3824 } 3825 } else { 3826 // Unknown element index, so ignore DemandedElts and demand them all. 3827 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3828 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3829 Tmp = std::min(Tmp, Tmp2); 3830 } 3831 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3832 return Tmp; 3833 } 3834 case ISD::EXTRACT_VECTOR_ELT: { 3835 SDValue InVec = Op.getOperand(0); 3836 SDValue EltNo = Op.getOperand(1); 3837 EVT VecVT = InVec.getValueType(); 3838 const unsigned BitWidth = Op.getValueSizeInBits(); 3839 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3840 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3841 3842 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3843 // anything about sign bits. But if the sizes match we can derive knowledge 3844 // about sign bits from the vector operand. 3845 if (BitWidth != EltBitWidth) 3846 break; 3847 3848 // If we know the element index, just demand that vector element, else for 3849 // an unknown element index, ignore DemandedElts and demand them all. 3850 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3851 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3852 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3853 DemandedSrcElts = 3854 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3855 3856 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3857 } 3858 case ISD::EXTRACT_SUBVECTOR: { 3859 // If we know the element index, just demand that subvector elements, 3860 // otherwise demand them all. 3861 SDValue Src = Op.getOperand(0); 3862 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3863 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3864 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 3865 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3866 // Offset the demanded elts by the subvector index. 3867 uint64_t Idx = SubIdx->getZExtValue(); 3868 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3869 } 3870 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3871 } 3872 case ISD::CONCAT_VECTORS: { 3873 // Determine the minimum number of sign bits across all demanded 3874 // elts of the input vectors. Early out if the result is already 1. 3875 Tmp = std::numeric_limits<unsigned>::max(); 3876 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3877 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3878 unsigned NumSubVectors = Op.getNumOperands(); 3879 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3880 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3881 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3882 if (!DemandedSub) 3883 continue; 3884 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3885 Tmp = std::min(Tmp, Tmp2); 3886 } 3887 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3888 return Tmp; 3889 } 3890 case ISD::INSERT_SUBVECTOR: { 3891 // If we know the element index, demand any elements from the subvector and 3892 // the remainder from the src its inserted into, otherwise demand them all. 3893 SDValue Src = Op.getOperand(0); 3894 SDValue Sub = Op.getOperand(1); 3895 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3896 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3897 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 3898 Tmp = std::numeric_limits<unsigned>::max(); 3899 uint64_t Idx = SubIdx->getZExtValue(); 3900 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3901 if (!!DemandedSubElts) { 3902 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3903 if (Tmp == 1) return 1; // early-out 3904 } 3905 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 3906 APInt DemandedSrcElts = DemandedElts & ~SubMask; 3907 if (!!DemandedSrcElts) { 3908 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3909 Tmp = std::min(Tmp, Tmp2); 3910 } 3911 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3912 return Tmp; 3913 } 3914 3915 // Not able to determine the index so just assume worst case. 3916 Tmp = ComputeNumSignBits(Sub, Depth + 1); 3917 if (Tmp == 1) return 1; // early-out 3918 Tmp2 = ComputeNumSignBits(Src, Depth + 1); 3919 Tmp = std::min(Tmp, Tmp2); 3920 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3921 return Tmp; 3922 } 3923 } 3924 3925 // If we are looking at the loaded value of the SDNode. 3926 if (Op.getResNo() == 0) { 3927 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3928 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3929 unsigned ExtType = LD->getExtensionType(); 3930 switch (ExtType) { 3931 default: break; 3932 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 3933 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3934 return VTBits - Tmp + 1; 3935 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 3936 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3937 return VTBits - Tmp; 3938 case ISD::NON_EXTLOAD: 3939 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 3940 // We only need to handle vectors - computeKnownBits should handle 3941 // scalar cases. 3942 Type *CstTy = Cst->getType(); 3943 if (CstTy->isVectorTy() && 3944 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 3945 Tmp = VTBits; 3946 for (unsigned i = 0; i != NumElts; ++i) { 3947 if (!DemandedElts[i]) 3948 continue; 3949 if (Constant *Elt = Cst->getAggregateElement(i)) { 3950 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3951 const APInt &Value = CInt->getValue(); 3952 Tmp = std::min(Tmp, Value.getNumSignBits()); 3953 continue; 3954 } 3955 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3956 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3957 Tmp = std::min(Tmp, Value.getNumSignBits()); 3958 continue; 3959 } 3960 } 3961 // Unknown type. Conservatively assume no bits match sign bit. 3962 return 1; 3963 } 3964 return Tmp; 3965 } 3966 } 3967 break; 3968 } 3969 } 3970 } 3971 3972 // Allow the target to implement this method for its nodes. 3973 if (Opcode >= ISD::BUILTIN_OP_END || 3974 Opcode == ISD::INTRINSIC_WO_CHAIN || 3975 Opcode == ISD::INTRINSIC_W_CHAIN || 3976 Opcode == ISD::INTRINSIC_VOID) { 3977 unsigned NumBits = 3978 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3979 if (NumBits > 1) 3980 FirstAnswer = std::max(FirstAnswer, NumBits); 3981 } 3982 3983 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3984 // use this information. 3985 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 3986 3987 APInt Mask; 3988 if (Known.isNonNegative()) { // sign bit is 0 3989 Mask = Known.Zero; 3990 } else if (Known.isNegative()) { // sign bit is 1; 3991 Mask = Known.One; 3992 } else { 3993 // Nothing known. 3994 return FirstAnswer; 3995 } 3996 3997 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3998 // the number of identical bits in the top of the input value. 3999 Mask = ~Mask; 4000 Mask <<= Mask.getBitWidth()-VTBits; 4001 // Return # leading zeros. We use 'min' here in case Val was zero before 4002 // shifting. We don't want to return '64' as for an i32 "0". 4003 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 4004 } 4005 4006 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 4007 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 4008 !isa<ConstantSDNode>(Op.getOperand(1))) 4009 return false; 4010 4011 if (Op.getOpcode() == ISD::OR && 4012 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 4013 return false; 4014 4015 return true; 4016 } 4017 4018 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4019 // If we're told that NaNs won't happen, assume they won't. 4020 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4021 return true; 4022 4023 if (Depth >= MaxRecursionDepth) 4024 return false; // Limit search depth. 4025 4026 // TODO: Handle vectors. 4027 // If the value is a constant, we can obviously see if it is a NaN or not. 4028 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4029 return !C->getValueAPF().isNaN() || 4030 (SNaN && !C->getValueAPF().isSignaling()); 4031 } 4032 4033 unsigned Opcode = Op.getOpcode(); 4034 switch (Opcode) { 4035 case ISD::FADD: 4036 case ISD::FSUB: 4037 case ISD::FMUL: 4038 case ISD::FDIV: 4039 case ISD::FREM: 4040 case ISD::FSIN: 4041 case ISD::FCOS: { 4042 if (SNaN) 4043 return true; 4044 // TODO: Need isKnownNeverInfinity 4045 return false; 4046 } 4047 case ISD::FCANONICALIZE: 4048 case ISD::FEXP: 4049 case ISD::FEXP2: 4050 case ISD::FTRUNC: 4051 case ISD::FFLOOR: 4052 case ISD::FCEIL: 4053 case ISD::FROUND: 4054 case ISD::FRINT: 4055 case ISD::FNEARBYINT: { 4056 if (SNaN) 4057 return true; 4058 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4059 } 4060 case ISD::FABS: 4061 case ISD::FNEG: 4062 case ISD::FCOPYSIGN: { 4063 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4064 } 4065 case ISD::SELECT: 4066 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4067 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4068 case ISD::FP_EXTEND: 4069 case ISD::FP_ROUND: { 4070 if (SNaN) 4071 return true; 4072 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4073 } 4074 case ISD::SINT_TO_FP: 4075 case ISD::UINT_TO_FP: 4076 return true; 4077 case ISD::FMA: 4078 case ISD::FMAD: { 4079 if (SNaN) 4080 return true; 4081 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4082 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4083 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4084 } 4085 case ISD::FSQRT: // Need is known positive 4086 case ISD::FLOG: 4087 case ISD::FLOG2: 4088 case ISD::FLOG10: 4089 case ISD::FPOWI: 4090 case ISD::FPOW: { 4091 if (SNaN) 4092 return true; 4093 // TODO: Refine on operand 4094 return false; 4095 } 4096 case ISD::FMINNUM: 4097 case ISD::FMAXNUM: { 4098 // Only one needs to be known not-nan, since it will be returned if the 4099 // other ends up being one. 4100 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4101 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4102 } 4103 case ISD::FMINNUM_IEEE: 4104 case ISD::FMAXNUM_IEEE: { 4105 if (SNaN) 4106 return true; 4107 // This can return a NaN if either operand is an sNaN, or if both operands 4108 // are NaN. 4109 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4110 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4111 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4112 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4113 } 4114 case ISD::FMINIMUM: 4115 case ISD::FMAXIMUM: { 4116 // TODO: Does this quiet or return the origina NaN as-is? 4117 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4118 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4119 } 4120 case ISD::EXTRACT_VECTOR_ELT: { 4121 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4122 } 4123 default: 4124 if (Opcode >= ISD::BUILTIN_OP_END || 4125 Opcode == ISD::INTRINSIC_WO_CHAIN || 4126 Opcode == ISD::INTRINSIC_W_CHAIN || 4127 Opcode == ISD::INTRINSIC_VOID) { 4128 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4129 } 4130 4131 return false; 4132 } 4133 } 4134 4135 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4136 assert(Op.getValueType().isFloatingPoint() && 4137 "Floating point type expected"); 4138 4139 // If the value is a constant, we can obviously see if it is a zero or not. 4140 // TODO: Add BuildVector support. 4141 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4142 return !C->isZero(); 4143 return false; 4144 } 4145 4146 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4147 assert(!Op.getValueType().isFloatingPoint() && 4148 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4149 4150 // If the value is a constant, we can obviously see if it is a zero or not. 4151 if (ISD::matchUnaryPredicate( 4152 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4153 return true; 4154 4155 // TODO: Recognize more cases here. 4156 switch (Op.getOpcode()) { 4157 default: break; 4158 case ISD::OR: 4159 if (isKnownNeverZero(Op.getOperand(1)) || 4160 isKnownNeverZero(Op.getOperand(0))) 4161 return true; 4162 break; 4163 } 4164 4165 return false; 4166 } 4167 4168 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4169 // Check the obvious case. 4170 if (A == B) return true; 4171 4172 // For for negative and positive zero. 4173 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4174 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4175 if (CA->isZero() && CB->isZero()) return true; 4176 4177 // Otherwise they may not be equal. 4178 return false; 4179 } 4180 4181 // FIXME: unify with llvm::haveNoCommonBitsSet. 4182 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4183 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4184 assert(A.getValueType() == B.getValueType() && 4185 "Values must have the same type"); 4186 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4187 } 4188 4189 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4190 ArrayRef<SDValue> Ops, 4191 SelectionDAG &DAG) { 4192 int NumOps = Ops.size(); 4193 assert(NumOps != 0 && "Can't build an empty vector!"); 4194 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4195 "Incorrect element count in BUILD_VECTOR!"); 4196 4197 // BUILD_VECTOR of UNDEFs is UNDEF. 4198 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4199 return DAG.getUNDEF(VT); 4200 4201 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4202 SDValue IdentitySrc; 4203 bool IsIdentity = true; 4204 for (int i = 0; i != NumOps; ++i) { 4205 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4206 Ops[i].getOperand(0).getValueType() != VT || 4207 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4208 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4209 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4210 IsIdentity = false; 4211 break; 4212 } 4213 IdentitySrc = Ops[i].getOperand(0); 4214 } 4215 if (IsIdentity) 4216 return IdentitySrc; 4217 4218 return SDValue(); 4219 } 4220 4221 /// Try to simplify vector concatenation to an input value, undef, or build 4222 /// vector. 4223 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4224 ArrayRef<SDValue> Ops, 4225 SelectionDAG &DAG) { 4226 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4227 assert(llvm::all_of(Ops, 4228 [Ops](SDValue Op) { 4229 return Ops[0].getValueType() == Op.getValueType(); 4230 }) && 4231 "Concatenation of vectors with inconsistent value types!"); 4232 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4233 VT.getVectorNumElements() && 4234 "Incorrect element count in vector concatenation!"); 4235 4236 if (Ops.size() == 1) 4237 return Ops[0]; 4238 4239 // Concat of UNDEFs is UNDEF. 4240 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4241 return DAG.getUNDEF(VT); 4242 4243 // Scan the operands and look for extract operations from a single source 4244 // that correspond to insertion at the same location via this concatenation: 4245 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4246 SDValue IdentitySrc; 4247 bool IsIdentity = true; 4248 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4249 SDValue Op = Ops[i]; 4250 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4251 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4252 Op.getOperand(0).getValueType() != VT || 4253 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4254 !isa<ConstantSDNode>(Op.getOperand(1)) || 4255 Op.getConstantOperandVal(1) != IdentityIndex) { 4256 IsIdentity = false; 4257 break; 4258 } 4259 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4260 "Unexpected identity source vector for concat of extracts"); 4261 IdentitySrc = Op.getOperand(0); 4262 } 4263 if (IsIdentity) { 4264 assert(IdentitySrc && "Failed to set source vector of extracts"); 4265 return IdentitySrc; 4266 } 4267 4268 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4269 // simplified to one big BUILD_VECTOR. 4270 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4271 EVT SVT = VT.getScalarType(); 4272 SmallVector<SDValue, 16> Elts; 4273 for (SDValue Op : Ops) { 4274 EVT OpVT = Op.getValueType(); 4275 if (Op.isUndef()) 4276 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4277 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4278 Elts.append(Op->op_begin(), Op->op_end()); 4279 else 4280 return SDValue(); 4281 } 4282 4283 // BUILD_VECTOR requires all inputs to be of the same type, find the 4284 // maximum type and extend them all. 4285 for (SDValue Op : Elts) 4286 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4287 4288 if (SVT.bitsGT(VT.getScalarType())) 4289 for (SDValue &Op : Elts) 4290 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4291 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4292 : DAG.getSExtOrTrunc(Op, DL, SVT); 4293 4294 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4295 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4296 return V; 4297 } 4298 4299 /// Gets or creates the specified node. 4300 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4301 FoldingSetNodeID ID; 4302 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4303 void *IP = nullptr; 4304 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4305 return SDValue(E, 0); 4306 4307 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4308 getVTList(VT)); 4309 CSEMap.InsertNode(N, IP); 4310 4311 InsertNode(N); 4312 SDValue V = SDValue(N, 0); 4313 NewSDValueDbgMsg(V, "Creating new node: ", this); 4314 return V; 4315 } 4316 4317 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4318 SDValue Operand, const SDNodeFlags Flags) { 4319 // Constant fold unary operations with an integer constant operand. Even 4320 // opaque constant will be folded, because the folding of unary operations 4321 // doesn't create new constants with different values. Nevertheless, the 4322 // opaque flag is preserved during folding to prevent future folding with 4323 // other constants. 4324 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4325 const APInt &Val = C->getAPIntValue(); 4326 switch (Opcode) { 4327 default: break; 4328 case ISD::SIGN_EXTEND: 4329 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4330 C->isTargetOpcode(), C->isOpaque()); 4331 case ISD::TRUNCATE: 4332 if (C->isOpaque()) 4333 break; 4334 LLVM_FALLTHROUGH; 4335 case ISD::ANY_EXTEND: 4336 case ISD::ZERO_EXTEND: 4337 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4338 C->isTargetOpcode(), C->isOpaque()); 4339 case ISD::UINT_TO_FP: 4340 case ISD::SINT_TO_FP: { 4341 APFloat apf(EVTToAPFloatSemantics(VT), 4342 APInt::getNullValue(VT.getSizeInBits())); 4343 (void)apf.convertFromAPInt(Val, 4344 Opcode==ISD::SINT_TO_FP, 4345 APFloat::rmNearestTiesToEven); 4346 return getConstantFP(apf, DL, VT); 4347 } 4348 case ISD::BITCAST: 4349 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4350 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4351 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4352 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4353 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4354 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4355 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4356 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4357 break; 4358 case ISD::ABS: 4359 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4360 C->isOpaque()); 4361 case ISD::BITREVERSE: 4362 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4363 C->isOpaque()); 4364 case ISD::BSWAP: 4365 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4366 C->isOpaque()); 4367 case ISD::CTPOP: 4368 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4369 C->isOpaque()); 4370 case ISD::CTLZ: 4371 case ISD::CTLZ_ZERO_UNDEF: 4372 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4373 C->isOpaque()); 4374 case ISD::CTTZ: 4375 case ISD::CTTZ_ZERO_UNDEF: 4376 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4377 C->isOpaque()); 4378 case ISD::FP16_TO_FP: { 4379 bool Ignored; 4380 APFloat FPV(APFloat::IEEEhalf(), 4381 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4382 4383 // This can return overflow, underflow, or inexact; we don't care. 4384 // FIXME need to be more flexible about rounding mode. 4385 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4386 APFloat::rmNearestTiesToEven, &Ignored); 4387 return getConstantFP(FPV, DL, VT); 4388 } 4389 } 4390 } 4391 4392 // Constant fold unary operations with a floating point constant operand. 4393 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4394 APFloat V = C->getValueAPF(); // make copy 4395 switch (Opcode) { 4396 case ISD::FNEG: 4397 V.changeSign(); 4398 return getConstantFP(V, DL, VT); 4399 case ISD::FABS: 4400 V.clearSign(); 4401 return getConstantFP(V, DL, VT); 4402 case ISD::FCEIL: { 4403 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4404 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4405 return getConstantFP(V, DL, VT); 4406 break; 4407 } 4408 case ISD::FTRUNC: { 4409 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4410 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4411 return getConstantFP(V, DL, VT); 4412 break; 4413 } 4414 case ISD::FFLOOR: { 4415 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4416 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4417 return getConstantFP(V, DL, VT); 4418 break; 4419 } 4420 case ISD::FP_EXTEND: { 4421 bool ignored; 4422 // This can return overflow, underflow, or inexact; we don't care. 4423 // FIXME need to be more flexible about rounding mode. 4424 (void)V.convert(EVTToAPFloatSemantics(VT), 4425 APFloat::rmNearestTiesToEven, &ignored); 4426 return getConstantFP(V, DL, VT); 4427 } 4428 case ISD::FP_TO_SINT: 4429 case ISD::FP_TO_UINT: { 4430 bool ignored; 4431 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4432 // FIXME need to be more flexible about rounding mode. 4433 APFloat::opStatus s = 4434 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4435 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4436 break; 4437 return getConstant(IntVal, DL, VT); 4438 } 4439 case ISD::BITCAST: 4440 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4441 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4442 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4443 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4444 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4445 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4446 break; 4447 case ISD::FP_TO_FP16: { 4448 bool Ignored; 4449 // This can return overflow, underflow, or inexact; we don't care. 4450 // FIXME need to be more flexible about rounding mode. 4451 (void)V.convert(APFloat::IEEEhalf(), 4452 APFloat::rmNearestTiesToEven, &Ignored); 4453 return getConstant(V.bitcastToAPInt(), DL, VT); 4454 } 4455 } 4456 } 4457 4458 // Constant fold unary operations with a vector integer or float operand. 4459 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4460 if (BV->isConstant()) { 4461 switch (Opcode) { 4462 default: 4463 // FIXME: Entirely reasonable to perform folding of other unary 4464 // operations here as the need arises. 4465 break; 4466 case ISD::FNEG: 4467 case ISD::FABS: 4468 case ISD::FCEIL: 4469 case ISD::FTRUNC: 4470 case ISD::FFLOOR: 4471 case ISD::FP_EXTEND: 4472 case ISD::FP_TO_SINT: 4473 case ISD::FP_TO_UINT: 4474 case ISD::TRUNCATE: 4475 case ISD::ANY_EXTEND: 4476 case ISD::ZERO_EXTEND: 4477 case ISD::SIGN_EXTEND: 4478 case ISD::UINT_TO_FP: 4479 case ISD::SINT_TO_FP: 4480 case ISD::ABS: 4481 case ISD::BITREVERSE: 4482 case ISD::BSWAP: 4483 case ISD::CTLZ: 4484 case ISD::CTLZ_ZERO_UNDEF: 4485 case ISD::CTTZ: 4486 case ISD::CTTZ_ZERO_UNDEF: 4487 case ISD::CTPOP: { 4488 SDValue Ops = { Operand }; 4489 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4490 return Fold; 4491 } 4492 } 4493 } 4494 } 4495 4496 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4497 switch (Opcode) { 4498 case ISD::TokenFactor: 4499 case ISD::MERGE_VALUES: 4500 case ISD::CONCAT_VECTORS: 4501 return Operand; // Factor, merge or concat of one node? No need. 4502 case ISD::BUILD_VECTOR: { 4503 // Attempt to simplify BUILD_VECTOR. 4504 SDValue Ops[] = {Operand}; 4505 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4506 return V; 4507 break; 4508 } 4509 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4510 case ISD::FP_EXTEND: 4511 assert(VT.isFloatingPoint() && 4512 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4513 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4514 assert((!VT.isVector() || 4515 VT.getVectorNumElements() == 4516 Operand.getValueType().getVectorNumElements()) && 4517 "Vector element count mismatch!"); 4518 assert(Operand.getValueType().bitsLT(VT) && 4519 "Invalid fpext node, dst < src!"); 4520 if (Operand.isUndef()) 4521 return getUNDEF(VT); 4522 break; 4523 case ISD::FP_TO_SINT: 4524 case ISD::FP_TO_UINT: 4525 if (Operand.isUndef()) 4526 return getUNDEF(VT); 4527 break; 4528 case ISD::SINT_TO_FP: 4529 case ISD::UINT_TO_FP: 4530 // [us]itofp(undef) = 0, because the result value is bounded. 4531 if (Operand.isUndef()) 4532 return getConstantFP(0.0, DL, VT); 4533 break; 4534 case ISD::SIGN_EXTEND: 4535 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4536 "Invalid SIGN_EXTEND!"); 4537 assert(VT.isVector() == Operand.getValueType().isVector() && 4538 "SIGN_EXTEND result type type should be vector iff the operand " 4539 "type is vector!"); 4540 if (Operand.getValueType() == VT) return Operand; // noop extension 4541 assert((!VT.isVector() || 4542 VT.getVectorNumElements() == 4543 Operand.getValueType().getVectorNumElements()) && 4544 "Vector element count mismatch!"); 4545 assert(Operand.getValueType().bitsLT(VT) && 4546 "Invalid sext node, dst < src!"); 4547 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4548 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4549 else if (OpOpcode == ISD::UNDEF) 4550 // sext(undef) = 0, because the top bits will all be the same. 4551 return getConstant(0, DL, VT); 4552 break; 4553 case ISD::ZERO_EXTEND: 4554 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4555 "Invalid ZERO_EXTEND!"); 4556 assert(VT.isVector() == Operand.getValueType().isVector() && 4557 "ZERO_EXTEND result type type should be vector iff the operand " 4558 "type is vector!"); 4559 if (Operand.getValueType() == VT) return Operand; // noop extension 4560 assert((!VT.isVector() || 4561 VT.getVectorNumElements() == 4562 Operand.getValueType().getVectorNumElements()) && 4563 "Vector element count mismatch!"); 4564 assert(Operand.getValueType().bitsLT(VT) && 4565 "Invalid zext node, dst < src!"); 4566 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4567 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4568 else if (OpOpcode == ISD::UNDEF) 4569 // zext(undef) = 0, because the top bits will be zero. 4570 return getConstant(0, DL, VT); 4571 break; 4572 case ISD::ANY_EXTEND: 4573 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4574 "Invalid ANY_EXTEND!"); 4575 assert(VT.isVector() == Operand.getValueType().isVector() && 4576 "ANY_EXTEND result type type should be vector iff the operand " 4577 "type is vector!"); 4578 if (Operand.getValueType() == VT) return Operand; // noop extension 4579 assert((!VT.isVector() || 4580 VT.getVectorNumElements() == 4581 Operand.getValueType().getVectorNumElements()) && 4582 "Vector element count mismatch!"); 4583 assert(Operand.getValueType().bitsLT(VT) && 4584 "Invalid anyext node, dst < src!"); 4585 4586 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4587 OpOpcode == ISD::ANY_EXTEND) 4588 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4589 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4590 else if (OpOpcode == ISD::UNDEF) 4591 return getUNDEF(VT); 4592 4593 // (ext (trunc x)) -> x 4594 if (OpOpcode == ISD::TRUNCATE) { 4595 SDValue OpOp = Operand.getOperand(0); 4596 if (OpOp.getValueType() == VT) { 4597 transferDbgValues(Operand, OpOp); 4598 return OpOp; 4599 } 4600 } 4601 break; 4602 case ISD::TRUNCATE: 4603 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4604 "Invalid TRUNCATE!"); 4605 assert(VT.isVector() == Operand.getValueType().isVector() && 4606 "TRUNCATE result type type should be vector iff the operand " 4607 "type is vector!"); 4608 if (Operand.getValueType() == VT) return Operand; // noop truncate 4609 assert((!VT.isVector() || 4610 VT.getVectorNumElements() == 4611 Operand.getValueType().getVectorNumElements()) && 4612 "Vector element count mismatch!"); 4613 assert(Operand.getValueType().bitsGT(VT) && 4614 "Invalid truncate node, src < dst!"); 4615 if (OpOpcode == ISD::TRUNCATE) 4616 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4617 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4618 OpOpcode == ISD::ANY_EXTEND) { 4619 // If the source is smaller than the dest, we still need an extend. 4620 if (Operand.getOperand(0).getValueType().getScalarType() 4621 .bitsLT(VT.getScalarType())) 4622 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4623 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4624 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4625 return Operand.getOperand(0); 4626 } 4627 if (OpOpcode == ISD::UNDEF) 4628 return getUNDEF(VT); 4629 break; 4630 case ISD::ANY_EXTEND_VECTOR_INREG: 4631 case ISD::ZERO_EXTEND_VECTOR_INREG: 4632 case ISD::SIGN_EXTEND_VECTOR_INREG: 4633 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4634 assert(Operand.getValueType().bitsLE(VT) && 4635 "The input must be the same size or smaller than the result."); 4636 assert(VT.getVectorNumElements() < 4637 Operand.getValueType().getVectorNumElements() && 4638 "The destination vector type must have fewer lanes than the input."); 4639 break; 4640 case ISD::ABS: 4641 assert(VT.isInteger() && VT == Operand.getValueType() && 4642 "Invalid ABS!"); 4643 if (OpOpcode == ISD::UNDEF) 4644 return getUNDEF(VT); 4645 break; 4646 case ISD::BSWAP: 4647 assert(VT.isInteger() && VT == Operand.getValueType() && 4648 "Invalid BSWAP!"); 4649 assert((VT.getScalarSizeInBits() % 16 == 0) && 4650 "BSWAP types must be a multiple of 16 bits!"); 4651 if (OpOpcode == ISD::UNDEF) 4652 return getUNDEF(VT); 4653 break; 4654 case ISD::BITREVERSE: 4655 assert(VT.isInteger() && VT == Operand.getValueType() && 4656 "Invalid BITREVERSE!"); 4657 if (OpOpcode == ISD::UNDEF) 4658 return getUNDEF(VT); 4659 break; 4660 case ISD::BITCAST: 4661 // Basic sanity checking. 4662 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4663 "Cannot BITCAST between types of different sizes!"); 4664 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4665 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4666 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4667 if (OpOpcode == ISD::UNDEF) 4668 return getUNDEF(VT); 4669 break; 4670 case ISD::SCALAR_TO_VECTOR: 4671 assert(VT.isVector() && !Operand.getValueType().isVector() && 4672 (VT.getVectorElementType() == Operand.getValueType() || 4673 (VT.getVectorElementType().isInteger() && 4674 Operand.getValueType().isInteger() && 4675 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4676 "Illegal SCALAR_TO_VECTOR node!"); 4677 if (OpOpcode == ISD::UNDEF) 4678 return getUNDEF(VT); 4679 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4680 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4681 isa<ConstantSDNode>(Operand.getOperand(1)) && 4682 Operand.getConstantOperandVal(1) == 0 && 4683 Operand.getOperand(0).getValueType() == VT) 4684 return Operand.getOperand(0); 4685 break; 4686 case ISD::FNEG: 4687 // Negation of an unknown bag of bits is still completely undefined. 4688 if (OpOpcode == ISD::UNDEF) 4689 return getUNDEF(VT); 4690 4691 if (OpOpcode == ISD::FNEG) // --X -> X 4692 return Operand.getOperand(0); 4693 break; 4694 case ISD::FABS: 4695 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4696 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4697 break; 4698 } 4699 4700 SDNode *N; 4701 SDVTList VTs = getVTList(VT); 4702 SDValue Ops[] = {Operand}; 4703 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4704 FoldingSetNodeID ID; 4705 AddNodeIDNode(ID, Opcode, VTs, Ops); 4706 void *IP = nullptr; 4707 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4708 E->intersectFlagsWith(Flags); 4709 return SDValue(E, 0); 4710 } 4711 4712 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4713 N->setFlags(Flags); 4714 createOperands(N, Ops); 4715 CSEMap.InsertNode(N, IP); 4716 } else { 4717 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4718 createOperands(N, Ops); 4719 } 4720 4721 InsertNode(N); 4722 SDValue V = SDValue(N, 0); 4723 NewSDValueDbgMsg(V, "Creating new node: ", this); 4724 return V; 4725 } 4726 4727 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, 4728 const APInt &C2) { 4729 switch (Opcode) { 4730 case ISD::ADD: return C1 + C2; 4731 case ISD::SUB: return C1 - C2; 4732 case ISD::MUL: return C1 * C2; 4733 case ISD::AND: return C1 & C2; 4734 case ISD::OR: return C1 | C2; 4735 case ISD::XOR: return C1 ^ C2; 4736 case ISD::SHL: return C1 << C2; 4737 case ISD::SRL: return C1.lshr(C2); 4738 case ISD::SRA: return C1.ashr(C2); 4739 case ISD::ROTL: return C1.rotl(C2); 4740 case ISD::ROTR: return C1.rotr(C2); 4741 case ISD::SMIN: return C1.sle(C2) ? C1 : C2; 4742 case ISD::SMAX: return C1.sge(C2) ? C1 : C2; 4743 case ISD::UMIN: return C1.ule(C2) ? C1 : C2; 4744 case ISD::UMAX: return C1.uge(C2) ? C1 : C2; 4745 case ISD::SADDSAT: return C1.sadd_sat(C2); 4746 case ISD::UADDSAT: return C1.uadd_sat(C2); 4747 case ISD::SSUBSAT: return C1.ssub_sat(C2); 4748 case ISD::USUBSAT: return C1.usub_sat(C2); 4749 case ISD::UDIV: 4750 if (!C2.getBoolValue()) 4751 break; 4752 return C1.udiv(C2); 4753 case ISD::UREM: 4754 if (!C2.getBoolValue()) 4755 break; 4756 return C1.urem(C2); 4757 case ISD::SDIV: 4758 if (!C2.getBoolValue()) 4759 break; 4760 return C1.sdiv(C2); 4761 case ISD::SREM: 4762 if (!C2.getBoolValue()) 4763 break; 4764 return C1.srem(C2); 4765 } 4766 return llvm::None; 4767 } 4768 4769 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4770 EVT VT, const ConstantSDNode *C1, 4771 const ConstantSDNode *C2) { 4772 if (C1->isOpaque() || C2->isOpaque()) 4773 return SDValue(); 4774 if (Optional<APInt> Folded = 4775 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue())) 4776 return getConstant(Folded.getValue(), DL, VT); 4777 return SDValue(); 4778 } 4779 4780 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4781 const GlobalAddressSDNode *GA, 4782 const SDNode *N2) { 4783 if (GA->getOpcode() != ISD::GlobalAddress) 4784 return SDValue(); 4785 if (!TLI->isOffsetFoldingLegal(GA)) 4786 return SDValue(); 4787 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4788 if (!C2) 4789 return SDValue(); 4790 int64_t Offset = C2->getSExtValue(); 4791 switch (Opcode) { 4792 case ISD::ADD: break; 4793 case ISD::SUB: Offset = -uint64_t(Offset); break; 4794 default: return SDValue(); 4795 } 4796 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4797 GA->getOffset() + uint64_t(Offset)); 4798 } 4799 4800 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4801 switch (Opcode) { 4802 case ISD::SDIV: 4803 case ISD::UDIV: 4804 case ISD::SREM: 4805 case ISD::UREM: { 4806 // If a divisor is zero/undef or any element of a divisor vector is 4807 // zero/undef, the whole op is undef. 4808 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4809 SDValue Divisor = Ops[1]; 4810 if (Divisor.isUndef() || isNullConstant(Divisor)) 4811 return true; 4812 4813 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4814 llvm::any_of(Divisor->op_values(), 4815 [](SDValue V) { return V.isUndef() || 4816 isNullConstant(V); }); 4817 // TODO: Handle signed overflow. 4818 } 4819 // TODO: Handle oversized shifts. 4820 default: 4821 return false; 4822 } 4823 } 4824 4825 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4826 EVT VT, SDNode *N1, SDNode *N2) { 4827 // If the opcode is a target-specific ISD node, there's nothing we can 4828 // do here and the operand rules may not line up with the below, so 4829 // bail early. 4830 if (Opcode >= ISD::BUILTIN_OP_END) 4831 return SDValue(); 4832 4833 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)})) 4834 return getUNDEF(VT); 4835 4836 // Handle the case of two scalars. 4837 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4838 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4839 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2); 4840 assert((!Folded || !VT.isVector()) && 4841 "Can't fold vectors ops with scalar operands"); 4842 return Folded; 4843 } 4844 } 4845 4846 // fold (add Sym, c) -> Sym+c 4847 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4848 return FoldSymbolOffset(Opcode, VT, GA, N2); 4849 if (TLI->isCommutativeBinOp(Opcode)) 4850 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4851 return FoldSymbolOffset(Opcode, VT, GA, N1); 4852 4853 // For vectors, extract each constant element and fold them individually. 4854 // Either input may be an undef value. 4855 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4856 if (!BV1 && !N1->isUndef()) 4857 return SDValue(); 4858 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4859 if (!BV2 && !N2->isUndef()) 4860 return SDValue(); 4861 // If both operands are undef, that's handled the same way as scalars. 4862 if (!BV1 && !BV2) 4863 return SDValue(); 4864 4865 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4866 "Vector binop with different number of elements in operands?"); 4867 4868 EVT SVT = VT.getScalarType(); 4869 EVT LegalSVT = SVT; 4870 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4871 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4872 if (LegalSVT.bitsLT(SVT)) 4873 return SDValue(); 4874 } 4875 SmallVector<SDValue, 4> Outputs; 4876 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4877 for (unsigned I = 0; I != NumOps; ++I) { 4878 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4879 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4880 if (SVT.isInteger()) { 4881 if (V1->getValueType(0).bitsGT(SVT)) 4882 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4883 if (V2->getValueType(0).bitsGT(SVT)) 4884 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4885 } 4886 4887 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4888 return SDValue(); 4889 4890 // Fold one vector element. 4891 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4892 if (LegalSVT != SVT) 4893 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4894 4895 // Scalar folding only succeeded if the result is a constant or UNDEF. 4896 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4897 ScalarResult.getOpcode() != ISD::ConstantFP) 4898 return SDValue(); 4899 Outputs.push_back(ScalarResult); 4900 } 4901 4902 assert(VT.getVectorNumElements() == Outputs.size() && 4903 "Vector size mismatch!"); 4904 4905 // We may have a vector type but a scalar result. Create a splat. 4906 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4907 4908 // Build a big vector out of the scalar elements we generated. 4909 return getBuildVector(VT, SDLoc(), Outputs); 4910 } 4911 4912 // TODO: Merge with FoldConstantArithmetic 4913 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4914 const SDLoc &DL, EVT VT, 4915 ArrayRef<SDValue> Ops, 4916 const SDNodeFlags Flags) { 4917 // If the opcode is a target-specific ISD node, there's nothing we can 4918 // do here and the operand rules may not line up with the below, so 4919 // bail early. 4920 if (Opcode >= ISD::BUILTIN_OP_END) 4921 return SDValue(); 4922 4923 if (isUndef(Opcode, Ops)) 4924 return getUNDEF(VT); 4925 4926 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4927 if (!VT.isVector()) 4928 return SDValue(); 4929 4930 unsigned NumElts = VT.getVectorNumElements(); 4931 4932 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4933 return !Op.getValueType().isVector() || 4934 Op.getValueType().getVectorNumElements() == NumElts; 4935 }; 4936 4937 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4938 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4939 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4940 (BV && BV->isConstant()); 4941 }; 4942 4943 // All operands must be vector types with the same number of elements as 4944 // the result type and must be either UNDEF or a build vector of constant 4945 // or UNDEF scalars. 4946 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4947 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4948 return SDValue(); 4949 4950 // If we are comparing vectors, then the result needs to be a i1 boolean 4951 // that is then sign-extended back to the legal result type. 4952 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4953 4954 // Find legal integer scalar type for constant promotion and 4955 // ensure that its scalar size is at least as large as source. 4956 EVT LegalSVT = VT.getScalarType(); 4957 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4958 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4959 if (LegalSVT.bitsLT(VT.getScalarType())) 4960 return SDValue(); 4961 } 4962 4963 // Constant fold each scalar lane separately. 4964 SmallVector<SDValue, 4> ScalarResults; 4965 for (unsigned i = 0; i != NumElts; i++) { 4966 SmallVector<SDValue, 4> ScalarOps; 4967 for (SDValue Op : Ops) { 4968 EVT InSVT = Op.getValueType().getScalarType(); 4969 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4970 if (!InBV) { 4971 // We've checked that this is UNDEF or a constant of some kind. 4972 if (Op.isUndef()) 4973 ScalarOps.push_back(getUNDEF(InSVT)); 4974 else 4975 ScalarOps.push_back(Op); 4976 continue; 4977 } 4978 4979 SDValue ScalarOp = InBV->getOperand(i); 4980 EVT ScalarVT = ScalarOp.getValueType(); 4981 4982 // Build vector (integer) scalar operands may need implicit 4983 // truncation - do this before constant folding. 4984 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4985 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4986 4987 ScalarOps.push_back(ScalarOp); 4988 } 4989 4990 // Constant fold the scalar operands. 4991 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4992 4993 // Legalize the (integer) scalar constant if necessary. 4994 if (LegalSVT != SVT) 4995 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4996 4997 // Scalar folding only succeeded if the result is a constant or UNDEF. 4998 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4999 ScalarResult.getOpcode() != ISD::ConstantFP) 5000 return SDValue(); 5001 ScalarResults.push_back(ScalarResult); 5002 } 5003 5004 SDValue V = getBuildVector(VT, DL, ScalarResults); 5005 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 5006 return V; 5007 } 5008 5009 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5010 EVT VT, SDValue N1, SDValue N2) { 5011 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5012 // should. That will require dealing with a potentially non-default 5013 // rounding mode, checking the "opStatus" return value from the APFloat 5014 // math calculations, and possibly other variations. 5015 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5016 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5017 if (N1CFP && N2CFP) { 5018 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5019 switch (Opcode) { 5020 case ISD::FADD: 5021 C1.add(C2, APFloat::rmNearestTiesToEven); 5022 return getConstantFP(C1, DL, VT); 5023 case ISD::FSUB: 5024 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5025 return getConstantFP(C1, DL, VT); 5026 case ISD::FMUL: 5027 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5028 return getConstantFP(C1, DL, VT); 5029 case ISD::FDIV: 5030 C1.divide(C2, APFloat::rmNearestTiesToEven); 5031 return getConstantFP(C1, DL, VT); 5032 case ISD::FREM: 5033 C1.mod(C2); 5034 return getConstantFP(C1, DL, VT); 5035 case ISD::FCOPYSIGN: 5036 C1.copySign(C2); 5037 return getConstantFP(C1, DL, VT); 5038 default: break; 5039 } 5040 } 5041 if (N1CFP && Opcode == ISD::FP_ROUND) { 5042 APFloat C1 = N1CFP->getValueAPF(); // make copy 5043 bool Unused; 5044 // This can return overflow, underflow, or inexact; we don't care. 5045 // FIXME need to be more flexible about rounding mode. 5046 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5047 &Unused); 5048 return getConstantFP(C1, DL, VT); 5049 } 5050 5051 switch (Opcode) { 5052 case ISD::FADD: 5053 case ISD::FSUB: 5054 case ISD::FMUL: 5055 case ISD::FDIV: 5056 case ISD::FREM: 5057 // If both operands are undef, the result is undef. If 1 operand is undef, 5058 // the result is NaN. This should match the behavior of the IR optimizer. 5059 if (N1.isUndef() && N2.isUndef()) 5060 return getUNDEF(VT); 5061 if (N1.isUndef() || N2.isUndef()) 5062 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5063 } 5064 return SDValue(); 5065 } 5066 5067 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5068 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5069 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5070 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5071 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5072 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5073 5074 // Canonicalize constant to RHS if commutative. 5075 if (TLI->isCommutativeBinOp(Opcode)) { 5076 if (N1C && !N2C) { 5077 std::swap(N1C, N2C); 5078 std::swap(N1, N2); 5079 } else if (N1CFP && !N2CFP) { 5080 std::swap(N1CFP, N2CFP); 5081 std::swap(N1, N2); 5082 } 5083 } 5084 5085 switch (Opcode) { 5086 default: break; 5087 case ISD::TokenFactor: 5088 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5089 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5090 // Fold trivial token factors. 5091 if (N1.getOpcode() == ISD::EntryToken) return N2; 5092 if (N2.getOpcode() == ISD::EntryToken) return N1; 5093 if (N1 == N2) return N1; 5094 break; 5095 case ISD::BUILD_VECTOR: { 5096 // Attempt to simplify BUILD_VECTOR. 5097 SDValue Ops[] = {N1, N2}; 5098 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5099 return V; 5100 break; 5101 } 5102 case ISD::CONCAT_VECTORS: { 5103 SDValue Ops[] = {N1, N2}; 5104 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5105 return V; 5106 break; 5107 } 5108 case ISD::AND: 5109 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5110 assert(N1.getValueType() == N2.getValueType() && 5111 N1.getValueType() == VT && "Binary operator types must match!"); 5112 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5113 // worth handling here. 5114 if (N2C && N2C->isNullValue()) 5115 return N2; 5116 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5117 return N1; 5118 break; 5119 case ISD::OR: 5120 case ISD::XOR: 5121 case ISD::ADD: 5122 case ISD::SUB: 5123 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5124 assert(N1.getValueType() == N2.getValueType() && 5125 N1.getValueType() == VT && "Binary operator types must match!"); 5126 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5127 // it's worth handling here. 5128 if (N2C && N2C->isNullValue()) 5129 return N1; 5130 break; 5131 case ISD::UDIV: 5132 case ISD::UREM: 5133 case ISD::MULHU: 5134 case ISD::MULHS: 5135 case ISD::MUL: 5136 case ISD::SDIV: 5137 case ISD::SREM: 5138 case ISD::SMIN: 5139 case ISD::SMAX: 5140 case ISD::UMIN: 5141 case ISD::UMAX: 5142 case ISD::SADDSAT: 5143 case ISD::SSUBSAT: 5144 case ISD::UADDSAT: 5145 case ISD::USUBSAT: 5146 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5147 assert(N1.getValueType() == N2.getValueType() && 5148 N1.getValueType() == VT && "Binary operator types must match!"); 5149 break; 5150 case ISD::FADD: 5151 case ISD::FSUB: 5152 case ISD::FMUL: 5153 case ISD::FDIV: 5154 case ISD::FREM: 5155 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5156 assert(N1.getValueType() == N2.getValueType() && 5157 N1.getValueType() == VT && "Binary operator types must match!"); 5158 if (SDValue V = simplifyFPBinop(Opcode, N1, N2)) 5159 return V; 5160 break; 5161 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5162 assert(N1.getValueType() == VT && 5163 N1.getValueType().isFloatingPoint() && 5164 N2.getValueType().isFloatingPoint() && 5165 "Invalid FCOPYSIGN!"); 5166 break; 5167 case ISD::SHL: 5168 case ISD::SRA: 5169 case ISD::SRL: 5170 if (SDValue V = simplifyShift(N1, N2)) 5171 return V; 5172 LLVM_FALLTHROUGH; 5173 case ISD::ROTL: 5174 case ISD::ROTR: 5175 assert(VT == N1.getValueType() && 5176 "Shift operators return type must be the same as their first arg"); 5177 assert(VT.isInteger() && N2.getValueType().isInteger() && 5178 "Shifts only work on integers"); 5179 assert((!VT.isVector() || VT == N2.getValueType()) && 5180 "Vector shift amounts must be in the same as their first arg"); 5181 // Verify that the shift amount VT is big enough to hold valid shift 5182 // amounts. This catches things like trying to shift an i1024 value by an 5183 // i8, which is easy to fall into in generic code that uses 5184 // TLI.getShiftAmount(). 5185 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 5186 "Invalid use of small shift amount with oversized value!"); 5187 5188 // Always fold shifts of i1 values so the code generator doesn't need to 5189 // handle them. Since we know the size of the shift has to be less than the 5190 // size of the value, the shift/rotate count is guaranteed to be zero. 5191 if (VT == MVT::i1) 5192 return N1; 5193 if (N2C && N2C->isNullValue()) 5194 return N1; 5195 break; 5196 case ISD::FP_ROUND: 5197 assert(VT.isFloatingPoint() && 5198 N1.getValueType().isFloatingPoint() && 5199 VT.bitsLE(N1.getValueType()) && 5200 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5201 "Invalid FP_ROUND!"); 5202 if (N1.getValueType() == VT) return N1; // noop conversion. 5203 break; 5204 case ISD::AssertSext: 5205 case ISD::AssertZext: { 5206 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5207 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5208 assert(VT.isInteger() && EVT.isInteger() && 5209 "Cannot *_EXTEND_INREG FP types"); 5210 assert(!EVT.isVector() && 5211 "AssertSExt/AssertZExt type should be the vector element type " 5212 "rather than the vector type!"); 5213 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5214 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5215 break; 5216 } 5217 case ISD::SIGN_EXTEND_INREG: { 5218 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5219 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5220 assert(VT.isInteger() && EVT.isInteger() && 5221 "Cannot *_EXTEND_INREG FP types"); 5222 assert(EVT.isVector() == VT.isVector() && 5223 "SIGN_EXTEND_INREG type should be vector iff the operand " 5224 "type is vector!"); 5225 assert((!EVT.isVector() || 5226 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 5227 "Vector element counts must match in SIGN_EXTEND_INREG"); 5228 assert(EVT.bitsLE(VT) && "Not extending!"); 5229 if (EVT == VT) return N1; // Not actually extending 5230 5231 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5232 unsigned FromBits = EVT.getScalarSizeInBits(); 5233 Val <<= Val.getBitWidth() - FromBits; 5234 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5235 return getConstant(Val, DL, ConstantVT); 5236 }; 5237 5238 if (N1C) { 5239 const APInt &Val = N1C->getAPIntValue(); 5240 return SignExtendInReg(Val, VT); 5241 } 5242 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5243 SmallVector<SDValue, 8> Ops; 5244 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5245 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5246 SDValue Op = N1.getOperand(i); 5247 if (Op.isUndef()) { 5248 Ops.push_back(getUNDEF(OpVT)); 5249 continue; 5250 } 5251 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5252 APInt Val = C->getAPIntValue(); 5253 Ops.push_back(SignExtendInReg(Val, OpVT)); 5254 } 5255 return getBuildVector(VT, DL, Ops); 5256 } 5257 break; 5258 } 5259 case ISD::EXTRACT_VECTOR_ELT: 5260 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5261 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5262 element type of the vector."); 5263 5264 // Extract from an undefined value or using an undefined index is undefined. 5265 if (N1.isUndef() || N2.isUndef()) 5266 return getUNDEF(VT); 5267 5268 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 5269 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5270 return getUNDEF(VT); 5271 5272 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5273 // expanding copies of large vectors from registers. 5274 if (N2C && 5275 N1.getOpcode() == ISD::CONCAT_VECTORS && 5276 N1.getNumOperands() > 0) { 5277 unsigned Factor = 5278 N1.getOperand(0).getValueType().getVectorNumElements(); 5279 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5280 N1.getOperand(N2C->getZExtValue() / Factor), 5281 getConstant(N2C->getZExtValue() % Factor, DL, 5282 N2.getValueType())); 5283 } 5284 5285 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 5286 // expanding large vector constants. 5287 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 5288 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 5289 5290 if (VT != Elt.getValueType()) 5291 // If the vector element type is not legal, the BUILD_VECTOR operands 5292 // are promoted and implicitly truncated, and the result implicitly 5293 // extended. Make that explicit here. 5294 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5295 5296 return Elt; 5297 } 5298 5299 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5300 // operations are lowered to scalars. 5301 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5302 // If the indices are the same, return the inserted element else 5303 // if the indices are known different, extract the element from 5304 // the original vector. 5305 SDValue N1Op2 = N1.getOperand(2); 5306 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5307 5308 if (N1Op2C && N2C) { 5309 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5310 if (VT == N1.getOperand(1).getValueType()) 5311 return N1.getOperand(1); 5312 else 5313 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5314 } 5315 5316 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5317 } 5318 } 5319 5320 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5321 // when vector types are scalarized and v1iX is legal. 5322 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 5323 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5324 N1.getValueType().getVectorNumElements() == 1) { 5325 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5326 N1.getOperand(1)); 5327 } 5328 break; 5329 case ISD::EXTRACT_ELEMENT: 5330 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5331 assert(!N1.getValueType().isVector() && !VT.isVector() && 5332 (N1.getValueType().isInteger() == VT.isInteger()) && 5333 N1.getValueType() != VT && 5334 "Wrong types for EXTRACT_ELEMENT!"); 5335 5336 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5337 // 64-bit integers into 32-bit parts. Instead of building the extract of 5338 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5339 if (N1.getOpcode() == ISD::BUILD_PAIR) 5340 return N1.getOperand(N2C->getZExtValue()); 5341 5342 // EXTRACT_ELEMENT of a constant int is also very common. 5343 if (N1C) { 5344 unsigned ElementSize = VT.getSizeInBits(); 5345 unsigned Shift = ElementSize * N2C->getZExtValue(); 5346 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5347 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5348 } 5349 break; 5350 case ISD::EXTRACT_SUBVECTOR: 5351 if (VT.isSimple() && N1.getValueType().isSimple()) { 5352 assert(VT.isVector() && N1.getValueType().isVector() && 5353 "Extract subvector VTs must be a vectors!"); 5354 assert(VT.getVectorElementType() == 5355 N1.getValueType().getVectorElementType() && 5356 "Extract subvector VTs must have the same element type!"); 5357 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 5358 "Extract subvector must be from larger vector to smaller vector!"); 5359 5360 if (N2C) { 5361 assert((VT.getVectorNumElements() + N2C->getZExtValue() 5362 <= N1.getValueType().getVectorNumElements()) 5363 && "Extract subvector overflow!"); 5364 } 5365 5366 // Trivial extraction. 5367 if (VT.getSimpleVT() == N1.getSimpleValueType()) 5368 return N1; 5369 5370 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5371 if (N1.isUndef()) 5372 return getUNDEF(VT); 5373 5374 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5375 // the concat have the same type as the extract. 5376 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5377 N1.getNumOperands() > 0 && 5378 VT == N1.getOperand(0).getValueType()) { 5379 unsigned Factor = VT.getVectorNumElements(); 5380 return N1.getOperand(N2C->getZExtValue() / Factor); 5381 } 5382 5383 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5384 // during shuffle legalization. 5385 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5386 VT == N1.getOperand(1).getValueType()) 5387 return N1.getOperand(1); 5388 } 5389 break; 5390 } 5391 5392 // Perform trivial constant folding. 5393 if (SDValue SV = 5394 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 5395 return SV; 5396 5397 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5398 return V; 5399 5400 // Canonicalize an UNDEF to the RHS, even over a constant. 5401 if (N1.isUndef()) { 5402 if (TLI->isCommutativeBinOp(Opcode)) { 5403 std::swap(N1, N2); 5404 } else { 5405 switch (Opcode) { 5406 case ISD::SIGN_EXTEND_INREG: 5407 case ISD::SUB: 5408 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5409 case ISD::UDIV: 5410 case ISD::SDIV: 5411 case ISD::UREM: 5412 case ISD::SREM: 5413 case ISD::SSUBSAT: 5414 case ISD::USUBSAT: 5415 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5416 } 5417 } 5418 } 5419 5420 // Fold a bunch of operators when the RHS is undef. 5421 if (N2.isUndef()) { 5422 switch (Opcode) { 5423 case ISD::XOR: 5424 if (N1.isUndef()) 5425 // Handle undef ^ undef -> 0 special case. This is a common 5426 // idiom (misuse). 5427 return getConstant(0, DL, VT); 5428 LLVM_FALLTHROUGH; 5429 case ISD::ADD: 5430 case ISD::SUB: 5431 case ISD::UDIV: 5432 case ISD::SDIV: 5433 case ISD::UREM: 5434 case ISD::SREM: 5435 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5436 case ISD::MUL: 5437 case ISD::AND: 5438 case ISD::SSUBSAT: 5439 case ISD::USUBSAT: 5440 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5441 case ISD::OR: 5442 case ISD::SADDSAT: 5443 case ISD::UADDSAT: 5444 return getAllOnesConstant(DL, VT); 5445 } 5446 } 5447 5448 // Memoize this node if possible. 5449 SDNode *N; 5450 SDVTList VTs = getVTList(VT); 5451 SDValue Ops[] = {N1, N2}; 5452 if (VT != MVT::Glue) { 5453 FoldingSetNodeID ID; 5454 AddNodeIDNode(ID, Opcode, VTs, Ops); 5455 void *IP = nullptr; 5456 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5457 E->intersectFlagsWith(Flags); 5458 return SDValue(E, 0); 5459 } 5460 5461 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5462 N->setFlags(Flags); 5463 createOperands(N, Ops); 5464 CSEMap.InsertNode(N, IP); 5465 } else { 5466 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5467 createOperands(N, Ops); 5468 } 5469 5470 InsertNode(N); 5471 SDValue V = SDValue(N, 0); 5472 NewSDValueDbgMsg(V, "Creating new node: ", this); 5473 return V; 5474 } 5475 5476 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5477 SDValue N1, SDValue N2, SDValue N3, 5478 const SDNodeFlags Flags) { 5479 // Perform various simplifications. 5480 switch (Opcode) { 5481 case ISD::FMA: { 5482 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5483 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5484 N3.getValueType() == VT && "FMA types must match!"); 5485 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5486 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5487 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5488 if (N1CFP && N2CFP && N3CFP) { 5489 APFloat V1 = N1CFP->getValueAPF(); 5490 const APFloat &V2 = N2CFP->getValueAPF(); 5491 const APFloat &V3 = N3CFP->getValueAPF(); 5492 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5493 return getConstantFP(V1, DL, VT); 5494 } 5495 break; 5496 } 5497 case ISD::BUILD_VECTOR: { 5498 // Attempt to simplify BUILD_VECTOR. 5499 SDValue Ops[] = {N1, N2, N3}; 5500 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5501 return V; 5502 break; 5503 } 5504 case ISD::CONCAT_VECTORS: { 5505 SDValue Ops[] = {N1, N2, N3}; 5506 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5507 return V; 5508 break; 5509 } 5510 case ISD::SETCC: { 5511 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5512 assert(N1.getValueType() == N2.getValueType() && 5513 "SETCC operands must have the same type!"); 5514 assert(VT.isVector() == N1.getValueType().isVector() && 5515 "SETCC type should be vector iff the operand type is vector!"); 5516 assert((!VT.isVector() || 5517 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) && 5518 "SETCC vector element counts must match!"); 5519 // Use FoldSetCC to simplify SETCC's. 5520 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5521 return V; 5522 // Vector constant folding. 5523 SDValue Ops[] = {N1, N2, N3}; 5524 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5525 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5526 return V; 5527 } 5528 break; 5529 } 5530 case ISD::SELECT: 5531 case ISD::VSELECT: 5532 if (SDValue V = simplifySelect(N1, N2, N3)) 5533 return V; 5534 break; 5535 case ISD::VECTOR_SHUFFLE: 5536 llvm_unreachable("should use getVectorShuffle constructor!"); 5537 case ISD::INSERT_VECTOR_ELT: { 5538 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5539 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 5540 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5541 return getUNDEF(VT); 5542 5543 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5544 if (N3.isUndef()) 5545 return getUNDEF(VT); 5546 5547 // If the inserted element is an UNDEF, just use the input vector. 5548 if (N2.isUndef()) 5549 return N1; 5550 5551 break; 5552 } 5553 case ISD::INSERT_SUBVECTOR: { 5554 // Inserting undef into undef is still undef. 5555 if (N1.isUndef() && N2.isUndef()) 5556 return getUNDEF(VT); 5557 SDValue Index = N3; 5558 if (VT.isSimple() && N1.getValueType().isSimple() 5559 && N2.getValueType().isSimple()) { 5560 assert(VT.isVector() && N1.getValueType().isVector() && 5561 N2.getValueType().isVector() && 5562 "Insert subvector VTs must be a vectors"); 5563 assert(VT == N1.getValueType() && 5564 "Dest and insert subvector source types must match!"); 5565 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5566 "Insert subvector must be from smaller vector to larger vector!"); 5567 if (isa<ConstantSDNode>(Index)) { 5568 assert((N2.getValueType().getVectorNumElements() + 5569 cast<ConstantSDNode>(Index)->getZExtValue() 5570 <= VT.getVectorNumElements()) 5571 && "Insert subvector overflow!"); 5572 } 5573 5574 // Trivial insertion. 5575 if (VT.getSimpleVT() == N2.getSimpleValueType()) 5576 return N2; 5577 5578 // If this is an insert of an extracted vector into an undef vector, we 5579 // can just use the input to the extract. 5580 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5581 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5582 return N2.getOperand(0); 5583 } 5584 break; 5585 } 5586 case ISD::BITCAST: 5587 // Fold bit_convert nodes from a type to themselves. 5588 if (N1.getValueType() == VT) 5589 return N1; 5590 break; 5591 } 5592 5593 // Memoize node if it doesn't produce a flag. 5594 SDNode *N; 5595 SDVTList VTs = getVTList(VT); 5596 SDValue Ops[] = {N1, N2, N3}; 5597 if (VT != MVT::Glue) { 5598 FoldingSetNodeID ID; 5599 AddNodeIDNode(ID, Opcode, VTs, Ops); 5600 void *IP = nullptr; 5601 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5602 E->intersectFlagsWith(Flags); 5603 return SDValue(E, 0); 5604 } 5605 5606 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5607 N->setFlags(Flags); 5608 createOperands(N, Ops); 5609 CSEMap.InsertNode(N, IP); 5610 } else { 5611 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5612 createOperands(N, Ops); 5613 } 5614 5615 InsertNode(N); 5616 SDValue V = SDValue(N, 0); 5617 NewSDValueDbgMsg(V, "Creating new node: ", this); 5618 return V; 5619 } 5620 5621 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5622 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5623 SDValue Ops[] = { N1, N2, N3, N4 }; 5624 return getNode(Opcode, DL, VT, Ops); 5625 } 5626 5627 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5628 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5629 SDValue N5) { 5630 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5631 return getNode(Opcode, DL, VT, Ops); 5632 } 5633 5634 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5635 /// the incoming stack arguments to be loaded from the stack. 5636 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5637 SmallVector<SDValue, 8> ArgChains; 5638 5639 // Include the original chain at the beginning of the list. When this is 5640 // used by target LowerCall hooks, this helps legalize find the 5641 // CALLSEQ_BEGIN node. 5642 ArgChains.push_back(Chain); 5643 5644 // Add a chain value for each stack argument. 5645 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5646 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5647 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5648 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5649 if (FI->getIndex() < 0) 5650 ArgChains.push_back(SDValue(L, 1)); 5651 5652 // Build a tokenfactor for all the chains. 5653 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5654 } 5655 5656 /// getMemsetValue - Vectorized representation of the memset value 5657 /// operand. 5658 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5659 const SDLoc &dl) { 5660 assert(!Value.isUndef()); 5661 5662 unsigned NumBits = VT.getScalarSizeInBits(); 5663 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5664 assert(C->getAPIntValue().getBitWidth() == 8); 5665 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5666 if (VT.isInteger()) { 5667 bool IsOpaque = VT.getSizeInBits() > 64 || 5668 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5669 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5670 } 5671 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5672 VT); 5673 } 5674 5675 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5676 EVT IntVT = VT.getScalarType(); 5677 if (!IntVT.isInteger()) 5678 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5679 5680 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5681 if (NumBits > 8) { 5682 // Use a multiplication with 0x010101... to extend the input to the 5683 // required length. 5684 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5685 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5686 DAG.getConstant(Magic, dl, IntVT)); 5687 } 5688 5689 if (VT != Value.getValueType() && !VT.isInteger()) 5690 Value = DAG.getBitcast(VT.getScalarType(), Value); 5691 if (VT != Value.getValueType()) 5692 Value = DAG.getSplatBuildVector(VT, dl, Value); 5693 5694 return Value; 5695 } 5696 5697 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5698 /// used when a memcpy is turned into a memset when the source is a constant 5699 /// string ptr. 5700 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5701 const TargetLowering &TLI, 5702 const ConstantDataArraySlice &Slice) { 5703 // Handle vector with all elements zero. 5704 if (Slice.Array == nullptr) { 5705 if (VT.isInteger()) 5706 return DAG.getConstant(0, dl, VT); 5707 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5708 return DAG.getConstantFP(0.0, dl, VT); 5709 else if (VT.isVector()) { 5710 unsigned NumElts = VT.getVectorNumElements(); 5711 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5712 return DAG.getNode(ISD::BITCAST, dl, VT, 5713 DAG.getConstant(0, dl, 5714 EVT::getVectorVT(*DAG.getContext(), 5715 EltVT, NumElts))); 5716 } else 5717 llvm_unreachable("Expected type!"); 5718 } 5719 5720 assert(!VT.isVector() && "Can't handle vector type here!"); 5721 unsigned NumVTBits = VT.getSizeInBits(); 5722 unsigned NumVTBytes = NumVTBits / 8; 5723 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5724 5725 APInt Val(NumVTBits, 0); 5726 if (DAG.getDataLayout().isLittleEndian()) { 5727 for (unsigned i = 0; i != NumBytes; ++i) 5728 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5729 } else { 5730 for (unsigned i = 0; i != NumBytes; ++i) 5731 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5732 } 5733 5734 // If the "cost" of materializing the integer immediate is less than the cost 5735 // of a load, then it is cost effective to turn the load into the immediate. 5736 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5737 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5738 return DAG.getConstant(Val, dl, VT); 5739 return SDValue(nullptr, 0); 5740 } 5741 5742 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5743 const SDLoc &DL, 5744 const SDNodeFlags Flags) { 5745 EVT VT = Base.getValueType(); 5746 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5747 } 5748 5749 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5750 const SDLoc &DL, 5751 const SDNodeFlags Flags) { 5752 assert(Offset.getValueType().isInteger()); 5753 EVT BasePtrVT = Ptr.getValueType(); 5754 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5755 } 5756 5757 /// Returns true if memcpy source is constant data. 5758 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5759 uint64_t SrcDelta = 0; 5760 GlobalAddressSDNode *G = nullptr; 5761 if (Src.getOpcode() == ISD::GlobalAddress) 5762 G = cast<GlobalAddressSDNode>(Src); 5763 else if (Src.getOpcode() == ISD::ADD && 5764 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5765 Src.getOperand(1).getOpcode() == ISD::Constant) { 5766 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5767 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5768 } 5769 if (!G) 5770 return false; 5771 5772 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5773 SrcDelta + G->getOffset()); 5774 } 5775 5776 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5777 SelectionDAG &DAG) { 5778 // On Darwin, -Os means optimize for size without hurting performance, so 5779 // only really optimize for size when -Oz (MinSize) is used. 5780 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5781 return MF.getFunction().hasMinSize(); 5782 return DAG.shouldOptForSize(); 5783 } 5784 5785 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5786 SmallVector<SDValue, 32> &OutChains, unsigned From, 5787 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5788 SmallVector<SDValue, 16> &OutStoreChains) { 5789 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5790 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5791 SmallVector<SDValue, 16> GluedLoadChains; 5792 for (unsigned i = From; i < To; ++i) { 5793 OutChains.push_back(OutLoadChains[i]); 5794 GluedLoadChains.push_back(OutLoadChains[i]); 5795 } 5796 5797 // Chain for all loads. 5798 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5799 GluedLoadChains); 5800 5801 for (unsigned i = From; i < To; ++i) { 5802 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5803 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5804 ST->getBasePtr(), ST->getMemoryVT(), 5805 ST->getMemOperand()); 5806 OutChains.push_back(NewStore); 5807 } 5808 } 5809 5810 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5811 SDValue Chain, SDValue Dst, SDValue Src, 5812 uint64_t Size, unsigned Alignment, 5813 bool isVol, bool AlwaysInline, 5814 MachinePointerInfo DstPtrInfo, 5815 MachinePointerInfo SrcPtrInfo) { 5816 // Turn a memcpy of undef to nop. 5817 // FIXME: We need to honor volatile even is Src is undef. 5818 if (Src.isUndef()) 5819 return Chain; 5820 5821 // Expand memcpy to a series of load and store ops if the size operand falls 5822 // below a certain threshold. 5823 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5824 // rather than maybe a humongous number of loads and stores. 5825 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5826 const DataLayout &DL = DAG.getDataLayout(); 5827 LLVMContext &C = *DAG.getContext(); 5828 std::vector<EVT> MemOps; 5829 bool DstAlignCanChange = false; 5830 MachineFunction &MF = DAG.getMachineFunction(); 5831 MachineFrameInfo &MFI = MF.getFrameInfo(); 5832 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 5833 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5834 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5835 DstAlignCanChange = true; 5836 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5837 if (Alignment > SrcAlign) 5838 SrcAlign = Alignment; 5839 ConstantDataArraySlice Slice; 5840 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5841 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5842 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5843 5844 if (!TLI.findOptimalMemOpLowering( 5845 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment), 5846 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false, 5847 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant, 5848 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), 5849 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 5850 return SDValue(); 5851 5852 if (DstAlignCanChange) { 5853 Type *Ty = MemOps[0].getTypeForEVT(C); 5854 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5855 5856 // Don't promote to an alignment that would require dynamic stack 5857 // realignment. 5858 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5859 if (!TRI->needsStackRealignment(MF)) 5860 while (NewAlign > Alignment && 5861 DL.exceedsNaturalStackAlignment(Align(NewAlign))) 5862 NewAlign /= 2; 5863 5864 if (NewAlign > Alignment) { 5865 // Give the stack frame object a larger alignment if needed. 5866 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5867 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5868 Alignment = NewAlign; 5869 } 5870 } 5871 5872 MachineMemOperand::Flags MMOFlags = 5873 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5874 SmallVector<SDValue, 16> OutLoadChains; 5875 SmallVector<SDValue, 16> OutStoreChains; 5876 SmallVector<SDValue, 32> OutChains; 5877 unsigned NumMemOps = MemOps.size(); 5878 uint64_t SrcOff = 0, DstOff = 0; 5879 for (unsigned i = 0; i != NumMemOps; ++i) { 5880 EVT VT = MemOps[i]; 5881 unsigned VTSize = VT.getSizeInBits() / 8; 5882 SDValue Value, Store; 5883 5884 if (VTSize > Size) { 5885 // Issuing an unaligned load / store pair that overlaps with the previous 5886 // pair. Adjust the offset accordingly. 5887 assert(i == NumMemOps-1 && i != 0); 5888 SrcOff -= VTSize - Size; 5889 DstOff -= VTSize - Size; 5890 } 5891 5892 if (CopyFromConstant && 5893 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5894 // It's unlikely a store of a vector immediate can be done in a single 5895 // instruction. It would require a load from a constantpool first. 5896 // We only handle zero vectors here. 5897 // FIXME: Handle other cases where store of vector immediate is done in 5898 // a single instruction. 5899 ConstantDataArraySlice SubSlice; 5900 if (SrcOff < Slice.Length) { 5901 SubSlice = Slice; 5902 SubSlice.move(SrcOff); 5903 } else { 5904 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5905 SubSlice.Array = nullptr; 5906 SubSlice.Offset = 0; 5907 SubSlice.Length = VTSize; 5908 } 5909 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5910 if (Value.getNode()) { 5911 Store = DAG.getStore( 5912 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5913 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 5914 OutChains.push_back(Store); 5915 } 5916 } 5917 5918 if (!Store.getNode()) { 5919 // The type might not be legal for the target. This should only happen 5920 // if the type is smaller than a legal type, as on PPC, so the right 5921 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5922 // to Load/Store if NVT==VT. 5923 // FIXME does the case above also need this? 5924 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5925 assert(NVT.bitsGE(VT)); 5926 5927 bool isDereferenceable = 5928 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5929 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5930 if (isDereferenceable) 5931 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5932 5933 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5934 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5935 SrcPtrInfo.getWithOffset(SrcOff), VT, 5936 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5937 OutLoadChains.push_back(Value.getValue(1)); 5938 5939 Store = DAG.getTruncStore( 5940 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5941 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 5942 OutStoreChains.push_back(Store); 5943 } 5944 SrcOff += VTSize; 5945 DstOff += VTSize; 5946 Size -= VTSize; 5947 } 5948 5949 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 5950 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 5951 unsigned NumLdStInMemcpy = OutStoreChains.size(); 5952 5953 if (NumLdStInMemcpy) { 5954 // It may be that memcpy might be converted to memset if it's memcpy 5955 // of constants. In such a case, we won't have loads and stores, but 5956 // just stores. In the absence of loads, there is nothing to gang up. 5957 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 5958 // If target does not care, just leave as it. 5959 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 5960 OutChains.push_back(OutLoadChains[i]); 5961 OutChains.push_back(OutStoreChains[i]); 5962 } 5963 } else { 5964 // Ld/St less than/equal limit set by target. 5965 if (NumLdStInMemcpy <= GluedLdStLimit) { 5966 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5967 NumLdStInMemcpy, OutLoadChains, 5968 OutStoreChains); 5969 } else { 5970 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 5971 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 5972 unsigned GlueIter = 0; 5973 5974 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 5975 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 5976 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 5977 5978 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 5979 OutLoadChains, OutStoreChains); 5980 GlueIter += GluedLdStLimit; 5981 } 5982 5983 // Residual ld/st. 5984 if (RemainingLdStInMemcpy) { 5985 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5986 RemainingLdStInMemcpy, OutLoadChains, 5987 OutStoreChains); 5988 } 5989 } 5990 } 5991 } 5992 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5993 } 5994 5995 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5996 SDValue Chain, SDValue Dst, SDValue Src, 5997 uint64_t Size, unsigned Align, 5998 bool isVol, bool AlwaysInline, 5999 MachinePointerInfo DstPtrInfo, 6000 MachinePointerInfo SrcPtrInfo) { 6001 // Turn a memmove of undef to nop. 6002 // FIXME: We need to honor volatile even is Src is undef. 6003 if (Src.isUndef()) 6004 return Chain; 6005 6006 // Expand memmove to a series of load and store ops if the size operand falls 6007 // below a certain threshold. 6008 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6009 const DataLayout &DL = DAG.getDataLayout(); 6010 LLVMContext &C = *DAG.getContext(); 6011 std::vector<EVT> MemOps; 6012 bool DstAlignCanChange = false; 6013 MachineFunction &MF = DAG.getMachineFunction(); 6014 MachineFrameInfo &MFI = MF.getFrameInfo(); 6015 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6016 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6017 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6018 DstAlignCanChange = true; 6019 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 6020 if (Align > SrcAlign) 6021 SrcAlign = Align; 6022 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6023 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in 6024 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the 6025 // correct code. 6026 bool AllowOverlap = false; 6027 if (!TLI.findOptimalMemOpLowering( 6028 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign, 6029 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, 6030 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6031 MF.getFunction().getAttributes())) 6032 return SDValue(); 6033 6034 if (DstAlignCanChange) { 6035 Type *Ty = MemOps[0].getTypeForEVT(C); 6036 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 6037 if (NewAlign > Align) { 6038 // Give the stack frame object a larger alignment if needed. 6039 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6040 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6041 Align = NewAlign; 6042 } 6043 } 6044 6045 MachineMemOperand::Flags MMOFlags = 6046 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6047 uint64_t SrcOff = 0, DstOff = 0; 6048 SmallVector<SDValue, 8> LoadValues; 6049 SmallVector<SDValue, 8> LoadChains; 6050 SmallVector<SDValue, 8> OutChains; 6051 unsigned NumMemOps = MemOps.size(); 6052 for (unsigned i = 0; i < NumMemOps; i++) { 6053 EVT VT = MemOps[i]; 6054 unsigned VTSize = VT.getSizeInBits() / 8; 6055 SDValue Value; 6056 6057 bool isDereferenceable = 6058 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6059 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6060 if (isDereferenceable) 6061 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6062 6063 Value = 6064 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6065 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 6066 LoadValues.push_back(Value); 6067 LoadChains.push_back(Value.getValue(1)); 6068 SrcOff += VTSize; 6069 } 6070 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6071 OutChains.clear(); 6072 for (unsigned i = 0; i < NumMemOps; i++) { 6073 EVT VT = MemOps[i]; 6074 unsigned VTSize = VT.getSizeInBits() / 8; 6075 SDValue Store; 6076 6077 Store = DAG.getStore(Chain, dl, LoadValues[i], 6078 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6079 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 6080 OutChains.push_back(Store); 6081 DstOff += VTSize; 6082 } 6083 6084 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6085 } 6086 6087 /// Lower the call to 'memset' intrinsic function into a series of store 6088 /// operations. 6089 /// 6090 /// \param DAG Selection DAG where lowered code is placed. 6091 /// \param dl Link to corresponding IR location. 6092 /// \param Chain Control flow dependency. 6093 /// \param Dst Pointer to destination memory location. 6094 /// \param Src Value of byte to write into the memory. 6095 /// \param Size Number of bytes to write. 6096 /// \param Align Alignment of the destination in bytes. 6097 /// \param isVol True if destination is volatile. 6098 /// \param DstPtrInfo IR information on the memory pointer. 6099 /// \returns New head in the control flow, if lowering was successful, empty 6100 /// SDValue otherwise. 6101 /// 6102 /// The function tries to replace 'llvm.memset' intrinsic with several store 6103 /// operations and value calculation code. This is usually profitable for small 6104 /// memory size. 6105 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6106 SDValue Chain, SDValue Dst, SDValue Src, 6107 uint64_t Size, unsigned Align, bool isVol, 6108 MachinePointerInfo DstPtrInfo) { 6109 // Turn a memset of undef to nop. 6110 // FIXME: We need to honor volatile even is Src is undef. 6111 if (Src.isUndef()) 6112 return Chain; 6113 6114 // Expand memset to a series of load/store ops if the size operand 6115 // falls below a certain threshold. 6116 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6117 std::vector<EVT> MemOps; 6118 bool DstAlignCanChange = false; 6119 MachineFunction &MF = DAG.getMachineFunction(); 6120 MachineFrameInfo &MFI = MF.getFrameInfo(); 6121 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6122 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6123 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6124 DstAlignCanChange = true; 6125 bool IsZeroVal = 6126 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6127 if (!TLI.findOptimalMemOpLowering( 6128 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size, 6129 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true, 6130 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false, 6131 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u, 6132 MF.getFunction().getAttributes())) 6133 return SDValue(); 6134 6135 if (DstAlignCanChange) { 6136 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6137 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 6138 if (NewAlign > Align) { 6139 // Give the stack frame object a larger alignment if needed. 6140 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6141 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6142 Align = NewAlign; 6143 } 6144 } 6145 6146 SmallVector<SDValue, 8> OutChains; 6147 uint64_t DstOff = 0; 6148 unsigned NumMemOps = MemOps.size(); 6149 6150 // Find the largest store and generate the bit pattern for it. 6151 EVT LargestVT = MemOps[0]; 6152 for (unsigned i = 1; i < NumMemOps; i++) 6153 if (MemOps[i].bitsGT(LargestVT)) 6154 LargestVT = MemOps[i]; 6155 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6156 6157 for (unsigned i = 0; i < NumMemOps; i++) { 6158 EVT VT = MemOps[i]; 6159 unsigned VTSize = VT.getSizeInBits() / 8; 6160 if (VTSize > Size) { 6161 // Issuing an unaligned load / store pair that overlaps with the previous 6162 // pair. Adjust the offset accordingly. 6163 assert(i == NumMemOps-1 && i != 0); 6164 DstOff -= VTSize - Size; 6165 } 6166 6167 // If this store is smaller than the largest store see whether we can get 6168 // the smaller value for free with a truncate. 6169 SDValue Value = MemSetValue; 6170 if (VT.bitsLT(LargestVT)) { 6171 if (!LargestVT.isVector() && !VT.isVector() && 6172 TLI.isTruncateFree(LargestVT, VT)) 6173 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6174 else 6175 Value = getMemsetValue(Src, VT, DAG, dl); 6176 } 6177 assert(Value.getValueType() == VT && "Value with wrong type."); 6178 SDValue Store = DAG.getStore( 6179 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6180 DstPtrInfo.getWithOffset(DstOff), Align, 6181 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6182 OutChains.push_back(Store); 6183 DstOff += VT.getSizeInBits() / 8; 6184 Size -= VTSize; 6185 } 6186 6187 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6188 } 6189 6190 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6191 unsigned AS) { 6192 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6193 // pointer operands can be losslessly bitcasted to pointers of address space 0 6194 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6195 report_fatal_error("cannot lower memory intrinsic in address space " + 6196 Twine(AS)); 6197 } 6198 } 6199 6200 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6201 SDValue Src, SDValue Size, unsigned Align, 6202 bool isVol, bool AlwaysInline, bool isTailCall, 6203 MachinePointerInfo DstPtrInfo, 6204 MachinePointerInfo SrcPtrInfo) { 6205 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6206 6207 // Check to see if we should lower the memcpy to loads and stores first. 6208 // For cases within the target-specified limits, this is the best choice. 6209 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6210 if (ConstantSize) { 6211 // Memcpy with size zero? Just return the original chain. 6212 if (ConstantSize->isNullValue()) 6213 return Chain; 6214 6215 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6216 ConstantSize->getZExtValue(),Align, 6217 isVol, false, DstPtrInfo, SrcPtrInfo); 6218 if (Result.getNode()) 6219 return Result; 6220 } 6221 6222 // Then check to see if we should lower the memcpy with target-specific 6223 // code. If the target chooses to do this, this is the next best. 6224 if (TSI) { 6225 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6226 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 6227 DstPtrInfo, SrcPtrInfo); 6228 if (Result.getNode()) 6229 return Result; 6230 } 6231 6232 // If we really need inline code and the target declined to provide it, 6233 // use a (potentially long) sequence of loads and stores. 6234 if (AlwaysInline) { 6235 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6236 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6237 ConstantSize->getZExtValue(), Align, isVol, 6238 true, DstPtrInfo, SrcPtrInfo); 6239 } 6240 6241 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6242 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6243 6244 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6245 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6246 // respect volatile, so they may do things like read or write memory 6247 // beyond the given memory regions. But fixing this isn't easy, and most 6248 // people don't care. 6249 6250 // Emit a library call. 6251 TargetLowering::ArgListTy Args; 6252 TargetLowering::ArgListEntry Entry; 6253 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6254 Entry.Node = Dst; Args.push_back(Entry); 6255 Entry.Node = Src; Args.push_back(Entry); 6256 6257 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6258 Entry.Node = Size; Args.push_back(Entry); 6259 // FIXME: pass in SDLoc 6260 TargetLowering::CallLoweringInfo CLI(*this); 6261 CLI.setDebugLoc(dl) 6262 .setChain(Chain) 6263 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6264 Dst.getValueType().getTypeForEVT(*getContext()), 6265 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6266 TLI->getPointerTy(getDataLayout())), 6267 std::move(Args)) 6268 .setDiscardResult() 6269 .setTailCall(isTailCall); 6270 6271 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6272 return CallResult.second; 6273 } 6274 6275 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6276 SDValue Dst, unsigned DstAlign, 6277 SDValue Src, unsigned SrcAlign, 6278 SDValue Size, Type *SizeTy, 6279 unsigned ElemSz, bool isTailCall, 6280 MachinePointerInfo DstPtrInfo, 6281 MachinePointerInfo SrcPtrInfo) { 6282 // Emit a library call. 6283 TargetLowering::ArgListTy Args; 6284 TargetLowering::ArgListEntry Entry; 6285 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6286 Entry.Node = Dst; 6287 Args.push_back(Entry); 6288 6289 Entry.Node = Src; 6290 Args.push_back(Entry); 6291 6292 Entry.Ty = SizeTy; 6293 Entry.Node = Size; 6294 Args.push_back(Entry); 6295 6296 RTLIB::Libcall LibraryCall = 6297 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6298 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6299 report_fatal_error("Unsupported element size"); 6300 6301 TargetLowering::CallLoweringInfo CLI(*this); 6302 CLI.setDebugLoc(dl) 6303 .setChain(Chain) 6304 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6305 Type::getVoidTy(*getContext()), 6306 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6307 TLI->getPointerTy(getDataLayout())), 6308 std::move(Args)) 6309 .setDiscardResult() 6310 .setTailCall(isTailCall); 6311 6312 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6313 return CallResult.second; 6314 } 6315 6316 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6317 SDValue Src, SDValue Size, unsigned Align, 6318 bool isVol, bool isTailCall, 6319 MachinePointerInfo DstPtrInfo, 6320 MachinePointerInfo SrcPtrInfo) { 6321 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6322 6323 // Check to see if we should lower the memmove to loads and stores first. 6324 // For cases within the target-specified limits, this is the best choice. 6325 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6326 if (ConstantSize) { 6327 // Memmove with size zero? Just return the original chain. 6328 if (ConstantSize->isNullValue()) 6329 return Chain; 6330 6331 SDValue Result = 6332 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 6333 ConstantSize->getZExtValue(), Align, isVol, 6334 false, DstPtrInfo, SrcPtrInfo); 6335 if (Result.getNode()) 6336 return Result; 6337 } 6338 6339 // Then check to see if we should lower the memmove with target-specific 6340 // code. If the target chooses to do this, this is the next best. 6341 if (TSI) { 6342 SDValue Result = TSI->EmitTargetCodeForMemmove( 6343 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 6344 if (Result.getNode()) 6345 return Result; 6346 } 6347 6348 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6349 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6350 6351 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6352 // not be safe. See memcpy above for more details. 6353 6354 // Emit a library call. 6355 TargetLowering::ArgListTy Args; 6356 TargetLowering::ArgListEntry Entry; 6357 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6358 Entry.Node = Dst; Args.push_back(Entry); 6359 Entry.Node = Src; Args.push_back(Entry); 6360 6361 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6362 Entry.Node = Size; Args.push_back(Entry); 6363 // FIXME: pass in SDLoc 6364 TargetLowering::CallLoweringInfo CLI(*this); 6365 CLI.setDebugLoc(dl) 6366 .setChain(Chain) 6367 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6368 Dst.getValueType().getTypeForEVT(*getContext()), 6369 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6370 TLI->getPointerTy(getDataLayout())), 6371 std::move(Args)) 6372 .setDiscardResult() 6373 .setTailCall(isTailCall); 6374 6375 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6376 return CallResult.second; 6377 } 6378 6379 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6380 SDValue Dst, unsigned DstAlign, 6381 SDValue Src, unsigned SrcAlign, 6382 SDValue Size, Type *SizeTy, 6383 unsigned ElemSz, bool isTailCall, 6384 MachinePointerInfo DstPtrInfo, 6385 MachinePointerInfo SrcPtrInfo) { 6386 // Emit a library call. 6387 TargetLowering::ArgListTy Args; 6388 TargetLowering::ArgListEntry Entry; 6389 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6390 Entry.Node = Dst; 6391 Args.push_back(Entry); 6392 6393 Entry.Node = Src; 6394 Args.push_back(Entry); 6395 6396 Entry.Ty = SizeTy; 6397 Entry.Node = Size; 6398 Args.push_back(Entry); 6399 6400 RTLIB::Libcall LibraryCall = 6401 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6402 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6403 report_fatal_error("Unsupported element size"); 6404 6405 TargetLowering::CallLoweringInfo CLI(*this); 6406 CLI.setDebugLoc(dl) 6407 .setChain(Chain) 6408 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6409 Type::getVoidTy(*getContext()), 6410 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6411 TLI->getPointerTy(getDataLayout())), 6412 std::move(Args)) 6413 .setDiscardResult() 6414 .setTailCall(isTailCall); 6415 6416 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6417 return CallResult.second; 6418 } 6419 6420 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6421 SDValue Src, SDValue Size, unsigned Align, 6422 bool isVol, bool isTailCall, 6423 MachinePointerInfo DstPtrInfo) { 6424 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6425 6426 // Check to see if we should lower the memset to stores first. 6427 // For cases within the target-specified limits, this is the best choice. 6428 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6429 if (ConstantSize) { 6430 // Memset with size zero? Just return the original chain. 6431 if (ConstantSize->isNullValue()) 6432 return Chain; 6433 6434 SDValue Result = 6435 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 6436 Align, isVol, DstPtrInfo); 6437 6438 if (Result.getNode()) 6439 return Result; 6440 } 6441 6442 // Then check to see if we should lower the memset with target-specific 6443 // code. If the target chooses to do this, this is the next best. 6444 if (TSI) { 6445 SDValue Result = TSI->EmitTargetCodeForMemset( 6446 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 6447 if (Result.getNode()) 6448 return Result; 6449 } 6450 6451 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6452 6453 // Emit a library call. 6454 TargetLowering::ArgListTy Args; 6455 TargetLowering::ArgListEntry Entry; 6456 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6457 Args.push_back(Entry); 6458 Entry.Node = Src; 6459 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6460 Args.push_back(Entry); 6461 Entry.Node = Size; 6462 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6463 Args.push_back(Entry); 6464 6465 // FIXME: pass in SDLoc 6466 TargetLowering::CallLoweringInfo CLI(*this); 6467 CLI.setDebugLoc(dl) 6468 .setChain(Chain) 6469 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6470 Dst.getValueType().getTypeForEVT(*getContext()), 6471 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6472 TLI->getPointerTy(getDataLayout())), 6473 std::move(Args)) 6474 .setDiscardResult() 6475 .setTailCall(isTailCall); 6476 6477 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6478 return CallResult.second; 6479 } 6480 6481 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6482 SDValue Dst, unsigned DstAlign, 6483 SDValue Value, SDValue Size, Type *SizeTy, 6484 unsigned ElemSz, bool isTailCall, 6485 MachinePointerInfo DstPtrInfo) { 6486 // Emit a library call. 6487 TargetLowering::ArgListTy Args; 6488 TargetLowering::ArgListEntry Entry; 6489 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6490 Entry.Node = Dst; 6491 Args.push_back(Entry); 6492 6493 Entry.Ty = Type::getInt8Ty(*getContext()); 6494 Entry.Node = Value; 6495 Args.push_back(Entry); 6496 6497 Entry.Ty = SizeTy; 6498 Entry.Node = Size; 6499 Args.push_back(Entry); 6500 6501 RTLIB::Libcall LibraryCall = 6502 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6503 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6504 report_fatal_error("Unsupported element size"); 6505 6506 TargetLowering::CallLoweringInfo CLI(*this); 6507 CLI.setDebugLoc(dl) 6508 .setChain(Chain) 6509 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6510 Type::getVoidTy(*getContext()), 6511 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6512 TLI->getPointerTy(getDataLayout())), 6513 std::move(Args)) 6514 .setDiscardResult() 6515 .setTailCall(isTailCall); 6516 6517 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6518 return CallResult.second; 6519 } 6520 6521 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6522 SDVTList VTList, ArrayRef<SDValue> Ops, 6523 MachineMemOperand *MMO) { 6524 FoldingSetNodeID ID; 6525 ID.AddInteger(MemVT.getRawBits()); 6526 AddNodeIDNode(ID, Opcode, VTList, Ops); 6527 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6528 void* IP = nullptr; 6529 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6530 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6531 return SDValue(E, 0); 6532 } 6533 6534 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6535 VTList, MemVT, MMO); 6536 createOperands(N, Ops); 6537 6538 CSEMap.InsertNode(N, IP); 6539 InsertNode(N); 6540 return SDValue(N, 0); 6541 } 6542 6543 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6544 EVT MemVT, SDVTList VTs, SDValue Chain, 6545 SDValue Ptr, SDValue Cmp, SDValue Swp, 6546 MachineMemOperand *MMO) { 6547 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6548 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6549 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6550 6551 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6552 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6553 } 6554 6555 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6556 SDValue Chain, SDValue Ptr, SDValue Val, 6557 MachineMemOperand *MMO) { 6558 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6559 Opcode == ISD::ATOMIC_LOAD_SUB || 6560 Opcode == ISD::ATOMIC_LOAD_AND || 6561 Opcode == ISD::ATOMIC_LOAD_CLR || 6562 Opcode == ISD::ATOMIC_LOAD_OR || 6563 Opcode == ISD::ATOMIC_LOAD_XOR || 6564 Opcode == ISD::ATOMIC_LOAD_NAND || 6565 Opcode == ISD::ATOMIC_LOAD_MIN || 6566 Opcode == ISD::ATOMIC_LOAD_MAX || 6567 Opcode == ISD::ATOMIC_LOAD_UMIN || 6568 Opcode == ISD::ATOMIC_LOAD_UMAX || 6569 Opcode == ISD::ATOMIC_LOAD_FADD || 6570 Opcode == ISD::ATOMIC_LOAD_FSUB || 6571 Opcode == ISD::ATOMIC_SWAP || 6572 Opcode == ISD::ATOMIC_STORE) && 6573 "Invalid Atomic Op"); 6574 6575 EVT VT = Val.getValueType(); 6576 6577 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6578 getVTList(VT, MVT::Other); 6579 SDValue Ops[] = {Chain, Ptr, Val}; 6580 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6581 } 6582 6583 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6584 EVT VT, SDValue Chain, SDValue Ptr, 6585 MachineMemOperand *MMO) { 6586 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6587 6588 SDVTList VTs = getVTList(VT, MVT::Other); 6589 SDValue Ops[] = {Chain, Ptr}; 6590 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6591 } 6592 6593 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6594 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6595 if (Ops.size() == 1) 6596 return Ops[0]; 6597 6598 SmallVector<EVT, 4> VTs; 6599 VTs.reserve(Ops.size()); 6600 for (unsigned i = 0; i < Ops.size(); ++i) 6601 VTs.push_back(Ops[i].getValueType()); 6602 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6603 } 6604 6605 SDValue SelectionDAG::getMemIntrinsicNode( 6606 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6607 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 6608 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6609 if (Align == 0) // Ensure that codegen never sees alignment 0 6610 Align = getEVTAlignment(MemVT); 6611 6612 if (!Size && MemVT.isScalableVector()) 6613 Size = MemoryLocation::UnknownSize; 6614 else if (!Size) 6615 Size = MemVT.getStoreSize(); 6616 6617 MachineFunction &MF = getMachineFunction(); 6618 MachineMemOperand *MMO = 6619 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo); 6620 6621 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6622 } 6623 6624 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6625 SDVTList VTList, 6626 ArrayRef<SDValue> Ops, EVT MemVT, 6627 MachineMemOperand *MMO) { 6628 assert((Opcode == ISD::INTRINSIC_VOID || 6629 Opcode == ISD::INTRINSIC_W_CHAIN || 6630 Opcode == ISD::PREFETCH || 6631 Opcode == ISD::LIFETIME_START || 6632 Opcode == ISD::LIFETIME_END || 6633 ((int)Opcode <= std::numeric_limits<int>::max() && 6634 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6635 "Opcode is not a memory-accessing opcode!"); 6636 6637 // Memoize the node unless it returns a flag. 6638 MemIntrinsicSDNode *N; 6639 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6640 FoldingSetNodeID ID; 6641 AddNodeIDNode(ID, Opcode, VTList, Ops); 6642 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6643 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6644 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6645 void *IP = nullptr; 6646 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6647 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6648 return SDValue(E, 0); 6649 } 6650 6651 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6652 VTList, MemVT, MMO); 6653 createOperands(N, Ops); 6654 6655 CSEMap.InsertNode(N, IP); 6656 } else { 6657 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6658 VTList, MemVT, MMO); 6659 createOperands(N, Ops); 6660 } 6661 InsertNode(N); 6662 SDValue V(N, 0); 6663 NewSDValueDbgMsg(V, "Creating new node: ", this); 6664 return V; 6665 } 6666 6667 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6668 SDValue Chain, int FrameIndex, 6669 int64_t Size, int64_t Offset) { 6670 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6671 const auto VTs = getVTList(MVT::Other); 6672 SDValue Ops[2] = { 6673 Chain, 6674 getFrameIndex(FrameIndex, 6675 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6676 true)}; 6677 6678 FoldingSetNodeID ID; 6679 AddNodeIDNode(ID, Opcode, VTs, Ops); 6680 ID.AddInteger(FrameIndex); 6681 ID.AddInteger(Size); 6682 ID.AddInteger(Offset); 6683 void *IP = nullptr; 6684 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6685 return SDValue(E, 0); 6686 6687 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6688 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6689 createOperands(N, Ops); 6690 CSEMap.InsertNode(N, IP); 6691 InsertNode(N); 6692 SDValue V(N, 0); 6693 NewSDValueDbgMsg(V, "Creating new node: ", this); 6694 return V; 6695 } 6696 6697 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6698 /// MachinePointerInfo record from it. This is particularly useful because the 6699 /// code generator has many cases where it doesn't bother passing in a 6700 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6701 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6702 SelectionDAG &DAG, SDValue Ptr, 6703 int64_t Offset = 0) { 6704 // If this is FI+Offset, we can model it. 6705 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6706 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6707 FI->getIndex(), Offset); 6708 6709 // If this is (FI+Offset1)+Offset2, we can model it. 6710 if (Ptr.getOpcode() != ISD::ADD || 6711 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6712 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6713 return Info; 6714 6715 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6716 return MachinePointerInfo::getFixedStack( 6717 DAG.getMachineFunction(), FI, 6718 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6719 } 6720 6721 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6722 /// MachinePointerInfo record from it. This is particularly useful because the 6723 /// code generator has many cases where it doesn't bother passing in a 6724 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6725 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6726 SelectionDAG &DAG, SDValue Ptr, 6727 SDValue OffsetOp) { 6728 // If the 'Offset' value isn't a constant, we can't handle this. 6729 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6730 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6731 if (OffsetOp.isUndef()) 6732 return InferPointerInfo(Info, DAG, Ptr); 6733 return Info; 6734 } 6735 6736 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6737 EVT VT, const SDLoc &dl, SDValue Chain, 6738 SDValue Ptr, SDValue Offset, 6739 MachinePointerInfo PtrInfo, EVT MemVT, 6740 unsigned Alignment, 6741 MachineMemOperand::Flags MMOFlags, 6742 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6743 assert(Chain.getValueType() == MVT::Other && 6744 "Invalid chain type"); 6745 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6746 Alignment = getEVTAlignment(MemVT); 6747 6748 MMOFlags |= MachineMemOperand::MOLoad; 6749 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6750 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6751 // clients. 6752 if (PtrInfo.V.isNull()) 6753 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6754 6755 MachineFunction &MF = getMachineFunction(); 6756 MachineMemOperand *MMO = MF.getMachineMemOperand( 6757 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6758 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6759 } 6760 6761 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6762 EVT VT, const SDLoc &dl, SDValue Chain, 6763 SDValue Ptr, SDValue Offset, EVT MemVT, 6764 MachineMemOperand *MMO) { 6765 if (VT == MemVT) { 6766 ExtType = ISD::NON_EXTLOAD; 6767 } else if (ExtType == ISD::NON_EXTLOAD) { 6768 assert(VT == MemVT && "Non-extending load from different memory type!"); 6769 } else { 6770 // Extending load. 6771 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6772 "Should only be an extending load, not truncating!"); 6773 assert(VT.isInteger() == MemVT.isInteger() && 6774 "Cannot convert from FP to Int or Int -> FP!"); 6775 assert(VT.isVector() == MemVT.isVector() && 6776 "Cannot use an ext load to convert to or from a vector!"); 6777 assert((!VT.isVector() || 6778 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6779 "Cannot use an ext load to change the number of vector elements!"); 6780 } 6781 6782 bool Indexed = AM != ISD::UNINDEXED; 6783 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6784 6785 SDVTList VTs = Indexed ? 6786 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6787 SDValue Ops[] = { Chain, Ptr, Offset }; 6788 FoldingSetNodeID ID; 6789 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6790 ID.AddInteger(MemVT.getRawBits()); 6791 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6792 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6793 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6794 void *IP = nullptr; 6795 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6796 cast<LoadSDNode>(E)->refineAlignment(MMO); 6797 return SDValue(E, 0); 6798 } 6799 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6800 ExtType, MemVT, MMO); 6801 createOperands(N, Ops); 6802 6803 CSEMap.InsertNode(N, IP); 6804 InsertNode(N); 6805 SDValue V(N, 0); 6806 NewSDValueDbgMsg(V, "Creating new node: ", this); 6807 return V; 6808 } 6809 6810 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6811 SDValue Ptr, MachinePointerInfo PtrInfo, 6812 unsigned Alignment, 6813 MachineMemOperand::Flags MMOFlags, 6814 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6815 SDValue Undef = getUNDEF(Ptr.getValueType()); 6816 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6817 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6818 } 6819 6820 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6821 SDValue Ptr, MachineMemOperand *MMO) { 6822 SDValue Undef = getUNDEF(Ptr.getValueType()); 6823 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6824 VT, MMO); 6825 } 6826 6827 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6828 EVT VT, SDValue Chain, SDValue Ptr, 6829 MachinePointerInfo PtrInfo, EVT MemVT, 6830 unsigned Alignment, 6831 MachineMemOperand::Flags MMOFlags, 6832 const AAMDNodes &AAInfo) { 6833 SDValue Undef = getUNDEF(Ptr.getValueType()); 6834 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6835 MemVT, Alignment, MMOFlags, AAInfo); 6836 } 6837 6838 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6839 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6840 MachineMemOperand *MMO) { 6841 SDValue Undef = getUNDEF(Ptr.getValueType()); 6842 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6843 MemVT, MMO); 6844 } 6845 6846 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6847 SDValue Base, SDValue Offset, 6848 ISD::MemIndexedMode AM) { 6849 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6850 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6851 // Don't propagate the invariant or dereferenceable flags. 6852 auto MMOFlags = 6853 LD->getMemOperand()->getFlags() & 6854 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6855 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6856 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6857 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6858 LD->getAAInfo()); 6859 } 6860 6861 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6862 SDValue Ptr, MachinePointerInfo PtrInfo, 6863 unsigned Alignment, 6864 MachineMemOperand::Flags MMOFlags, 6865 const AAMDNodes &AAInfo) { 6866 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6867 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6868 Alignment = getEVTAlignment(Val.getValueType()); 6869 6870 MMOFlags |= MachineMemOperand::MOStore; 6871 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6872 6873 if (PtrInfo.V.isNull()) 6874 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6875 6876 MachineFunction &MF = getMachineFunction(); 6877 MachineMemOperand *MMO = MF.getMachineMemOperand( 6878 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6879 return getStore(Chain, dl, Val, Ptr, MMO); 6880 } 6881 6882 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6883 SDValue Ptr, MachineMemOperand *MMO) { 6884 assert(Chain.getValueType() == MVT::Other && 6885 "Invalid chain type"); 6886 EVT VT = Val.getValueType(); 6887 SDVTList VTs = getVTList(MVT::Other); 6888 SDValue Undef = getUNDEF(Ptr.getValueType()); 6889 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6890 FoldingSetNodeID ID; 6891 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6892 ID.AddInteger(VT.getRawBits()); 6893 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6894 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6895 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6896 void *IP = nullptr; 6897 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6898 cast<StoreSDNode>(E)->refineAlignment(MMO); 6899 return SDValue(E, 0); 6900 } 6901 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6902 ISD::UNINDEXED, false, VT, MMO); 6903 createOperands(N, Ops); 6904 6905 CSEMap.InsertNode(N, IP); 6906 InsertNode(N); 6907 SDValue V(N, 0); 6908 NewSDValueDbgMsg(V, "Creating new node: ", this); 6909 return V; 6910 } 6911 6912 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6913 SDValue Ptr, MachinePointerInfo PtrInfo, 6914 EVT SVT, unsigned Alignment, 6915 MachineMemOperand::Flags MMOFlags, 6916 const AAMDNodes &AAInfo) { 6917 assert(Chain.getValueType() == MVT::Other && 6918 "Invalid chain type"); 6919 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6920 Alignment = getEVTAlignment(SVT); 6921 6922 MMOFlags |= MachineMemOperand::MOStore; 6923 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6924 6925 if (PtrInfo.V.isNull()) 6926 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6927 6928 MachineFunction &MF = getMachineFunction(); 6929 MachineMemOperand *MMO = MF.getMachineMemOperand( 6930 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6931 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6932 } 6933 6934 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6935 SDValue Ptr, EVT SVT, 6936 MachineMemOperand *MMO) { 6937 EVT VT = Val.getValueType(); 6938 6939 assert(Chain.getValueType() == MVT::Other && 6940 "Invalid chain type"); 6941 if (VT == SVT) 6942 return getStore(Chain, dl, Val, Ptr, MMO); 6943 6944 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6945 "Should only be a truncating store, not extending!"); 6946 assert(VT.isInteger() == SVT.isInteger() && 6947 "Can't do FP-INT conversion!"); 6948 assert(VT.isVector() == SVT.isVector() && 6949 "Cannot use trunc store to convert to or from a vector!"); 6950 assert((!VT.isVector() || 6951 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6952 "Cannot use trunc store to change the number of vector elements!"); 6953 6954 SDVTList VTs = getVTList(MVT::Other); 6955 SDValue Undef = getUNDEF(Ptr.getValueType()); 6956 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6957 FoldingSetNodeID ID; 6958 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6959 ID.AddInteger(SVT.getRawBits()); 6960 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6961 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6962 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6963 void *IP = nullptr; 6964 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6965 cast<StoreSDNode>(E)->refineAlignment(MMO); 6966 return SDValue(E, 0); 6967 } 6968 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6969 ISD::UNINDEXED, true, SVT, MMO); 6970 createOperands(N, Ops); 6971 6972 CSEMap.InsertNode(N, IP); 6973 InsertNode(N); 6974 SDValue V(N, 0); 6975 NewSDValueDbgMsg(V, "Creating new node: ", this); 6976 return V; 6977 } 6978 6979 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6980 SDValue Base, SDValue Offset, 6981 ISD::MemIndexedMode AM) { 6982 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6983 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6984 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6985 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6986 FoldingSetNodeID ID; 6987 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6988 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6989 ID.AddInteger(ST->getRawSubclassData()); 6990 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6991 void *IP = nullptr; 6992 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6993 return SDValue(E, 0); 6994 6995 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6996 ST->isTruncatingStore(), ST->getMemoryVT(), 6997 ST->getMemOperand()); 6998 createOperands(N, Ops); 6999 7000 CSEMap.InsertNode(N, IP); 7001 InsertNode(N); 7002 SDValue V(N, 0); 7003 NewSDValueDbgMsg(V, "Creating new node: ", this); 7004 return V; 7005 } 7006 7007 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7008 SDValue Base, SDValue Offset, SDValue Mask, 7009 SDValue PassThru, EVT MemVT, 7010 MachineMemOperand *MMO, 7011 ISD::MemIndexedMode AM, 7012 ISD::LoadExtType ExtTy, bool isExpanding) { 7013 bool Indexed = AM != ISD::UNINDEXED; 7014 assert((Indexed || Offset.isUndef()) && 7015 "Unindexed masked load with an offset!"); 7016 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7017 : getVTList(VT, MVT::Other); 7018 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7019 FoldingSetNodeID ID; 7020 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7021 ID.AddInteger(MemVT.getRawBits()); 7022 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7023 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7024 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7025 void *IP = nullptr; 7026 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7027 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7028 return SDValue(E, 0); 7029 } 7030 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7031 AM, ExtTy, isExpanding, MemVT, MMO); 7032 createOperands(N, Ops); 7033 7034 CSEMap.InsertNode(N, IP); 7035 InsertNode(N); 7036 SDValue V(N, 0); 7037 NewSDValueDbgMsg(V, "Creating new node: ", this); 7038 return V; 7039 } 7040 7041 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7042 SDValue Base, SDValue Offset, 7043 ISD::MemIndexedMode AM) { 7044 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7045 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7046 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7047 Offset, LD->getMask(), LD->getPassThru(), 7048 LD->getMemoryVT(), LD->getMemOperand(), AM, 7049 LD->getExtensionType(), LD->isExpandingLoad()); 7050 } 7051 7052 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7053 SDValue Val, SDValue Base, SDValue Offset, 7054 SDValue Mask, EVT MemVT, 7055 MachineMemOperand *MMO, 7056 ISD::MemIndexedMode AM, bool IsTruncating, 7057 bool IsCompressing) { 7058 assert(Chain.getValueType() == MVT::Other && 7059 "Invalid chain type"); 7060 bool Indexed = AM != ISD::UNINDEXED; 7061 assert((Indexed || Offset.isUndef()) && 7062 "Unindexed masked store with an offset!"); 7063 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7064 : getVTList(MVT::Other); 7065 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7066 FoldingSetNodeID ID; 7067 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7068 ID.AddInteger(MemVT.getRawBits()); 7069 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7070 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7071 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7072 void *IP = nullptr; 7073 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7074 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7075 return SDValue(E, 0); 7076 } 7077 auto *N = 7078 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7079 IsTruncating, IsCompressing, MemVT, MMO); 7080 createOperands(N, Ops); 7081 7082 CSEMap.InsertNode(N, IP); 7083 InsertNode(N); 7084 SDValue V(N, 0); 7085 NewSDValueDbgMsg(V, "Creating new node: ", this); 7086 return V; 7087 } 7088 7089 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7090 SDValue Base, SDValue Offset, 7091 ISD::MemIndexedMode AM) { 7092 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7093 assert(ST->getOffset().isUndef() && 7094 "Masked store is already a indexed store!"); 7095 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7096 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7097 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7098 } 7099 7100 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7101 ArrayRef<SDValue> Ops, 7102 MachineMemOperand *MMO, 7103 ISD::MemIndexType IndexType) { 7104 assert(Ops.size() == 6 && "Incompatible number of operands"); 7105 7106 FoldingSetNodeID ID; 7107 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7108 ID.AddInteger(VT.getRawBits()); 7109 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7110 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7111 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7112 void *IP = nullptr; 7113 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7114 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7115 return SDValue(E, 0); 7116 } 7117 7118 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7119 VTs, VT, MMO, IndexType); 7120 createOperands(N, Ops); 7121 7122 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7123 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7124 assert(N->getMask().getValueType().getVectorNumElements() == 7125 N->getValueType(0).getVectorNumElements() && 7126 "Vector width mismatch between mask and data"); 7127 assert(N->getIndex().getValueType().getVectorNumElements() >= 7128 N->getValueType(0).getVectorNumElements() && 7129 "Vector width mismatch between index and data"); 7130 assert(isa<ConstantSDNode>(N->getScale()) && 7131 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7132 "Scale should be a constant power of 2"); 7133 7134 CSEMap.InsertNode(N, IP); 7135 InsertNode(N); 7136 SDValue V(N, 0); 7137 NewSDValueDbgMsg(V, "Creating new node: ", this); 7138 return V; 7139 } 7140 7141 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7142 ArrayRef<SDValue> Ops, 7143 MachineMemOperand *MMO, 7144 ISD::MemIndexType IndexType) { 7145 assert(Ops.size() == 6 && "Incompatible number of operands"); 7146 7147 FoldingSetNodeID ID; 7148 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7149 ID.AddInteger(VT.getRawBits()); 7150 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7151 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7152 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7153 void *IP = nullptr; 7154 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7155 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7156 return SDValue(E, 0); 7157 } 7158 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7159 VTs, VT, MMO, IndexType); 7160 createOperands(N, Ops); 7161 7162 assert(N->getMask().getValueType().getVectorNumElements() == 7163 N->getValue().getValueType().getVectorNumElements() && 7164 "Vector width mismatch between mask and data"); 7165 assert(N->getIndex().getValueType().getVectorNumElements() >= 7166 N->getValue().getValueType().getVectorNumElements() && 7167 "Vector width mismatch between index and data"); 7168 assert(isa<ConstantSDNode>(N->getScale()) && 7169 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7170 "Scale should be a constant power of 2"); 7171 7172 CSEMap.InsertNode(N, IP); 7173 InsertNode(N); 7174 SDValue V(N, 0); 7175 NewSDValueDbgMsg(V, "Creating new node: ", this); 7176 return V; 7177 } 7178 7179 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7180 // select undef, T, F --> T (if T is a constant), otherwise F 7181 // select, ?, undef, F --> F 7182 // select, ?, T, undef --> T 7183 if (Cond.isUndef()) 7184 return isConstantValueOfAnyType(T) ? T : F; 7185 if (T.isUndef()) 7186 return F; 7187 if (F.isUndef()) 7188 return T; 7189 7190 // select true, T, F --> T 7191 // select false, T, F --> F 7192 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7193 return CondC->isNullValue() ? F : T; 7194 7195 // TODO: This should simplify VSELECT with constant condition using something 7196 // like this (but check boolean contents to be complete?): 7197 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7198 // return T; 7199 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7200 // return F; 7201 7202 // select ?, T, T --> T 7203 if (T == F) 7204 return T; 7205 7206 return SDValue(); 7207 } 7208 7209 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7210 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7211 if (X.isUndef()) 7212 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7213 // shift X, undef --> undef (because it may shift by the bitwidth) 7214 if (Y.isUndef()) 7215 return getUNDEF(X.getValueType()); 7216 7217 // shift 0, Y --> 0 7218 // shift X, 0 --> X 7219 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7220 return X; 7221 7222 // shift X, C >= bitwidth(X) --> undef 7223 // All vector elements must be too big (or undef) to avoid partial undefs. 7224 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7225 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7226 }; 7227 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7228 return getUNDEF(X.getValueType()); 7229 7230 return SDValue(); 7231 } 7232 7233 // TODO: Use fast-math-flags to enable more simplifications. 7234 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) { 7235 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7236 if (!YC) 7237 return SDValue(); 7238 7239 // X + -0.0 --> X 7240 if (Opcode == ISD::FADD) 7241 if (YC->getValueAPF().isNegZero()) 7242 return X; 7243 7244 // X - +0.0 --> X 7245 if (Opcode == ISD::FSUB) 7246 if (YC->getValueAPF().isPosZero()) 7247 return X; 7248 7249 // X * 1.0 --> X 7250 // X / 1.0 --> X 7251 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7252 if (YC->getValueAPF().isExactlyValue(1.0)) 7253 return X; 7254 7255 return SDValue(); 7256 } 7257 7258 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7259 SDValue Ptr, SDValue SV, unsigned Align) { 7260 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7261 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7262 } 7263 7264 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7265 ArrayRef<SDUse> Ops) { 7266 switch (Ops.size()) { 7267 case 0: return getNode(Opcode, DL, VT); 7268 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7269 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7270 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7271 default: break; 7272 } 7273 7274 // Copy from an SDUse array into an SDValue array for use with 7275 // the regular getNode logic. 7276 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7277 return getNode(Opcode, DL, VT, NewOps); 7278 } 7279 7280 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7281 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7282 unsigned NumOps = Ops.size(); 7283 switch (NumOps) { 7284 case 0: return getNode(Opcode, DL, VT); 7285 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7286 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7287 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7288 default: break; 7289 } 7290 7291 switch (Opcode) { 7292 default: break; 7293 case ISD::BUILD_VECTOR: 7294 // Attempt to simplify BUILD_VECTOR. 7295 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7296 return V; 7297 break; 7298 case ISD::CONCAT_VECTORS: 7299 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7300 return V; 7301 break; 7302 case ISD::SELECT_CC: 7303 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7304 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7305 "LHS and RHS of condition must have same type!"); 7306 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7307 "True and False arms of SelectCC must have same type!"); 7308 assert(Ops[2].getValueType() == VT && 7309 "select_cc node must be of same type as true and false value!"); 7310 break; 7311 case ISD::BR_CC: 7312 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7313 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7314 "LHS/RHS of comparison should match types!"); 7315 break; 7316 } 7317 7318 // Memoize nodes. 7319 SDNode *N; 7320 SDVTList VTs = getVTList(VT); 7321 7322 if (VT != MVT::Glue) { 7323 FoldingSetNodeID ID; 7324 AddNodeIDNode(ID, Opcode, VTs, Ops); 7325 void *IP = nullptr; 7326 7327 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7328 return SDValue(E, 0); 7329 7330 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7331 createOperands(N, Ops); 7332 7333 CSEMap.InsertNode(N, IP); 7334 } else { 7335 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7336 createOperands(N, Ops); 7337 } 7338 7339 InsertNode(N); 7340 SDValue V(N, 0); 7341 NewSDValueDbgMsg(V, "Creating new node: ", this); 7342 return V; 7343 } 7344 7345 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7346 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7347 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7348 } 7349 7350 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7351 ArrayRef<SDValue> Ops) { 7352 if (VTList.NumVTs == 1) 7353 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7354 7355 switch (Opcode) { 7356 case ISD::STRICT_FP_EXTEND: 7357 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7358 "Invalid STRICT_FP_EXTEND!"); 7359 assert(VTList.VTs[0].isFloatingPoint() && 7360 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7361 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7362 "STRICT_FP_EXTEND result type should be vector iff the operand " 7363 "type is vector!"); 7364 assert((!VTList.VTs[0].isVector() || 7365 VTList.VTs[0].getVectorNumElements() == 7366 Ops[1].getValueType().getVectorNumElements()) && 7367 "Vector element count mismatch!"); 7368 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7369 "Invalid fpext node, dst <= src!"); 7370 break; 7371 case ISD::STRICT_FP_ROUND: 7372 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7373 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7374 "STRICT_FP_ROUND result type should be vector iff the operand " 7375 "type is vector!"); 7376 assert((!VTList.VTs[0].isVector() || 7377 VTList.VTs[0].getVectorNumElements() == 7378 Ops[1].getValueType().getVectorNumElements()) && 7379 "Vector element count mismatch!"); 7380 assert(VTList.VTs[0].isFloatingPoint() && 7381 Ops[1].getValueType().isFloatingPoint() && 7382 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7383 isa<ConstantSDNode>(Ops[2]) && 7384 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7385 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7386 "Invalid STRICT_FP_ROUND!"); 7387 break; 7388 #if 0 7389 // FIXME: figure out how to safely handle things like 7390 // int foo(int x) { return 1 << (x & 255); } 7391 // int bar() { return foo(256); } 7392 case ISD::SRA_PARTS: 7393 case ISD::SRL_PARTS: 7394 case ISD::SHL_PARTS: 7395 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7396 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7397 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7398 else if (N3.getOpcode() == ISD::AND) 7399 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7400 // If the and is only masking out bits that cannot effect the shift, 7401 // eliminate the and. 7402 unsigned NumBits = VT.getScalarSizeInBits()*2; 7403 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7404 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7405 } 7406 break; 7407 #endif 7408 } 7409 7410 // Memoize the node unless it returns a flag. 7411 SDNode *N; 7412 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7413 FoldingSetNodeID ID; 7414 AddNodeIDNode(ID, Opcode, VTList, Ops); 7415 void *IP = nullptr; 7416 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7417 return SDValue(E, 0); 7418 7419 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7420 createOperands(N, Ops); 7421 CSEMap.InsertNode(N, IP); 7422 } else { 7423 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7424 createOperands(N, Ops); 7425 } 7426 InsertNode(N); 7427 SDValue V(N, 0); 7428 NewSDValueDbgMsg(V, "Creating new node: ", this); 7429 return V; 7430 } 7431 7432 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7433 SDVTList VTList) { 7434 return getNode(Opcode, DL, VTList, None); 7435 } 7436 7437 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7438 SDValue N1) { 7439 SDValue Ops[] = { N1 }; 7440 return getNode(Opcode, DL, VTList, Ops); 7441 } 7442 7443 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7444 SDValue N1, SDValue N2) { 7445 SDValue Ops[] = { N1, N2 }; 7446 return getNode(Opcode, DL, VTList, Ops); 7447 } 7448 7449 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7450 SDValue N1, SDValue N2, SDValue N3) { 7451 SDValue Ops[] = { N1, N2, N3 }; 7452 return getNode(Opcode, DL, VTList, Ops); 7453 } 7454 7455 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7456 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7457 SDValue Ops[] = { N1, N2, N3, N4 }; 7458 return getNode(Opcode, DL, VTList, Ops); 7459 } 7460 7461 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7462 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7463 SDValue N5) { 7464 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7465 return getNode(Opcode, DL, VTList, Ops); 7466 } 7467 7468 SDVTList SelectionDAG::getVTList(EVT VT) { 7469 return makeVTList(SDNode::getValueTypeList(VT), 1); 7470 } 7471 7472 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7473 FoldingSetNodeID ID; 7474 ID.AddInteger(2U); 7475 ID.AddInteger(VT1.getRawBits()); 7476 ID.AddInteger(VT2.getRawBits()); 7477 7478 void *IP = nullptr; 7479 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7480 if (!Result) { 7481 EVT *Array = Allocator.Allocate<EVT>(2); 7482 Array[0] = VT1; 7483 Array[1] = VT2; 7484 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7485 VTListMap.InsertNode(Result, IP); 7486 } 7487 return Result->getSDVTList(); 7488 } 7489 7490 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7491 FoldingSetNodeID ID; 7492 ID.AddInteger(3U); 7493 ID.AddInteger(VT1.getRawBits()); 7494 ID.AddInteger(VT2.getRawBits()); 7495 ID.AddInteger(VT3.getRawBits()); 7496 7497 void *IP = nullptr; 7498 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7499 if (!Result) { 7500 EVT *Array = Allocator.Allocate<EVT>(3); 7501 Array[0] = VT1; 7502 Array[1] = VT2; 7503 Array[2] = VT3; 7504 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7505 VTListMap.InsertNode(Result, IP); 7506 } 7507 return Result->getSDVTList(); 7508 } 7509 7510 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7511 FoldingSetNodeID ID; 7512 ID.AddInteger(4U); 7513 ID.AddInteger(VT1.getRawBits()); 7514 ID.AddInteger(VT2.getRawBits()); 7515 ID.AddInteger(VT3.getRawBits()); 7516 ID.AddInteger(VT4.getRawBits()); 7517 7518 void *IP = nullptr; 7519 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7520 if (!Result) { 7521 EVT *Array = Allocator.Allocate<EVT>(4); 7522 Array[0] = VT1; 7523 Array[1] = VT2; 7524 Array[2] = VT3; 7525 Array[3] = VT4; 7526 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7527 VTListMap.InsertNode(Result, IP); 7528 } 7529 return Result->getSDVTList(); 7530 } 7531 7532 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7533 unsigned NumVTs = VTs.size(); 7534 FoldingSetNodeID ID; 7535 ID.AddInteger(NumVTs); 7536 for (unsigned index = 0; index < NumVTs; index++) { 7537 ID.AddInteger(VTs[index].getRawBits()); 7538 } 7539 7540 void *IP = nullptr; 7541 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7542 if (!Result) { 7543 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7544 llvm::copy(VTs, Array); 7545 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7546 VTListMap.InsertNode(Result, IP); 7547 } 7548 return Result->getSDVTList(); 7549 } 7550 7551 7552 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7553 /// specified operands. If the resultant node already exists in the DAG, 7554 /// this does not modify the specified node, instead it returns the node that 7555 /// already exists. If the resultant node does not exist in the DAG, the 7556 /// input node is returned. As a degenerate case, if you specify the same 7557 /// input operands as the node already has, the input node is returned. 7558 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7559 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7560 7561 // Check to see if there is no change. 7562 if (Op == N->getOperand(0)) return N; 7563 7564 // See if the modified node already exists. 7565 void *InsertPos = nullptr; 7566 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7567 return Existing; 7568 7569 // Nope it doesn't. Remove the node from its current place in the maps. 7570 if (InsertPos) 7571 if (!RemoveNodeFromCSEMaps(N)) 7572 InsertPos = nullptr; 7573 7574 // Now we update the operands. 7575 N->OperandList[0].set(Op); 7576 7577 updateDivergence(N); 7578 // If this gets put into a CSE map, add it. 7579 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7580 return N; 7581 } 7582 7583 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7584 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7585 7586 // Check to see if there is no change. 7587 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7588 return N; // No operands changed, just return the input node. 7589 7590 // See if the modified node already exists. 7591 void *InsertPos = nullptr; 7592 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7593 return Existing; 7594 7595 // Nope it doesn't. Remove the node from its current place in the maps. 7596 if (InsertPos) 7597 if (!RemoveNodeFromCSEMaps(N)) 7598 InsertPos = nullptr; 7599 7600 // Now we update the operands. 7601 if (N->OperandList[0] != Op1) 7602 N->OperandList[0].set(Op1); 7603 if (N->OperandList[1] != Op2) 7604 N->OperandList[1].set(Op2); 7605 7606 updateDivergence(N); 7607 // If this gets put into a CSE map, add it. 7608 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7609 return N; 7610 } 7611 7612 SDNode *SelectionDAG:: 7613 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7614 SDValue Ops[] = { Op1, Op2, Op3 }; 7615 return UpdateNodeOperands(N, Ops); 7616 } 7617 7618 SDNode *SelectionDAG:: 7619 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7620 SDValue Op3, SDValue Op4) { 7621 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7622 return UpdateNodeOperands(N, Ops); 7623 } 7624 7625 SDNode *SelectionDAG:: 7626 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7627 SDValue Op3, SDValue Op4, SDValue Op5) { 7628 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7629 return UpdateNodeOperands(N, Ops); 7630 } 7631 7632 SDNode *SelectionDAG:: 7633 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7634 unsigned NumOps = Ops.size(); 7635 assert(N->getNumOperands() == NumOps && 7636 "Update with wrong number of operands"); 7637 7638 // If no operands changed just return the input node. 7639 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7640 return N; 7641 7642 // See if the modified node already exists. 7643 void *InsertPos = nullptr; 7644 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7645 return Existing; 7646 7647 // Nope it doesn't. Remove the node from its current place in the maps. 7648 if (InsertPos) 7649 if (!RemoveNodeFromCSEMaps(N)) 7650 InsertPos = nullptr; 7651 7652 // Now we update the operands. 7653 for (unsigned i = 0; i != NumOps; ++i) 7654 if (N->OperandList[i] != Ops[i]) 7655 N->OperandList[i].set(Ops[i]); 7656 7657 updateDivergence(N); 7658 // If this gets put into a CSE map, add it. 7659 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7660 return N; 7661 } 7662 7663 /// DropOperands - Release the operands and set this node to have 7664 /// zero operands. 7665 void SDNode::DropOperands() { 7666 // Unlike the code in MorphNodeTo that does this, we don't need to 7667 // watch for dead nodes here. 7668 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7669 SDUse &Use = *I++; 7670 Use.set(SDValue()); 7671 } 7672 } 7673 7674 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7675 ArrayRef<MachineMemOperand *> NewMemRefs) { 7676 if (NewMemRefs.empty()) { 7677 N->clearMemRefs(); 7678 return; 7679 } 7680 7681 // Check if we can avoid allocating by storing a single reference directly. 7682 if (NewMemRefs.size() == 1) { 7683 N->MemRefs = NewMemRefs[0]; 7684 N->NumMemRefs = 1; 7685 return; 7686 } 7687 7688 MachineMemOperand **MemRefsBuffer = 7689 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7690 llvm::copy(NewMemRefs, MemRefsBuffer); 7691 N->MemRefs = MemRefsBuffer; 7692 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7693 } 7694 7695 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7696 /// machine opcode. 7697 /// 7698 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7699 EVT VT) { 7700 SDVTList VTs = getVTList(VT); 7701 return SelectNodeTo(N, MachineOpc, VTs, None); 7702 } 7703 7704 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7705 EVT VT, SDValue Op1) { 7706 SDVTList VTs = getVTList(VT); 7707 SDValue Ops[] = { Op1 }; 7708 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7709 } 7710 7711 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7712 EVT VT, SDValue Op1, 7713 SDValue Op2) { 7714 SDVTList VTs = getVTList(VT); 7715 SDValue Ops[] = { Op1, Op2 }; 7716 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7717 } 7718 7719 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7720 EVT VT, SDValue Op1, 7721 SDValue Op2, SDValue Op3) { 7722 SDVTList VTs = getVTList(VT); 7723 SDValue Ops[] = { Op1, Op2, Op3 }; 7724 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7725 } 7726 7727 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7728 EVT VT, ArrayRef<SDValue> Ops) { 7729 SDVTList VTs = getVTList(VT); 7730 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7731 } 7732 7733 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7734 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7735 SDVTList VTs = getVTList(VT1, VT2); 7736 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7737 } 7738 7739 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7740 EVT VT1, EVT VT2) { 7741 SDVTList VTs = getVTList(VT1, VT2); 7742 return SelectNodeTo(N, MachineOpc, VTs, None); 7743 } 7744 7745 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7746 EVT VT1, EVT VT2, EVT VT3, 7747 ArrayRef<SDValue> Ops) { 7748 SDVTList VTs = getVTList(VT1, VT2, VT3); 7749 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7750 } 7751 7752 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7753 EVT VT1, EVT VT2, 7754 SDValue Op1, SDValue Op2) { 7755 SDVTList VTs = getVTList(VT1, VT2); 7756 SDValue Ops[] = { Op1, Op2 }; 7757 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7758 } 7759 7760 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7761 SDVTList VTs,ArrayRef<SDValue> Ops) { 7762 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7763 // Reset the NodeID to -1. 7764 New->setNodeId(-1); 7765 if (New != N) { 7766 ReplaceAllUsesWith(N, New); 7767 RemoveDeadNode(N); 7768 } 7769 return New; 7770 } 7771 7772 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7773 /// the line number information on the merged node since it is not possible to 7774 /// preserve the information that operation is associated with multiple lines. 7775 /// This will make the debugger working better at -O0, were there is a higher 7776 /// probability having other instructions associated with that line. 7777 /// 7778 /// For IROrder, we keep the smaller of the two 7779 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7780 DebugLoc NLoc = N->getDebugLoc(); 7781 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7782 N->setDebugLoc(DebugLoc()); 7783 } 7784 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7785 N->setIROrder(Order); 7786 return N; 7787 } 7788 7789 /// MorphNodeTo - This *mutates* the specified node to have the specified 7790 /// return type, opcode, and operands. 7791 /// 7792 /// Note that MorphNodeTo returns the resultant node. If there is already a 7793 /// node of the specified opcode and operands, it returns that node instead of 7794 /// the current one. Note that the SDLoc need not be the same. 7795 /// 7796 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7797 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7798 /// node, and because it doesn't require CSE recalculation for any of 7799 /// the node's users. 7800 /// 7801 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7802 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7803 /// the legalizer which maintain worklists that would need to be updated when 7804 /// deleting things. 7805 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7806 SDVTList VTs, ArrayRef<SDValue> Ops) { 7807 // If an identical node already exists, use it. 7808 void *IP = nullptr; 7809 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7810 FoldingSetNodeID ID; 7811 AddNodeIDNode(ID, Opc, VTs, Ops); 7812 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7813 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7814 } 7815 7816 if (!RemoveNodeFromCSEMaps(N)) 7817 IP = nullptr; 7818 7819 // Start the morphing. 7820 N->NodeType = Opc; 7821 N->ValueList = VTs.VTs; 7822 N->NumValues = VTs.NumVTs; 7823 7824 // Clear the operands list, updating used nodes to remove this from their 7825 // use list. Keep track of any operands that become dead as a result. 7826 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7827 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7828 SDUse &Use = *I++; 7829 SDNode *Used = Use.getNode(); 7830 Use.set(SDValue()); 7831 if (Used->use_empty()) 7832 DeadNodeSet.insert(Used); 7833 } 7834 7835 // For MachineNode, initialize the memory references information. 7836 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7837 MN->clearMemRefs(); 7838 7839 // Swap for an appropriately sized array from the recycler. 7840 removeOperands(N); 7841 createOperands(N, Ops); 7842 7843 // Delete any nodes that are still dead after adding the uses for the 7844 // new operands. 7845 if (!DeadNodeSet.empty()) { 7846 SmallVector<SDNode *, 16> DeadNodes; 7847 for (SDNode *N : DeadNodeSet) 7848 if (N->use_empty()) 7849 DeadNodes.push_back(N); 7850 RemoveDeadNodes(DeadNodes); 7851 } 7852 7853 if (IP) 7854 CSEMap.InsertNode(N, IP); // Memoize the new node. 7855 return N; 7856 } 7857 7858 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7859 unsigned OrigOpc = Node->getOpcode(); 7860 unsigned NewOpc; 7861 switch (OrigOpc) { 7862 default: 7863 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7864 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7865 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 7866 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7867 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 7868 #include "llvm/IR/ConstrainedOps.def" 7869 } 7870 7871 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 7872 7873 // We're taking this node out of the chain, so we need to re-link things. 7874 SDValue InputChain = Node->getOperand(0); 7875 SDValue OutputChain = SDValue(Node, 1); 7876 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7877 7878 SmallVector<SDValue, 3> Ops; 7879 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 7880 Ops.push_back(Node->getOperand(i)); 7881 7882 SDVTList VTs = getVTList(Node->getValueType(0)); 7883 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 7884 7885 // MorphNodeTo can operate in two ways: if an existing node with the 7886 // specified operands exists, it can just return it. Otherwise, it 7887 // updates the node in place to have the requested operands. 7888 if (Res == Node) { 7889 // If we updated the node in place, reset the node ID. To the isel, 7890 // this should be just like a newly allocated machine node. 7891 Res->setNodeId(-1); 7892 } else { 7893 ReplaceAllUsesWith(Node, Res); 7894 RemoveDeadNode(Node); 7895 } 7896 7897 return Res; 7898 } 7899 7900 /// getMachineNode - These are used for target selectors to create a new node 7901 /// with specified return type(s), MachineInstr opcode, and operands. 7902 /// 7903 /// Note that getMachineNode returns the resultant node. If there is already a 7904 /// node of the specified opcode and operands, it returns that node instead of 7905 /// the current one. 7906 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7907 EVT VT) { 7908 SDVTList VTs = getVTList(VT); 7909 return getMachineNode(Opcode, dl, VTs, None); 7910 } 7911 7912 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7913 EVT VT, SDValue Op1) { 7914 SDVTList VTs = getVTList(VT); 7915 SDValue Ops[] = { Op1 }; 7916 return getMachineNode(Opcode, dl, VTs, Ops); 7917 } 7918 7919 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7920 EVT VT, SDValue Op1, SDValue Op2) { 7921 SDVTList VTs = getVTList(VT); 7922 SDValue Ops[] = { Op1, Op2 }; 7923 return getMachineNode(Opcode, dl, VTs, Ops); 7924 } 7925 7926 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7927 EVT VT, SDValue Op1, SDValue Op2, 7928 SDValue Op3) { 7929 SDVTList VTs = getVTList(VT); 7930 SDValue Ops[] = { Op1, Op2, Op3 }; 7931 return getMachineNode(Opcode, dl, VTs, Ops); 7932 } 7933 7934 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7935 EVT VT, ArrayRef<SDValue> Ops) { 7936 SDVTList VTs = getVTList(VT); 7937 return getMachineNode(Opcode, dl, VTs, Ops); 7938 } 7939 7940 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7941 EVT VT1, EVT VT2, SDValue Op1, 7942 SDValue Op2) { 7943 SDVTList VTs = getVTList(VT1, VT2); 7944 SDValue Ops[] = { Op1, Op2 }; 7945 return getMachineNode(Opcode, dl, VTs, Ops); 7946 } 7947 7948 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7949 EVT VT1, EVT VT2, SDValue Op1, 7950 SDValue Op2, SDValue Op3) { 7951 SDVTList VTs = getVTList(VT1, VT2); 7952 SDValue Ops[] = { Op1, Op2, Op3 }; 7953 return getMachineNode(Opcode, dl, VTs, Ops); 7954 } 7955 7956 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7957 EVT VT1, EVT VT2, 7958 ArrayRef<SDValue> Ops) { 7959 SDVTList VTs = getVTList(VT1, VT2); 7960 return getMachineNode(Opcode, dl, VTs, Ops); 7961 } 7962 7963 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7964 EVT VT1, EVT VT2, EVT VT3, 7965 SDValue Op1, SDValue Op2) { 7966 SDVTList VTs = getVTList(VT1, VT2, VT3); 7967 SDValue Ops[] = { Op1, Op2 }; 7968 return getMachineNode(Opcode, dl, VTs, Ops); 7969 } 7970 7971 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7972 EVT VT1, EVT VT2, EVT VT3, 7973 SDValue Op1, SDValue Op2, 7974 SDValue Op3) { 7975 SDVTList VTs = getVTList(VT1, VT2, VT3); 7976 SDValue Ops[] = { Op1, Op2, Op3 }; 7977 return getMachineNode(Opcode, dl, VTs, Ops); 7978 } 7979 7980 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7981 EVT VT1, EVT VT2, EVT VT3, 7982 ArrayRef<SDValue> Ops) { 7983 SDVTList VTs = getVTList(VT1, VT2, VT3); 7984 return getMachineNode(Opcode, dl, VTs, Ops); 7985 } 7986 7987 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7988 ArrayRef<EVT> ResultTys, 7989 ArrayRef<SDValue> Ops) { 7990 SDVTList VTs = getVTList(ResultTys); 7991 return getMachineNode(Opcode, dl, VTs, Ops); 7992 } 7993 7994 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 7995 SDVTList VTs, 7996 ArrayRef<SDValue> Ops) { 7997 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 7998 MachineSDNode *N; 7999 void *IP = nullptr; 8000 8001 if (DoCSE) { 8002 FoldingSetNodeID ID; 8003 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 8004 IP = nullptr; 8005 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 8006 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8007 } 8008 } 8009 8010 // Allocate a new MachineSDNode. 8011 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8012 createOperands(N, Ops); 8013 8014 if (DoCSE) 8015 CSEMap.InsertNode(N, IP); 8016 8017 InsertNode(N); 8018 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8019 return N; 8020 } 8021 8022 /// getTargetExtractSubreg - A convenience function for creating 8023 /// TargetOpcode::EXTRACT_SUBREG nodes. 8024 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8025 SDValue Operand) { 8026 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8027 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8028 VT, Operand, SRIdxVal); 8029 return SDValue(Subreg, 0); 8030 } 8031 8032 /// getTargetInsertSubreg - A convenience function for creating 8033 /// TargetOpcode::INSERT_SUBREG nodes. 8034 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8035 SDValue Operand, SDValue Subreg) { 8036 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8037 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8038 VT, Operand, Subreg, SRIdxVal); 8039 return SDValue(Result, 0); 8040 } 8041 8042 /// getNodeIfExists - Get the specified node if it's already available, or 8043 /// else return NULL. 8044 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8045 ArrayRef<SDValue> Ops, 8046 const SDNodeFlags Flags) { 8047 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8048 FoldingSetNodeID ID; 8049 AddNodeIDNode(ID, Opcode, VTList, Ops); 8050 void *IP = nullptr; 8051 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8052 E->intersectFlagsWith(Flags); 8053 return E; 8054 } 8055 } 8056 return nullptr; 8057 } 8058 8059 /// getDbgValue - Creates a SDDbgValue node. 8060 /// 8061 /// SDNode 8062 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8063 SDNode *N, unsigned R, bool IsIndirect, 8064 const DebugLoc &DL, unsigned O) { 8065 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8066 "Expected inlined-at fields to agree"); 8067 return new (DbgInfo->getAlloc()) 8068 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8069 } 8070 8071 /// Constant 8072 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8073 DIExpression *Expr, 8074 const Value *C, 8075 const DebugLoc &DL, unsigned O) { 8076 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8077 "Expected inlined-at fields to agree"); 8078 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8079 } 8080 8081 /// FrameIndex 8082 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8083 DIExpression *Expr, unsigned FI, 8084 bool IsIndirect, 8085 const DebugLoc &DL, 8086 unsigned O) { 8087 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8088 "Expected inlined-at fields to agree"); 8089 return new (DbgInfo->getAlloc()) 8090 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8091 } 8092 8093 /// VReg 8094 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8095 DIExpression *Expr, 8096 unsigned VReg, bool IsIndirect, 8097 const DebugLoc &DL, unsigned O) { 8098 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8099 "Expected inlined-at fields to agree"); 8100 return new (DbgInfo->getAlloc()) 8101 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8102 } 8103 8104 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8105 unsigned OffsetInBits, unsigned SizeInBits, 8106 bool InvalidateDbg) { 8107 SDNode *FromNode = From.getNode(); 8108 SDNode *ToNode = To.getNode(); 8109 assert(FromNode && ToNode && "Can't modify dbg values"); 8110 8111 // PR35338 8112 // TODO: assert(From != To && "Redundant dbg value transfer"); 8113 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8114 if (From == To || FromNode == ToNode) 8115 return; 8116 8117 if (!FromNode->getHasDebugValue()) 8118 return; 8119 8120 SmallVector<SDDbgValue *, 2> ClonedDVs; 8121 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8122 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8123 continue; 8124 8125 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8126 8127 // Just transfer the dbg value attached to From. 8128 if (Dbg->getResNo() != From.getResNo()) 8129 continue; 8130 8131 DIVariable *Var = Dbg->getVariable(); 8132 auto *Expr = Dbg->getExpression(); 8133 // If a fragment is requested, update the expression. 8134 if (SizeInBits) { 8135 // When splitting a larger (e.g., sign-extended) value whose 8136 // lower bits are described with an SDDbgValue, do not attempt 8137 // to transfer the SDDbgValue to the upper bits. 8138 if (auto FI = Expr->getFragmentInfo()) 8139 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8140 continue; 8141 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8142 SizeInBits); 8143 if (!Fragment) 8144 continue; 8145 Expr = *Fragment; 8146 } 8147 // Clone the SDDbgValue and move it to To. 8148 SDDbgValue *Clone = getDbgValue( 8149 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(), 8150 std::max(ToNode->getIROrder(), Dbg->getOrder())); 8151 ClonedDVs.push_back(Clone); 8152 8153 if (InvalidateDbg) { 8154 // Invalidate value and indicate the SDDbgValue should not be emitted. 8155 Dbg->setIsInvalidated(); 8156 Dbg->setIsEmitted(); 8157 } 8158 } 8159 8160 for (SDDbgValue *Dbg : ClonedDVs) 8161 AddDbgValue(Dbg, ToNode, false); 8162 } 8163 8164 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8165 if (!N.getHasDebugValue()) 8166 return; 8167 8168 SmallVector<SDDbgValue *, 2> ClonedDVs; 8169 for (auto DV : GetDbgValues(&N)) { 8170 if (DV->isInvalidated()) 8171 continue; 8172 switch (N.getOpcode()) { 8173 default: 8174 break; 8175 case ISD::ADD: 8176 SDValue N0 = N.getOperand(0); 8177 SDValue N1 = N.getOperand(1); 8178 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8179 isConstantIntBuildVectorOrConstantInt(N1)) { 8180 uint64_t Offset = N.getConstantOperandVal(1); 8181 // Rewrite an ADD constant node into a DIExpression. Since we are 8182 // performing arithmetic to compute the variable's *value* in the 8183 // DIExpression, we need to mark the expression with a 8184 // DW_OP_stack_value. 8185 auto *DIExpr = DV->getExpression(); 8186 DIExpr = 8187 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8188 SDDbgValue *Clone = 8189 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8190 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8191 ClonedDVs.push_back(Clone); 8192 DV->setIsInvalidated(); 8193 DV->setIsEmitted(); 8194 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8195 N0.getNode()->dumprFull(this); 8196 dbgs() << " into " << *DIExpr << '\n'); 8197 } 8198 } 8199 } 8200 8201 for (SDDbgValue *Dbg : ClonedDVs) 8202 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8203 } 8204 8205 /// Creates a SDDbgLabel node. 8206 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8207 const DebugLoc &DL, unsigned O) { 8208 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8209 "Expected inlined-at fields to agree"); 8210 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8211 } 8212 8213 namespace { 8214 8215 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8216 /// pointed to by a use iterator is deleted, increment the use iterator 8217 /// so that it doesn't dangle. 8218 /// 8219 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8220 SDNode::use_iterator &UI; 8221 SDNode::use_iterator &UE; 8222 8223 void NodeDeleted(SDNode *N, SDNode *E) override { 8224 // Increment the iterator as needed. 8225 while (UI != UE && N == *UI) 8226 ++UI; 8227 } 8228 8229 public: 8230 RAUWUpdateListener(SelectionDAG &d, 8231 SDNode::use_iterator &ui, 8232 SDNode::use_iterator &ue) 8233 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8234 }; 8235 8236 } // end anonymous namespace 8237 8238 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8239 /// This can cause recursive merging of nodes in the DAG. 8240 /// 8241 /// This version assumes From has a single result value. 8242 /// 8243 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8244 SDNode *From = FromN.getNode(); 8245 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8246 "Cannot replace with this method!"); 8247 assert(From != To.getNode() && "Cannot replace uses of with self"); 8248 8249 // Preserve Debug Values 8250 transferDbgValues(FromN, To); 8251 8252 // Iterate over all the existing uses of From. New uses will be added 8253 // to the beginning of the use list, which we avoid visiting. 8254 // This specifically avoids visiting uses of From that arise while the 8255 // replacement is happening, because any such uses would be the result 8256 // of CSE: If an existing node looks like From after one of its operands 8257 // is replaced by To, we don't want to replace of all its users with To 8258 // too. See PR3018 for more info. 8259 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8260 RAUWUpdateListener Listener(*this, UI, UE); 8261 while (UI != UE) { 8262 SDNode *User = *UI; 8263 8264 // This node is about to morph, remove its old self from the CSE maps. 8265 RemoveNodeFromCSEMaps(User); 8266 8267 // A user can appear in a use list multiple times, and when this 8268 // happens the uses are usually next to each other in the list. 8269 // To help reduce the number of CSE recomputations, process all 8270 // the uses of this user that we can find this way. 8271 do { 8272 SDUse &Use = UI.getUse(); 8273 ++UI; 8274 Use.set(To); 8275 if (To->isDivergent() != From->isDivergent()) 8276 updateDivergence(User); 8277 } while (UI != UE && *UI == User); 8278 // Now that we have modified User, add it back to the CSE maps. If it 8279 // already exists there, recursively merge the results together. 8280 AddModifiedNodeToCSEMaps(User); 8281 } 8282 8283 // If we just RAUW'd the root, take note. 8284 if (FromN == getRoot()) 8285 setRoot(To); 8286 } 8287 8288 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8289 /// This can cause recursive merging of nodes in the DAG. 8290 /// 8291 /// This version assumes that for each value of From, there is a 8292 /// corresponding value in To in the same position with the same type. 8293 /// 8294 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8295 #ifndef NDEBUG 8296 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8297 assert((!From->hasAnyUseOfValue(i) || 8298 From->getValueType(i) == To->getValueType(i)) && 8299 "Cannot use this version of ReplaceAllUsesWith!"); 8300 #endif 8301 8302 // Handle the trivial case. 8303 if (From == To) 8304 return; 8305 8306 // Preserve Debug Info. Only do this if there's a use. 8307 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8308 if (From->hasAnyUseOfValue(i)) { 8309 assert((i < To->getNumValues()) && "Invalid To location"); 8310 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8311 } 8312 8313 // Iterate over just the existing users of From. See the comments in 8314 // the ReplaceAllUsesWith above. 8315 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8316 RAUWUpdateListener Listener(*this, UI, UE); 8317 while (UI != UE) { 8318 SDNode *User = *UI; 8319 8320 // This node is about to morph, remove its old self from the CSE maps. 8321 RemoveNodeFromCSEMaps(User); 8322 8323 // A user can appear in a use list multiple times, and when this 8324 // happens the uses are usually next to each other in the list. 8325 // To help reduce the number of CSE recomputations, process all 8326 // the uses of this user that we can find this way. 8327 do { 8328 SDUse &Use = UI.getUse(); 8329 ++UI; 8330 Use.setNode(To); 8331 if (To->isDivergent() != From->isDivergent()) 8332 updateDivergence(User); 8333 } while (UI != UE && *UI == User); 8334 8335 // Now that we have modified User, add it back to the CSE maps. If it 8336 // already exists there, recursively merge the results together. 8337 AddModifiedNodeToCSEMaps(User); 8338 } 8339 8340 // If we just RAUW'd the root, take note. 8341 if (From == getRoot().getNode()) 8342 setRoot(SDValue(To, getRoot().getResNo())); 8343 } 8344 8345 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8346 /// This can cause recursive merging of nodes in the DAG. 8347 /// 8348 /// This version can replace From with any result values. To must match the 8349 /// number and types of values returned by From. 8350 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8351 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8352 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8353 8354 // Preserve Debug Info. 8355 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8356 transferDbgValues(SDValue(From, i), To[i]); 8357 8358 // Iterate over just the existing users of From. See the comments in 8359 // the ReplaceAllUsesWith above. 8360 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8361 RAUWUpdateListener Listener(*this, UI, UE); 8362 while (UI != UE) { 8363 SDNode *User = *UI; 8364 8365 // This node is about to morph, remove its old self from the CSE maps. 8366 RemoveNodeFromCSEMaps(User); 8367 8368 // A user can appear in a use list multiple times, and when this happens the 8369 // uses are usually next to each other in the list. To help reduce the 8370 // number of CSE and divergence recomputations, process all the uses of this 8371 // user that we can find this way. 8372 bool To_IsDivergent = false; 8373 do { 8374 SDUse &Use = UI.getUse(); 8375 const SDValue &ToOp = To[Use.getResNo()]; 8376 ++UI; 8377 Use.set(ToOp); 8378 To_IsDivergent |= ToOp->isDivergent(); 8379 } while (UI != UE && *UI == User); 8380 8381 if (To_IsDivergent != From->isDivergent()) 8382 updateDivergence(User); 8383 8384 // Now that we have modified User, add it back to the CSE maps. If it 8385 // already exists there, recursively merge the results together. 8386 AddModifiedNodeToCSEMaps(User); 8387 } 8388 8389 // If we just RAUW'd the root, take note. 8390 if (From == getRoot().getNode()) 8391 setRoot(SDValue(To[getRoot().getResNo()])); 8392 } 8393 8394 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8395 /// uses of other values produced by From.getNode() alone. The Deleted 8396 /// vector is handled the same way as for ReplaceAllUsesWith. 8397 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8398 // Handle the really simple, really trivial case efficiently. 8399 if (From == To) return; 8400 8401 // Handle the simple, trivial, case efficiently. 8402 if (From.getNode()->getNumValues() == 1) { 8403 ReplaceAllUsesWith(From, To); 8404 return; 8405 } 8406 8407 // Preserve Debug Info. 8408 transferDbgValues(From, To); 8409 8410 // Iterate over just the existing users of From. See the comments in 8411 // the ReplaceAllUsesWith above. 8412 SDNode::use_iterator UI = From.getNode()->use_begin(), 8413 UE = From.getNode()->use_end(); 8414 RAUWUpdateListener Listener(*this, UI, UE); 8415 while (UI != UE) { 8416 SDNode *User = *UI; 8417 bool UserRemovedFromCSEMaps = false; 8418 8419 // A user can appear in a use list multiple times, and when this 8420 // happens the uses are usually next to each other in the list. 8421 // To help reduce the number of CSE recomputations, process all 8422 // the uses of this user that we can find this way. 8423 do { 8424 SDUse &Use = UI.getUse(); 8425 8426 // Skip uses of different values from the same node. 8427 if (Use.getResNo() != From.getResNo()) { 8428 ++UI; 8429 continue; 8430 } 8431 8432 // If this node hasn't been modified yet, it's still in the CSE maps, 8433 // so remove its old self from the CSE maps. 8434 if (!UserRemovedFromCSEMaps) { 8435 RemoveNodeFromCSEMaps(User); 8436 UserRemovedFromCSEMaps = true; 8437 } 8438 8439 ++UI; 8440 Use.set(To); 8441 if (To->isDivergent() != From->isDivergent()) 8442 updateDivergence(User); 8443 } while (UI != UE && *UI == User); 8444 // We are iterating over all uses of the From node, so if a use 8445 // doesn't use the specific value, no changes are made. 8446 if (!UserRemovedFromCSEMaps) 8447 continue; 8448 8449 // Now that we have modified User, add it back to the CSE maps. If it 8450 // already exists there, recursively merge the results together. 8451 AddModifiedNodeToCSEMaps(User); 8452 } 8453 8454 // If we just RAUW'd the root, take note. 8455 if (From == getRoot()) 8456 setRoot(To); 8457 } 8458 8459 namespace { 8460 8461 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8462 /// to record information about a use. 8463 struct UseMemo { 8464 SDNode *User; 8465 unsigned Index; 8466 SDUse *Use; 8467 }; 8468 8469 /// operator< - Sort Memos by User. 8470 bool operator<(const UseMemo &L, const UseMemo &R) { 8471 return (intptr_t)L.User < (intptr_t)R.User; 8472 } 8473 8474 } // end anonymous namespace 8475 8476 void SelectionDAG::updateDivergence(SDNode * N) 8477 { 8478 if (TLI->isSDNodeAlwaysUniform(N)) 8479 return; 8480 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8481 for (auto &Op : N->ops()) { 8482 if (Op.Val.getValueType() != MVT::Other) 8483 IsDivergent |= Op.getNode()->isDivergent(); 8484 } 8485 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8486 N->SDNodeBits.IsDivergent = IsDivergent; 8487 for (auto U : N->uses()) { 8488 updateDivergence(U); 8489 } 8490 } 8491 } 8492 8493 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8494 DenseMap<SDNode *, unsigned> Degree; 8495 Order.reserve(AllNodes.size()); 8496 for (auto &N : allnodes()) { 8497 unsigned NOps = N.getNumOperands(); 8498 Degree[&N] = NOps; 8499 if (0 == NOps) 8500 Order.push_back(&N); 8501 } 8502 for (size_t I = 0; I != Order.size(); ++I) { 8503 SDNode *N = Order[I]; 8504 for (auto U : N->uses()) { 8505 unsigned &UnsortedOps = Degree[U]; 8506 if (0 == --UnsortedOps) 8507 Order.push_back(U); 8508 } 8509 } 8510 } 8511 8512 #ifndef NDEBUG 8513 void SelectionDAG::VerifyDAGDiverence() { 8514 std::vector<SDNode *> TopoOrder; 8515 CreateTopologicalOrder(TopoOrder); 8516 const TargetLowering &TLI = getTargetLoweringInfo(); 8517 DenseMap<const SDNode *, bool> DivergenceMap; 8518 for (auto &N : allnodes()) { 8519 DivergenceMap[&N] = false; 8520 } 8521 for (auto N : TopoOrder) { 8522 bool IsDivergent = DivergenceMap[N]; 8523 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8524 for (auto &Op : N->ops()) { 8525 if (Op.Val.getValueType() != MVT::Other) 8526 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8527 } 8528 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8529 DivergenceMap[N] = true; 8530 } 8531 } 8532 for (auto &N : allnodes()) { 8533 (void)N; 8534 assert(DivergenceMap[&N] == N.isDivergent() && 8535 "Divergence bit inconsistency detected\n"); 8536 } 8537 } 8538 #endif 8539 8540 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8541 /// uses of other values produced by From.getNode() alone. The same value 8542 /// may appear in both the From and To list. The Deleted vector is 8543 /// handled the same way as for ReplaceAllUsesWith. 8544 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8545 const SDValue *To, 8546 unsigned Num){ 8547 // Handle the simple, trivial case efficiently. 8548 if (Num == 1) 8549 return ReplaceAllUsesOfValueWith(*From, *To); 8550 8551 transferDbgValues(*From, *To); 8552 8553 // Read up all the uses and make records of them. This helps 8554 // processing new uses that are introduced during the 8555 // replacement process. 8556 SmallVector<UseMemo, 4> Uses; 8557 for (unsigned i = 0; i != Num; ++i) { 8558 unsigned FromResNo = From[i].getResNo(); 8559 SDNode *FromNode = From[i].getNode(); 8560 for (SDNode::use_iterator UI = FromNode->use_begin(), 8561 E = FromNode->use_end(); UI != E; ++UI) { 8562 SDUse &Use = UI.getUse(); 8563 if (Use.getResNo() == FromResNo) { 8564 UseMemo Memo = { *UI, i, &Use }; 8565 Uses.push_back(Memo); 8566 } 8567 } 8568 } 8569 8570 // Sort the uses, so that all the uses from a given User are together. 8571 llvm::sort(Uses); 8572 8573 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8574 UseIndex != UseIndexEnd; ) { 8575 // We know that this user uses some value of From. If it is the right 8576 // value, update it. 8577 SDNode *User = Uses[UseIndex].User; 8578 8579 // This node is about to morph, remove its old self from the CSE maps. 8580 RemoveNodeFromCSEMaps(User); 8581 8582 // The Uses array is sorted, so all the uses for a given User 8583 // are next to each other in the list. 8584 // To help reduce the number of CSE recomputations, process all 8585 // the uses of this user that we can find this way. 8586 do { 8587 unsigned i = Uses[UseIndex].Index; 8588 SDUse &Use = *Uses[UseIndex].Use; 8589 ++UseIndex; 8590 8591 Use.set(To[i]); 8592 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8593 8594 // Now that we have modified User, add it back to the CSE maps. If it 8595 // already exists there, recursively merge the results together. 8596 AddModifiedNodeToCSEMaps(User); 8597 } 8598 } 8599 8600 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8601 /// based on their topological order. It returns the maximum id and a vector 8602 /// of the SDNodes* in assigned order by reference. 8603 unsigned SelectionDAG::AssignTopologicalOrder() { 8604 unsigned DAGSize = 0; 8605 8606 // SortedPos tracks the progress of the algorithm. Nodes before it are 8607 // sorted, nodes after it are unsorted. When the algorithm completes 8608 // it is at the end of the list. 8609 allnodes_iterator SortedPos = allnodes_begin(); 8610 8611 // Visit all the nodes. Move nodes with no operands to the front of 8612 // the list immediately. Annotate nodes that do have operands with their 8613 // operand count. Before we do this, the Node Id fields of the nodes 8614 // may contain arbitrary values. After, the Node Id fields for nodes 8615 // before SortedPos will contain the topological sort index, and the 8616 // Node Id fields for nodes At SortedPos and after will contain the 8617 // count of outstanding operands. 8618 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8619 SDNode *N = &*I++; 8620 checkForCycles(N, this); 8621 unsigned Degree = N->getNumOperands(); 8622 if (Degree == 0) { 8623 // A node with no uses, add it to the result array immediately. 8624 N->setNodeId(DAGSize++); 8625 allnodes_iterator Q(N); 8626 if (Q != SortedPos) 8627 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8628 assert(SortedPos != AllNodes.end() && "Overran node list"); 8629 ++SortedPos; 8630 } else { 8631 // Temporarily use the Node Id as scratch space for the degree count. 8632 N->setNodeId(Degree); 8633 } 8634 } 8635 8636 // Visit all the nodes. As we iterate, move nodes into sorted order, 8637 // such that by the time the end is reached all nodes will be sorted. 8638 for (SDNode &Node : allnodes()) { 8639 SDNode *N = &Node; 8640 checkForCycles(N, this); 8641 // N is in sorted position, so all its uses have one less operand 8642 // that needs to be sorted. 8643 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8644 UI != UE; ++UI) { 8645 SDNode *P = *UI; 8646 unsigned Degree = P->getNodeId(); 8647 assert(Degree != 0 && "Invalid node degree"); 8648 --Degree; 8649 if (Degree == 0) { 8650 // All of P's operands are sorted, so P may sorted now. 8651 P->setNodeId(DAGSize++); 8652 if (P->getIterator() != SortedPos) 8653 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8654 assert(SortedPos != AllNodes.end() && "Overran node list"); 8655 ++SortedPos; 8656 } else { 8657 // Update P's outstanding operand count. 8658 P->setNodeId(Degree); 8659 } 8660 } 8661 if (Node.getIterator() == SortedPos) { 8662 #ifndef NDEBUG 8663 allnodes_iterator I(N); 8664 SDNode *S = &*++I; 8665 dbgs() << "Overran sorted position:\n"; 8666 S->dumprFull(this); dbgs() << "\n"; 8667 dbgs() << "Checking if this is due to cycles\n"; 8668 checkForCycles(this, true); 8669 #endif 8670 llvm_unreachable(nullptr); 8671 } 8672 } 8673 8674 assert(SortedPos == AllNodes.end() && 8675 "Topological sort incomplete!"); 8676 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8677 "First node in topological sort is not the entry token!"); 8678 assert(AllNodes.front().getNodeId() == 0 && 8679 "First node in topological sort has non-zero id!"); 8680 assert(AllNodes.front().getNumOperands() == 0 && 8681 "First node in topological sort has operands!"); 8682 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8683 "Last node in topologic sort has unexpected id!"); 8684 assert(AllNodes.back().use_empty() && 8685 "Last node in topologic sort has users!"); 8686 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8687 return DAGSize; 8688 } 8689 8690 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8691 /// value is produced by SD. 8692 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8693 if (SD) { 8694 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8695 SD->setHasDebugValue(true); 8696 } 8697 DbgInfo->add(DB, SD, isParameter); 8698 } 8699 8700 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8701 DbgInfo->add(DB); 8702 } 8703 8704 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8705 SDValue NewMemOp) { 8706 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8707 // The new memory operation must have the same position as the old load in 8708 // terms of memory dependency. Create a TokenFactor for the old load and new 8709 // memory operation and update uses of the old load's output chain to use that 8710 // TokenFactor. 8711 SDValue OldChain = SDValue(OldLoad, 1); 8712 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8713 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8714 return NewChain; 8715 8716 SDValue TokenFactor = 8717 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8718 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8719 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8720 return TokenFactor; 8721 } 8722 8723 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8724 Function **OutFunction) { 8725 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8726 8727 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8728 auto *Module = MF->getFunction().getParent(); 8729 auto *Function = Module->getFunction(Symbol); 8730 8731 if (OutFunction != nullptr) 8732 *OutFunction = Function; 8733 8734 if (Function != nullptr) { 8735 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8736 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8737 } 8738 8739 std::string ErrorStr; 8740 raw_string_ostream ErrorFormatter(ErrorStr); 8741 8742 ErrorFormatter << "Undefined external symbol "; 8743 ErrorFormatter << '"' << Symbol << '"'; 8744 ErrorFormatter.flush(); 8745 8746 report_fatal_error(ErrorStr); 8747 } 8748 8749 //===----------------------------------------------------------------------===// 8750 // SDNode Class 8751 //===----------------------------------------------------------------------===// 8752 8753 bool llvm::isNullConstant(SDValue V) { 8754 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8755 return Const != nullptr && Const->isNullValue(); 8756 } 8757 8758 bool llvm::isNullFPConstant(SDValue V) { 8759 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8760 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8761 } 8762 8763 bool llvm::isAllOnesConstant(SDValue V) { 8764 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8765 return Const != nullptr && Const->isAllOnesValue(); 8766 } 8767 8768 bool llvm::isOneConstant(SDValue V) { 8769 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8770 return Const != nullptr && Const->isOne(); 8771 } 8772 8773 SDValue llvm::peekThroughBitcasts(SDValue V) { 8774 while (V.getOpcode() == ISD::BITCAST) 8775 V = V.getOperand(0); 8776 return V; 8777 } 8778 8779 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8780 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8781 V = V.getOperand(0); 8782 return V; 8783 } 8784 8785 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8786 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8787 V = V.getOperand(0); 8788 return V; 8789 } 8790 8791 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8792 if (V.getOpcode() != ISD::XOR) 8793 return false; 8794 V = peekThroughBitcasts(V.getOperand(1)); 8795 unsigned NumBits = V.getScalarValueSizeInBits(); 8796 ConstantSDNode *C = 8797 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8798 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8799 } 8800 8801 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8802 bool AllowTruncation) { 8803 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8804 return CN; 8805 8806 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8807 BitVector UndefElements; 8808 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8809 8810 // BuildVectors can truncate their operands. Ignore that case here unless 8811 // AllowTruncation is set. 8812 if (CN && (UndefElements.none() || AllowUndefs)) { 8813 EVT CVT = CN->getValueType(0); 8814 EVT NSVT = N.getValueType().getScalarType(); 8815 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8816 if (AllowTruncation || (CVT == NSVT)) 8817 return CN; 8818 } 8819 } 8820 8821 return nullptr; 8822 } 8823 8824 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8825 bool AllowUndefs, 8826 bool AllowTruncation) { 8827 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8828 return CN; 8829 8830 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8831 BitVector UndefElements; 8832 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8833 8834 // BuildVectors can truncate their operands. Ignore that case here unless 8835 // AllowTruncation is set. 8836 if (CN && (UndefElements.none() || AllowUndefs)) { 8837 EVT CVT = CN->getValueType(0); 8838 EVT NSVT = N.getValueType().getScalarType(); 8839 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8840 if (AllowTruncation || (CVT == NSVT)) 8841 return CN; 8842 } 8843 } 8844 8845 return nullptr; 8846 } 8847 8848 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8849 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8850 return CN; 8851 8852 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8853 BitVector UndefElements; 8854 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8855 if (CN && (UndefElements.none() || AllowUndefs)) 8856 return CN; 8857 } 8858 8859 return nullptr; 8860 } 8861 8862 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 8863 const APInt &DemandedElts, 8864 bool AllowUndefs) { 8865 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8866 return CN; 8867 8868 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8869 BitVector UndefElements; 8870 ConstantFPSDNode *CN = 8871 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 8872 if (CN && (UndefElements.none() || AllowUndefs)) 8873 return CN; 8874 } 8875 8876 return nullptr; 8877 } 8878 8879 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 8880 // TODO: may want to use peekThroughBitcast() here. 8881 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 8882 return C && C->isNullValue(); 8883 } 8884 8885 bool llvm::isOneOrOneSplat(SDValue N) { 8886 // TODO: may want to use peekThroughBitcast() here. 8887 unsigned BitWidth = N.getScalarValueSizeInBits(); 8888 ConstantSDNode *C = isConstOrConstSplat(N); 8889 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8890 } 8891 8892 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8893 N = peekThroughBitcasts(N); 8894 unsigned BitWidth = N.getScalarValueSizeInBits(); 8895 ConstantSDNode *C = isConstOrConstSplat(N); 8896 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8897 } 8898 8899 HandleSDNode::~HandleSDNode() { 8900 DropOperands(); 8901 } 8902 8903 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8904 const DebugLoc &DL, 8905 const GlobalValue *GA, EVT VT, 8906 int64_t o, unsigned TF) 8907 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 8908 TheGlobal = GA; 8909 } 8910 8911 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 8912 EVT VT, unsigned SrcAS, 8913 unsigned DestAS) 8914 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 8915 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 8916 8917 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 8918 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 8919 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 8920 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 8921 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 8922 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 8923 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 8924 8925 // We check here that the size of the memory operand fits within the size of 8926 // the MMO. This is because the MMO might indicate only a possible address 8927 // range instead of specifying the affected memory addresses precisely. 8928 // TODO: Make MachineMemOperands aware of scalable vectors. 8929 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 8930 "Size mismatch!"); 8931 } 8932 8933 /// Profile - Gather unique data for the node. 8934 /// 8935 void SDNode::Profile(FoldingSetNodeID &ID) const { 8936 AddNodeIDNode(ID, this); 8937 } 8938 8939 namespace { 8940 8941 struct EVTArray { 8942 std::vector<EVT> VTs; 8943 8944 EVTArray() { 8945 VTs.reserve(MVT::LAST_VALUETYPE); 8946 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 8947 VTs.push_back(MVT((MVT::SimpleValueType)i)); 8948 } 8949 }; 8950 8951 } // end anonymous namespace 8952 8953 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 8954 static ManagedStatic<EVTArray> SimpleVTArray; 8955 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 8956 8957 /// getValueTypeList - Return a pointer to the specified value type. 8958 /// 8959 const EVT *SDNode::getValueTypeList(EVT VT) { 8960 if (VT.isExtended()) { 8961 sys::SmartScopedLock<true> Lock(*VTMutex); 8962 return &(*EVTs->insert(VT).first); 8963 } else { 8964 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 8965 "Value type out of range!"); 8966 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 8967 } 8968 } 8969 8970 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 8971 /// indicated value. This method ignores uses of other values defined by this 8972 /// operation. 8973 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 8974 assert(Value < getNumValues() && "Bad value!"); 8975 8976 // TODO: Only iterate over uses of a given value of the node 8977 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 8978 if (UI.getUse().getResNo() == Value) { 8979 if (NUses == 0) 8980 return false; 8981 --NUses; 8982 } 8983 } 8984 8985 // Found exactly the right number of uses? 8986 return NUses == 0; 8987 } 8988 8989 /// hasAnyUseOfValue - Return true if there are any use of the indicated 8990 /// value. This method ignores uses of other values defined by this operation. 8991 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 8992 assert(Value < getNumValues() && "Bad value!"); 8993 8994 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 8995 if (UI.getUse().getResNo() == Value) 8996 return true; 8997 8998 return false; 8999 } 9000 9001 /// isOnlyUserOf - Return true if this node is the only use of N. 9002 bool SDNode::isOnlyUserOf(const SDNode *N) const { 9003 bool Seen = false; 9004 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9005 SDNode *User = *I; 9006 if (User == this) 9007 Seen = true; 9008 else 9009 return false; 9010 } 9011 9012 return Seen; 9013 } 9014 9015 /// Return true if the only users of N are contained in Nodes. 9016 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9017 bool Seen = false; 9018 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9019 SDNode *User = *I; 9020 if (llvm::any_of(Nodes, 9021 [&User](const SDNode *Node) { return User == Node; })) 9022 Seen = true; 9023 else 9024 return false; 9025 } 9026 9027 return Seen; 9028 } 9029 9030 /// isOperand - Return true if this node is an operand of N. 9031 bool SDValue::isOperandOf(const SDNode *N) const { 9032 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9033 } 9034 9035 bool SDNode::isOperandOf(const SDNode *N) const { 9036 return any_of(N->op_values(), 9037 [this](SDValue Op) { return this == Op.getNode(); }); 9038 } 9039 9040 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9041 /// be a chain) reaches the specified operand without crossing any 9042 /// side-effecting instructions on any chain path. In practice, this looks 9043 /// through token factors and non-volatile loads. In order to remain efficient, 9044 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9045 /// 9046 /// Note that we only need to examine chains when we're searching for 9047 /// side-effects; SelectionDAG requires that all side-effects are represented 9048 /// by chains, even if another operand would force a specific ordering. This 9049 /// constraint is necessary to allow transformations like splitting loads. 9050 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9051 unsigned Depth) const { 9052 if (*this == Dest) return true; 9053 9054 // Don't search too deeply, we just want to be able to see through 9055 // TokenFactor's etc. 9056 if (Depth == 0) return false; 9057 9058 // If this is a token factor, all inputs to the TF happen in parallel. 9059 if (getOpcode() == ISD::TokenFactor) { 9060 // First, try a shallow search. 9061 if (is_contained((*this)->ops(), Dest)) { 9062 // We found the chain we want as an operand of this TokenFactor. 9063 // Essentially, we reach the chain without side-effects if we could 9064 // serialize the TokenFactor into a simple chain of operations with 9065 // Dest as the last operation. This is automatically true if the 9066 // chain has one use: there are no other ordering constraints. 9067 // If the chain has more than one use, we give up: some other 9068 // use of Dest might force a side-effect between Dest and the current 9069 // node. 9070 if (Dest.hasOneUse()) 9071 return true; 9072 } 9073 // Next, try a deep search: check whether every operand of the TokenFactor 9074 // reaches Dest. 9075 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9076 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9077 }); 9078 } 9079 9080 // Loads don't have side effects, look through them. 9081 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9082 if (Ld->isUnordered()) 9083 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9084 } 9085 return false; 9086 } 9087 9088 bool SDNode::hasPredecessor(const SDNode *N) const { 9089 SmallPtrSet<const SDNode *, 32> Visited; 9090 SmallVector<const SDNode *, 16> Worklist; 9091 Worklist.push_back(this); 9092 return hasPredecessorHelper(N, Visited, Worklist); 9093 } 9094 9095 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9096 this->Flags.intersectWith(Flags); 9097 } 9098 9099 SDValue 9100 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9101 ArrayRef<ISD::NodeType> CandidateBinOps, 9102 bool AllowPartials) { 9103 // The pattern must end in an extract from index 0. 9104 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9105 !isNullConstant(Extract->getOperand(1))) 9106 return SDValue(); 9107 9108 // Match against one of the candidate binary ops. 9109 SDValue Op = Extract->getOperand(0); 9110 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9111 return Op.getOpcode() == unsigned(BinOp); 9112 })) 9113 return SDValue(); 9114 9115 // Floating-point reductions may require relaxed constraints on the final step 9116 // of the reduction because they may reorder intermediate operations. 9117 unsigned CandidateBinOp = Op.getOpcode(); 9118 if (Op.getValueType().isFloatingPoint()) { 9119 SDNodeFlags Flags = Op->getFlags(); 9120 switch (CandidateBinOp) { 9121 case ISD::FADD: 9122 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9123 return SDValue(); 9124 break; 9125 default: 9126 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9127 } 9128 } 9129 9130 // Matching failed - attempt to see if we did enough stages that a partial 9131 // reduction from a subvector is possible. 9132 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9133 if (!AllowPartials || !Op) 9134 return SDValue(); 9135 EVT OpVT = Op.getValueType(); 9136 EVT OpSVT = OpVT.getScalarType(); 9137 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9138 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9139 return SDValue(); 9140 BinOp = (ISD::NodeType)CandidateBinOp; 9141 return getNode( 9142 ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9143 getConstant(0, SDLoc(Op), TLI->getVectorIdxTy(getDataLayout()))); 9144 }; 9145 9146 // At each stage, we're looking for something that looks like: 9147 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9148 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9149 // i32 undef, i32 undef, i32 undef, i32 undef> 9150 // %a = binop <8 x i32> %op, %s 9151 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9152 // we expect something like: 9153 // <4,5,6,7,u,u,u,u> 9154 // <2,3,u,u,u,u,u,u> 9155 // <1,u,u,u,u,u,u,u> 9156 // While a partial reduction match would be: 9157 // <2,3,u,u,u,u,u,u> 9158 // <1,u,u,u,u,u,u,u> 9159 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9160 SDValue PrevOp; 9161 for (unsigned i = 0; i < Stages; ++i) { 9162 unsigned MaskEnd = (1 << i); 9163 9164 if (Op.getOpcode() != CandidateBinOp) 9165 return PartialReduction(PrevOp, MaskEnd); 9166 9167 SDValue Op0 = Op.getOperand(0); 9168 SDValue Op1 = Op.getOperand(1); 9169 9170 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9171 if (Shuffle) { 9172 Op = Op1; 9173 } else { 9174 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9175 Op = Op0; 9176 } 9177 9178 // The first operand of the shuffle should be the same as the other operand 9179 // of the binop. 9180 if (!Shuffle || Shuffle->getOperand(0) != Op) 9181 return PartialReduction(PrevOp, MaskEnd); 9182 9183 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9184 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9185 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9186 return PartialReduction(PrevOp, MaskEnd); 9187 9188 PrevOp = Op; 9189 } 9190 9191 BinOp = (ISD::NodeType)CandidateBinOp; 9192 return Op; 9193 } 9194 9195 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9196 assert(N->getNumValues() == 1 && 9197 "Can't unroll a vector with multiple results!"); 9198 9199 EVT VT = N->getValueType(0); 9200 unsigned NE = VT.getVectorNumElements(); 9201 EVT EltVT = VT.getVectorElementType(); 9202 SDLoc dl(N); 9203 9204 SmallVector<SDValue, 8> Scalars; 9205 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9206 9207 // If ResNE is 0, fully unroll the vector op. 9208 if (ResNE == 0) 9209 ResNE = NE; 9210 else if (NE > ResNE) 9211 NE = ResNE; 9212 9213 unsigned i; 9214 for (i= 0; i != NE; ++i) { 9215 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9216 SDValue Operand = N->getOperand(j); 9217 EVT OperandVT = Operand.getValueType(); 9218 if (OperandVT.isVector()) { 9219 // A vector operand; extract a single element. 9220 EVT OperandEltVT = OperandVT.getVectorElementType(); 9221 Operands[j] = 9222 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 9223 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 9224 } else { 9225 // A scalar operand; just use it as is. 9226 Operands[j] = Operand; 9227 } 9228 } 9229 9230 switch (N->getOpcode()) { 9231 default: { 9232 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9233 N->getFlags())); 9234 break; 9235 } 9236 case ISD::VSELECT: 9237 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9238 break; 9239 case ISD::SHL: 9240 case ISD::SRA: 9241 case ISD::SRL: 9242 case ISD::ROTL: 9243 case ISD::ROTR: 9244 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9245 getShiftAmountOperand(Operands[0].getValueType(), 9246 Operands[1]))); 9247 break; 9248 case ISD::SIGN_EXTEND_INREG: { 9249 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9250 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9251 Operands[0], 9252 getValueType(ExtVT))); 9253 } 9254 } 9255 } 9256 9257 for (; i < ResNE; ++i) 9258 Scalars.push_back(getUNDEF(EltVT)); 9259 9260 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9261 return getBuildVector(VecVT, dl, Scalars); 9262 } 9263 9264 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9265 SDNode *N, unsigned ResNE) { 9266 unsigned Opcode = N->getOpcode(); 9267 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9268 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9269 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9270 "Expected an overflow opcode"); 9271 9272 EVT ResVT = N->getValueType(0); 9273 EVT OvVT = N->getValueType(1); 9274 EVT ResEltVT = ResVT.getVectorElementType(); 9275 EVT OvEltVT = OvVT.getVectorElementType(); 9276 SDLoc dl(N); 9277 9278 // If ResNE is 0, fully unroll the vector op. 9279 unsigned NE = ResVT.getVectorNumElements(); 9280 if (ResNE == 0) 9281 ResNE = NE; 9282 else if (NE > ResNE) 9283 NE = ResNE; 9284 9285 SmallVector<SDValue, 8> LHSScalars; 9286 SmallVector<SDValue, 8> RHSScalars; 9287 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9288 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9289 9290 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9291 SDVTList VTs = getVTList(ResEltVT, SVT); 9292 SmallVector<SDValue, 8> ResScalars; 9293 SmallVector<SDValue, 8> OvScalars; 9294 for (unsigned i = 0; i < NE; ++i) { 9295 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9296 SDValue Ov = 9297 getSelect(dl, OvEltVT, Res.getValue(1), 9298 getBoolConstant(true, dl, OvEltVT, ResVT), 9299 getConstant(0, dl, OvEltVT)); 9300 9301 ResScalars.push_back(Res); 9302 OvScalars.push_back(Ov); 9303 } 9304 9305 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9306 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9307 9308 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9309 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9310 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9311 getBuildVector(NewOvVT, dl, OvScalars)); 9312 } 9313 9314 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9315 LoadSDNode *Base, 9316 unsigned Bytes, 9317 int Dist) const { 9318 if (LD->isVolatile() || Base->isVolatile()) 9319 return false; 9320 // TODO: probably too restrictive for atomics, revisit 9321 if (!LD->isSimple()) 9322 return false; 9323 if (LD->isIndexed() || Base->isIndexed()) 9324 return false; 9325 if (LD->getChain() != Base->getChain()) 9326 return false; 9327 EVT VT = LD->getValueType(0); 9328 if (VT.getSizeInBits() / 8 != Bytes) 9329 return false; 9330 9331 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9332 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9333 9334 int64_t Offset = 0; 9335 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9336 return (Dist * Bytes == Offset); 9337 return false; 9338 } 9339 9340 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 9341 /// it cannot be inferred. 9342 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 9343 // If this is a GlobalAddress + cst, return the alignment. 9344 const GlobalValue *GV = nullptr; 9345 int64_t GVOffset = 0; 9346 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9347 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9348 KnownBits Known(PtrWidth); 9349 llvm::computeKnownBits(GV, Known, getDataLayout()); 9350 unsigned AlignBits = Known.countMinTrailingZeros(); 9351 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 9352 if (Align) 9353 return MinAlign(Align, GVOffset); 9354 } 9355 9356 // If this is a direct reference to a stack slot, use information about the 9357 // stack slot's alignment. 9358 int FrameIdx = INT_MIN; 9359 int64_t FrameOffset = 0; 9360 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9361 FrameIdx = FI->getIndex(); 9362 } else if (isBaseWithConstantOffset(Ptr) && 9363 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9364 // Handle FI+Cst 9365 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9366 FrameOffset = Ptr.getConstantOperandVal(1); 9367 } 9368 9369 if (FrameIdx != INT_MIN) { 9370 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9371 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 9372 FrameOffset); 9373 return FIInfoAlign; 9374 } 9375 9376 return 0; 9377 } 9378 9379 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9380 /// which is split (or expanded) into two not necessarily identical pieces. 9381 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9382 // Currently all types are split in half. 9383 EVT LoVT, HiVT; 9384 if (!VT.isVector()) 9385 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9386 else 9387 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9388 9389 return std::make_pair(LoVT, HiVT); 9390 } 9391 9392 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9393 /// low/high part. 9394 std::pair<SDValue, SDValue> 9395 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9396 const EVT &HiVT) { 9397 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9398 N.getValueType().getVectorNumElements() && 9399 "More vector elements requested than available!"); 9400 SDValue Lo, Hi; 9401 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 9402 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9403 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9404 getConstant(LoVT.getVectorNumElements(), DL, 9405 TLI->getVectorIdxTy(getDataLayout()))); 9406 return std::make_pair(Lo, Hi); 9407 } 9408 9409 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9410 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9411 EVT VT = N.getValueType(); 9412 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9413 NextPowerOf2(VT.getVectorNumElements())); 9414 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9415 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9416 } 9417 9418 void SelectionDAG::ExtractVectorElements(SDValue Op, 9419 SmallVectorImpl<SDValue> &Args, 9420 unsigned Start, unsigned Count) { 9421 EVT VT = Op.getValueType(); 9422 if (Count == 0) 9423 Count = VT.getVectorNumElements(); 9424 9425 EVT EltVT = VT.getVectorElementType(); 9426 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 9427 SDLoc SL(Op); 9428 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9429 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9430 Op, getConstant(i, SL, IdxTy))); 9431 } 9432 } 9433 9434 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9435 unsigned GlobalAddressSDNode::getAddressSpace() const { 9436 return getGlobal()->getType()->getAddressSpace(); 9437 } 9438 9439 Type *ConstantPoolSDNode::getType() const { 9440 if (isMachineConstantPoolEntry()) 9441 return Val.MachineCPVal->getType(); 9442 return Val.ConstVal->getType(); 9443 } 9444 9445 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9446 unsigned &SplatBitSize, 9447 bool &HasAnyUndefs, 9448 unsigned MinSplatBits, 9449 bool IsBigEndian) const { 9450 EVT VT = getValueType(0); 9451 assert(VT.isVector() && "Expected a vector type"); 9452 unsigned VecWidth = VT.getSizeInBits(); 9453 if (MinSplatBits > VecWidth) 9454 return false; 9455 9456 // FIXME: The widths are based on this node's type, but build vectors can 9457 // truncate their operands. 9458 SplatValue = APInt(VecWidth, 0); 9459 SplatUndef = APInt(VecWidth, 0); 9460 9461 // Get the bits. Bits with undefined values (when the corresponding element 9462 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9463 // in SplatValue. If any of the values are not constant, give up and return 9464 // false. 9465 unsigned int NumOps = getNumOperands(); 9466 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9467 unsigned EltWidth = VT.getScalarSizeInBits(); 9468 9469 for (unsigned j = 0; j < NumOps; ++j) { 9470 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9471 SDValue OpVal = getOperand(i); 9472 unsigned BitPos = j * EltWidth; 9473 9474 if (OpVal.isUndef()) 9475 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9476 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9477 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9478 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9479 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9480 else 9481 return false; 9482 } 9483 9484 // The build_vector is all constants or undefs. Find the smallest element 9485 // size that splats the vector. 9486 HasAnyUndefs = (SplatUndef != 0); 9487 9488 // FIXME: This does not work for vectors with elements less than 8 bits. 9489 while (VecWidth > 8) { 9490 unsigned HalfSize = VecWidth / 2; 9491 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9492 APInt LowValue = SplatValue.trunc(HalfSize); 9493 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9494 APInt LowUndef = SplatUndef.trunc(HalfSize); 9495 9496 // If the two halves do not match (ignoring undef bits), stop here. 9497 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9498 MinSplatBits > HalfSize) 9499 break; 9500 9501 SplatValue = HighValue | LowValue; 9502 SplatUndef = HighUndef & LowUndef; 9503 9504 VecWidth = HalfSize; 9505 } 9506 9507 SplatBitSize = VecWidth; 9508 return true; 9509 } 9510 9511 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9512 BitVector *UndefElements) const { 9513 if (UndefElements) { 9514 UndefElements->clear(); 9515 UndefElements->resize(getNumOperands()); 9516 } 9517 assert(getNumOperands() == DemandedElts.getBitWidth() && 9518 "Unexpected vector size"); 9519 if (!DemandedElts) 9520 return SDValue(); 9521 SDValue Splatted; 9522 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9523 if (!DemandedElts[i]) 9524 continue; 9525 SDValue Op = getOperand(i); 9526 if (Op.isUndef()) { 9527 if (UndefElements) 9528 (*UndefElements)[i] = true; 9529 } else if (!Splatted) { 9530 Splatted = Op; 9531 } else if (Splatted != Op) { 9532 return SDValue(); 9533 } 9534 } 9535 9536 if (!Splatted) { 9537 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9538 assert(getOperand(FirstDemandedIdx).isUndef() && 9539 "Can only have a splat without a constant for all undefs."); 9540 return getOperand(FirstDemandedIdx); 9541 } 9542 9543 return Splatted; 9544 } 9545 9546 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9547 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9548 return getSplatValue(DemandedElts, UndefElements); 9549 } 9550 9551 ConstantSDNode * 9552 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9553 BitVector *UndefElements) const { 9554 return dyn_cast_or_null<ConstantSDNode>( 9555 getSplatValue(DemandedElts, UndefElements)); 9556 } 9557 9558 ConstantSDNode * 9559 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9560 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9561 } 9562 9563 ConstantFPSDNode * 9564 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9565 BitVector *UndefElements) const { 9566 return dyn_cast_or_null<ConstantFPSDNode>( 9567 getSplatValue(DemandedElts, UndefElements)); 9568 } 9569 9570 ConstantFPSDNode * 9571 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9572 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9573 } 9574 9575 int32_t 9576 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9577 uint32_t BitWidth) const { 9578 if (ConstantFPSDNode *CN = 9579 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9580 bool IsExact; 9581 APSInt IntVal(BitWidth); 9582 const APFloat &APF = CN->getValueAPF(); 9583 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9584 APFloat::opOK || 9585 !IsExact) 9586 return -1; 9587 9588 return IntVal.exactLogBase2(); 9589 } 9590 return -1; 9591 } 9592 9593 bool BuildVectorSDNode::isConstant() const { 9594 for (const SDValue &Op : op_values()) { 9595 unsigned Opc = Op.getOpcode(); 9596 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9597 return false; 9598 } 9599 return true; 9600 } 9601 9602 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9603 // Find the first non-undef value in the shuffle mask. 9604 unsigned i, e; 9605 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9606 /* search */; 9607 9608 // If all elements are undefined, this shuffle can be considered a splat 9609 // (although it should eventually get simplified away completely). 9610 if (i == e) 9611 return true; 9612 9613 // Make sure all remaining elements are either undef or the same as the first 9614 // non-undef value. 9615 for (int Idx = Mask[i]; i != e; ++i) 9616 if (Mask[i] >= 0 && Mask[i] != Idx) 9617 return false; 9618 return true; 9619 } 9620 9621 // Returns the SDNode if it is a constant integer BuildVector 9622 // or constant integer. 9623 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9624 if (isa<ConstantSDNode>(N)) 9625 return N.getNode(); 9626 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9627 return N.getNode(); 9628 // Treat a GlobalAddress supporting constant offset folding as a 9629 // constant integer. 9630 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9631 if (GA->getOpcode() == ISD::GlobalAddress && 9632 TLI->isOffsetFoldingLegal(GA)) 9633 return GA; 9634 return nullptr; 9635 } 9636 9637 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9638 if (isa<ConstantFPSDNode>(N)) 9639 return N.getNode(); 9640 9641 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9642 return N.getNode(); 9643 9644 return nullptr; 9645 } 9646 9647 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9648 assert(!Node->OperandList && "Node already has operands"); 9649 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9650 "too many operands to fit into SDNode"); 9651 SDUse *Ops = OperandRecycler.allocate( 9652 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9653 9654 bool IsDivergent = false; 9655 for (unsigned I = 0; I != Vals.size(); ++I) { 9656 Ops[I].setUser(Node); 9657 Ops[I].setInitial(Vals[I]); 9658 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9659 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9660 } 9661 Node->NumOperands = Vals.size(); 9662 Node->OperandList = Ops; 9663 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9664 if (!TLI->isSDNodeAlwaysUniform(Node)) 9665 Node->SDNodeBits.IsDivergent = IsDivergent; 9666 checkForCycles(Node); 9667 } 9668 9669 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9670 SmallVectorImpl<SDValue> &Vals) { 9671 size_t Limit = SDNode::getMaxNumOperands(); 9672 while (Vals.size() > Limit) { 9673 unsigned SliceIdx = Vals.size() - Limit; 9674 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9675 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9676 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9677 Vals.emplace_back(NewTF); 9678 } 9679 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9680 } 9681 9682 #ifndef NDEBUG 9683 static void checkForCyclesHelper(const SDNode *N, 9684 SmallPtrSetImpl<const SDNode*> &Visited, 9685 SmallPtrSetImpl<const SDNode*> &Checked, 9686 const llvm::SelectionDAG *DAG) { 9687 // If this node has already been checked, don't check it again. 9688 if (Checked.count(N)) 9689 return; 9690 9691 // If a node has already been visited on this depth-first walk, reject it as 9692 // a cycle. 9693 if (!Visited.insert(N).second) { 9694 errs() << "Detected cycle in SelectionDAG\n"; 9695 dbgs() << "Offending node:\n"; 9696 N->dumprFull(DAG); dbgs() << "\n"; 9697 abort(); 9698 } 9699 9700 for (const SDValue &Op : N->op_values()) 9701 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9702 9703 Checked.insert(N); 9704 Visited.erase(N); 9705 } 9706 #endif 9707 9708 void llvm::checkForCycles(const llvm::SDNode *N, 9709 const llvm::SelectionDAG *DAG, 9710 bool force) { 9711 #ifndef NDEBUG 9712 bool check = force; 9713 #ifdef EXPENSIVE_CHECKS 9714 check = true; 9715 #endif // EXPENSIVE_CHECKS 9716 if (check) { 9717 assert(N && "Checking nonexistent SDNode"); 9718 SmallPtrSet<const SDNode*, 32> visited; 9719 SmallPtrSet<const SDNode*, 32> checked; 9720 checkForCyclesHelper(N, visited, checked, DAG); 9721 } 9722 #endif // !NDEBUG 9723 } 9724 9725 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9726 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9727 } 9728