1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/BlockFrequencyInfo.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/ProfileSummaryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineConstantPool.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/RuntimeLibcalls.h" 38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 39 #include "llvm/CodeGen/SelectionDAGNodes.h" 40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 41 #include "llvm/CodeGen/TargetLowering.h" 42 #include "llvm/CodeGen/TargetRegisterInfo.h" 43 #include "llvm/CodeGen/TargetSubtargetInfo.h" 44 #include "llvm/CodeGen/ValueTypes.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/DerivedTypes.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GlobalValue.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Compiler.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/KnownBits.h" 62 #include "llvm/Support/MachineValueType.h" 63 #include "llvm/Support/ManagedStatic.h" 64 #include "llvm/Support/MathExtras.h" 65 #include "llvm/Support/Mutex.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Target/TargetMachine.h" 68 #include "llvm/Target/TargetOptions.h" 69 #include "llvm/Transforms/Utils/SizeOpts.h" 70 #include <algorithm> 71 #include <cassert> 72 #include <cstdint> 73 #include <cstdlib> 74 #include <limits> 75 #include <set> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 using namespace llvm; 81 82 /// makeVTList - Return an instance of the SDVTList struct initialized with the 83 /// specified members. 84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 85 SDVTList Res = {VTs, NumVTs}; 86 return Res; 87 } 88 89 // Default null implementations of the callbacks. 90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 93 94 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 95 96 #define DEBUG_TYPE "selectiondag" 97 98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 99 cl::Hidden, cl::init(true), 100 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 101 102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 103 cl::desc("Number limit for gluing ld/st of memcpy."), 104 cl::Hidden, cl::init(0)); 105 106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 108 } 109 110 //===----------------------------------------------------------------------===// 111 // ConstantFPSDNode Class 112 //===----------------------------------------------------------------------===// 113 114 /// isExactlyValue - We don't rely on operator== working on double values, as 115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 116 /// As such, this method can be used to do an exact bit-for-bit comparison of 117 /// two floating point values. 118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 119 return getValueAPF().bitwiseIsEqual(V); 120 } 121 122 bool ConstantFPSDNode::isValueValidForType(EVT VT, 123 const APFloat& Val) { 124 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 125 126 // convert modifies in place, so make a copy. 127 APFloat Val2 = APFloat(Val); 128 bool losesInfo; 129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 130 APFloat::rmNearestTiesToEven, 131 &losesInfo); 132 return !losesInfo; 133 } 134 135 //===----------------------------------------------------------------------===// 136 // ISD Namespace 137 //===----------------------------------------------------------------------===// 138 139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 140 auto *BV = dyn_cast<BuildVectorSDNode>(N); 141 if (!BV) 142 return false; 143 144 APInt SplatUndef; 145 unsigned SplatBitSize; 146 bool HasUndefs; 147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 149 EltSize) && 150 EltSize == SplatBitSize; 151 } 152 153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 154 // specializations of the more general isConstantSplatVector()? 155 156 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 157 // Look through a bit convert. 158 while (N->getOpcode() == ISD::BITCAST) 159 N = N->getOperand(0).getNode(); 160 161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 162 163 unsigned i = 0, e = N->getNumOperands(); 164 165 // Skip over all of the undef values. 166 while (i != e && N->getOperand(i).isUndef()) 167 ++i; 168 169 // Do not accept an all-undef vector. 170 if (i == e) return false; 171 172 // Do not accept build_vectors that aren't all constants or which have non-~0 173 // elements. We have to be a bit careful here, as the type of the constant 174 // may not be the same as the type of the vector elements due to type 175 // legalization (the elements are promoted to a legal type for the target and 176 // a vector of a type may be legal when the base element type is not). 177 // We only want to check enough bits to cover the vector elements, because 178 // we care if the resultant vector is all ones, not whether the individual 179 // constants are. 180 SDValue NotZero = N->getOperand(i); 181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 183 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 184 return false; 185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 187 return false; 188 } else 189 return false; 190 191 // Okay, we have at least one ~0 value, check to see if the rest match or are 192 // undefs. Even with the above element type twiddling, this should be OK, as 193 // the same type legalization should have applied to all the elements. 194 for (++i; i != e; ++i) 195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 196 return false; 197 return true; 198 } 199 200 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 201 // Look through a bit convert. 202 while (N->getOpcode() == ISD::BITCAST) 203 N = N->getOperand(0).getNode(); 204 205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 206 207 bool IsAllUndef = true; 208 for (const SDValue &Op : N->op_values()) { 209 if (Op.isUndef()) 210 continue; 211 IsAllUndef = false; 212 // Do not accept build_vectors that aren't all constants or which have non-0 213 // elements. We have to be a bit careful here, as the type of the constant 214 // may not be the same as the type of the vector elements due to type 215 // legalization (the elements are promoted to a legal type for the target 216 // and a vector of a type may be legal when the base element type is not). 217 // We only want to check enough bits to cover the vector elements, because 218 // we care if the resultant vector is all zeros, not whether the individual 219 // constants are. 220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 222 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 223 return false; 224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 226 return false; 227 } else 228 return false; 229 } 230 231 // Do not accept an all-undef vector. 232 if (IsAllUndef) 233 return false; 234 return true; 235 } 236 237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 238 if (N->getOpcode() != ISD::BUILD_VECTOR) 239 return false; 240 241 for (const SDValue &Op : N->op_values()) { 242 if (Op.isUndef()) 243 continue; 244 if (!isa<ConstantSDNode>(Op)) 245 return false; 246 } 247 return true; 248 } 249 250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 251 if (N->getOpcode() != ISD::BUILD_VECTOR) 252 return false; 253 254 for (const SDValue &Op : N->op_values()) { 255 if (Op.isUndef()) 256 continue; 257 if (!isa<ConstantFPSDNode>(Op)) 258 return false; 259 } 260 return true; 261 } 262 263 bool ISD::allOperandsUndef(const SDNode *N) { 264 // Return false if the node has no operands. 265 // This is "logically inconsistent" with the definition of "all" but 266 // is probably the desired behavior. 267 if (N->getNumOperands() == 0) 268 return false; 269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 270 } 271 272 bool ISD::matchUnaryPredicate(SDValue Op, 273 std::function<bool(ConstantSDNode *)> Match, 274 bool AllowUndefs) { 275 // FIXME: Add support for scalar UNDEF cases? 276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 277 return Match(Cst); 278 279 // FIXME: Add support for vector UNDEF cases? 280 if (ISD::BUILD_VECTOR != Op.getOpcode()) 281 return false; 282 283 EVT SVT = Op.getValueType().getScalarType(); 284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 285 if (AllowUndefs && Op.getOperand(i).isUndef()) { 286 if (!Match(nullptr)) 287 return false; 288 continue; 289 } 290 291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 293 return false; 294 } 295 return true; 296 } 297 298 bool ISD::matchBinaryPredicate( 299 SDValue LHS, SDValue RHS, 300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 301 bool AllowUndefs, bool AllowTypeMismatch) { 302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) 303 return false; 304 305 // TODO: Add support for scalar UNDEF cases? 306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 308 return Match(LHSCst, RHSCst); 309 310 // TODO: Add support for vector UNDEF cases? 311 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 312 ISD::BUILD_VECTOR != RHS.getOpcode()) 313 return false; 314 315 EVT SVT = LHS.getValueType().getScalarType(); 316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 317 SDValue LHSOp = LHS.getOperand(i); 318 SDValue RHSOp = RHS.getOperand(i); 319 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 320 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 324 return false; 325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || 326 LHSOp.getValueType() != RHSOp.getValueType())) 327 return false; 328 if (!Match(LHSCst, RHSCst)) 329 return false; 330 } 331 return true; 332 } 333 334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 335 switch (ExtType) { 336 case ISD::EXTLOAD: 337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 338 case ISD::SEXTLOAD: 339 return ISD::SIGN_EXTEND; 340 case ISD::ZEXTLOAD: 341 return ISD::ZERO_EXTEND; 342 default: 343 break; 344 } 345 346 llvm_unreachable("Invalid LoadExtType"); 347 } 348 349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 350 // To perform this operation, we just need to swap the L and G bits of the 351 // operation. 352 unsigned OldL = (Operation >> 2) & 1; 353 unsigned OldG = (Operation >> 1) & 1; 354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 355 (OldL << 1) | // New G bit 356 (OldG << 2)); // New L bit. 357 } 358 359 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { 360 bool IsInteger = Type.isInteger(); 361 unsigned Operation = Op; 362 if (IsInteger) 363 Operation ^= 7; // Flip L, G, E bits, but not U. 364 else 365 Operation ^= 15; // Flip all of the condition bits. 366 367 if (Operation > ISD::SETTRUE2) 368 Operation &= ~8; // Don't let N and U bits get set. 369 370 return ISD::CondCode(Operation); 371 } 372 373 /// For an integer comparison, return 1 if the comparison is a signed operation 374 /// and 2 if the result is an unsigned comparison. Return zero if the operation 375 /// does not depend on the sign of the input (setne and seteq). 376 static int isSignedOp(ISD::CondCode Opcode) { 377 switch (Opcode) { 378 default: llvm_unreachable("Illegal integer setcc operation!"); 379 case ISD::SETEQ: 380 case ISD::SETNE: return 0; 381 case ISD::SETLT: 382 case ISD::SETLE: 383 case ISD::SETGT: 384 case ISD::SETGE: return 1; 385 case ISD::SETULT: 386 case ISD::SETULE: 387 case ISD::SETUGT: 388 case ISD::SETUGE: return 2; 389 } 390 } 391 392 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 393 EVT Type) { 394 bool IsInteger = Type.isInteger(); 395 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 396 // Cannot fold a signed integer setcc with an unsigned integer setcc. 397 return ISD::SETCC_INVALID; 398 399 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 400 401 // If the N and U bits get set, then the resultant comparison DOES suddenly 402 // care about orderedness, and it is true when ordered. 403 if (Op > ISD::SETTRUE2) 404 Op &= ~16; // Clear the U bit if the N bit is set. 405 406 // Canonicalize illegal integer setcc's. 407 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 408 Op = ISD::SETNE; 409 410 return ISD::CondCode(Op); 411 } 412 413 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 414 EVT Type) { 415 bool IsInteger = Type.isInteger(); 416 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 417 // Cannot fold a signed setcc with an unsigned setcc. 418 return ISD::SETCC_INVALID; 419 420 // Combine all of the condition bits. 421 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 422 423 // Canonicalize illegal integer setcc's. 424 if (IsInteger) { 425 switch (Result) { 426 default: break; 427 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 428 case ISD::SETOEQ: // SETEQ & SETU[LG]E 429 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 430 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 431 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 432 } 433 } 434 435 return Result; 436 } 437 438 //===----------------------------------------------------------------------===// 439 // SDNode Profile Support 440 //===----------------------------------------------------------------------===// 441 442 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 443 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 444 ID.AddInteger(OpC); 445 } 446 447 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 448 /// solely with their pointer. 449 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 450 ID.AddPointer(VTList.VTs); 451 } 452 453 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 454 static void AddNodeIDOperands(FoldingSetNodeID &ID, 455 ArrayRef<SDValue> Ops) { 456 for (auto& Op : Ops) { 457 ID.AddPointer(Op.getNode()); 458 ID.AddInteger(Op.getResNo()); 459 } 460 } 461 462 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 463 static void AddNodeIDOperands(FoldingSetNodeID &ID, 464 ArrayRef<SDUse> Ops) { 465 for (auto& Op : Ops) { 466 ID.AddPointer(Op.getNode()); 467 ID.AddInteger(Op.getResNo()); 468 } 469 } 470 471 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 472 SDVTList VTList, ArrayRef<SDValue> OpList) { 473 AddNodeIDOpcode(ID, OpC); 474 AddNodeIDValueTypes(ID, VTList); 475 AddNodeIDOperands(ID, OpList); 476 } 477 478 /// If this is an SDNode with special info, add this info to the NodeID data. 479 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 480 switch (N->getOpcode()) { 481 case ISD::TargetExternalSymbol: 482 case ISD::ExternalSymbol: 483 case ISD::MCSymbol: 484 llvm_unreachable("Should only be used on nodes with operands"); 485 default: break; // Normal nodes don't need extra info. 486 case ISD::TargetConstant: 487 case ISD::Constant: { 488 const ConstantSDNode *C = cast<ConstantSDNode>(N); 489 ID.AddPointer(C->getConstantIntValue()); 490 ID.AddBoolean(C->isOpaque()); 491 break; 492 } 493 case ISD::TargetConstantFP: 494 case ISD::ConstantFP: 495 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 496 break; 497 case ISD::TargetGlobalAddress: 498 case ISD::GlobalAddress: 499 case ISD::TargetGlobalTLSAddress: 500 case ISD::GlobalTLSAddress: { 501 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 502 ID.AddPointer(GA->getGlobal()); 503 ID.AddInteger(GA->getOffset()); 504 ID.AddInteger(GA->getTargetFlags()); 505 break; 506 } 507 case ISD::BasicBlock: 508 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 509 break; 510 case ISD::Register: 511 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 512 break; 513 case ISD::RegisterMask: 514 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 515 break; 516 case ISD::SRCVALUE: 517 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 518 break; 519 case ISD::FrameIndex: 520 case ISD::TargetFrameIndex: 521 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 522 break; 523 case ISD::LIFETIME_START: 524 case ISD::LIFETIME_END: 525 if (cast<LifetimeSDNode>(N)->hasOffset()) { 526 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 527 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 528 } 529 break; 530 case ISD::JumpTable: 531 case ISD::TargetJumpTable: 532 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 533 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 534 break; 535 case ISD::ConstantPool: 536 case ISD::TargetConstantPool: { 537 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 538 ID.AddInteger(CP->getAlignment()); 539 ID.AddInteger(CP->getOffset()); 540 if (CP->isMachineConstantPoolEntry()) 541 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 542 else 543 ID.AddPointer(CP->getConstVal()); 544 ID.AddInteger(CP->getTargetFlags()); 545 break; 546 } 547 case ISD::TargetIndex: { 548 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 549 ID.AddInteger(TI->getIndex()); 550 ID.AddInteger(TI->getOffset()); 551 ID.AddInteger(TI->getTargetFlags()); 552 break; 553 } 554 case ISD::LOAD: { 555 const LoadSDNode *LD = cast<LoadSDNode>(N); 556 ID.AddInteger(LD->getMemoryVT().getRawBits()); 557 ID.AddInteger(LD->getRawSubclassData()); 558 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 559 break; 560 } 561 case ISD::STORE: { 562 const StoreSDNode *ST = cast<StoreSDNode>(N); 563 ID.AddInteger(ST->getMemoryVT().getRawBits()); 564 ID.AddInteger(ST->getRawSubclassData()); 565 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 566 break; 567 } 568 case ISD::MLOAD: { 569 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 570 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 571 ID.AddInteger(MLD->getRawSubclassData()); 572 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 573 break; 574 } 575 case ISD::MSTORE: { 576 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 577 ID.AddInteger(MST->getMemoryVT().getRawBits()); 578 ID.AddInteger(MST->getRawSubclassData()); 579 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 580 break; 581 } 582 case ISD::MGATHER: { 583 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 584 ID.AddInteger(MG->getMemoryVT().getRawBits()); 585 ID.AddInteger(MG->getRawSubclassData()); 586 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 587 break; 588 } 589 case ISD::MSCATTER: { 590 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 591 ID.AddInteger(MS->getMemoryVT().getRawBits()); 592 ID.AddInteger(MS->getRawSubclassData()); 593 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 594 break; 595 } 596 case ISD::ATOMIC_CMP_SWAP: 597 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 598 case ISD::ATOMIC_SWAP: 599 case ISD::ATOMIC_LOAD_ADD: 600 case ISD::ATOMIC_LOAD_SUB: 601 case ISD::ATOMIC_LOAD_AND: 602 case ISD::ATOMIC_LOAD_CLR: 603 case ISD::ATOMIC_LOAD_OR: 604 case ISD::ATOMIC_LOAD_XOR: 605 case ISD::ATOMIC_LOAD_NAND: 606 case ISD::ATOMIC_LOAD_MIN: 607 case ISD::ATOMIC_LOAD_MAX: 608 case ISD::ATOMIC_LOAD_UMIN: 609 case ISD::ATOMIC_LOAD_UMAX: 610 case ISD::ATOMIC_LOAD: 611 case ISD::ATOMIC_STORE: { 612 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 613 ID.AddInteger(AT->getMemoryVT().getRawBits()); 614 ID.AddInteger(AT->getRawSubclassData()); 615 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 616 break; 617 } 618 case ISD::PREFETCH: { 619 const MemSDNode *PF = cast<MemSDNode>(N); 620 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 621 break; 622 } 623 case ISD::VECTOR_SHUFFLE: { 624 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 625 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 626 i != e; ++i) 627 ID.AddInteger(SVN->getMaskElt(i)); 628 break; 629 } 630 case ISD::TargetBlockAddress: 631 case ISD::BlockAddress: { 632 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 633 ID.AddPointer(BA->getBlockAddress()); 634 ID.AddInteger(BA->getOffset()); 635 ID.AddInteger(BA->getTargetFlags()); 636 break; 637 } 638 } // end switch (N->getOpcode()) 639 640 // Target specific memory nodes could also have address spaces to check. 641 if (N->isTargetMemoryOpcode()) 642 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 643 } 644 645 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 646 /// data. 647 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 648 AddNodeIDOpcode(ID, N->getOpcode()); 649 // Add the return value info. 650 AddNodeIDValueTypes(ID, N->getVTList()); 651 // Add the operand info. 652 AddNodeIDOperands(ID, N->ops()); 653 654 // Handle SDNode leafs with special info. 655 AddNodeIDCustom(ID, N); 656 } 657 658 //===----------------------------------------------------------------------===// 659 // SelectionDAG Class 660 //===----------------------------------------------------------------------===// 661 662 /// doNotCSE - Return true if CSE should not be performed for this node. 663 static bool doNotCSE(SDNode *N) { 664 if (N->getValueType(0) == MVT::Glue) 665 return true; // Never CSE anything that produces a flag. 666 667 switch (N->getOpcode()) { 668 default: break; 669 case ISD::HANDLENODE: 670 case ISD::EH_LABEL: 671 return true; // Never CSE these nodes. 672 } 673 674 // Check that remaining values produced are not flags. 675 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 676 if (N->getValueType(i) == MVT::Glue) 677 return true; // Never CSE anything that produces a flag. 678 679 return false; 680 } 681 682 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 683 /// SelectionDAG. 684 void SelectionDAG::RemoveDeadNodes() { 685 // Create a dummy node (which is not added to allnodes), that adds a reference 686 // to the root node, preventing it from being deleted. 687 HandleSDNode Dummy(getRoot()); 688 689 SmallVector<SDNode*, 128> DeadNodes; 690 691 // Add all obviously-dead nodes to the DeadNodes worklist. 692 for (SDNode &Node : allnodes()) 693 if (Node.use_empty()) 694 DeadNodes.push_back(&Node); 695 696 RemoveDeadNodes(DeadNodes); 697 698 // If the root changed (e.g. it was a dead load, update the root). 699 setRoot(Dummy.getValue()); 700 } 701 702 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 703 /// given list, and any nodes that become unreachable as a result. 704 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 705 706 // Process the worklist, deleting the nodes and adding their uses to the 707 // worklist. 708 while (!DeadNodes.empty()) { 709 SDNode *N = DeadNodes.pop_back_val(); 710 // Skip to next node if we've already managed to delete the node. This could 711 // happen if replacing a node causes a node previously added to the node to 712 // be deleted. 713 if (N->getOpcode() == ISD::DELETED_NODE) 714 continue; 715 716 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 717 DUL->NodeDeleted(N, nullptr); 718 719 // Take the node out of the appropriate CSE map. 720 RemoveNodeFromCSEMaps(N); 721 722 // Next, brutally remove the operand list. This is safe to do, as there are 723 // no cycles in the graph. 724 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 725 SDUse &Use = *I++; 726 SDNode *Operand = Use.getNode(); 727 Use.set(SDValue()); 728 729 // Now that we removed this operand, see if there are no uses of it left. 730 if (Operand->use_empty()) 731 DeadNodes.push_back(Operand); 732 } 733 734 DeallocateNode(N); 735 } 736 } 737 738 void SelectionDAG::RemoveDeadNode(SDNode *N){ 739 SmallVector<SDNode*, 16> DeadNodes(1, N); 740 741 // Create a dummy node that adds a reference to the root node, preventing 742 // it from being deleted. (This matters if the root is an operand of the 743 // dead node.) 744 HandleSDNode Dummy(getRoot()); 745 746 RemoveDeadNodes(DeadNodes); 747 } 748 749 void SelectionDAG::DeleteNode(SDNode *N) { 750 // First take this out of the appropriate CSE map. 751 RemoveNodeFromCSEMaps(N); 752 753 // Finally, remove uses due to operands of this node, remove from the 754 // AllNodes list, and delete the node. 755 DeleteNodeNotInCSEMaps(N); 756 } 757 758 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 759 assert(N->getIterator() != AllNodes.begin() && 760 "Cannot delete the entry node!"); 761 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 762 763 // Drop all of the operands and decrement used node's use counts. 764 N->DropOperands(); 765 766 DeallocateNode(N); 767 } 768 769 void SDDbgInfo::erase(const SDNode *Node) { 770 DbgValMapType::iterator I = DbgValMap.find(Node); 771 if (I == DbgValMap.end()) 772 return; 773 for (auto &Val: I->second) 774 Val->setIsInvalidated(); 775 DbgValMap.erase(I); 776 } 777 778 void SelectionDAG::DeallocateNode(SDNode *N) { 779 // If we have operands, deallocate them. 780 removeOperands(N); 781 782 NodeAllocator.Deallocate(AllNodes.remove(N)); 783 784 // Set the opcode to DELETED_NODE to help catch bugs when node 785 // memory is reallocated. 786 // FIXME: There are places in SDag that have grown a dependency on the opcode 787 // value in the released node. 788 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 789 N->NodeType = ISD::DELETED_NODE; 790 791 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 792 // them and forget about that node. 793 DbgInfo->erase(N); 794 } 795 796 #ifndef NDEBUG 797 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 798 static void VerifySDNode(SDNode *N) { 799 switch (N->getOpcode()) { 800 default: 801 break; 802 case ISD::BUILD_PAIR: { 803 EVT VT = N->getValueType(0); 804 assert(N->getNumValues() == 1 && "Too many results!"); 805 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 806 "Wrong return type!"); 807 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 808 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 809 "Mismatched operand types!"); 810 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 811 "Wrong operand type!"); 812 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 813 "Wrong return type size"); 814 break; 815 } 816 case ISD::BUILD_VECTOR: { 817 assert(N->getNumValues() == 1 && "Too many results!"); 818 assert(N->getValueType(0).isVector() && "Wrong return type!"); 819 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 820 "Wrong number of operands!"); 821 EVT EltVT = N->getValueType(0).getVectorElementType(); 822 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 823 assert((I->getValueType() == EltVT || 824 (EltVT.isInteger() && I->getValueType().isInteger() && 825 EltVT.bitsLE(I->getValueType()))) && 826 "Wrong operand type!"); 827 assert(I->getValueType() == N->getOperand(0).getValueType() && 828 "Operands must all have the same type"); 829 } 830 break; 831 } 832 } 833 } 834 #endif // NDEBUG 835 836 /// Insert a newly allocated node into the DAG. 837 /// 838 /// Handles insertion into the all nodes list and CSE map, as well as 839 /// verification and other common operations when a new node is allocated. 840 void SelectionDAG::InsertNode(SDNode *N) { 841 AllNodes.push_back(N); 842 #ifndef NDEBUG 843 N->PersistentId = NextPersistentId++; 844 VerifySDNode(N); 845 #endif 846 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 847 DUL->NodeInserted(N); 848 } 849 850 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 851 /// correspond to it. This is useful when we're about to delete or repurpose 852 /// the node. We don't want future request for structurally identical nodes 853 /// to return N anymore. 854 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 855 bool Erased = false; 856 switch (N->getOpcode()) { 857 case ISD::HANDLENODE: return false; // noop. 858 case ISD::CONDCODE: 859 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 860 "Cond code doesn't exist!"); 861 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 862 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 863 break; 864 case ISD::ExternalSymbol: 865 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 866 break; 867 case ISD::TargetExternalSymbol: { 868 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 869 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( 870 ESN->getSymbol(), ESN->getTargetFlags())); 871 break; 872 } 873 case ISD::MCSymbol: { 874 auto *MCSN = cast<MCSymbolSDNode>(N); 875 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 876 break; 877 } 878 case ISD::VALUETYPE: { 879 EVT VT = cast<VTSDNode>(N)->getVT(); 880 if (VT.isExtended()) { 881 Erased = ExtendedValueTypeNodes.erase(VT); 882 } else { 883 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 884 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 885 } 886 break; 887 } 888 default: 889 // Remove it from the CSE Map. 890 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 891 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 892 Erased = CSEMap.RemoveNode(N); 893 break; 894 } 895 #ifndef NDEBUG 896 // Verify that the node was actually in one of the CSE maps, unless it has a 897 // flag result (which cannot be CSE'd) or is one of the special cases that are 898 // not subject to CSE. 899 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 900 !N->isMachineOpcode() && !doNotCSE(N)) { 901 N->dump(this); 902 dbgs() << "\n"; 903 llvm_unreachable("Node is not in map!"); 904 } 905 #endif 906 return Erased; 907 } 908 909 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 910 /// maps and modified in place. Add it back to the CSE maps, unless an identical 911 /// node already exists, in which case transfer all its users to the existing 912 /// node. This transfer can potentially trigger recursive merging. 913 void 914 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 915 // For node types that aren't CSE'd, just act as if no identical node 916 // already exists. 917 if (!doNotCSE(N)) { 918 SDNode *Existing = CSEMap.GetOrInsertNode(N); 919 if (Existing != N) { 920 // If there was already an existing matching node, use ReplaceAllUsesWith 921 // to replace the dead one with the existing one. This can cause 922 // recursive merging of other unrelated nodes down the line. 923 ReplaceAllUsesWith(N, Existing); 924 925 // N is now dead. Inform the listeners and delete it. 926 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 927 DUL->NodeDeleted(N, Existing); 928 DeleteNodeNotInCSEMaps(N); 929 return; 930 } 931 } 932 933 // If the node doesn't already exist, we updated it. Inform listeners. 934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 935 DUL->NodeUpdated(N); 936 } 937 938 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 939 /// were replaced with those specified. If this node is never memoized, 940 /// return null, otherwise return a pointer to the slot it would take. If a 941 /// node already exists with these operands, the slot will be non-null. 942 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 943 void *&InsertPos) { 944 if (doNotCSE(N)) 945 return nullptr; 946 947 SDValue Ops[] = { Op }; 948 FoldingSetNodeID ID; 949 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 950 AddNodeIDCustom(ID, N); 951 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 952 if (Node) 953 Node->intersectFlagsWith(N->getFlags()); 954 return Node; 955 } 956 957 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 958 /// were replaced with those specified. If this node is never memoized, 959 /// return null, otherwise return a pointer to the slot it would take. If a 960 /// node already exists with these operands, the slot will be non-null. 961 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 962 SDValue Op1, SDValue Op2, 963 void *&InsertPos) { 964 if (doNotCSE(N)) 965 return nullptr; 966 967 SDValue Ops[] = { Op1, Op2 }; 968 FoldingSetNodeID ID; 969 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 970 AddNodeIDCustom(ID, N); 971 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 972 if (Node) 973 Node->intersectFlagsWith(N->getFlags()); 974 return Node; 975 } 976 977 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 978 /// were replaced with those specified. If this node is never memoized, 979 /// return null, otherwise return a pointer to the slot it would take. If a 980 /// node already exists with these operands, the slot will be non-null. 981 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 982 void *&InsertPos) { 983 if (doNotCSE(N)) 984 return nullptr; 985 986 FoldingSetNodeID ID; 987 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 988 AddNodeIDCustom(ID, N); 989 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 990 if (Node) 991 Node->intersectFlagsWith(N->getFlags()); 992 return Node; 993 } 994 995 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 996 Type *Ty = VT == MVT::iPTR ? 997 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 998 VT.getTypeForEVT(*getContext()); 999 1000 return getDataLayout().getABITypeAlignment(Ty); 1001 } 1002 1003 // EntryNode could meaningfully have debug info if we can find it... 1004 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 1005 : TM(tm), OptLevel(OL), 1006 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1007 Root(getEntryNode()) { 1008 InsertNode(&EntryNode); 1009 DbgInfo = new SDDbgInfo(); 1010 } 1011 1012 void SelectionDAG::init(MachineFunction &NewMF, 1013 OptimizationRemarkEmitter &NewORE, 1014 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1015 LegacyDivergenceAnalysis * Divergence, 1016 ProfileSummaryInfo *PSIin, 1017 BlockFrequencyInfo *BFIin) { 1018 MF = &NewMF; 1019 SDAGISelPass = PassPtr; 1020 ORE = &NewORE; 1021 TLI = getSubtarget().getTargetLowering(); 1022 TSI = getSubtarget().getSelectionDAGInfo(); 1023 LibInfo = LibraryInfo; 1024 Context = &MF->getFunction().getContext(); 1025 DA = Divergence; 1026 PSI = PSIin; 1027 BFI = BFIin; 1028 } 1029 1030 SelectionDAG::~SelectionDAG() { 1031 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1032 allnodes_clear(); 1033 OperandRecycler.clear(OperandAllocator); 1034 delete DbgInfo; 1035 } 1036 1037 bool SelectionDAG::shouldOptForSize() const { 1038 return MF->getFunction().hasOptSize() || 1039 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); 1040 } 1041 1042 void SelectionDAG::allnodes_clear() { 1043 assert(&*AllNodes.begin() == &EntryNode); 1044 AllNodes.remove(AllNodes.begin()); 1045 while (!AllNodes.empty()) 1046 DeallocateNode(&AllNodes.front()); 1047 #ifndef NDEBUG 1048 NextPersistentId = 0; 1049 #endif 1050 } 1051 1052 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1053 void *&InsertPos) { 1054 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1055 if (N) { 1056 switch (N->getOpcode()) { 1057 default: break; 1058 case ISD::Constant: 1059 case ISD::ConstantFP: 1060 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1061 "debug location. Use another overload."); 1062 } 1063 } 1064 return N; 1065 } 1066 1067 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1068 const SDLoc &DL, void *&InsertPos) { 1069 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1070 if (N) { 1071 switch (N->getOpcode()) { 1072 case ISD::Constant: 1073 case ISD::ConstantFP: 1074 // Erase debug location from the node if the node is used at several 1075 // different places. Do not propagate one location to all uses as it 1076 // will cause a worse single stepping debugging experience. 1077 if (N->getDebugLoc() != DL.getDebugLoc()) 1078 N->setDebugLoc(DebugLoc()); 1079 break; 1080 default: 1081 // When the node's point of use is located earlier in the instruction 1082 // sequence than its prior point of use, update its debug info to the 1083 // earlier location. 1084 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1085 N->setDebugLoc(DL.getDebugLoc()); 1086 break; 1087 } 1088 } 1089 return N; 1090 } 1091 1092 void SelectionDAG::clear() { 1093 allnodes_clear(); 1094 OperandRecycler.clear(OperandAllocator); 1095 OperandAllocator.Reset(); 1096 CSEMap.clear(); 1097 1098 ExtendedValueTypeNodes.clear(); 1099 ExternalSymbols.clear(); 1100 TargetExternalSymbols.clear(); 1101 MCSymbols.clear(); 1102 SDCallSiteDbgInfo.clear(); 1103 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1104 static_cast<CondCodeSDNode*>(nullptr)); 1105 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1106 static_cast<SDNode*>(nullptr)); 1107 1108 EntryNode.UseList = nullptr; 1109 InsertNode(&EntryNode); 1110 Root = getEntryNode(); 1111 DbgInfo->clear(); 1112 } 1113 1114 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1115 return VT.bitsGT(Op.getValueType()) 1116 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1117 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1118 } 1119 1120 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1121 return VT.bitsGT(Op.getValueType()) ? 1122 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1123 getNode(ISD::TRUNCATE, DL, VT, Op); 1124 } 1125 1126 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1127 return VT.bitsGT(Op.getValueType()) ? 1128 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1129 getNode(ISD::TRUNCATE, DL, VT, Op); 1130 } 1131 1132 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1133 return VT.bitsGT(Op.getValueType()) ? 1134 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1135 getNode(ISD::TRUNCATE, DL, VT, Op); 1136 } 1137 1138 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1139 EVT OpVT) { 1140 if (VT.bitsLE(Op.getValueType())) 1141 return getNode(ISD::TRUNCATE, SL, VT, Op); 1142 1143 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1144 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1145 } 1146 1147 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1148 assert(!VT.isVector() && 1149 "getZeroExtendInReg should use the vector element type instead of " 1150 "the vector type!"); 1151 if (Op.getValueType().getScalarType() == VT) return Op; 1152 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1153 APInt Imm = APInt::getLowBitsSet(BitWidth, 1154 VT.getSizeInBits()); 1155 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1156 getConstant(Imm, DL, Op.getValueType())); 1157 } 1158 1159 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1160 // Only unsigned pointer semantics are supported right now. In the future this 1161 // might delegate to TLI to check pointer signedness. 1162 return getZExtOrTrunc(Op, DL, VT); 1163 } 1164 1165 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1166 // Only unsigned pointer semantics are supported right now. In the future this 1167 // might delegate to TLI to check pointer signedness. 1168 return getZeroExtendInReg(Op, DL, VT); 1169 } 1170 1171 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1172 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1173 EVT EltVT = VT.getScalarType(); 1174 SDValue NegOne = 1175 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1176 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1177 } 1178 1179 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1180 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1181 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1182 } 1183 1184 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1185 EVT OpVT) { 1186 if (!V) 1187 return getConstant(0, DL, VT); 1188 1189 switch (TLI->getBooleanContents(OpVT)) { 1190 case TargetLowering::ZeroOrOneBooleanContent: 1191 case TargetLowering::UndefinedBooleanContent: 1192 return getConstant(1, DL, VT); 1193 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1194 return getAllOnesConstant(DL, VT); 1195 } 1196 llvm_unreachable("Unexpected boolean content enum!"); 1197 } 1198 1199 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1200 bool isT, bool isO) { 1201 EVT EltVT = VT.getScalarType(); 1202 assert((EltVT.getSizeInBits() >= 64 || 1203 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1204 "getConstant with a uint64_t value that doesn't fit in the type!"); 1205 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1206 } 1207 1208 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1209 bool isT, bool isO) { 1210 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1211 } 1212 1213 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1214 EVT VT, bool isT, bool isO) { 1215 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1216 1217 EVT EltVT = VT.getScalarType(); 1218 const ConstantInt *Elt = &Val; 1219 1220 // In some cases the vector type is legal but the element type is illegal and 1221 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1222 // inserted value (the type does not need to match the vector element type). 1223 // Any extra bits introduced will be truncated away. 1224 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1225 TargetLowering::TypePromoteInteger) { 1226 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1227 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1228 Elt = ConstantInt::get(*getContext(), NewVal); 1229 } 1230 // In other cases the element type is illegal and needs to be expanded, for 1231 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1232 // the value into n parts and use a vector type with n-times the elements. 1233 // Then bitcast to the type requested. 1234 // Legalizing constants too early makes the DAGCombiner's job harder so we 1235 // only legalize if the DAG tells us we must produce legal types. 1236 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1237 TLI->getTypeAction(*getContext(), EltVT) == 1238 TargetLowering::TypeExpandInteger) { 1239 const APInt &NewVal = Elt->getValue(); 1240 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1241 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1242 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1243 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1244 1245 // Check the temporary vector is the correct size. If this fails then 1246 // getTypeToTransformTo() probably returned a type whose size (in bits) 1247 // isn't a power-of-2 factor of the requested type size. 1248 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1249 1250 SmallVector<SDValue, 2> EltParts; 1251 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1252 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1253 .zextOrTrunc(ViaEltSizeInBits), DL, 1254 ViaEltVT, isT, isO)); 1255 } 1256 1257 // EltParts is currently in little endian order. If we actually want 1258 // big-endian order then reverse it now. 1259 if (getDataLayout().isBigEndian()) 1260 std::reverse(EltParts.begin(), EltParts.end()); 1261 1262 // The elements must be reversed when the element order is different 1263 // to the endianness of the elements (because the BITCAST is itself a 1264 // vector shuffle in this situation). However, we do not need any code to 1265 // perform this reversal because getConstant() is producing a vector 1266 // splat. 1267 // This situation occurs in MIPS MSA. 1268 1269 SmallVector<SDValue, 8> Ops; 1270 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1271 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1272 1273 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1274 return V; 1275 } 1276 1277 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1278 "APInt size does not match type size!"); 1279 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1280 FoldingSetNodeID ID; 1281 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1282 ID.AddPointer(Elt); 1283 ID.AddBoolean(isO); 1284 void *IP = nullptr; 1285 SDNode *N = nullptr; 1286 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1287 if (!VT.isVector()) 1288 return SDValue(N, 0); 1289 1290 if (!N) { 1291 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1292 CSEMap.InsertNode(N, IP); 1293 InsertNode(N); 1294 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1295 } 1296 1297 SDValue Result(N, 0); 1298 if (VT.isScalableVector()) 1299 Result = getSplatVector(VT, DL, Result); 1300 else if (VT.isVector()) 1301 Result = getSplatBuildVector(VT, DL, Result); 1302 1303 return Result; 1304 } 1305 1306 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1307 bool isTarget) { 1308 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1309 } 1310 1311 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1312 const SDLoc &DL, bool LegalTypes) { 1313 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1314 return getConstant(Val, DL, ShiftVT); 1315 } 1316 1317 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1318 bool isTarget) { 1319 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1320 } 1321 1322 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1323 EVT VT, bool isTarget) { 1324 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1325 1326 EVT EltVT = VT.getScalarType(); 1327 1328 // Do the map lookup using the actual bit pattern for the floating point 1329 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1330 // we don't have issues with SNANs. 1331 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1332 FoldingSetNodeID ID; 1333 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1334 ID.AddPointer(&V); 1335 void *IP = nullptr; 1336 SDNode *N = nullptr; 1337 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1338 if (!VT.isVector()) 1339 return SDValue(N, 0); 1340 1341 if (!N) { 1342 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1343 CSEMap.InsertNode(N, IP); 1344 InsertNode(N); 1345 } 1346 1347 SDValue Result(N, 0); 1348 if (VT.isVector()) 1349 Result = getSplatBuildVector(VT, DL, Result); 1350 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1351 return Result; 1352 } 1353 1354 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1355 bool isTarget) { 1356 EVT EltVT = VT.getScalarType(); 1357 if (EltVT == MVT::f32) 1358 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1359 else if (EltVT == MVT::f64) 1360 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1361 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1362 EltVT == MVT::f16) { 1363 bool Ignored; 1364 APFloat APF = APFloat(Val); 1365 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1366 &Ignored); 1367 return getConstantFP(APF, DL, VT, isTarget); 1368 } else 1369 llvm_unreachable("Unsupported type in getConstantFP"); 1370 } 1371 1372 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1373 EVT VT, int64_t Offset, bool isTargetGA, 1374 unsigned TargetFlags) { 1375 assert((TargetFlags == 0 || isTargetGA) && 1376 "Cannot set target flags on target-independent globals"); 1377 1378 // Truncate (with sign-extension) the offset value to the pointer size. 1379 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1380 if (BitWidth < 64) 1381 Offset = SignExtend64(Offset, BitWidth); 1382 1383 unsigned Opc; 1384 if (GV->isThreadLocal()) 1385 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1386 else 1387 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1388 1389 FoldingSetNodeID ID; 1390 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1391 ID.AddPointer(GV); 1392 ID.AddInteger(Offset); 1393 ID.AddInteger(TargetFlags); 1394 void *IP = nullptr; 1395 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1396 return SDValue(E, 0); 1397 1398 auto *N = newSDNode<GlobalAddressSDNode>( 1399 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1400 CSEMap.InsertNode(N, IP); 1401 InsertNode(N); 1402 return SDValue(N, 0); 1403 } 1404 1405 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1406 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1407 FoldingSetNodeID ID; 1408 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1409 ID.AddInteger(FI); 1410 void *IP = nullptr; 1411 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1412 return SDValue(E, 0); 1413 1414 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1415 CSEMap.InsertNode(N, IP); 1416 InsertNode(N); 1417 return SDValue(N, 0); 1418 } 1419 1420 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1421 unsigned TargetFlags) { 1422 assert((TargetFlags == 0 || isTarget) && 1423 "Cannot set target flags on target-independent jump tables"); 1424 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1425 FoldingSetNodeID ID; 1426 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1427 ID.AddInteger(JTI); 1428 ID.AddInteger(TargetFlags); 1429 void *IP = nullptr; 1430 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1431 return SDValue(E, 0); 1432 1433 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1434 CSEMap.InsertNode(N, IP); 1435 InsertNode(N); 1436 return SDValue(N, 0); 1437 } 1438 1439 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1440 unsigned Alignment, int Offset, 1441 bool isTarget, 1442 unsigned TargetFlags) { 1443 assert((TargetFlags == 0 || isTarget) && 1444 "Cannot set target flags on target-independent globals"); 1445 if (Alignment == 0) 1446 Alignment = shouldOptForSize() 1447 ? getDataLayout().getABITypeAlignment(C->getType()) 1448 : getDataLayout().getPrefTypeAlignment(C->getType()); 1449 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1450 FoldingSetNodeID ID; 1451 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1452 ID.AddInteger(Alignment); 1453 ID.AddInteger(Offset); 1454 ID.AddPointer(C); 1455 ID.AddInteger(TargetFlags); 1456 void *IP = nullptr; 1457 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1458 return SDValue(E, 0); 1459 1460 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1461 TargetFlags); 1462 CSEMap.InsertNode(N, IP); 1463 InsertNode(N); 1464 return SDValue(N, 0); 1465 } 1466 1467 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1468 unsigned Alignment, int Offset, 1469 bool isTarget, 1470 unsigned TargetFlags) { 1471 assert((TargetFlags == 0 || isTarget) && 1472 "Cannot set target flags on target-independent globals"); 1473 if (Alignment == 0) 1474 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1475 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1476 FoldingSetNodeID ID; 1477 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1478 ID.AddInteger(Alignment); 1479 ID.AddInteger(Offset); 1480 C->addSelectionDAGCSEId(ID); 1481 ID.AddInteger(TargetFlags); 1482 void *IP = nullptr; 1483 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1484 return SDValue(E, 0); 1485 1486 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1487 TargetFlags); 1488 CSEMap.InsertNode(N, IP); 1489 InsertNode(N); 1490 return SDValue(N, 0); 1491 } 1492 1493 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1494 unsigned TargetFlags) { 1495 FoldingSetNodeID ID; 1496 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1497 ID.AddInteger(Index); 1498 ID.AddInteger(Offset); 1499 ID.AddInteger(TargetFlags); 1500 void *IP = nullptr; 1501 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1502 return SDValue(E, 0); 1503 1504 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1505 CSEMap.InsertNode(N, IP); 1506 InsertNode(N); 1507 return SDValue(N, 0); 1508 } 1509 1510 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1511 FoldingSetNodeID ID; 1512 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1513 ID.AddPointer(MBB); 1514 void *IP = nullptr; 1515 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1516 return SDValue(E, 0); 1517 1518 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1519 CSEMap.InsertNode(N, IP); 1520 InsertNode(N); 1521 return SDValue(N, 0); 1522 } 1523 1524 SDValue SelectionDAG::getValueType(EVT VT) { 1525 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1526 ValueTypeNodes.size()) 1527 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1528 1529 SDNode *&N = VT.isExtended() ? 1530 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1531 1532 if (N) return SDValue(N, 0); 1533 N = newSDNode<VTSDNode>(VT); 1534 InsertNode(N); 1535 return SDValue(N, 0); 1536 } 1537 1538 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1539 SDNode *&N = ExternalSymbols[Sym]; 1540 if (N) return SDValue(N, 0); 1541 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1542 InsertNode(N); 1543 return SDValue(N, 0); 1544 } 1545 1546 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1547 SDNode *&N = MCSymbols[Sym]; 1548 if (N) 1549 return SDValue(N, 0); 1550 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1551 InsertNode(N); 1552 return SDValue(N, 0); 1553 } 1554 1555 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1556 unsigned TargetFlags) { 1557 SDNode *&N = 1558 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; 1559 if (N) return SDValue(N, 0); 1560 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1561 InsertNode(N); 1562 return SDValue(N, 0); 1563 } 1564 1565 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1566 if ((unsigned)Cond >= CondCodeNodes.size()) 1567 CondCodeNodes.resize(Cond+1); 1568 1569 if (!CondCodeNodes[Cond]) { 1570 auto *N = newSDNode<CondCodeSDNode>(Cond); 1571 CondCodeNodes[Cond] = N; 1572 InsertNode(N); 1573 } 1574 1575 return SDValue(CondCodeNodes[Cond], 0); 1576 } 1577 1578 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1579 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1580 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1581 std::swap(N1, N2); 1582 ShuffleVectorSDNode::commuteMask(M); 1583 } 1584 1585 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1586 SDValue N2, ArrayRef<int> Mask) { 1587 assert(VT.getVectorNumElements() == Mask.size() && 1588 "Must have the same number of vector elements as mask elements!"); 1589 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1590 "Invalid VECTOR_SHUFFLE"); 1591 1592 // Canonicalize shuffle undef, undef -> undef 1593 if (N1.isUndef() && N2.isUndef()) 1594 return getUNDEF(VT); 1595 1596 // Validate that all indices in Mask are within the range of the elements 1597 // input to the shuffle. 1598 int NElts = Mask.size(); 1599 assert(llvm::all_of(Mask, 1600 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1601 "Index out of range"); 1602 1603 // Copy the mask so we can do any needed cleanup. 1604 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1605 1606 // Canonicalize shuffle v, v -> v, undef 1607 if (N1 == N2) { 1608 N2 = getUNDEF(VT); 1609 for (int i = 0; i != NElts; ++i) 1610 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1611 } 1612 1613 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1614 if (N1.isUndef()) 1615 commuteShuffle(N1, N2, MaskVec); 1616 1617 if (TLI->hasVectorBlend()) { 1618 // If shuffling a splat, try to blend the splat instead. We do this here so 1619 // that even when this arises during lowering we don't have to re-handle it. 1620 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1621 BitVector UndefElements; 1622 SDValue Splat = BV->getSplatValue(&UndefElements); 1623 if (!Splat) 1624 return; 1625 1626 for (int i = 0; i < NElts; ++i) { 1627 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1628 continue; 1629 1630 // If this input comes from undef, mark it as such. 1631 if (UndefElements[MaskVec[i] - Offset]) { 1632 MaskVec[i] = -1; 1633 continue; 1634 } 1635 1636 // If we can blend a non-undef lane, use that instead. 1637 if (!UndefElements[i]) 1638 MaskVec[i] = i + Offset; 1639 } 1640 }; 1641 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1642 BlendSplat(N1BV, 0); 1643 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1644 BlendSplat(N2BV, NElts); 1645 } 1646 1647 // Canonicalize all index into lhs, -> shuffle lhs, undef 1648 // Canonicalize all index into rhs, -> shuffle rhs, undef 1649 bool AllLHS = true, AllRHS = true; 1650 bool N2Undef = N2.isUndef(); 1651 for (int i = 0; i != NElts; ++i) { 1652 if (MaskVec[i] >= NElts) { 1653 if (N2Undef) 1654 MaskVec[i] = -1; 1655 else 1656 AllLHS = false; 1657 } else if (MaskVec[i] >= 0) { 1658 AllRHS = false; 1659 } 1660 } 1661 if (AllLHS && AllRHS) 1662 return getUNDEF(VT); 1663 if (AllLHS && !N2Undef) 1664 N2 = getUNDEF(VT); 1665 if (AllRHS) { 1666 N1 = getUNDEF(VT); 1667 commuteShuffle(N1, N2, MaskVec); 1668 } 1669 // Reset our undef status after accounting for the mask. 1670 N2Undef = N2.isUndef(); 1671 // Re-check whether both sides ended up undef. 1672 if (N1.isUndef() && N2Undef) 1673 return getUNDEF(VT); 1674 1675 // If Identity shuffle return that node. 1676 bool Identity = true, AllSame = true; 1677 for (int i = 0; i != NElts; ++i) { 1678 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1679 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1680 } 1681 if (Identity && NElts) 1682 return N1; 1683 1684 // Shuffling a constant splat doesn't change the result. 1685 if (N2Undef) { 1686 SDValue V = N1; 1687 1688 // Look through any bitcasts. We check that these don't change the number 1689 // (and size) of elements and just changes their types. 1690 while (V.getOpcode() == ISD::BITCAST) 1691 V = V->getOperand(0); 1692 1693 // A splat should always show up as a build vector node. 1694 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1695 BitVector UndefElements; 1696 SDValue Splat = BV->getSplatValue(&UndefElements); 1697 // If this is a splat of an undef, shuffling it is also undef. 1698 if (Splat && Splat.isUndef()) 1699 return getUNDEF(VT); 1700 1701 bool SameNumElts = 1702 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1703 1704 // We only have a splat which can skip shuffles if there is a splatted 1705 // value and no undef lanes rearranged by the shuffle. 1706 if (Splat && UndefElements.none()) { 1707 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1708 // number of elements match or the value splatted is a zero constant. 1709 if (SameNumElts) 1710 return N1; 1711 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1712 if (C->isNullValue()) 1713 return N1; 1714 } 1715 1716 // If the shuffle itself creates a splat, build the vector directly. 1717 if (AllSame && SameNumElts) { 1718 EVT BuildVT = BV->getValueType(0); 1719 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1720 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1721 1722 // We may have jumped through bitcasts, so the type of the 1723 // BUILD_VECTOR may not match the type of the shuffle. 1724 if (BuildVT != VT) 1725 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1726 return NewBV; 1727 } 1728 } 1729 } 1730 1731 FoldingSetNodeID ID; 1732 SDValue Ops[2] = { N1, N2 }; 1733 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1734 for (int i = 0; i != NElts; ++i) 1735 ID.AddInteger(MaskVec[i]); 1736 1737 void* IP = nullptr; 1738 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1739 return SDValue(E, 0); 1740 1741 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1742 // SDNode doesn't have access to it. This memory will be "leaked" when 1743 // the node is deallocated, but recovered when the NodeAllocator is released. 1744 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1745 llvm::copy(MaskVec, MaskAlloc); 1746 1747 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1748 dl.getDebugLoc(), MaskAlloc); 1749 createOperands(N, Ops); 1750 1751 CSEMap.InsertNode(N, IP); 1752 InsertNode(N); 1753 SDValue V = SDValue(N, 0); 1754 NewSDValueDbgMsg(V, "Creating new node: ", this); 1755 return V; 1756 } 1757 1758 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1759 EVT VT = SV.getValueType(0); 1760 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1761 ShuffleVectorSDNode::commuteMask(MaskVec); 1762 1763 SDValue Op0 = SV.getOperand(0); 1764 SDValue Op1 = SV.getOperand(1); 1765 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1766 } 1767 1768 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1769 FoldingSetNodeID ID; 1770 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1771 ID.AddInteger(RegNo); 1772 void *IP = nullptr; 1773 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1774 return SDValue(E, 0); 1775 1776 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1777 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1778 CSEMap.InsertNode(N, IP); 1779 InsertNode(N); 1780 return SDValue(N, 0); 1781 } 1782 1783 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1784 FoldingSetNodeID ID; 1785 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1786 ID.AddPointer(RegMask); 1787 void *IP = nullptr; 1788 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1789 return SDValue(E, 0); 1790 1791 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1792 CSEMap.InsertNode(N, IP); 1793 InsertNode(N); 1794 return SDValue(N, 0); 1795 } 1796 1797 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1798 MCSymbol *Label) { 1799 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1800 } 1801 1802 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1803 SDValue Root, MCSymbol *Label) { 1804 FoldingSetNodeID ID; 1805 SDValue Ops[] = { Root }; 1806 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1807 ID.AddPointer(Label); 1808 void *IP = nullptr; 1809 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1810 return SDValue(E, 0); 1811 1812 auto *N = 1813 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1814 createOperands(N, Ops); 1815 1816 CSEMap.InsertNode(N, IP); 1817 InsertNode(N); 1818 return SDValue(N, 0); 1819 } 1820 1821 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1822 int64_t Offset, bool isTarget, 1823 unsigned TargetFlags) { 1824 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1825 1826 FoldingSetNodeID ID; 1827 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1828 ID.AddPointer(BA); 1829 ID.AddInteger(Offset); 1830 ID.AddInteger(TargetFlags); 1831 void *IP = nullptr; 1832 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1833 return SDValue(E, 0); 1834 1835 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1836 CSEMap.InsertNode(N, IP); 1837 InsertNode(N); 1838 return SDValue(N, 0); 1839 } 1840 1841 SDValue SelectionDAG::getSrcValue(const Value *V) { 1842 assert((!V || V->getType()->isPointerTy()) && 1843 "SrcValue is not a pointer?"); 1844 1845 FoldingSetNodeID ID; 1846 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1847 ID.AddPointer(V); 1848 1849 void *IP = nullptr; 1850 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1851 return SDValue(E, 0); 1852 1853 auto *N = newSDNode<SrcValueSDNode>(V); 1854 CSEMap.InsertNode(N, IP); 1855 InsertNode(N); 1856 return SDValue(N, 0); 1857 } 1858 1859 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1860 FoldingSetNodeID ID; 1861 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1862 ID.AddPointer(MD); 1863 1864 void *IP = nullptr; 1865 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1866 return SDValue(E, 0); 1867 1868 auto *N = newSDNode<MDNodeSDNode>(MD); 1869 CSEMap.InsertNode(N, IP); 1870 InsertNode(N); 1871 return SDValue(N, 0); 1872 } 1873 1874 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1875 if (VT == V.getValueType()) 1876 return V; 1877 1878 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1879 } 1880 1881 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1882 unsigned SrcAS, unsigned DestAS) { 1883 SDValue Ops[] = {Ptr}; 1884 FoldingSetNodeID ID; 1885 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1886 ID.AddInteger(SrcAS); 1887 ID.AddInteger(DestAS); 1888 1889 void *IP = nullptr; 1890 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1891 return SDValue(E, 0); 1892 1893 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1894 VT, SrcAS, DestAS); 1895 createOperands(N, Ops); 1896 1897 CSEMap.InsertNode(N, IP); 1898 InsertNode(N); 1899 return SDValue(N, 0); 1900 } 1901 1902 /// getShiftAmountOperand - Return the specified value casted to 1903 /// the target's desired shift amount type. 1904 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1905 EVT OpTy = Op.getValueType(); 1906 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1907 if (OpTy == ShTy || OpTy.isVector()) return Op; 1908 1909 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1910 } 1911 1912 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1913 SDLoc dl(Node); 1914 const TargetLowering &TLI = getTargetLoweringInfo(); 1915 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1916 EVT VT = Node->getValueType(0); 1917 SDValue Tmp1 = Node->getOperand(0); 1918 SDValue Tmp2 = Node->getOperand(1); 1919 const MaybeAlign MA(Node->getConstantOperandVal(3)); 1920 1921 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1922 Tmp2, MachinePointerInfo(V)); 1923 SDValue VAList = VAListLoad; 1924 1925 if (MA && *MA > TLI.getMinStackArgumentAlignment()) { 1926 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1927 getConstant(MA->value() - 1, dl, VAList.getValueType())); 1928 1929 VAList = 1930 getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1931 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); 1932 } 1933 1934 // Increment the pointer, VAList, to the next vaarg 1935 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1936 getConstant(getDataLayout().getTypeAllocSize( 1937 VT.getTypeForEVT(*getContext())), 1938 dl, VAList.getValueType())); 1939 // Store the incremented VAList to the legalized pointer 1940 Tmp1 = 1941 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1942 // Load the actual argument out of the pointer VAList 1943 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1944 } 1945 1946 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1947 SDLoc dl(Node); 1948 const TargetLowering &TLI = getTargetLoweringInfo(); 1949 // This defaults to loading a pointer from the input and storing it to the 1950 // output, returning the chain. 1951 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1952 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1953 SDValue Tmp1 = 1954 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1955 Node->getOperand(2), MachinePointerInfo(VS)); 1956 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1957 MachinePointerInfo(VD)); 1958 } 1959 1960 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1961 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1962 unsigned ByteSize = VT.getStoreSize(); 1963 Type *Ty = VT.getTypeForEVT(*getContext()); 1964 unsigned StackAlign = 1965 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1966 1967 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1968 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1969 } 1970 1971 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1972 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1973 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1974 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1975 const DataLayout &DL = getDataLayout(); 1976 unsigned Align = 1977 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1978 1979 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1980 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1981 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1982 } 1983 1984 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1985 ISD::CondCode Cond, const SDLoc &dl) { 1986 EVT OpVT = N1.getValueType(); 1987 1988 // These setcc operations always fold. 1989 switch (Cond) { 1990 default: break; 1991 case ISD::SETFALSE: 1992 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 1993 case ISD::SETTRUE: 1994 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 1995 1996 case ISD::SETOEQ: 1997 case ISD::SETOGT: 1998 case ISD::SETOGE: 1999 case ISD::SETOLT: 2000 case ISD::SETOLE: 2001 case ISD::SETONE: 2002 case ISD::SETO: 2003 case ISD::SETUO: 2004 case ISD::SETUEQ: 2005 case ISD::SETUNE: 2006 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 2007 break; 2008 } 2009 2010 if (OpVT.isInteger()) { 2011 // For EQ and NE, we can always pick a value for the undef to make the 2012 // predicate pass or fail, so we can return undef. 2013 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2014 // icmp eq/ne X, undef -> undef. 2015 if ((N1.isUndef() || N2.isUndef()) && 2016 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2017 return getUNDEF(VT); 2018 2019 // If both operands are undef, we can return undef for int comparison. 2020 // icmp undef, undef -> undef. 2021 if (N1.isUndef() && N2.isUndef()) 2022 return getUNDEF(VT); 2023 2024 // icmp X, X -> true/false 2025 // icmp X, undef -> true/false because undef could be X. 2026 if (N1 == N2) 2027 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2028 } 2029 2030 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2031 const APInt &C2 = N2C->getAPIntValue(); 2032 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2033 const APInt &C1 = N1C->getAPIntValue(); 2034 2035 switch (Cond) { 2036 default: llvm_unreachable("Unknown integer setcc!"); 2037 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2038 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2039 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2040 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2041 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2042 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2043 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2044 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2045 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2046 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2047 } 2048 } 2049 } 2050 2051 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2052 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2053 2054 if (N1CFP && N2CFP) { 2055 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2056 switch (Cond) { 2057 default: break; 2058 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2059 return getUNDEF(VT); 2060 LLVM_FALLTHROUGH; 2061 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2062 OpVT); 2063 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2064 return getUNDEF(VT); 2065 LLVM_FALLTHROUGH; 2066 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2067 R==APFloat::cmpLessThan, dl, VT, 2068 OpVT); 2069 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2070 return getUNDEF(VT); 2071 LLVM_FALLTHROUGH; 2072 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2073 OpVT); 2074 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2075 return getUNDEF(VT); 2076 LLVM_FALLTHROUGH; 2077 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2078 VT, OpVT); 2079 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2080 return getUNDEF(VT); 2081 LLVM_FALLTHROUGH; 2082 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2083 R==APFloat::cmpEqual, dl, VT, 2084 OpVT); 2085 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2086 return getUNDEF(VT); 2087 LLVM_FALLTHROUGH; 2088 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2089 R==APFloat::cmpEqual, dl, VT, OpVT); 2090 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2091 OpVT); 2092 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2093 OpVT); 2094 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2095 R==APFloat::cmpEqual, dl, VT, 2096 OpVT); 2097 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2098 OpVT); 2099 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2100 R==APFloat::cmpLessThan, dl, VT, 2101 OpVT); 2102 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2103 R==APFloat::cmpUnordered, dl, VT, 2104 OpVT); 2105 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2106 VT, OpVT); 2107 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2108 OpVT); 2109 } 2110 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2111 // Ensure that the constant occurs on the RHS. 2112 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2113 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2114 return SDValue(); 2115 return getSetCC(dl, VT, N2, N1, SwappedCond); 2116 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2117 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2118 // If an operand is known to be a nan (or undef that could be a nan), we can 2119 // fold it. 2120 // Choosing NaN for the undef will always make unordered comparison succeed 2121 // and ordered comparison fails. 2122 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2123 switch (ISD::getUnorderedFlavor(Cond)) { 2124 default: 2125 llvm_unreachable("Unknown flavor!"); 2126 case 0: // Known false. 2127 return getBoolConstant(false, dl, VT, OpVT); 2128 case 1: // Known true. 2129 return getBoolConstant(true, dl, VT, OpVT); 2130 case 2: // Undefined. 2131 return getUNDEF(VT); 2132 } 2133 } 2134 2135 // Could not fold it. 2136 return SDValue(); 2137 } 2138 2139 /// See if the specified operand can be simplified with the knowledge that only 2140 /// the bits specified by DemandedBits are used. 2141 /// TODO: really we should be making this into the DAG equivalent of 2142 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2143 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2144 EVT VT = V.getValueType(); 2145 APInt DemandedElts = VT.isVector() 2146 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2147 : APInt(1, 1); 2148 return GetDemandedBits(V, DemandedBits, DemandedElts); 2149 } 2150 2151 /// See if the specified operand can be simplified with the knowledge that only 2152 /// the bits specified by DemandedBits are used in the elements specified by 2153 /// DemandedElts. 2154 /// TODO: really we should be making this into the DAG equivalent of 2155 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2156 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2157 const APInt &DemandedElts) { 2158 switch (V.getOpcode()) { 2159 default: 2160 break; 2161 case ISD::Constant: { 2162 auto *CV = cast<ConstantSDNode>(V.getNode()); 2163 assert(CV && "Const value should be ConstSDNode."); 2164 const APInt &CVal = CV->getAPIntValue(); 2165 APInt NewVal = CVal & DemandedBits; 2166 if (NewVal != CVal) 2167 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2168 break; 2169 } 2170 case ISD::OR: 2171 case ISD::XOR: 2172 case ISD::SIGN_EXTEND_INREG: 2173 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts, 2174 *this, 0); 2175 case ISD::SRL: 2176 // Only look at single-use SRLs. 2177 if (!V.getNode()->hasOneUse()) 2178 break; 2179 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2180 // See if we can recursively simplify the LHS. 2181 unsigned Amt = RHSC->getZExtValue(); 2182 2183 // Watch out for shift count overflow though. 2184 if (Amt >= DemandedBits.getBitWidth()) 2185 break; 2186 APInt SrcDemandedBits = DemandedBits << Amt; 2187 if (SDValue SimplifyLHS = 2188 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2189 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2190 V.getOperand(1)); 2191 } 2192 break; 2193 case ISD::AND: { 2194 // X & -1 -> X (ignoring bits which aren't demanded). 2195 // Also handle the case where masked out bits in X are known to be zero. 2196 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2197 const APInt &AndVal = RHSC->getAPIntValue(); 2198 if (DemandedBits.isSubsetOf(AndVal) || 2199 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2200 AndVal)) 2201 return V.getOperand(0); 2202 } 2203 break; 2204 } 2205 case ISD::ANY_EXTEND: { 2206 SDValue Src = V.getOperand(0); 2207 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2208 // Being conservative here - only peek through if we only demand bits in the 2209 // non-extended source (even though the extended bits are technically 2210 // undef). 2211 if (DemandedBits.getActiveBits() > SrcBitWidth) 2212 break; 2213 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth); 2214 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits)) 2215 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2216 break; 2217 } 2218 } 2219 return SDValue(); 2220 } 2221 2222 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2223 /// use this predicate to simplify operations downstream. 2224 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2225 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2226 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2227 } 2228 2229 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2230 /// this predicate to simplify operations downstream. Mask is known to be zero 2231 /// for bits that V cannot have. 2232 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2233 unsigned Depth) const { 2234 EVT VT = V.getValueType(); 2235 APInt DemandedElts = VT.isVector() 2236 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2237 : APInt(1, 1); 2238 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2239 } 2240 2241 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2242 /// DemandedElts. We use this predicate to simplify operations downstream. 2243 /// Mask is known to be zero for bits that V cannot have. 2244 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2245 const APInt &DemandedElts, 2246 unsigned Depth) const { 2247 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2248 } 2249 2250 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. 2251 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, 2252 unsigned Depth) const { 2253 return Mask.isSubsetOf(computeKnownBits(V, Depth).One); 2254 } 2255 2256 /// isSplatValue - Return true if the vector V has the same value 2257 /// across all DemandedElts. 2258 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2259 APInt &UndefElts) { 2260 if (!DemandedElts) 2261 return false; // No demanded elts, better to assume we don't know anything. 2262 2263 EVT VT = V.getValueType(); 2264 assert(VT.isVector() && "Vector type expected"); 2265 2266 unsigned NumElts = VT.getVectorNumElements(); 2267 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2268 UndefElts = APInt::getNullValue(NumElts); 2269 2270 switch (V.getOpcode()) { 2271 case ISD::BUILD_VECTOR: { 2272 SDValue Scl; 2273 for (unsigned i = 0; i != NumElts; ++i) { 2274 SDValue Op = V.getOperand(i); 2275 if (Op.isUndef()) { 2276 UndefElts.setBit(i); 2277 continue; 2278 } 2279 if (!DemandedElts[i]) 2280 continue; 2281 if (Scl && Scl != Op) 2282 return false; 2283 Scl = Op; 2284 } 2285 return true; 2286 } 2287 case ISD::VECTOR_SHUFFLE: { 2288 // Check if this is a shuffle node doing a splat. 2289 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2290 int SplatIndex = -1; 2291 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2292 for (int i = 0; i != (int)NumElts; ++i) { 2293 int M = Mask[i]; 2294 if (M < 0) { 2295 UndefElts.setBit(i); 2296 continue; 2297 } 2298 if (!DemandedElts[i]) 2299 continue; 2300 if (0 <= SplatIndex && SplatIndex != M) 2301 return false; 2302 SplatIndex = M; 2303 } 2304 return true; 2305 } 2306 case ISD::EXTRACT_SUBVECTOR: { 2307 SDValue Src = V.getOperand(0); 2308 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1)); 2309 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2310 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2311 // Offset the demanded elts by the subvector index. 2312 uint64_t Idx = SubIdx->getZExtValue(); 2313 APInt UndefSrcElts; 2314 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2315 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) { 2316 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2317 return true; 2318 } 2319 } 2320 break; 2321 } 2322 case ISD::ADD: 2323 case ISD::SUB: 2324 case ISD::AND: { 2325 APInt UndefLHS, UndefRHS; 2326 SDValue LHS = V.getOperand(0); 2327 SDValue RHS = V.getOperand(1); 2328 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2329 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2330 UndefElts = UndefLHS | UndefRHS; 2331 return true; 2332 } 2333 break; 2334 } 2335 } 2336 2337 return false; 2338 } 2339 2340 /// Helper wrapper to main isSplatValue function. 2341 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2342 EVT VT = V.getValueType(); 2343 assert(VT.isVector() && "Vector type expected"); 2344 unsigned NumElts = VT.getVectorNumElements(); 2345 2346 APInt UndefElts; 2347 APInt DemandedElts = APInt::getAllOnesValue(NumElts); 2348 return isSplatValue(V, DemandedElts, UndefElts) && 2349 (AllowUndefs || !UndefElts); 2350 } 2351 2352 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2353 V = peekThroughExtractSubvectors(V); 2354 2355 EVT VT = V.getValueType(); 2356 unsigned Opcode = V.getOpcode(); 2357 switch (Opcode) { 2358 default: { 2359 APInt UndefElts; 2360 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2361 if (isSplatValue(V, DemandedElts, UndefElts)) { 2362 // Handle case where all demanded elements are UNDEF. 2363 if (DemandedElts.isSubsetOf(UndefElts)) { 2364 SplatIdx = 0; 2365 return getUNDEF(VT); 2366 } 2367 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2368 return V; 2369 } 2370 break; 2371 } 2372 case ISD::VECTOR_SHUFFLE: { 2373 // Check if this is a shuffle node doing a splat. 2374 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2375 // getTargetVShiftNode currently struggles without the splat source. 2376 auto *SVN = cast<ShuffleVectorSDNode>(V); 2377 if (!SVN->isSplat()) 2378 break; 2379 int Idx = SVN->getSplatIndex(); 2380 int NumElts = V.getValueType().getVectorNumElements(); 2381 SplatIdx = Idx % NumElts; 2382 return V.getOperand(Idx / NumElts); 2383 } 2384 } 2385 2386 return SDValue(); 2387 } 2388 2389 SDValue SelectionDAG::getSplatValue(SDValue V) { 2390 int SplatIdx; 2391 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2392 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2393 SrcVector.getValueType().getScalarType(), SrcVector, 2394 getIntPtrConstant(SplatIdx, SDLoc(V))); 2395 return SDValue(); 2396 } 2397 2398 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2399 /// is less than the element bit-width of the shift node, return it. 2400 static const APInt *getValidShiftAmountConstant(SDValue V) { 2401 unsigned BitWidth = V.getScalarValueSizeInBits(); 2402 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2403 // Shifting more than the bitwidth is not valid. 2404 const APInt &ShAmt = SA->getAPIntValue(); 2405 if (ShAmt.ult(BitWidth)) 2406 return &ShAmt; 2407 } 2408 return nullptr; 2409 } 2410 2411 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less 2412 /// than the element bit-width of the shift node, return the minimum value. 2413 static const APInt *getValidMinimumShiftAmountConstant(SDValue V) { 2414 unsigned BitWidth = V.getScalarValueSizeInBits(); 2415 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); 2416 if (!BV) 2417 return nullptr; 2418 const APInt *MinShAmt = nullptr; 2419 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2420 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); 2421 if (!SA) 2422 return nullptr; 2423 // Shifting more than the bitwidth is not valid. 2424 const APInt &ShAmt = SA->getAPIntValue(); 2425 if (ShAmt.uge(BitWidth)) 2426 return nullptr; 2427 if (MinShAmt && MinShAmt->ule(ShAmt)) 2428 continue; 2429 MinShAmt = &ShAmt; 2430 } 2431 return MinShAmt; 2432 } 2433 2434 /// Determine which bits of Op are known to be either zero or one and return 2435 /// them in Known. For vectors, the known bits are those that are shared by 2436 /// every vector element. 2437 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2438 EVT VT = Op.getValueType(); 2439 APInt DemandedElts = VT.isVector() 2440 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2441 : APInt(1, 1); 2442 return computeKnownBits(Op, DemandedElts, Depth); 2443 } 2444 2445 /// Determine which bits of Op are known to be either zero or one and return 2446 /// them in Known. The DemandedElts argument allows us to only collect the known 2447 /// bits that are shared by the requested vector elements. 2448 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2449 unsigned Depth) const { 2450 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2451 2452 KnownBits Known(BitWidth); // Don't know anything. 2453 2454 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2455 // We know all of the bits for a constant! 2456 Known.One = C->getAPIntValue(); 2457 Known.Zero = ~Known.One; 2458 return Known; 2459 } 2460 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2461 // We know all of the bits for a constant fp! 2462 Known.One = C->getValueAPF().bitcastToAPInt(); 2463 Known.Zero = ~Known.One; 2464 return Known; 2465 } 2466 2467 if (Depth >= MaxRecursionDepth) 2468 return Known; // Limit search depth. 2469 2470 KnownBits Known2; 2471 unsigned NumElts = DemandedElts.getBitWidth(); 2472 assert((!Op.getValueType().isVector() || 2473 NumElts == Op.getValueType().getVectorNumElements()) && 2474 "Unexpected vector size"); 2475 2476 if (!DemandedElts) 2477 return Known; // No demanded elts, better to assume we don't know anything. 2478 2479 unsigned Opcode = Op.getOpcode(); 2480 switch (Opcode) { 2481 case ISD::BUILD_VECTOR: 2482 // Collect the known bits that are shared by every demanded vector element. 2483 Known.Zero.setAllBits(); Known.One.setAllBits(); 2484 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2485 if (!DemandedElts[i]) 2486 continue; 2487 2488 SDValue SrcOp = Op.getOperand(i); 2489 Known2 = computeKnownBits(SrcOp, Depth + 1); 2490 2491 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2492 if (SrcOp.getValueSizeInBits() != BitWidth) { 2493 assert(SrcOp.getValueSizeInBits() > BitWidth && 2494 "Expected BUILD_VECTOR implicit truncation"); 2495 Known2 = Known2.trunc(BitWidth); 2496 } 2497 2498 // Known bits are the values that are shared by every demanded element. 2499 Known.One &= Known2.One; 2500 Known.Zero &= Known2.Zero; 2501 2502 // If we don't know any bits, early out. 2503 if (Known.isUnknown()) 2504 break; 2505 } 2506 break; 2507 case ISD::VECTOR_SHUFFLE: { 2508 // Collect the known bits that are shared by every vector element referenced 2509 // by the shuffle. 2510 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2511 Known.Zero.setAllBits(); Known.One.setAllBits(); 2512 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2513 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2514 for (unsigned i = 0; i != NumElts; ++i) { 2515 if (!DemandedElts[i]) 2516 continue; 2517 2518 int M = SVN->getMaskElt(i); 2519 if (M < 0) { 2520 // For UNDEF elements, we don't know anything about the common state of 2521 // the shuffle result. 2522 Known.resetAll(); 2523 DemandedLHS.clearAllBits(); 2524 DemandedRHS.clearAllBits(); 2525 break; 2526 } 2527 2528 if ((unsigned)M < NumElts) 2529 DemandedLHS.setBit((unsigned)M % NumElts); 2530 else 2531 DemandedRHS.setBit((unsigned)M % NumElts); 2532 } 2533 // Known bits are the values that are shared by every demanded element. 2534 if (!!DemandedLHS) { 2535 SDValue LHS = Op.getOperand(0); 2536 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2537 Known.One &= Known2.One; 2538 Known.Zero &= Known2.Zero; 2539 } 2540 // If we don't know any bits, early out. 2541 if (Known.isUnknown()) 2542 break; 2543 if (!!DemandedRHS) { 2544 SDValue RHS = Op.getOperand(1); 2545 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2546 Known.One &= Known2.One; 2547 Known.Zero &= Known2.Zero; 2548 } 2549 break; 2550 } 2551 case ISD::CONCAT_VECTORS: { 2552 // Split DemandedElts and test each of the demanded subvectors. 2553 Known.Zero.setAllBits(); Known.One.setAllBits(); 2554 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2555 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2556 unsigned NumSubVectors = Op.getNumOperands(); 2557 for (unsigned i = 0; i != NumSubVectors; ++i) { 2558 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2559 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2560 if (!!DemandedSub) { 2561 SDValue Sub = Op.getOperand(i); 2562 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2563 Known.One &= Known2.One; 2564 Known.Zero &= Known2.Zero; 2565 } 2566 // If we don't know any bits, early out. 2567 if (Known.isUnknown()) 2568 break; 2569 } 2570 break; 2571 } 2572 case ISD::INSERT_SUBVECTOR: { 2573 // If we know the element index, demand any elements from the subvector and 2574 // the remainder from the src its inserted into, otherwise demand them all. 2575 SDValue Src = Op.getOperand(0); 2576 SDValue Sub = Op.getOperand(1); 2577 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2578 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2579 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2580 Known.One.setAllBits(); 2581 Known.Zero.setAllBits(); 2582 uint64_t Idx = SubIdx->getZExtValue(); 2583 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2584 if (!!DemandedSubElts) { 2585 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2586 if (Known.isUnknown()) 2587 break; // early-out. 2588 } 2589 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2590 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2591 if (!!DemandedSrcElts) { 2592 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2593 Known.One &= Known2.One; 2594 Known.Zero &= Known2.Zero; 2595 } 2596 } else { 2597 Known = computeKnownBits(Sub, Depth + 1); 2598 if (Known.isUnknown()) 2599 break; // early-out. 2600 Known2 = computeKnownBits(Src, Depth + 1); 2601 Known.One &= Known2.One; 2602 Known.Zero &= Known2.Zero; 2603 } 2604 break; 2605 } 2606 case ISD::EXTRACT_SUBVECTOR: { 2607 // If we know the element index, just demand that subvector elements, 2608 // otherwise demand them all. 2609 SDValue Src = Op.getOperand(0); 2610 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2611 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2612 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 2613 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2614 // Offset the demanded elts by the subvector index. 2615 uint64_t Idx = SubIdx->getZExtValue(); 2616 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2617 } 2618 Known = computeKnownBits(Src, DemandedSrc, Depth + 1); 2619 break; 2620 } 2621 case ISD::SCALAR_TO_VECTOR: { 2622 // We know about scalar_to_vector as much as we know about it source, 2623 // which becomes the first element of otherwise unknown vector. 2624 if (DemandedElts != 1) 2625 break; 2626 2627 SDValue N0 = Op.getOperand(0); 2628 Known = computeKnownBits(N0, Depth + 1); 2629 if (N0.getValueSizeInBits() != BitWidth) 2630 Known = Known.trunc(BitWidth); 2631 2632 break; 2633 } 2634 case ISD::BITCAST: { 2635 SDValue N0 = Op.getOperand(0); 2636 EVT SubVT = N0.getValueType(); 2637 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2638 2639 // Ignore bitcasts from unsupported types. 2640 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2641 break; 2642 2643 // Fast handling of 'identity' bitcasts. 2644 if (BitWidth == SubBitWidth) { 2645 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2646 break; 2647 } 2648 2649 bool IsLE = getDataLayout().isLittleEndian(); 2650 2651 // Bitcast 'small element' vector to 'large element' scalar/vector. 2652 if ((BitWidth % SubBitWidth) == 0) { 2653 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2654 2655 // Collect known bits for the (larger) output by collecting the known 2656 // bits from each set of sub elements and shift these into place. 2657 // We need to separately call computeKnownBits for each set of 2658 // sub elements as the knownbits for each is likely to be different. 2659 unsigned SubScale = BitWidth / SubBitWidth; 2660 APInt SubDemandedElts(NumElts * SubScale, 0); 2661 for (unsigned i = 0; i != NumElts; ++i) 2662 if (DemandedElts[i]) 2663 SubDemandedElts.setBit(i * SubScale); 2664 2665 for (unsigned i = 0; i != SubScale; ++i) { 2666 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2667 Depth + 1); 2668 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2669 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2670 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2671 } 2672 } 2673 2674 // Bitcast 'large element' scalar/vector to 'small element' vector. 2675 if ((SubBitWidth % BitWidth) == 0) { 2676 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2677 2678 // Collect known bits for the (smaller) output by collecting the known 2679 // bits from the overlapping larger input elements and extracting the 2680 // sub sections we actually care about. 2681 unsigned SubScale = SubBitWidth / BitWidth; 2682 APInt SubDemandedElts(NumElts / SubScale, 0); 2683 for (unsigned i = 0; i != NumElts; ++i) 2684 if (DemandedElts[i]) 2685 SubDemandedElts.setBit(i / SubScale); 2686 2687 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2688 2689 Known.Zero.setAllBits(); Known.One.setAllBits(); 2690 for (unsigned i = 0; i != NumElts; ++i) 2691 if (DemandedElts[i]) { 2692 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2693 unsigned Offset = (Shifts % SubScale) * BitWidth; 2694 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2695 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2696 // If we don't know any bits, early out. 2697 if (Known.isUnknown()) 2698 break; 2699 } 2700 } 2701 break; 2702 } 2703 case ISD::AND: 2704 // If either the LHS or the RHS are Zero, the result is zero. 2705 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2706 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2707 2708 // Output known-1 bits are only known if set in both the LHS & RHS. 2709 Known.One &= Known2.One; 2710 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2711 Known.Zero |= Known2.Zero; 2712 break; 2713 case ISD::OR: 2714 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2715 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2716 2717 // Output known-0 bits are only known if clear in both the LHS & RHS. 2718 Known.Zero &= Known2.Zero; 2719 // Output known-1 are known to be set if set in either the LHS | RHS. 2720 Known.One |= Known2.One; 2721 break; 2722 case ISD::XOR: { 2723 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2724 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2725 2726 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2727 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2728 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2729 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2730 Known.Zero = KnownZeroOut; 2731 break; 2732 } 2733 case ISD::MUL: { 2734 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2735 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2736 2737 // If low bits are zero in either operand, output low known-0 bits. 2738 // Also compute a conservative estimate for high known-0 bits. 2739 // More trickiness is possible, but this is sufficient for the 2740 // interesting case of alignment computation. 2741 unsigned TrailZ = Known.countMinTrailingZeros() + 2742 Known2.countMinTrailingZeros(); 2743 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2744 Known2.countMinLeadingZeros(), 2745 BitWidth) - BitWidth; 2746 2747 Known.resetAll(); 2748 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2749 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2750 break; 2751 } 2752 case ISD::UDIV: { 2753 // For the purposes of computing leading zeros we can conservatively 2754 // treat a udiv as a logical right shift by the power of 2 known to 2755 // be less than the denominator. 2756 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2757 unsigned LeadZ = Known2.countMinLeadingZeros(); 2758 2759 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2760 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2761 if (RHSMaxLeadingZeros != BitWidth) 2762 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2763 2764 Known.Zero.setHighBits(LeadZ); 2765 break; 2766 } 2767 case ISD::SELECT: 2768 case ISD::VSELECT: 2769 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2770 // If we don't know any bits, early out. 2771 if (Known.isUnknown()) 2772 break; 2773 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2774 2775 // Only known if known in both the LHS and RHS. 2776 Known.One &= Known2.One; 2777 Known.Zero &= Known2.Zero; 2778 break; 2779 case ISD::SELECT_CC: 2780 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2781 // If we don't know any bits, early out. 2782 if (Known.isUnknown()) 2783 break; 2784 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2785 2786 // Only known if known in both the LHS and RHS. 2787 Known.One &= Known2.One; 2788 Known.Zero &= Known2.Zero; 2789 break; 2790 case ISD::SMULO: 2791 case ISD::UMULO: 2792 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2793 if (Op.getResNo() != 1) 2794 break; 2795 // The boolean result conforms to getBooleanContents. 2796 // If we know the result of a setcc has the top bits zero, use this info. 2797 // We know that we have an integer-based boolean since these operations 2798 // are only available for integer. 2799 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2800 TargetLowering::ZeroOrOneBooleanContent && 2801 BitWidth > 1) 2802 Known.Zero.setBitsFrom(1); 2803 break; 2804 case ISD::SETCC: 2805 case ISD::STRICT_FSETCC: 2806 case ISD::STRICT_FSETCCS: { 2807 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 2808 // If we know the result of a setcc has the top bits zero, use this info. 2809 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 2810 TargetLowering::ZeroOrOneBooleanContent && 2811 BitWidth > 1) 2812 Known.Zero.setBitsFrom(1); 2813 break; 2814 } 2815 case ISD::SHL: 2816 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2817 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2818 unsigned Shift = ShAmt->getZExtValue(); 2819 Known.Zero <<= Shift; 2820 Known.One <<= Shift; 2821 // Low bits are known zero. 2822 Known.Zero.setLowBits(Shift); 2823 } 2824 break; 2825 case ISD::SRL: 2826 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2827 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2828 unsigned Shift = ShAmt->getZExtValue(); 2829 Known.Zero.lshrInPlace(Shift); 2830 Known.One.lshrInPlace(Shift); 2831 // High bits are known zero. 2832 Known.Zero.setHighBits(Shift); 2833 } else if (const APInt *ShMinAmt = getValidMinimumShiftAmountConstant(Op)) { 2834 // Minimum shift high bits are known zero. 2835 Known.Zero.setHighBits(ShMinAmt->getZExtValue()); 2836 } 2837 break; 2838 case ISD::SRA: 2839 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2840 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2841 unsigned Shift = ShAmt->getZExtValue(); 2842 // Sign extend known zero/one bit (else is unknown). 2843 Known.Zero.ashrInPlace(Shift); 2844 Known.One.ashrInPlace(Shift); 2845 } 2846 break; 2847 case ISD::FSHL: 2848 case ISD::FSHR: 2849 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2850 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2851 2852 // For fshl, 0-shift returns the 1st arg. 2853 // For fshr, 0-shift returns the 2nd arg. 2854 if (Amt == 0) { 2855 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2856 DemandedElts, Depth + 1); 2857 break; 2858 } 2859 2860 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2861 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2862 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2863 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2864 if (Opcode == ISD::FSHL) { 2865 Known.One <<= Amt; 2866 Known.Zero <<= Amt; 2867 Known2.One.lshrInPlace(BitWidth - Amt); 2868 Known2.Zero.lshrInPlace(BitWidth - Amt); 2869 } else { 2870 Known.One <<= BitWidth - Amt; 2871 Known.Zero <<= BitWidth - Amt; 2872 Known2.One.lshrInPlace(Amt); 2873 Known2.Zero.lshrInPlace(Amt); 2874 } 2875 Known.One |= Known2.One; 2876 Known.Zero |= Known2.Zero; 2877 } 2878 break; 2879 case ISD::SIGN_EXTEND_INREG: { 2880 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2881 unsigned EBits = EVT.getScalarSizeInBits(); 2882 2883 // Sign extension. Compute the demanded bits in the result that are not 2884 // present in the input. 2885 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2886 2887 APInt InSignMask = APInt::getSignMask(EBits); 2888 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2889 2890 // If the sign extended bits are demanded, we know that the sign 2891 // bit is demanded. 2892 InSignMask = InSignMask.zext(BitWidth); 2893 if (NewBits.getBoolValue()) 2894 InputDemandedBits |= InSignMask; 2895 2896 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2897 Known.One &= InputDemandedBits; 2898 Known.Zero &= InputDemandedBits; 2899 2900 // If the sign bit of the input is known set or clear, then we know the 2901 // top bits of the result. 2902 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2903 Known.Zero |= NewBits; 2904 Known.One &= ~NewBits; 2905 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2906 Known.One |= NewBits; 2907 Known.Zero &= ~NewBits; 2908 } else { // Input sign bit unknown 2909 Known.Zero &= ~NewBits; 2910 Known.One &= ~NewBits; 2911 } 2912 break; 2913 } 2914 case ISD::CTTZ: 2915 case ISD::CTTZ_ZERO_UNDEF: { 2916 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2917 // If we have a known 1, its position is our upper bound. 2918 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2919 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2920 Known.Zero.setBitsFrom(LowBits); 2921 break; 2922 } 2923 case ISD::CTLZ: 2924 case ISD::CTLZ_ZERO_UNDEF: { 2925 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2926 // If we have a known 1, its position is our upper bound. 2927 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2928 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2929 Known.Zero.setBitsFrom(LowBits); 2930 break; 2931 } 2932 case ISD::CTPOP: { 2933 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2934 // If we know some of the bits are zero, they can't be one. 2935 unsigned PossibleOnes = Known2.countMaxPopulation(); 2936 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2937 break; 2938 } 2939 case ISD::LOAD: { 2940 LoadSDNode *LD = cast<LoadSDNode>(Op); 2941 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 2942 if (ISD::isNON_EXTLoad(LD) && Cst) { 2943 // Determine any common known bits from the loaded constant pool value. 2944 Type *CstTy = Cst->getType(); 2945 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 2946 // If its a vector splat, then we can (quickly) reuse the scalar path. 2947 // NOTE: We assume all elements match and none are UNDEF. 2948 if (CstTy->isVectorTy()) { 2949 if (const Constant *Splat = Cst->getSplatValue()) { 2950 Cst = Splat; 2951 CstTy = Cst->getType(); 2952 } 2953 } 2954 // TODO - do we need to handle different bitwidths? 2955 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 2956 // Iterate across all vector elements finding common known bits. 2957 Known.One.setAllBits(); 2958 Known.Zero.setAllBits(); 2959 for (unsigned i = 0; i != NumElts; ++i) { 2960 if (!DemandedElts[i]) 2961 continue; 2962 if (Constant *Elt = Cst->getAggregateElement(i)) { 2963 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 2964 const APInt &Value = CInt->getValue(); 2965 Known.One &= Value; 2966 Known.Zero &= ~Value; 2967 continue; 2968 } 2969 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 2970 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 2971 Known.One &= Value; 2972 Known.Zero &= ~Value; 2973 continue; 2974 } 2975 } 2976 Known.One.clearAllBits(); 2977 Known.Zero.clearAllBits(); 2978 break; 2979 } 2980 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 2981 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 2982 const APInt &Value = CInt->getValue(); 2983 Known.One = Value; 2984 Known.Zero = ~Value; 2985 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 2986 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 2987 Known.One = Value; 2988 Known.Zero = ~Value; 2989 } 2990 } 2991 } 2992 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2993 // If this is a ZEXTLoad and we are looking at the loaded value. 2994 EVT VT = LD->getMemoryVT(); 2995 unsigned MemBits = VT.getScalarSizeInBits(); 2996 Known.Zero.setBitsFrom(MemBits); 2997 } else if (const MDNode *Ranges = LD->getRanges()) { 2998 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2999 computeKnownBitsFromRangeMetadata(*Ranges, Known); 3000 } 3001 break; 3002 } 3003 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3004 EVT InVT = Op.getOperand(0).getValueType(); 3005 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3006 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3007 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3008 break; 3009 } 3010 case ISD::ZERO_EXTEND: { 3011 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3012 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 3013 break; 3014 } 3015 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3016 EVT InVT = Op.getOperand(0).getValueType(); 3017 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 3018 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 3019 // If the sign bit is known to be zero or one, then sext will extend 3020 // it to the top bits, else it will just zext. 3021 Known = Known.sext(BitWidth); 3022 break; 3023 } 3024 case ISD::SIGN_EXTEND: { 3025 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3026 // If the sign bit is known to be zero or one, then sext will extend 3027 // it to the top bits, else it will just zext. 3028 Known = Known.sext(BitWidth); 3029 break; 3030 } 3031 case ISD::ANY_EXTEND: { 3032 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3033 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */); 3034 break; 3035 } 3036 case ISD::TRUNCATE: { 3037 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3038 Known = Known.trunc(BitWidth); 3039 break; 3040 } 3041 case ISD::AssertZext: { 3042 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3043 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3044 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3045 Known.Zero |= (~InMask); 3046 Known.One &= (~Known.Zero); 3047 break; 3048 } 3049 case ISD::FGETSIGN: 3050 // All bits are zero except the low bit. 3051 Known.Zero.setBitsFrom(1); 3052 break; 3053 case ISD::USUBO: 3054 case ISD::SSUBO: 3055 if (Op.getResNo() == 1) { 3056 // If we know the result of a setcc has the top bits zero, use this info. 3057 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3058 TargetLowering::ZeroOrOneBooleanContent && 3059 BitWidth > 1) 3060 Known.Zero.setBitsFrom(1); 3061 break; 3062 } 3063 LLVM_FALLTHROUGH; 3064 case ISD::SUB: 3065 case ISD::SUBC: { 3066 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3067 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3068 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3069 Known, Known2); 3070 break; 3071 } 3072 case ISD::UADDO: 3073 case ISD::SADDO: 3074 case ISD::ADDCARRY: 3075 if (Op.getResNo() == 1) { 3076 // If we know the result of a setcc has the top bits zero, use this info. 3077 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3078 TargetLowering::ZeroOrOneBooleanContent && 3079 BitWidth > 1) 3080 Known.Zero.setBitsFrom(1); 3081 break; 3082 } 3083 LLVM_FALLTHROUGH; 3084 case ISD::ADD: 3085 case ISD::ADDC: 3086 case ISD::ADDE: { 3087 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3088 3089 // With ADDE and ADDCARRY, a carry bit may be added in. 3090 KnownBits Carry(1); 3091 if (Opcode == ISD::ADDE) 3092 // Can't track carry from glue, set carry to unknown. 3093 Carry.resetAll(); 3094 else if (Opcode == ISD::ADDCARRY) 3095 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3096 // the trouble (how often will we find a known carry bit). And I haven't 3097 // tested this very much yet, but something like this might work: 3098 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3099 // Carry = Carry.zextOrTrunc(1, false); 3100 Carry.resetAll(); 3101 else 3102 Carry.setAllZero(); 3103 3104 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3105 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3106 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3107 break; 3108 } 3109 case ISD::SREM: 3110 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3111 const APInt &RA = Rem->getAPIntValue().abs(); 3112 if (RA.isPowerOf2()) { 3113 APInt LowBits = RA - 1; 3114 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3115 3116 // The low bits of the first operand are unchanged by the srem. 3117 Known.Zero = Known2.Zero & LowBits; 3118 Known.One = Known2.One & LowBits; 3119 3120 // If the first operand is non-negative or has all low bits zero, then 3121 // the upper bits are all zero. 3122 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 3123 Known.Zero |= ~LowBits; 3124 3125 // If the first operand is negative and not all low bits are zero, then 3126 // the upper bits are all one. 3127 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 3128 Known.One |= ~LowBits; 3129 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3130 } 3131 } 3132 break; 3133 case ISD::UREM: { 3134 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3135 const APInt &RA = Rem->getAPIntValue(); 3136 if (RA.isPowerOf2()) { 3137 APInt LowBits = (RA - 1); 3138 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3139 3140 // The upper bits are all zero, the lower ones are unchanged. 3141 Known.Zero = Known2.Zero | ~LowBits; 3142 Known.One = Known2.One & LowBits; 3143 break; 3144 } 3145 } 3146 3147 // Since the result is less than or equal to either operand, any leading 3148 // zero bits in either operand must also exist in the result. 3149 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3150 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3151 3152 uint32_t Leaders = 3153 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3154 Known.resetAll(); 3155 Known.Zero.setHighBits(Leaders); 3156 break; 3157 } 3158 case ISD::EXTRACT_ELEMENT: { 3159 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3160 const unsigned Index = Op.getConstantOperandVal(1); 3161 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3162 3163 // Remove low part of known bits mask 3164 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3165 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3166 3167 // Remove high part of known bit mask 3168 Known = Known.trunc(EltBitWidth); 3169 break; 3170 } 3171 case ISD::EXTRACT_VECTOR_ELT: { 3172 SDValue InVec = Op.getOperand(0); 3173 SDValue EltNo = Op.getOperand(1); 3174 EVT VecVT = InVec.getValueType(); 3175 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3176 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3177 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3178 // anything about the extended bits. 3179 if (BitWidth > EltBitWidth) 3180 Known = Known.trunc(EltBitWidth); 3181 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3182 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 3183 // If we know the element index, just demand that vector element. 3184 unsigned Idx = ConstEltNo->getZExtValue(); 3185 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 3186 Known = computeKnownBits(InVec, DemandedElt, Depth + 1); 3187 } else { 3188 // Unknown element index, so ignore DemandedElts and demand them all. 3189 Known = computeKnownBits(InVec, Depth + 1); 3190 } 3191 if (BitWidth > EltBitWidth) 3192 Known = Known.zext(BitWidth, false /* => any extend */); 3193 break; 3194 } 3195 case ISD::INSERT_VECTOR_ELT: { 3196 SDValue InVec = Op.getOperand(0); 3197 SDValue InVal = Op.getOperand(1); 3198 SDValue EltNo = Op.getOperand(2); 3199 3200 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3201 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3202 // If we know the element index, split the demand between the 3203 // source vector and the inserted element. 3204 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 3205 unsigned EltIdx = CEltNo->getZExtValue(); 3206 3207 // If we demand the inserted element then add its common known bits. 3208 if (DemandedElts[EltIdx]) { 3209 Known2 = computeKnownBits(InVal, Depth + 1); 3210 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3211 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3212 } 3213 3214 // If we demand the source vector then add its common known bits, ensuring 3215 // that we don't demand the inserted element. 3216 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 3217 if (!!VectorElts) { 3218 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1); 3219 Known.One &= Known2.One; 3220 Known.Zero &= Known2.Zero; 3221 } 3222 } else { 3223 // Unknown element index, so ignore DemandedElts and demand them all. 3224 Known = computeKnownBits(InVec, Depth + 1); 3225 Known2 = computeKnownBits(InVal, Depth + 1); 3226 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3227 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3228 } 3229 break; 3230 } 3231 case ISD::BITREVERSE: { 3232 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3233 Known.Zero = Known2.Zero.reverseBits(); 3234 Known.One = Known2.One.reverseBits(); 3235 break; 3236 } 3237 case ISD::BSWAP: { 3238 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3239 Known.Zero = Known2.Zero.byteSwap(); 3240 Known.One = Known2.One.byteSwap(); 3241 break; 3242 } 3243 case ISD::ABS: { 3244 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3245 3246 // If the source's MSB is zero then we know the rest of the bits already. 3247 if (Known2.isNonNegative()) { 3248 Known.Zero = Known2.Zero; 3249 Known.One = Known2.One; 3250 break; 3251 } 3252 3253 // We only know that the absolute values's MSB will be zero iff there is 3254 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3255 Known2.One.clearSignBit(); 3256 if (Known2.One.getBoolValue()) { 3257 Known.Zero = APInt::getSignMask(BitWidth); 3258 break; 3259 } 3260 break; 3261 } 3262 case ISD::UMIN: { 3263 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3264 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3265 3266 // UMIN - we know that the result will have the maximum of the 3267 // known zero leading bits of the inputs. 3268 unsigned LeadZero = Known.countMinLeadingZeros(); 3269 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3270 3271 Known.Zero &= Known2.Zero; 3272 Known.One &= Known2.One; 3273 Known.Zero.setHighBits(LeadZero); 3274 break; 3275 } 3276 case ISD::UMAX: { 3277 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3278 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3279 3280 // UMAX - we know that the result will have the maximum of the 3281 // known one leading bits of the inputs. 3282 unsigned LeadOne = Known.countMinLeadingOnes(); 3283 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3284 3285 Known.Zero &= Known2.Zero; 3286 Known.One &= Known2.One; 3287 Known.One.setHighBits(LeadOne); 3288 break; 3289 } 3290 case ISD::SMIN: 3291 case ISD::SMAX: { 3292 // If we have a clamp pattern, we know that the number of sign bits will be 3293 // the minimum of the clamp min/max range. 3294 bool IsMax = (Opcode == ISD::SMAX); 3295 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3296 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3297 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3298 CstHigh = 3299 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3300 if (CstLow && CstHigh) { 3301 if (!IsMax) 3302 std::swap(CstLow, CstHigh); 3303 3304 const APInt &ValueLow = CstLow->getAPIntValue(); 3305 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3306 if (ValueLow.sle(ValueHigh)) { 3307 unsigned LowSignBits = ValueLow.getNumSignBits(); 3308 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3309 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3310 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3311 Known.One.setHighBits(MinSignBits); 3312 break; 3313 } 3314 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3315 Known.Zero.setHighBits(MinSignBits); 3316 break; 3317 } 3318 } 3319 } 3320 3321 // Fallback - just get the shared known bits of the operands. 3322 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3323 if (Known.isUnknown()) break; // Early-out 3324 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3325 Known.Zero &= Known2.Zero; 3326 Known.One &= Known2.One; 3327 break; 3328 } 3329 case ISD::FrameIndex: 3330 case ISD::TargetFrameIndex: 3331 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3332 break; 3333 3334 default: 3335 if (Opcode < ISD::BUILTIN_OP_END) 3336 break; 3337 LLVM_FALLTHROUGH; 3338 case ISD::INTRINSIC_WO_CHAIN: 3339 case ISD::INTRINSIC_W_CHAIN: 3340 case ISD::INTRINSIC_VOID: 3341 // Allow the target to implement this method for its nodes. 3342 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3343 break; 3344 } 3345 3346 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3347 return Known; 3348 } 3349 3350 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3351 SDValue N1) const { 3352 // X + 0 never overflow 3353 if (isNullConstant(N1)) 3354 return OFK_Never; 3355 3356 KnownBits N1Known = computeKnownBits(N1); 3357 if (N1Known.Zero.getBoolValue()) { 3358 KnownBits N0Known = computeKnownBits(N0); 3359 3360 bool overflow; 3361 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow); 3362 if (!overflow) 3363 return OFK_Never; 3364 } 3365 3366 // mulhi + 1 never overflow 3367 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3368 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue()) 3369 return OFK_Never; 3370 3371 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3372 KnownBits N0Known = computeKnownBits(N0); 3373 3374 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue()) 3375 return OFK_Never; 3376 } 3377 3378 return OFK_Sometime; 3379 } 3380 3381 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3382 EVT OpVT = Val.getValueType(); 3383 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3384 3385 // Is the constant a known power of 2? 3386 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3387 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3388 3389 // A left-shift of a constant one will have exactly one bit set because 3390 // shifting the bit off the end is undefined. 3391 if (Val.getOpcode() == ISD::SHL) { 3392 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3393 if (C && C->getAPIntValue() == 1) 3394 return true; 3395 } 3396 3397 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3398 // one bit set. 3399 if (Val.getOpcode() == ISD::SRL) { 3400 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3401 if (C && C->getAPIntValue().isSignMask()) 3402 return true; 3403 } 3404 3405 // Are all operands of a build vector constant powers of two? 3406 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3407 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3408 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3409 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3410 return false; 3411 })) 3412 return true; 3413 3414 // More could be done here, though the above checks are enough 3415 // to handle some common cases. 3416 3417 // Fall back to computeKnownBits to catch other known cases. 3418 KnownBits Known = computeKnownBits(Val); 3419 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3420 } 3421 3422 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3423 EVT VT = Op.getValueType(); 3424 APInt DemandedElts = VT.isVector() 3425 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3426 : APInt(1, 1); 3427 return ComputeNumSignBits(Op, DemandedElts, Depth); 3428 } 3429 3430 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3431 unsigned Depth) const { 3432 EVT VT = Op.getValueType(); 3433 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3434 unsigned VTBits = VT.getScalarSizeInBits(); 3435 unsigned NumElts = DemandedElts.getBitWidth(); 3436 unsigned Tmp, Tmp2; 3437 unsigned FirstAnswer = 1; 3438 3439 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3440 const APInt &Val = C->getAPIntValue(); 3441 return Val.getNumSignBits(); 3442 } 3443 3444 if (Depth >= MaxRecursionDepth) 3445 return 1; // Limit search depth. 3446 3447 if (!DemandedElts) 3448 return 1; // No demanded elts, better to assume we don't know anything. 3449 3450 unsigned Opcode = Op.getOpcode(); 3451 switch (Opcode) { 3452 default: break; 3453 case ISD::AssertSext: 3454 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3455 return VTBits-Tmp+1; 3456 case ISD::AssertZext: 3457 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3458 return VTBits-Tmp; 3459 3460 case ISD::BUILD_VECTOR: 3461 Tmp = VTBits; 3462 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3463 if (!DemandedElts[i]) 3464 continue; 3465 3466 SDValue SrcOp = Op.getOperand(i); 3467 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3468 3469 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3470 if (SrcOp.getValueSizeInBits() != VTBits) { 3471 assert(SrcOp.getValueSizeInBits() > VTBits && 3472 "Expected BUILD_VECTOR implicit truncation"); 3473 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3474 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3475 } 3476 Tmp = std::min(Tmp, Tmp2); 3477 } 3478 return Tmp; 3479 3480 case ISD::VECTOR_SHUFFLE: { 3481 // Collect the minimum number of sign bits that are shared by every vector 3482 // element referenced by the shuffle. 3483 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3484 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3485 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3486 for (unsigned i = 0; i != NumElts; ++i) { 3487 int M = SVN->getMaskElt(i); 3488 if (!DemandedElts[i]) 3489 continue; 3490 // For UNDEF elements, we don't know anything about the common state of 3491 // the shuffle result. 3492 if (M < 0) 3493 return 1; 3494 if ((unsigned)M < NumElts) 3495 DemandedLHS.setBit((unsigned)M % NumElts); 3496 else 3497 DemandedRHS.setBit((unsigned)M % NumElts); 3498 } 3499 Tmp = std::numeric_limits<unsigned>::max(); 3500 if (!!DemandedLHS) 3501 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3502 if (!!DemandedRHS) { 3503 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3504 Tmp = std::min(Tmp, Tmp2); 3505 } 3506 // If we don't know anything, early out and try computeKnownBits fall-back. 3507 if (Tmp == 1) 3508 break; 3509 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3510 return Tmp; 3511 } 3512 3513 case ISD::BITCAST: { 3514 SDValue N0 = Op.getOperand(0); 3515 EVT SrcVT = N0.getValueType(); 3516 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3517 3518 // Ignore bitcasts from unsupported types.. 3519 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3520 break; 3521 3522 // Fast handling of 'identity' bitcasts. 3523 if (VTBits == SrcBits) 3524 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3525 3526 bool IsLE = getDataLayout().isLittleEndian(); 3527 3528 // Bitcast 'large element' scalar/vector to 'small element' vector. 3529 if ((SrcBits % VTBits) == 0) { 3530 assert(VT.isVector() && "Expected bitcast to vector"); 3531 3532 unsigned Scale = SrcBits / VTBits; 3533 APInt SrcDemandedElts(NumElts / Scale, 0); 3534 for (unsigned i = 0; i != NumElts; ++i) 3535 if (DemandedElts[i]) 3536 SrcDemandedElts.setBit(i / Scale); 3537 3538 // Fast case - sign splat can be simply split across the small elements. 3539 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3540 if (Tmp == SrcBits) 3541 return VTBits; 3542 3543 // Slow case - determine how far the sign extends into each sub-element. 3544 Tmp2 = VTBits; 3545 for (unsigned i = 0; i != NumElts; ++i) 3546 if (DemandedElts[i]) { 3547 unsigned SubOffset = i % Scale; 3548 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3549 SubOffset = SubOffset * VTBits; 3550 if (Tmp <= SubOffset) 3551 return 1; 3552 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3553 } 3554 return Tmp2; 3555 } 3556 break; 3557 } 3558 3559 case ISD::SIGN_EXTEND: 3560 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3561 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3562 case ISD::SIGN_EXTEND_INREG: 3563 // Max of the input and what this extends. 3564 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3565 Tmp = VTBits-Tmp+1; 3566 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3567 return std::max(Tmp, Tmp2); 3568 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3569 SDValue Src = Op.getOperand(0); 3570 EVT SrcVT = Src.getValueType(); 3571 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3572 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3573 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3574 } 3575 3576 case ISD::SRA: 3577 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3578 // SRA X, C -> adds C sign bits. 3579 if (ConstantSDNode *C = 3580 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3581 APInt ShiftVal = C->getAPIntValue(); 3582 ShiftVal += Tmp; 3583 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3584 } 3585 return Tmp; 3586 case ISD::SHL: 3587 if (ConstantSDNode *C = 3588 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3589 // shl destroys sign bits. 3590 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3591 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3592 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3593 return Tmp - C->getZExtValue(); 3594 } 3595 break; 3596 case ISD::AND: 3597 case ISD::OR: 3598 case ISD::XOR: // NOT is handled here. 3599 // Logical binary ops preserve the number of sign bits at the worst. 3600 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3601 if (Tmp != 1) { 3602 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3603 FirstAnswer = std::min(Tmp, Tmp2); 3604 // We computed what we know about the sign bits as our first 3605 // answer. Now proceed to the generic code that uses 3606 // computeKnownBits, and pick whichever answer is better. 3607 } 3608 break; 3609 3610 case ISD::SELECT: 3611 case ISD::VSELECT: 3612 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3613 if (Tmp == 1) return 1; // Early out. 3614 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3615 return std::min(Tmp, Tmp2); 3616 case ISD::SELECT_CC: 3617 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3618 if (Tmp == 1) return 1; // Early out. 3619 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3620 return std::min(Tmp, Tmp2); 3621 3622 case ISD::SMIN: 3623 case ISD::SMAX: { 3624 // If we have a clamp pattern, we know that the number of sign bits will be 3625 // the minimum of the clamp min/max range. 3626 bool IsMax = (Opcode == ISD::SMAX); 3627 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3628 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3629 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3630 CstHigh = 3631 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3632 if (CstLow && CstHigh) { 3633 if (!IsMax) 3634 std::swap(CstLow, CstHigh); 3635 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3636 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3637 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3638 return std::min(Tmp, Tmp2); 3639 } 3640 } 3641 3642 // Fallback - just get the minimum number of sign bits of the operands. 3643 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3644 if (Tmp == 1) 3645 return 1; // Early out. 3646 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3647 return std::min(Tmp, Tmp2); 3648 } 3649 case ISD::UMIN: 3650 case ISD::UMAX: 3651 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3652 if (Tmp == 1) 3653 return 1; // Early out. 3654 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3655 return std::min(Tmp, Tmp2); 3656 case ISD::SADDO: 3657 case ISD::UADDO: 3658 case ISD::SSUBO: 3659 case ISD::USUBO: 3660 case ISD::SMULO: 3661 case ISD::UMULO: 3662 if (Op.getResNo() != 1) 3663 break; 3664 // The boolean result conforms to getBooleanContents. Fall through. 3665 // If setcc returns 0/-1, all bits are sign bits. 3666 // We know that we have an integer-based boolean since these operations 3667 // are only available for integer. 3668 if (TLI->getBooleanContents(VT.isVector(), false) == 3669 TargetLowering::ZeroOrNegativeOneBooleanContent) 3670 return VTBits; 3671 break; 3672 case ISD::SETCC: 3673 case ISD::STRICT_FSETCC: 3674 case ISD::STRICT_FSETCCS: { 3675 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; 3676 // If setcc returns 0/-1, all bits are sign bits. 3677 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == 3678 TargetLowering::ZeroOrNegativeOneBooleanContent) 3679 return VTBits; 3680 break; 3681 } 3682 case ISD::ROTL: 3683 case ISD::ROTR: 3684 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3685 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3686 3687 // Handle rotate right by N like a rotate left by 32-N. 3688 if (Opcode == ISD::ROTR) 3689 RotAmt = (VTBits - RotAmt) % VTBits; 3690 3691 // If we aren't rotating out all of the known-in sign bits, return the 3692 // number that are left. This handles rotl(sext(x), 1) for example. 3693 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3694 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3695 } 3696 break; 3697 case ISD::ADD: 3698 case ISD::ADDC: 3699 // Add can have at most one carry bit. Thus we know that the output 3700 // is, at worst, one more bit than the inputs. 3701 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3702 if (Tmp == 1) return 1; // Early out. 3703 3704 // Special case decrementing a value (ADD X, -1): 3705 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3706 if (CRHS->isAllOnesValue()) { 3707 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1); 3708 3709 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3710 // sign bits set. 3711 if ((Known.Zero | 1).isAllOnesValue()) 3712 return VTBits; 3713 3714 // If we are subtracting one from a positive number, there is no carry 3715 // out of the result. 3716 if (Known.isNonNegative()) 3717 return Tmp; 3718 } 3719 3720 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3721 if (Tmp2 == 1) return 1; 3722 return std::min(Tmp, Tmp2)-1; 3723 3724 case ISD::SUB: 3725 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3726 if (Tmp2 == 1) return 1; 3727 3728 // Handle NEG. 3729 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3730 if (CLHS->isNullValue()) { 3731 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1); 3732 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3733 // sign bits set. 3734 if ((Known.Zero | 1).isAllOnesValue()) 3735 return VTBits; 3736 3737 // If the input is known to be positive (the sign bit is known clear), 3738 // the output of the NEG has the same number of sign bits as the input. 3739 if (Known.isNonNegative()) 3740 return Tmp2; 3741 3742 // Otherwise, we treat this like a SUB. 3743 } 3744 3745 // Sub can have at most one carry bit. Thus we know that the output 3746 // is, at worst, one more bit than the inputs. 3747 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3748 if (Tmp == 1) return 1; // Early out. 3749 return std::min(Tmp, Tmp2)-1; 3750 case ISD::MUL: { 3751 // The output of the Mul can be at most twice the valid bits in the inputs. 3752 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3753 if (SignBitsOp0 == 1) 3754 break; 3755 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3756 if (SignBitsOp1 == 1) 3757 break; 3758 unsigned OutValidBits = 3759 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); 3760 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; 3761 } 3762 case ISD::TRUNCATE: { 3763 // Check if the sign bits of source go down as far as the truncated value. 3764 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3765 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3766 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3767 return NumSrcSignBits - (NumSrcBits - VTBits); 3768 break; 3769 } 3770 case ISD::EXTRACT_ELEMENT: { 3771 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3772 const int BitWidth = Op.getValueSizeInBits(); 3773 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3774 3775 // Get reverse index (starting from 1), Op1 value indexes elements from 3776 // little end. Sign starts at big end. 3777 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3778 3779 // If the sign portion ends in our element the subtraction gives correct 3780 // result. Otherwise it gives either negative or > bitwidth result 3781 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3782 } 3783 case ISD::INSERT_VECTOR_ELT: { 3784 SDValue InVec = Op.getOperand(0); 3785 SDValue InVal = Op.getOperand(1); 3786 SDValue EltNo = Op.getOperand(2); 3787 3788 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3789 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3790 // If we know the element index, split the demand between the 3791 // source vector and the inserted element. 3792 unsigned EltIdx = CEltNo->getZExtValue(); 3793 3794 // If we demand the inserted element then get its sign bits. 3795 Tmp = std::numeric_limits<unsigned>::max(); 3796 if (DemandedElts[EltIdx]) { 3797 // TODO - handle implicit truncation of inserted elements. 3798 if (InVal.getScalarValueSizeInBits() != VTBits) 3799 break; 3800 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3801 } 3802 3803 // If we demand the source vector then get its sign bits, and determine 3804 // the minimum. 3805 APInt VectorElts = DemandedElts; 3806 VectorElts.clearBit(EltIdx); 3807 if (!!VectorElts) { 3808 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3809 Tmp = std::min(Tmp, Tmp2); 3810 } 3811 } else { 3812 // Unknown element index, so ignore DemandedElts and demand them all. 3813 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3814 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3815 Tmp = std::min(Tmp, Tmp2); 3816 } 3817 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3818 return Tmp; 3819 } 3820 case ISD::EXTRACT_VECTOR_ELT: { 3821 SDValue InVec = Op.getOperand(0); 3822 SDValue EltNo = Op.getOperand(1); 3823 EVT VecVT = InVec.getValueType(); 3824 const unsigned BitWidth = Op.getValueSizeInBits(); 3825 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3826 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3827 3828 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3829 // anything about sign bits. But if the sizes match we can derive knowledge 3830 // about sign bits from the vector operand. 3831 if (BitWidth != EltBitWidth) 3832 break; 3833 3834 // If we know the element index, just demand that vector element, else for 3835 // an unknown element index, ignore DemandedElts and demand them all. 3836 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3837 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3838 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3839 DemandedSrcElts = 3840 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3841 3842 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3843 } 3844 case ISD::EXTRACT_SUBVECTOR: { 3845 // If we know the element index, just demand that subvector elements, 3846 // otherwise demand them all. 3847 SDValue Src = Op.getOperand(0); 3848 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3849 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3850 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts); 3851 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3852 // Offset the demanded elts by the subvector index. 3853 uint64_t Idx = SubIdx->getZExtValue(); 3854 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3855 } 3856 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3857 } 3858 case ISD::CONCAT_VECTORS: { 3859 // Determine the minimum number of sign bits across all demanded 3860 // elts of the input vectors. Early out if the result is already 1. 3861 Tmp = std::numeric_limits<unsigned>::max(); 3862 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3863 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3864 unsigned NumSubVectors = Op.getNumOperands(); 3865 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3866 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3867 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3868 if (!DemandedSub) 3869 continue; 3870 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3871 Tmp = std::min(Tmp, Tmp2); 3872 } 3873 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3874 return Tmp; 3875 } 3876 case ISD::INSERT_SUBVECTOR: { 3877 // If we know the element index, demand any elements from the subvector and 3878 // the remainder from the src its inserted into, otherwise demand them all. 3879 SDValue Src = Op.getOperand(0); 3880 SDValue Sub = Op.getOperand(1); 3881 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3882 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3883 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 3884 Tmp = std::numeric_limits<unsigned>::max(); 3885 uint64_t Idx = SubIdx->getZExtValue(); 3886 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3887 if (!!DemandedSubElts) { 3888 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3889 if (Tmp == 1) return 1; // early-out 3890 } 3891 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 3892 APInt DemandedSrcElts = DemandedElts & ~SubMask; 3893 if (!!DemandedSrcElts) { 3894 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3895 Tmp = std::min(Tmp, Tmp2); 3896 } 3897 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3898 return Tmp; 3899 } 3900 3901 // Not able to determine the index so just assume worst case. 3902 Tmp = ComputeNumSignBits(Sub, Depth + 1); 3903 if (Tmp == 1) return 1; // early-out 3904 Tmp2 = ComputeNumSignBits(Src, Depth + 1); 3905 Tmp = std::min(Tmp, Tmp2); 3906 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3907 return Tmp; 3908 } 3909 } 3910 3911 // If we are looking at the loaded value of the SDNode. 3912 if (Op.getResNo() == 0) { 3913 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3914 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3915 unsigned ExtType = LD->getExtensionType(); 3916 switch (ExtType) { 3917 default: break; 3918 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 3919 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3920 return VTBits - Tmp + 1; 3921 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 3922 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3923 return VTBits - Tmp; 3924 case ISD::NON_EXTLOAD: 3925 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 3926 // We only need to handle vectors - computeKnownBits should handle 3927 // scalar cases. 3928 Type *CstTy = Cst->getType(); 3929 if (CstTy->isVectorTy() && 3930 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 3931 Tmp = VTBits; 3932 for (unsigned i = 0; i != NumElts; ++i) { 3933 if (!DemandedElts[i]) 3934 continue; 3935 if (Constant *Elt = Cst->getAggregateElement(i)) { 3936 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3937 const APInt &Value = CInt->getValue(); 3938 Tmp = std::min(Tmp, Value.getNumSignBits()); 3939 continue; 3940 } 3941 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3942 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3943 Tmp = std::min(Tmp, Value.getNumSignBits()); 3944 continue; 3945 } 3946 } 3947 // Unknown type. Conservatively assume no bits match sign bit. 3948 return 1; 3949 } 3950 return Tmp; 3951 } 3952 } 3953 break; 3954 } 3955 } 3956 } 3957 3958 // Allow the target to implement this method for its nodes. 3959 if (Opcode >= ISD::BUILTIN_OP_END || 3960 Opcode == ISD::INTRINSIC_WO_CHAIN || 3961 Opcode == ISD::INTRINSIC_W_CHAIN || 3962 Opcode == ISD::INTRINSIC_VOID) { 3963 unsigned NumBits = 3964 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3965 if (NumBits > 1) 3966 FirstAnswer = std::max(FirstAnswer, NumBits); 3967 } 3968 3969 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3970 // use this information. 3971 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 3972 3973 APInt Mask; 3974 if (Known.isNonNegative()) { // sign bit is 0 3975 Mask = Known.Zero; 3976 } else if (Known.isNegative()) { // sign bit is 1; 3977 Mask = Known.One; 3978 } else { 3979 // Nothing known. 3980 return FirstAnswer; 3981 } 3982 3983 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3984 // the number of identical bits in the top of the input value. 3985 Mask = ~Mask; 3986 Mask <<= Mask.getBitWidth()-VTBits; 3987 // Return # leading zeros. We use 'min' here in case Val was zero before 3988 // shifting. We don't want to return '64' as for an i32 "0". 3989 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3990 } 3991 3992 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3993 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3994 !isa<ConstantSDNode>(Op.getOperand(1))) 3995 return false; 3996 3997 if (Op.getOpcode() == ISD::OR && 3998 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 3999 return false; 4000 4001 return true; 4002 } 4003 4004 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 4005 // If we're told that NaNs won't happen, assume they won't. 4006 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 4007 return true; 4008 4009 if (Depth >= MaxRecursionDepth) 4010 return false; // Limit search depth. 4011 4012 // TODO: Handle vectors. 4013 // If the value is a constant, we can obviously see if it is a NaN or not. 4014 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 4015 return !C->getValueAPF().isNaN() || 4016 (SNaN && !C->getValueAPF().isSignaling()); 4017 } 4018 4019 unsigned Opcode = Op.getOpcode(); 4020 switch (Opcode) { 4021 case ISD::FADD: 4022 case ISD::FSUB: 4023 case ISD::FMUL: 4024 case ISD::FDIV: 4025 case ISD::FREM: 4026 case ISD::FSIN: 4027 case ISD::FCOS: { 4028 if (SNaN) 4029 return true; 4030 // TODO: Need isKnownNeverInfinity 4031 return false; 4032 } 4033 case ISD::FCANONICALIZE: 4034 case ISD::FEXP: 4035 case ISD::FEXP2: 4036 case ISD::FTRUNC: 4037 case ISD::FFLOOR: 4038 case ISD::FCEIL: 4039 case ISD::FROUND: 4040 case ISD::FRINT: 4041 case ISD::FNEARBYINT: { 4042 if (SNaN) 4043 return true; 4044 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4045 } 4046 case ISD::FABS: 4047 case ISD::FNEG: 4048 case ISD::FCOPYSIGN: { 4049 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4050 } 4051 case ISD::SELECT: 4052 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4053 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4054 case ISD::FP_EXTEND: 4055 case ISD::FP_ROUND: { 4056 if (SNaN) 4057 return true; 4058 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4059 } 4060 case ISD::SINT_TO_FP: 4061 case ISD::UINT_TO_FP: 4062 return true; 4063 case ISD::FMA: 4064 case ISD::FMAD: { 4065 if (SNaN) 4066 return true; 4067 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4068 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4069 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4070 } 4071 case ISD::FSQRT: // Need is known positive 4072 case ISD::FLOG: 4073 case ISD::FLOG2: 4074 case ISD::FLOG10: 4075 case ISD::FPOWI: 4076 case ISD::FPOW: { 4077 if (SNaN) 4078 return true; 4079 // TODO: Refine on operand 4080 return false; 4081 } 4082 case ISD::FMINNUM: 4083 case ISD::FMAXNUM: { 4084 // Only one needs to be known not-nan, since it will be returned if the 4085 // other ends up being one. 4086 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4087 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4088 } 4089 case ISD::FMINNUM_IEEE: 4090 case ISD::FMAXNUM_IEEE: { 4091 if (SNaN) 4092 return true; 4093 // This can return a NaN if either operand is an sNaN, or if both operands 4094 // are NaN. 4095 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4096 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4097 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4098 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4099 } 4100 case ISD::FMINIMUM: 4101 case ISD::FMAXIMUM: { 4102 // TODO: Does this quiet or return the origina NaN as-is? 4103 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4104 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4105 } 4106 case ISD::EXTRACT_VECTOR_ELT: { 4107 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4108 } 4109 default: 4110 if (Opcode >= ISD::BUILTIN_OP_END || 4111 Opcode == ISD::INTRINSIC_WO_CHAIN || 4112 Opcode == ISD::INTRINSIC_W_CHAIN || 4113 Opcode == ISD::INTRINSIC_VOID) { 4114 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4115 } 4116 4117 return false; 4118 } 4119 } 4120 4121 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4122 assert(Op.getValueType().isFloatingPoint() && 4123 "Floating point type expected"); 4124 4125 // If the value is a constant, we can obviously see if it is a zero or not. 4126 // TODO: Add BuildVector support. 4127 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4128 return !C->isZero(); 4129 return false; 4130 } 4131 4132 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4133 assert(!Op.getValueType().isFloatingPoint() && 4134 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4135 4136 // If the value is a constant, we can obviously see if it is a zero or not. 4137 if (ISD::matchUnaryPredicate( 4138 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4139 return true; 4140 4141 // TODO: Recognize more cases here. 4142 switch (Op.getOpcode()) { 4143 default: break; 4144 case ISD::OR: 4145 if (isKnownNeverZero(Op.getOperand(1)) || 4146 isKnownNeverZero(Op.getOperand(0))) 4147 return true; 4148 break; 4149 } 4150 4151 return false; 4152 } 4153 4154 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4155 // Check the obvious case. 4156 if (A == B) return true; 4157 4158 // For for negative and positive zero. 4159 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4160 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4161 if (CA->isZero() && CB->isZero()) return true; 4162 4163 // Otherwise they may not be equal. 4164 return false; 4165 } 4166 4167 // FIXME: unify with llvm::haveNoCommonBitsSet. 4168 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4169 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4170 assert(A.getValueType() == B.getValueType() && 4171 "Values must have the same type"); 4172 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4173 } 4174 4175 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4176 ArrayRef<SDValue> Ops, 4177 SelectionDAG &DAG) { 4178 int NumOps = Ops.size(); 4179 assert(NumOps != 0 && "Can't build an empty vector!"); 4180 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4181 "Incorrect element count in BUILD_VECTOR!"); 4182 4183 // BUILD_VECTOR of UNDEFs is UNDEF. 4184 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4185 return DAG.getUNDEF(VT); 4186 4187 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4188 SDValue IdentitySrc; 4189 bool IsIdentity = true; 4190 for (int i = 0; i != NumOps; ++i) { 4191 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4192 Ops[i].getOperand(0).getValueType() != VT || 4193 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4194 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4195 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4196 IsIdentity = false; 4197 break; 4198 } 4199 IdentitySrc = Ops[i].getOperand(0); 4200 } 4201 if (IsIdentity) 4202 return IdentitySrc; 4203 4204 return SDValue(); 4205 } 4206 4207 /// Try to simplify vector concatenation to an input value, undef, or build 4208 /// vector. 4209 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4210 ArrayRef<SDValue> Ops, 4211 SelectionDAG &DAG) { 4212 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4213 assert(llvm::all_of(Ops, 4214 [Ops](SDValue Op) { 4215 return Ops[0].getValueType() == Op.getValueType(); 4216 }) && 4217 "Concatenation of vectors with inconsistent value types!"); 4218 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4219 VT.getVectorNumElements() && 4220 "Incorrect element count in vector concatenation!"); 4221 4222 if (Ops.size() == 1) 4223 return Ops[0]; 4224 4225 // Concat of UNDEFs is UNDEF. 4226 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4227 return DAG.getUNDEF(VT); 4228 4229 // Scan the operands and look for extract operations from a single source 4230 // that correspond to insertion at the same location via this concatenation: 4231 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4232 SDValue IdentitySrc; 4233 bool IsIdentity = true; 4234 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4235 SDValue Op = Ops[i]; 4236 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4237 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4238 Op.getOperand(0).getValueType() != VT || 4239 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4240 !isa<ConstantSDNode>(Op.getOperand(1)) || 4241 Op.getConstantOperandVal(1) != IdentityIndex) { 4242 IsIdentity = false; 4243 break; 4244 } 4245 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4246 "Unexpected identity source vector for concat of extracts"); 4247 IdentitySrc = Op.getOperand(0); 4248 } 4249 if (IsIdentity) { 4250 assert(IdentitySrc && "Failed to set source vector of extracts"); 4251 return IdentitySrc; 4252 } 4253 4254 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4255 // simplified to one big BUILD_VECTOR. 4256 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4257 EVT SVT = VT.getScalarType(); 4258 SmallVector<SDValue, 16> Elts; 4259 for (SDValue Op : Ops) { 4260 EVT OpVT = Op.getValueType(); 4261 if (Op.isUndef()) 4262 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4263 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4264 Elts.append(Op->op_begin(), Op->op_end()); 4265 else 4266 return SDValue(); 4267 } 4268 4269 // BUILD_VECTOR requires all inputs to be of the same type, find the 4270 // maximum type and extend them all. 4271 for (SDValue Op : Elts) 4272 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4273 4274 if (SVT.bitsGT(VT.getScalarType())) 4275 for (SDValue &Op : Elts) 4276 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4277 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4278 : DAG.getSExtOrTrunc(Op, DL, SVT); 4279 4280 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4281 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4282 return V; 4283 } 4284 4285 /// Gets or creates the specified node. 4286 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4287 FoldingSetNodeID ID; 4288 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4289 void *IP = nullptr; 4290 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4291 return SDValue(E, 0); 4292 4293 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4294 getVTList(VT)); 4295 CSEMap.InsertNode(N, IP); 4296 4297 InsertNode(N); 4298 SDValue V = SDValue(N, 0); 4299 NewSDValueDbgMsg(V, "Creating new node: ", this); 4300 return V; 4301 } 4302 4303 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4304 SDValue Operand, const SDNodeFlags Flags) { 4305 // Constant fold unary operations with an integer constant operand. Even 4306 // opaque constant will be folded, because the folding of unary operations 4307 // doesn't create new constants with different values. Nevertheless, the 4308 // opaque flag is preserved during folding to prevent future folding with 4309 // other constants. 4310 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4311 const APInt &Val = C->getAPIntValue(); 4312 switch (Opcode) { 4313 default: break; 4314 case ISD::SIGN_EXTEND: 4315 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4316 C->isTargetOpcode(), C->isOpaque()); 4317 case ISD::TRUNCATE: 4318 if (C->isOpaque()) 4319 break; 4320 LLVM_FALLTHROUGH; 4321 case ISD::ANY_EXTEND: 4322 case ISD::ZERO_EXTEND: 4323 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4324 C->isTargetOpcode(), C->isOpaque()); 4325 case ISD::UINT_TO_FP: 4326 case ISD::SINT_TO_FP: { 4327 APFloat apf(EVTToAPFloatSemantics(VT), 4328 APInt::getNullValue(VT.getSizeInBits())); 4329 (void)apf.convertFromAPInt(Val, 4330 Opcode==ISD::SINT_TO_FP, 4331 APFloat::rmNearestTiesToEven); 4332 return getConstantFP(apf, DL, VT); 4333 } 4334 case ISD::BITCAST: 4335 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4336 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4337 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4338 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4339 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4340 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4341 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4342 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4343 break; 4344 case ISD::ABS: 4345 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4346 C->isOpaque()); 4347 case ISD::BITREVERSE: 4348 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4349 C->isOpaque()); 4350 case ISD::BSWAP: 4351 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4352 C->isOpaque()); 4353 case ISD::CTPOP: 4354 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4355 C->isOpaque()); 4356 case ISD::CTLZ: 4357 case ISD::CTLZ_ZERO_UNDEF: 4358 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4359 C->isOpaque()); 4360 case ISD::CTTZ: 4361 case ISD::CTTZ_ZERO_UNDEF: 4362 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4363 C->isOpaque()); 4364 case ISD::FP16_TO_FP: { 4365 bool Ignored; 4366 APFloat FPV(APFloat::IEEEhalf(), 4367 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4368 4369 // This can return overflow, underflow, or inexact; we don't care. 4370 // FIXME need to be more flexible about rounding mode. 4371 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4372 APFloat::rmNearestTiesToEven, &Ignored); 4373 return getConstantFP(FPV, DL, VT); 4374 } 4375 } 4376 } 4377 4378 // Constant fold unary operations with a floating point constant operand. 4379 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4380 APFloat V = C->getValueAPF(); // make copy 4381 switch (Opcode) { 4382 case ISD::FNEG: 4383 V.changeSign(); 4384 return getConstantFP(V, DL, VT); 4385 case ISD::FABS: 4386 V.clearSign(); 4387 return getConstantFP(V, DL, VT); 4388 case ISD::FCEIL: { 4389 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4390 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4391 return getConstantFP(V, DL, VT); 4392 break; 4393 } 4394 case ISD::FTRUNC: { 4395 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4396 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4397 return getConstantFP(V, DL, VT); 4398 break; 4399 } 4400 case ISD::FFLOOR: { 4401 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4402 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4403 return getConstantFP(V, DL, VT); 4404 break; 4405 } 4406 case ISD::FP_EXTEND: { 4407 bool ignored; 4408 // This can return overflow, underflow, or inexact; we don't care. 4409 // FIXME need to be more flexible about rounding mode. 4410 (void)V.convert(EVTToAPFloatSemantics(VT), 4411 APFloat::rmNearestTiesToEven, &ignored); 4412 return getConstantFP(V, DL, VT); 4413 } 4414 case ISD::FP_TO_SINT: 4415 case ISD::FP_TO_UINT: { 4416 bool ignored; 4417 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4418 // FIXME need to be more flexible about rounding mode. 4419 APFloat::opStatus s = 4420 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4421 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4422 break; 4423 return getConstant(IntVal, DL, VT); 4424 } 4425 case ISD::BITCAST: 4426 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4427 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4428 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4429 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4430 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4431 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4432 break; 4433 case ISD::FP_TO_FP16: { 4434 bool Ignored; 4435 // This can return overflow, underflow, or inexact; we don't care. 4436 // FIXME need to be more flexible about rounding mode. 4437 (void)V.convert(APFloat::IEEEhalf(), 4438 APFloat::rmNearestTiesToEven, &Ignored); 4439 return getConstant(V.bitcastToAPInt(), DL, VT); 4440 } 4441 } 4442 } 4443 4444 // Constant fold unary operations with a vector integer or float operand. 4445 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4446 if (BV->isConstant()) { 4447 switch (Opcode) { 4448 default: 4449 // FIXME: Entirely reasonable to perform folding of other unary 4450 // operations here as the need arises. 4451 break; 4452 case ISD::FNEG: 4453 case ISD::FABS: 4454 case ISD::FCEIL: 4455 case ISD::FTRUNC: 4456 case ISD::FFLOOR: 4457 case ISD::FP_EXTEND: 4458 case ISD::FP_TO_SINT: 4459 case ISD::FP_TO_UINT: 4460 case ISD::TRUNCATE: 4461 case ISD::ANY_EXTEND: 4462 case ISD::ZERO_EXTEND: 4463 case ISD::SIGN_EXTEND: 4464 case ISD::UINT_TO_FP: 4465 case ISD::SINT_TO_FP: 4466 case ISD::ABS: 4467 case ISD::BITREVERSE: 4468 case ISD::BSWAP: 4469 case ISD::CTLZ: 4470 case ISD::CTLZ_ZERO_UNDEF: 4471 case ISD::CTTZ: 4472 case ISD::CTTZ_ZERO_UNDEF: 4473 case ISD::CTPOP: { 4474 SDValue Ops = { Operand }; 4475 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4476 return Fold; 4477 } 4478 } 4479 } 4480 } 4481 4482 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4483 switch (Opcode) { 4484 case ISD::TokenFactor: 4485 case ISD::MERGE_VALUES: 4486 case ISD::CONCAT_VECTORS: 4487 return Operand; // Factor, merge or concat of one node? No need. 4488 case ISD::BUILD_VECTOR: { 4489 // Attempt to simplify BUILD_VECTOR. 4490 SDValue Ops[] = {Operand}; 4491 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4492 return V; 4493 break; 4494 } 4495 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4496 case ISD::FP_EXTEND: 4497 assert(VT.isFloatingPoint() && 4498 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4499 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4500 assert((!VT.isVector() || 4501 VT.getVectorNumElements() == 4502 Operand.getValueType().getVectorNumElements()) && 4503 "Vector element count mismatch!"); 4504 assert(Operand.getValueType().bitsLT(VT) && 4505 "Invalid fpext node, dst < src!"); 4506 if (Operand.isUndef()) 4507 return getUNDEF(VT); 4508 break; 4509 case ISD::FP_TO_SINT: 4510 case ISD::FP_TO_UINT: 4511 if (Operand.isUndef()) 4512 return getUNDEF(VT); 4513 break; 4514 case ISD::SINT_TO_FP: 4515 case ISD::UINT_TO_FP: 4516 // [us]itofp(undef) = 0, because the result value is bounded. 4517 if (Operand.isUndef()) 4518 return getConstantFP(0.0, DL, VT); 4519 break; 4520 case ISD::SIGN_EXTEND: 4521 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4522 "Invalid SIGN_EXTEND!"); 4523 assert(VT.isVector() == Operand.getValueType().isVector() && 4524 "SIGN_EXTEND result type type should be vector iff the operand " 4525 "type is vector!"); 4526 if (Operand.getValueType() == VT) return Operand; // noop extension 4527 assert((!VT.isVector() || 4528 VT.getVectorNumElements() == 4529 Operand.getValueType().getVectorNumElements()) && 4530 "Vector element count mismatch!"); 4531 assert(Operand.getValueType().bitsLT(VT) && 4532 "Invalid sext node, dst < src!"); 4533 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4534 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4535 else if (OpOpcode == ISD::UNDEF) 4536 // sext(undef) = 0, because the top bits will all be the same. 4537 return getConstant(0, DL, VT); 4538 break; 4539 case ISD::ZERO_EXTEND: 4540 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4541 "Invalid ZERO_EXTEND!"); 4542 assert(VT.isVector() == Operand.getValueType().isVector() && 4543 "ZERO_EXTEND result type type should be vector iff the operand " 4544 "type is vector!"); 4545 if (Operand.getValueType() == VT) return Operand; // noop extension 4546 assert((!VT.isVector() || 4547 VT.getVectorNumElements() == 4548 Operand.getValueType().getVectorNumElements()) && 4549 "Vector element count mismatch!"); 4550 assert(Operand.getValueType().bitsLT(VT) && 4551 "Invalid zext node, dst < src!"); 4552 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4553 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4554 else if (OpOpcode == ISD::UNDEF) 4555 // zext(undef) = 0, because the top bits will be zero. 4556 return getConstant(0, DL, VT); 4557 break; 4558 case ISD::ANY_EXTEND: 4559 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4560 "Invalid ANY_EXTEND!"); 4561 assert(VT.isVector() == Operand.getValueType().isVector() && 4562 "ANY_EXTEND result type type should be vector iff the operand " 4563 "type is vector!"); 4564 if (Operand.getValueType() == VT) return Operand; // noop extension 4565 assert((!VT.isVector() || 4566 VT.getVectorNumElements() == 4567 Operand.getValueType().getVectorNumElements()) && 4568 "Vector element count mismatch!"); 4569 assert(Operand.getValueType().bitsLT(VT) && 4570 "Invalid anyext node, dst < src!"); 4571 4572 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4573 OpOpcode == ISD::ANY_EXTEND) 4574 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4575 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4576 else if (OpOpcode == ISD::UNDEF) 4577 return getUNDEF(VT); 4578 4579 // (ext (trunc x)) -> x 4580 if (OpOpcode == ISD::TRUNCATE) { 4581 SDValue OpOp = Operand.getOperand(0); 4582 if (OpOp.getValueType() == VT) { 4583 transferDbgValues(Operand, OpOp); 4584 return OpOp; 4585 } 4586 } 4587 break; 4588 case ISD::TRUNCATE: 4589 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4590 "Invalid TRUNCATE!"); 4591 assert(VT.isVector() == Operand.getValueType().isVector() && 4592 "TRUNCATE result type type should be vector iff the operand " 4593 "type is vector!"); 4594 if (Operand.getValueType() == VT) return Operand; // noop truncate 4595 assert((!VT.isVector() || 4596 VT.getVectorNumElements() == 4597 Operand.getValueType().getVectorNumElements()) && 4598 "Vector element count mismatch!"); 4599 assert(Operand.getValueType().bitsGT(VT) && 4600 "Invalid truncate node, src < dst!"); 4601 if (OpOpcode == ISD::TRUNCATE) 4602 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4603 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4604 OpOpcode == ISD::ANY_EXTEND) { 4605 // If the source is smaller than the dest, we still need an extend. 4606 if (Operand.getOperand(0).getValueType().getScalarType() 4607 .bitsLT(VT.getScalarType())) 4608 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4609 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4610 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4611 return Operand.getOperand(0); 4612 } 4613 if (OpOpcode == ISD::UNDEF) 4614 return getUNDEF(VT); 4615 break; 4616 case ISD::ANY_EXTEND_VECTOR_INREG: 4617 case ISD::ZERO_EXTEND_VECTOR_INREG: 4618 case ISD::SIGN_EXTEND_VECTOR_INREG: 4619 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4620 assert(Operand.getValueType().bitsLE(VT) && 4621 "The input must be the same size or smaller than the result."); 4622 assert(VT.getVectorNumElements() < 4623 Operand.getValueType().getVectorNumElements() && 4624 "The destination vector type must have fewer lanes than the input."); 4625 break; 4626 case ISD::ABS: 4627 assert(VT.isInteger() && VT == Operand.getValueType() && 4628 "Invalid ABS!"); 4629 if (OpOpcode == ISD::UNDEF) 4630 return getUNDEF(VT); 4631 break; 4632 case ISD::BSWAP: 4633 assert(VT.isInteger() && VT == Operand.getValueType() && 4634 "Invalid BSWAP!"); 4635 assert((VT.getScalarSizeInBits() % 16 == 0) && 4636 "BSWAP types must be a multiple of 16 bits!"); 4637 if (OpOpcode == ISD::UNDEF) 4638 return getUNDEF(VT); 4639 break; 4640 case ISD::BITREVERSE: 4641 assert(VT.isInteger() && VT == Operand.getValueType() && 4642 "Invalid BITREVERSE!"); 4643 if (OpOpcode == ISD::UNDEF) 4644 return getUNDEF(VT); 4645 break; 4646 case ISD::BITCAST: 4647 // Basic sanity checking. 4648 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4649 "Cannot BITCAST between types of different sizes!"); 4650 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4651 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4652 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4653 if (OpOpcode == ISD::UNDEF) 4654 return getUNDEF(VT); 4655 break; 4656 case ISD::SCALAR_TO_VECTOR: 4657 assert(VT.isVector() && !Operand.getValueType().isVector() && 4658 (VT.getVectorElementType() == Operand.getValueType() || 4659 (VT.getVectorElementType().isInteger() && 4660 Operand.getValueType().isInteger() && 4661 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4662 "Illegal SCALAR_TO_VECTOR node!"); 4663 if (OpOpcode == ISD::UNDEF) 4664 return getUNDEF(VT); 4665 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4666 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4667 isa<ConstantSDNode>(Operand.getOperand(1)) && 4668 Operand.getConstantOperandVal(1) == 0 && 4669 Operand.getOperand(0).getValueType() == VT) 4670 return Operand.getOperand(0); 4671 break; 4672 case ISD::FNEG: 4673 // Negation of an unknown bag of bits is still completely undefined. 4674 if (OpOpcode == ISD::UNDEF) 4675 return getUNDEF(VT); 4676 4677 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 4678 if ((getTarget().Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) && 4679 OpOpcode == ISD::FSUB) 4680 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 4681 Operand.getOperand(0), Flags); 4682 if (OpOpcode == ISD::FNEG) // --X -> X 4683 return Operand.getOperand(0); 4684 break; 4685 case ISD::FABS: 4686 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4687 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4688 break; 4689 } 4690 4691 SDNode *N; 4692 SDVTList VTs = getVTList(VT); 4693 SDValue Ops[] = {Operand}; 4694 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4695 FoldingSetNodeID ID; 4696 AddNodeIDNode(ID, Opcode, VTs, Ops); 4697 void *IP = nullptr; 4698 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4699 E->intersectFlagsWith(Flags); 4700 return SDValue(E, 0); 4701 } 4702 4703 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4704 N->setFlags(Flags); 4705 createOperands(N, Ops); 4706 CSEMap.InsertNode(N, IP); 4707 } else { 4708 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4709 createOperands(N, Ops); 4710 } 4711 4712 InsertNode(N); 4713 SDValue V = SDValue(N, 0); 4714 NewSDValueDbgMsg(V, "Creating new node: ", this); 4715 return V; 4716 } 4717 4718 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 4719 const APInt &C2) { 4720 switch (Opcode) { 4721 case ISD::ADD: return std::make_pair(C1 + C2, true); 4722 case ISD::SUB: return std::make_pair(C1 - C2, true); 4723 case ISD::MUL: return std::make_pair(C1 * C2, true); 4724 case ISD::AND: return std::make_pair(C1 & C2, true); 4725 case ISD::OR: return std::make_pair(C1 | C2, true); 4726 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 4727 case ISD::SHL: return std::make_pair(C1 << C2, true); 4728 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 4729 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 4730 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 4731 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 4732 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 4733 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 4734 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 4735 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 4736 case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true); 4737 case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true); 4738 case ISD::SSUBSAT: return std::make_pair(C1.ssub_sat(C2), true); 4739 case ISD::USUBSAT: return std::make_pair(C1.usub_sat(C2), true); 4740 case ISD::UDIV: 4741 if (!C2.getBoolValue()) 4742 break; 4743 return std::make_pair(C1.udiv(C2), true); 4744 case ISD::UREM: 4745 if (!C2.getBoolValue()) 4746 break; 4747 return std::make_pair(C1.urem(C2), true); 4748 case ISD::SDIV: 4749 if (!C2.getBoolValue()) 4750 break; 4751 return std::make_pair(C1.sdiv(C2), true); 4752 case ISD::SREM: 4753 if (!C2.getBoolValue()) 4754 break; 4755 return std::make_pair(C1.srem(C2), true); 4756 } 4757 return std::make_pair(APInt(1, 0), false); 4758 } 4759 4760 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4761 EVT VT, const ConstantSDNode *C1, 4762 const ConstantSDNode *C2) { 4763 if (C1->isOpaque() || C2->isOpaque()) 4764 return SDValue(); 4765 4766 std::pair<APInt, bool> Folded = FoldValue(Opcode, C1->getAPIntValue(), 4767 C2->getAPIntValue()); 4768 if (!Folded.second) 4769 return SDValue(); 4770 return getConstant(Folded.first, DL, VT); 4771 } 4772 4773 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4774 const GlobalAddressSDNode *GA, 4775 const SDNode *N2) { 4776 if (GA->getOpcode() != ISD::GlobalAddress) 4777 return SDValue(); 4778 if (!TLI->isOffsetFoldingLegal(GA)) 4779 return SDValue(); 4780 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4781 if (!C2) 4782 return SDValue(); 4783 int64_t Offset = C2->getSExtValue(); 4784 switch (Opcode) { 4785 case ISD::ADD: break; 4786 case ISD::SUB: Offset = -uint64_t(Offset); break; 4787 default: return SDValue(); 4788 } 4789 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4790 GA->getOffset() + uint64_t(Offset)); 4791 } 4792 4793 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4794 switch (Opcode) { 4795 case ISD::SDIV: 4796 case ISD::UDIV: 4797 case ISD::SREM: 4798 case ISD::UREM: { 4799 // If a divisor is zero/undef or any element of a divisor vector is 4800 // zero/undef, the whole op is undef. 4801 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4802 SDValue Divisor = Ops[1]; 4803 if (Divisor.isUndef() || isNullConstant(Divisor)) 4804 return true; 4805 4806 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4807 llvm::any_of(Divisor->op_values(), 4808 [](SDValue V) { return V.isUndef() || 4809 isNullConstant(V); }); 4810 // TODO: Handle signed overflow. 4811 } 4812 // TODO: Handle oversized shifts. 4813 default: 4814 return false; 4815 } 4816 } 4817 4818 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4819 EVT VT, SDNode *N1, SDNode *N2) { 4820 // If the opcode is a target-specific ISD node, there's nothing we can 4821 // do here and the operand rules may not line up with the below, so 4822 // bail early. 4823 if (Opcode >= ISD::BUILTIN_OP_END) 4824 return SDValue(); 4825 4826 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)})) 4827 return getUNDEF(VT); 4828 4829 // Handle the case of two scalars. 4830 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4831 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4832 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2); 4833 assert((!Folded || !VT.isVector()) && 4834 "Can't fold vectors ops with scalar operands"); 4835 return Folded; 4836 } 4837 } 4838 4839 // fold (add Sym, c) -> Sym+c 4840 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4841 return FoldSymbolOffset(Opcode, VT, GA, N2); 4842 if (TLI->isCommutativeBinOp(Opcode)) 4843 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4844 return FoldSymbolOffset(Opcode, VT, GA, N1); 4845 4846 // For vectors, extract each constant element and fold them individually. 4847 // Either input may be an undef value. 4848 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4849 if (!BV1 && !N1->isUndef()) 4850 return SDValue(); 4851 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4852 if (!BV2 && !N2->isUndef()) 4853 return SDValue(); 4854 // If both operands are undef, that's handled the same way as scalars. 4855 if (!BV1 && !BV2) 4856 return SDValue(); 4857 4858 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4859 "Vector binop with different number of elements in operands?"); 4860 4861 EVT SVT = VT.getScalarType(); 4862 EVT LegalSVT = SVT; 4863 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4864 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4865 if (LegalSVT.bitsLT(SVT)) 4866 return SDValue(); 4867 } 4868 SmallVector<SDValue, 4> Outputs; 4869 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4870 for (unsigned I = 0; I != NumOps; ++I) { 4871 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4872 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4873 if (SVT.isInteger()) { 4874 if (V1->getValueType(0).bitsGT(SVT)) 4875 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4876 if (V2->getValueType(0).bitsGT(SVT)) 4877 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4878 } 4879 4880 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4881 return SDValue(); 4882 4883 // Fold one vector element. 4884 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4885 if (LegalSVT != SVT) 4886 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4887 4888 // Scalar folding only succeeded if the result is a constant or UNDEF. 4889 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4890 ScalarResult.getOpcode() != ISD::ConstantFP) 4891 return SDValue(); 4892 Outputs.push_back(ScalarResult); 4893 } 4894 4895 assert(VT.getVectorNumElements() == Outputs.size() && 4896 "Vector size mismatch!"); 4897 4898 // We may have a vector type but a scalar result. Create a splat. 4899 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4900 4901 // Build a big vector out of the scalar elements we generated. 4902 return getBuildVector(VT, SDLoc(), Outputs); 4903 } 4904 4905 // TODO: Merge with FoldConstantArithmetic 4906 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4907 const SDLoc &DL, EVT VT, 4908 ArrayRef<SDValue> Ops, 4909 const SDNodeFlags Flags) { 4910 // If the opcode is a target-specific ISD node, there's nothing we can 4911 // do here and the operand rules may not line up with the below, so 4912 // bail early. 4913 if (Opcode >= ISD::BUILTIN_OP_END) 4914 return SDValue(); 4915 4916 if (isUndef(Opcode, Ops)) 4917 return getUNDEF(VT); 4918 4919 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4920 if (!VT.isVector()) 4921 return SDValue(); 4922 4923 unsigned NumElts = VT.getVectorNumElements(); 4924 4925 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4926 return !Op.getValueType().isVector() || 4927 Op.getValueType().getVectorNumElements() == NumElts; 4928 }; 4929 4930 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4931 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4932 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4933 (BV && BV->isConstant()); 4934 }; 4935 4936 // All operands must be vector types with the same number of elements as 4937 // the result type and must be either UNDEF or a build vector of constant 4938 // or UNDEF scalars. 4939 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4940 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4941 return SDValue(); 4942 4943 // If we are comparing vectors, then the result needs to be a i1 boolean 4944 // that is then sign-extended back to the legal result type. 4945 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4946 4947 // Find legal integer scalar type for constant promotion and 4948 // ensure that its scalar size is at least as large as source. 4949 EVT LegalSVT = VT.getScalarType(); 4950 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4951 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4952 if (LegalSVT.bitsLT(VT.getScalarType())) 4953 return SDValue(); 4954 } 4955 4956 // Constant fold each scalar lane separately. 4957 SmallVector<SDValue, 4> ScalarResults; 4958 for (unsigned i = 0; i != NumElts; i++) { 4959 SmallVector<SDValue, 4> ScalarOps; 4960 for (SDValue Op : Ops) { 4961 EVT InSVT = Op.getValueType().getScalarType(); 4962 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4963 if (!InBV) { 4964 // We've checked that this is UNDEF or a constant of some kind. 4965 if (Op.isUndef()) 4966 ScalarOps.push_back(getUNDEF(InSVT)); 4967 else 4968 ScalarOps.push_back(Op); 4969 continue; 4970 } 4971 4972 SDValue ScalarOp = InBV->getOperand(i); 4973 EVT ScalarVT = ScalarOp.getValueType(); 4974 4975 // Build vector (integer) scalar operands may need implicit 4976 // truncation - do this before constant folding. 4977 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4978 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4979 4980 ScalarOps.push_back(ScalarOp); 4981 } 4982 4983 // Constant fold the scalar operands. 4984 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4985 4986 // Legalize the (integer) scalar constant if necessary. 4987 if (LegalSVT != SVT) 4988 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4989 4990 // Scalar folding only succeeded if the result is a constant or UNDEF. 4991 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4992 ScalarResult.getOpcode() != ISD::ConstantFP) 4993 return SDValue(); 4994 ScalarResults.push_back(ScalarResult); 4995 } 4996 4997 SDValue V = getBuildVector(VT, DL, ScalarResults); 4998 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4999 return V; 5000 } 5001 5002 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 5003 EVT VT, SDValue N1, SDValue N2) { 5004 // TODO: We don't do any constant folding for strict FP opcodes here, but we 5005 // should. That will require dealing with a potentially non-default 5006 // rounding mode, checking the "opStatus" return value from the APFloat 5007 // math calculations, and possibly other variations. 5008 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 5009 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 5010 if (N1CFP && N2CFP) { 5011 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 5012 switch (Opcode) { 5013 case ISD::FADD: 5014 C1.add(C2, APFloat::rmNearestTiesToEven); 5015 return getConstantFP(C1, DL, VT); 5016 case ISD::FSUB: 5017 C1.subtract(C2, APFloat::rmNearestTiesToEven); 5018 return getConstantFP(C1, DL, VT); 5019 case ISD::FMUL: 5020 C1.multiply(C2, APFloat::rmNearestTiesToEven); 5021 return getConstantFP(C1, DL, VT); 5022 case ISD::FDIV: 5023 C1.divide(C2, APFloat::rmNearestTiesToEven); 5024 return getConstantFP(C1, DL, VT); 5025 case ISD::FREM: 5026 C1.mod(C2); 5027 return getConstantFP(C1, DL, VT); 5028 case ISD::FCOPYSIGN: 5029 C1.copySign(C2); 5030 return getConstantFP(C1, DL, VT); 5031 default: break; 5032 } 5033 } 5034 if (N1CFP && Opcode == ISD::FP_ROUND) { 5035 APFloat C1 = N1CFP->getValueAPF(); // make copy 5036 bool Unused; 5037 // This can return overflow, underflow, or inexact; we don't care. 5038 // FIXME need to be more flexible about rounding mode. 5039 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5040 &Unused); 5041 return getConstantFP(C1, DL, VT); 5042 } 5043 5044 switch (Opcode) { 5045 case ISD::FADD: 5046 case ISD::FSUB: 5047 case ISD::FMUL: 5048 case ISD::FDIV: 5049 case ISD::FREM: 5050 // If both operands are undef, the result is undef. If 1 operand is undef, 5051 // the result is NaN. This should match the behavior of the IR optimizer. 5052 if (N1.isUndef() && N2.isUndef()) 5053 return getUNDEF(VT); 5054 if (N1.isUndef() || N2.isUndef()) 5055 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5056 } 5057 return SDValue(); 5058 } 5059 5060 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5061 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5062 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5063 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5064 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5065 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5066 5067 // Canonicalize constant to RHS if commutative. 5068 if (TLI->isCommutativeBinOp(Opcode)) { 5069 if (N1C && !N2C) { 5070 std::swap(N1C, N2C); 5071 std::swap(N1, N2); 5072 } else if (N1CFP && !N2CFP) { 5073 std::swap(N1CFP, N2CFP); 5074 std::swap(N1, N2); 5075 } 5076 } 5077 5078 switch (Opcode) { 5079 default: break; 5080 case ISD::TokenFactor: 5081 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5082 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5083 // Fold trivial token factors. 5084 if (N1.getOpcode() == ISD::EntryToken) return N2; 5085 if (N2.getOpcode() == ISD::EntryToken) return N1; 5086 if (N1 == N2) return N1; 5087 break; 5088 case ISD::BUILD_VECTOR: { 5089 // Attempt to simplify BUILD_VECTOR. 5090 SDValue Ops[] = {N1, N2}; 5091 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5092 return V; 5093 break; 5094 } 5095 case ISD::CONCAT_VECTORS: { 5096 SDValue Ops[] = {N1, N2}; 5097 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5098 return V; 5099 break; 5100 } 5101 case ISD::AND: 5102 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5103 assert(N1.getValueType() == N2.getValueType() && 5104 N1.getValueType() == VT && "Binary operator types must match!"); 5105 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5106 // worth handling here. 5107 if (N2C && N2C->isNullValue()) 5108 return N2; 5109 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5110 return N1; 5111 break; 5112 case ISD::OR: 5113 case ISD::XOR: 5114 case ISD::ADD: 5115 case ISD::SUB: 5116 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5117 assert(N1.getValueType() == N2.getValueType() && 5118 N1.getValueType() == VT && "Binary operator types must match!"); 5119 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5120 // it's worth handling here. 5121 if (N2C && N2C->isNullValue()) 5122 return N1; 5123 break; 5124 case ISD::UDIV: 5125 case ISD::UREM: 5126 case ISD::MULHU: 5127 case ISD::MULHS: 5128 case ISD::MUL: 5129 case ISD::SDIV: 5130 case ISD::SREM: 5131 case ISD::SMIN: 5132 case ISD::SMAX: 5133 case ISD::UMIN: 5134 case ISD::UMAX: 5135 case ISD::SADDSAT: 5136 case ISD::SSUBSAT: 5137 case ISD::UADDSAT: 5138 case ISD::USUBSAT: 5139 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5140 assert(N1.getValueType() == N2.getValueType() && 5141 N1.getValueType() == VT && "Binary operator types must match!"); 5142 break; 5143 case ISD::FADD: 5144 case ISD::FSUB: 5145 case ISD::FMUL: 5146 case ISD::FDIV: 5147 case ISD::FREM: 5148 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5149 assert(N1.getValueType() == N2.getValueType() && 5150 N1.getValueType() == VT && "Binary operator types must match!"); 5151 if (SDValue V = simplifyFPBinop(Opcode, N1, N2)) 5152 return V; 5153 break; 5154 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5155 assert(N1.getValueType() == VT && 5156 N1.getValueType().isFloatingPoint() && 5157 N2.getValueType().isFloatingPoint() && 5158 "Invalid FCOPYSIGN!"); 5159 break; 5160 case ISD::SHL: 5161 case ISD::SRA: 5162 case ISD::SRL: 5163 if (SDValue V = simplifyShift(N1, N2)) 5164 return V; 5165 LLVM_FALLTHROUGH; 5166 case ISD::ROTL: 5167 case ISD::ROTR: 5168 assert(VT == N1.getValueType() && 5169 "Shift operators return type must be the same as their first arg"); 5170 assert(VT.isInteger() && N2.getValueType().isInteger() && 5171 "Shifts only work on integers"); 5172 assert((!VT.isVector() || VT == N2.getValueType()) && 5173 "Vector shift amounts must be in the same as their first arg"); 5174 // Verify that the shift amount VT is big enough to hold valid shift 5175 // amounts. This catches things like trying to shift an i1024 value by an 5176 // i8, which is easy to fall into in generic code that uses 5177 // TLI.getShiftAmount(). 5178 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 5179 "Invalid use of small shift amount with oversized value!"); 5180 5181 // Always fold shifts of i1 values so the code generator doesn't need to 5182 // handle them. Since we know the size of the shift has to be less than the 5183 // size of the value, the shift/rotate count is guaranteed to be zero. 5184 if (VT == MVT::i1) 5185 return N1; 5186 if (N2C && N2C->isNullValue()) 5187 return N1; 5188 break; 5189 case ISD::FP_ROUND: 5190 assert(VT.isFloatingPoint() && 5191 N1.getValueType().isFloatingPoint() && 5192 VT.bitsLE(N1.getValueType()) && 5193 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5194 "Invalid FP_ROUND!"); 5195 if (N1.getValueType() == VT) return N1; // noop conversion. 5196 break; 5197 case ISD::AssertSext: 5198 case ISD::AssertZext: { 5199 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5200 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5201 assert(VT.isInteger() && EVT.isInteger() && 5202 "Cannot *_EXTEND_INREG FP types"); 5203 assert(!EVT.isVector() && 5204 "AssertSExt/AssertZExt type should be the vector element type " 5205 "rather than the vector type!"); 5206 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5207 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5208 break; 5209 } 5210 case ISD::SIGN_EXTEND_INREG: { 5211 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5212 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5213 assert(VT.isInteger() && EVT.isInteger() && 5214 "Cannot *_EXTEND_INREG FP types"); 5215 assert(EVT.isVector() == VT.isVector() && 5216 "SIGN_EXTEND_INREG type should be vector iff the operand " 5217 "type is vector!"); 5218 assert((!EVT.isVector() || 5219 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 5220 "Vector element counts must match in SIGN_EXTEND_INREG"); 5221 assert(EVT.bitsLE(VT) && "Not extending!"); 5222 if (EVT == VT) return N1; // Not actually extending 5223 5224 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5225 unsigned FromBits = EVT.getScalarSizeInBits(); 5226 Val <<= Val.getBitWidth() - FromBits; 5227 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5228 return getConstant(Val, DL, ConstantVT); 5229 }; 5230 5231 if (N1C) { 5232 const APInt &Val = N1C->getAPIntValue(); 5233 return SignExtendInReg(Val, VT); 5234 } 5235 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5236 SmallVector<SDValue, 8> Ops; 5237 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5238 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5239 SDValue Op = N1.getOperand(i); 5240 if (Op.isUndef()) { 5241 Ops.push_back(getUNDEF(OpVT)); 5242 continue; 5243 } 5244 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5245 APInt Val = C->getAPIntValue(); 5246 Ops.push_back(SignExtendInReg(Val, OpVT)); 5247 } 5248 return getBuildVector(VT, DL, Ops); 5249 } 5250 break; 5251 } 5252 case ISD::EXTRACT_VECTOR_ELT: 5253 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5254 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5255 element type of the vector."); 5256 5257 // Extract from an undefined value or using an undefined index is undefined. 5258 if (N1.isUndef() || N2.isUndef()) 5259 return getUNDEF(VT); 5260 5261 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 5262 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5263 return getUNDEF(VT); 5264 5265 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5266 // expanding copies of large vectors from registers. 5267 if (N2C && 5268 N1.getOpcode() == ISD::CONCAT_VECTORS && 5269 N1.getNumOperands() > 0) { 5270 unsigned Factor = 5271 N1.getOperand(0).getValueType().getVectorNumElements(); 5272 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5273 N1.getOperand(N2C->getZExtValue() / Factor), 5274 getConstant(N2C->getZExtValue() % Factor, DL, 5275 N2.getValueType())); 5276 } 5277 5278 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 5279 // expanding large vector constants. 5280 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 5281 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 5282 5283 if (VT != Elt.getValueType()) 5284 // If the vector element type is not legal, the BUILD_VECTOR operands 5285 // are promoted and implicitly truncated, and the result implicitly 5286 // extended. Make that explicit here. 5287 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5288 5289 return Elt; 5290 } 5291 5292 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5293 // operations are lowered to scalars. 5294 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5295 // If the indices are the same, return the inserted element else 5296 // if the indices are known different, extract the element from 5297 // the original vector. 5298 SDValue N1Op2 = N1.getOperand(2); 5299 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5300 5301 if (N1Op2C && N2C) { 5302 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5303 if (VT == N1.getOperand(1).getValueType()) 5304 return N1.getOperand(1); 5305 else 5306 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5307 } 5308 5309 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5310 } 5311 } 5312 5313 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5314 // when vector types are scalarized and v1iX is legal. 5315 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 5316 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5317 N1.getValueType().getVectorNumElements() == 1) { 5318 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5319 N1.getOperand(1)); 5320 } 5321 break; 5322 case ISD::EXTRACT_ELEMENT: 5323 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5324 assert(!N1.getValueType().isVector() && !VT.isVector() && 5325 (N1.getValueType().isInteger() == VT.isInteger()) && 5326 N1.getValueType() != VT && 5327 "Wrong types for EXTRACT_ELEMENT!"); 5328 5329 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5330 // 64-bit integers into 32-bit parts. Instead of building the extract of 5331 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5332 if (N1.getOpcode() == ISD::BUILD_PAIR) 5333 return N1.getOperand(N2C->getZExtValue()); 5334 5335 // EXTRACT_ELEMENT of a constant int is also very common. 5336 if (N1C) { 5337 unsigned ElementSize = VT.getSizeInBits(); 5338 unsigned Shift = ElementSize * N2C->getZExtValue(); 5339 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5340 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5341 } 5342 break; 5343 case ISD::EXTRACT_SUBVECTOR: 5344 if (VT.isSimple() && N1.getValueType().isSimple()) { 5345 assert(VT.isVector() && N1.getValueType().isVector() && 5346 "Extract subvector VTs must be a vectors!"); 5347 assert(VT.getVectorElementType() == 5348 N1.getValueType().getVectorElementType() && 5349 "Extract subvector VTs must have the same element type!"); 5350 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 5351 "Extract subvector must be from larger vector to smaller vector!"); 5352 5353 if (N2C) { 5354 assert((VT.getVectorNumElements() + N2C->getZExtValue() 5355 <= N1.getValueType().getVectorNumElements()) 5356 && "Extract subvector overflow!"); 5357 } 5358 5359 // Trivial extraction. 5360 if (VT.getSimpleVT() == N1.getSimpleValueType()) 5361 return N1; 5362 5363 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5364 if (N1.isUndef()) 5365 return getUNDEF(VT); 5366 5367 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5368 // the concat have the same type as the extract. 5369 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5370 N1.getNumOperands() > 0 && 5371 VT == N1.getOperand(0).getValueType()) { 5372 unsigned Factor = VT.getVectorNumElements(); 5373 return N1.getOperand(N2C->getZExtValue() / Factor); 5374 } 5375 5376 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5377 // during shuffle legalization. 5378 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5379 VT == N1.getOperand(1).getValueType()) 5380 return N1.getOperand(1); 5381 } 5382 break; 5383 } 5384 5385 // Perform trivial constant folding. 5386 if (SDValue SV = 5387 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 5388 return SV; 5389 5390 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5391 return V; 5392 5393 // Canonicalize an UNDEF to the RHS, even over a constant. 5394 if (N1.isUndef()) { 5395 if (TLI->isCommutativeBinOp(Opcode)) { 5396 std::swap(N1, N2); 5397 } else { 5398 switch (Opcode) { 5399 case ISD::SIGN_EXTEND_INREG: 5400 case ISD::SUB: 5401 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5402 case ISD::UDIV: 5403 case ISD::SDIV: 5404 case ISD::UREM: 5405 case ISD::SREM: 5406 case ISD::SSUBSAT: 5407 case ISD::USUBSAT: 5408 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5409 } 5410 } 5411 } 5412 5413 // Fold a bunch of operators when the RHS is undef. 5414 if (N2.isUndef()) { 5415 switch (Opcode) { 5416 case ISD::XOR: 5417 if (N1.isUndef()) 5418 // Handle undef ^ undef -> 0 special case. This is a common 5419 // idiom (misuse). 5420 return getConstant(0, DL, VT); 5421 LLVM_FALLTHROUGH; 5422 case ISD::ADD: 5423 case ISD::SUB: 5424 case ISD::UDIV: 5425 case ISD::SDIV: 5426 case ISD::UREM: 5427 case ISD::SREM: 5428 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5429 case ISD::MUL: 5430 case ISD::AND: 5431 case ISD::SSUBSAT: 5432 case ISD::USUBSAT: 5433 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5434 case ISD::OR: 5435 case ISD::SADDSAT: 5436 case ISD::UADDSAT: 5437 return getAllOnesConstant(DL, VT); 5438 } 5439 } 5440 5441 // Memoize this node if possible. 5442 SDNode *N; 5443 SDVTList VTs = getVTList(VT); 5444 SDValue Ops[] = {N1, N2}; 5445 if (VT != MVT::Glue) { 5446 FoldingSetNodeID ID; 5447 AddNodeIDNode(ID, Opcode, VTs, Ops); 5448 void *IP = nullptr; 5449 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5450 E->intersectFlagsWith(Flags); 5451 return SDValue(E, 0); 5452 } 5453 5454 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5455 N->setFlags(Flags); 5456 createOperands(N, Ops); 5457 CSEMap.InsertNode(N, IP); 5458 } else { 5459 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5460 createOperands(N, Ops); 5461 } 5462 5463 InsertNode(N); 5464 SDValue V = SDValue(N, 0); 5465 NewSDValueDbgMsg(V, "Creating new node: ", this); 5466 return V; 5467 } 5468 5469 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5470 SDValue N1, SDValue N2, SDValue N3, 5471 const SDNodeFlags Flags) { 5472 // Perform various simplifications. 5473 switch (Opcode) { 5474 case ISD::FMA: { 5475 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5476 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5477 N3.getValueType() == VT && "FMA types must match!"); 5478 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5479 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5480 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5481 if (N1CFP && N2CFP && N3CFP) { 5482 APFloat V1 = N1CFP->getValueAPF(); 5483 const APFloat &V2 = N2CFP->getValueAPF(); 5484 const APFloat &V3 = N3CFP->getValueAPF(); 5485 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5486 return getConstantFP(V1, DL, VT); 5487 } 5488 break; 5489 } 5490 case ISD::BUILD_VECTOR: { 5491 // Attempt to simplify BUILD_VECTOR. 5492 SDValue Ops[] = {N1, N2, N3}; 5493 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5494 return V; 5495 break; 5496 } 5497 case ISD::CONCAT_VECTORS: { 5498 SDValue Ops[] = {N1, N2, N3}; 5499 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5500 return V; 5501 break; 5502 } 5503 case ISD::SETCC: { 5504 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5505 assert(N1.getValueType() == N2.getValueType() && 5506 "SETCC operands must have the same type!"); 5507 assert(VT.isVector() == N1.getValueType().isVector() && 5508 "SETCC type should be vector iff the operand type is vector!"); 5509 assert((!VT.isVector() || 5510 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) && 5511 "SETCC vector element counts must match!"); 5512 // Use FoldSetCC to simplify SETCC's. 5513 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5514 return V; 5515 // Vector constant folding. 5516 SDValue Ops[] = {N1, N2, N3}; 5517 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5518 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5519 return V; 5520 } 5521 break; 5522 } 5523 case ISD::SELECT: 5524 case ISD::VSELECT: 5525 if (SDValue V = simplifySelect(N1, N2, N3)) 5526 return V; 5527 break; 5528 case ISD::VECTOR_SHUFFLE: 5529 llvm_unreachable("should use getVectorShuffle constructor!"); 5530 case ISD::INSERT_VECTOR_ELT: { 5531 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5532 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 5533 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5534 return getUNDEF(VT); 5535 5536 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. 5537 if (N3.isUndef()) 5538 return getUNDEF(VT); 5539 5540 // If the inserted element is an UNDEF, just use the input vector. 5541 if (N2.isUndef()) 5542 return N1; 5543 5544 break; 5545 } 5546 case ISD::INSERT_SUBVECTOR: { 5547 // Inserting undef into undef is still undef. 5548 if (N1.isUndef() && N2.isUndef()) 5549 return getUNDEF(VT); 5550 SDValue Index = N3; 5551 if (VT.isSimple() && N1.getValueType().isSimple() 5552 && N2.getValueType().isSimple()) { 5553 assert(VT.isVector() && N1.getValueType().isVector() && 5554 N2.getValueType().isVector() && 5555 "Insert subvector VTs must be a vectors"); 5556 assert(VT == N1.getValueType() && 5557 "Dest and insert subvector source types must match!"); 5558 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5559 "Insert subvector must be from smaller vector to larger vector!"); 5560 if (isa<ConstantSDNode>(Index)) { 5561 assert((N2.getValueType().getVectorNumElements() + 5562 cast<ConstantSDNode>(Index)->getZExtValue() 5563 <= VT.getVectorNumElements()) 5564 && "Insert subvector overflow!"); 5565 } 5566 5567 // Trivial insertion. 5568 if (VT.getSimpleVT() == N2.getSimpleValueType()) 5569 return N2; 5570 5571 // If this is an insert of an extracted vector into an undef vector, we 5572 // can just use the input to the extract. 5573 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5574 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5575 return N2.getOperand(0); 5576 } 5577 break; 5578 } 5579 case ISD::BITCAST: 5580 // Fold bit_convert nodes from a type to themselves. 5581 if (N1.getValueType() == VT) 5582 return N1; 5583 break; 5584 } 5585 5586 // Memoize node if it doesn't produce a flag. 5587 SDNode *N; 5588 SDVTList VTs = getVTList(VT); 5589 SDValue Ops[] = {N1, N2, N3}; 5590 if (VT != MVT::Glue) { 5591 FoldingSetNodeID ID; 5592 AddNodeIDNode(ID, Opcode, VTs, Ops); 5593 void *IP = nullptr; 5594 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5595 E->intersectFlagsWith(Flags); 5596 return SDValue(E, 0); 5597 } 5598 5599 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5600 N->setFlags(Flags); 5601 createOperands(N, Ops); 5602 CSEMap.InsertNode(N, IP); 5603 } else { 5604 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5605 createOperands(N, Ops); 5606 } 5607 5608 InsertNode(N); 5609 SDValue V = SDValue(N, 0); 5610 NewSDValueDbgMsg(V, "Creating new node: ", this); 5611 return V; 5612 } 5613 5614 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5615 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5616 SDValue Ops[] = { N1, N2, N3, N4 }; 5617 return getNode(Opcode, DL, VT, Ops); 5618 } 5619 5620 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5621 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5622 SDValue N5) { 5623 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5624 return getNode(Opcode, DL, VT, Ops); 5625 } 5626 5627 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5628 /// the incoming stack arguments to be loaded from the stack. 5629 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5630 SmallVector<SDValue, 8> ArgChains; 5631 5632 // Include the original chain at the beginning of the list. When this is 5633 // used by target LowerCall hooks, this helps legalize find the 5634 // CALLSEQ_BEGIN node. 5635 ArgChains.push_back(Chain); 5636 5637 // Add a chain value for each stack argument. 5638 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5639 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5640 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5641 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5642 if (FI->getIndex() < 0) 5643 ArgChains.push_back(SDValue(L, 1)); 5644 5645 // Build a tokenfactor for all the chains. 5646 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5647 } 5648 5649 /// getMemsetValue - Vectorized representation of the memset value 5650 /// operand. 5651 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5652 const SDLoc &dl) { 5653 assert(!Value.isUndef()); 5654 5655 unsigned NumBits = VT.getScalarSizeInBits(); 5656 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5657 assert(C->getAPIntValue().getBitWidth() == 8); 5658 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5659 if (VT.isInteger()) { 5660 bool IsOpaque = VT.getSizeInBits() > 64 || 5661 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5662 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5663 } 5664 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5665 VT); 5666 } 5667 5668 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5669 EVT IntVT = VT.getScalarType(); 5670 if (!IntVT.isInteger()) 5671 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5672 5673 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5674 if (NumBits > 8) { 5675 // Use a multiplication with 0x010101... to extend the input to the 5676 // required length. 5677 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5678 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5679 DAG.getConstant(Magic, dl, IntVT)); 5680 } 5681 5682 if (VT != Value.getValueType() && !VT.isInteger()) 5683 Value = DAG.getBitcast(VT.getScalarType(), Value); 5684 if (VT != Value.getValueType()) 5685 Value = DAG.getSplatBuildVector(VT, dl, Value); 5686 5687 return Value; 5688 } 5689 5690 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5691 /// used when a memcpy is turned into a memset when the source is a constant 5692 /// string ptr. 5693 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5694 const TargetLowering &TLI, 5695 const ConstantDataArraySlice &Slice) { 5696 // Handle vector with all elements zero. 5697 if (Slice.Array == nullptr) { 5698 if (VT.isInteger()) 5699 return DAG.getConstant(0, dl, VT); 5700 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5701 return DAG.getConstantFP(0.0, dl, VT); 5702 else if (VT.isVector()) { 5703 unsigned NumElts = VT.getVectorNumElements(); 5704 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5705 return DAG.getNode(ISD::BITCAST, dl, VT, 5706 DAG.getConstant(0, dl, 5707 EVT::getVectorVT(*DAG.getContext(), 5708 EltVT, NumElts))); 5709 } else 5710 llvm_unreachable("Expected type!"); 5711 } 5712 5713 assert(!VT.isVector() && "Can't handle vector type here!"); 5714 unsigned NumVTBits = VT.getSizeInBits(); 5715 unsigned NumVTBytes = NumVTBits / 8; 5716 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5717 5718 APInt Val(NumVTBits, 0); 5719 if (DAG.getDataLayout().isLittleEndian()) { 5720 for (unsigned i = 0; i != NumBytes; ++i) 5721 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5722 } else { 5723 for (unsigned i = 0; i != NumBytes; ++i) 5724 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5725 } 5726 5727 // If the "cost" of materializing the integer immediate is less than the cost 5728 // of a load, then it is cost effective to turn the load into the immediate. 5729 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5730 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5731 return DAG.getConstant(Val, dl, VT); 5732 return SDValue(nullptr, 0); 5733 } 5734 5735 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset, 5736 const SDLoc &DL, 5737 const SDNodeFlags Flags) { 5738 EVT VT = Base.getValueType(); 5739 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags); 5740 } 5741 5742 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, 5743 const SDLoc &DL, 5744 const SDNodeFlags Flags) { 5745 assert(Offset.getValueType().isInteger()); 5746 EVT BasePtrVT = Ptr.getValueType(); 5747 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); 5748 } 5749 5750 /// Returns true if memcpy source is constant data. 5751 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5752 uint64_t SrcDelta = 0; 5753 GlobalAddressSDNode *G = nullptr; 5754 if (Src.getOpcode() == ISD::GlobalAddress) 5755 G = cast<GlobalAddressSDNode>(Src); 5756 else if (Src.getOpcode() == ISD::ADD && 5757 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5758 Src.getOperand(1).getOpcode() == ISD::Constant) { 5759 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5760 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5761 } 5762 if (!G) 5763 return false; 5764 5765 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5766 SrcDelta + G->getOffset()); 5767 } 5768 5769 static bool shouldLowerMemFuncForSize(const MachineFunction &MF, 5770 SelectionDAG &DAG) { 5771 // On Darwin, -Os means optimize for size without hurting performance, so 5772 // only really optimize for size when -Oz (MinSize) is used. 5773 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5774 return MF.getFunction().hasMinSize(); 5775 return DAG.shouldOptForSize(); 5776 } 5777 5778 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5779 SmallVector<SDValue, 32> &OutChains, unsigned From, 5780 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5781 SmallVector<SDValue, 16> &OutStoreChains) { 5782 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5783 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5784 SmallVector<SDValue, 16> GluedLoadChains; 5785 for (unsigned i = From; i < To; ++i) { 5786 OutChains.push_back(OutLoadChains[i]); 5787 GluedLoadChains.push_back(OutLoadChains[i]); 5788 } 5789 5790 // Chain for all loads. 5791 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5792 GluedLoadChains); 5793 5794 for (unsigned i = From; i < To; ++i) { 5795 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5796 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5797 ST->getBasePtr(), ST->getMemoryVT(), 5798 ST->getMemOperand()); 5799 OutChains.push_back(NewStore); 5800 } 5801 } 5802 5803 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5804 SDValue Chain, SDValue Dst, SDValue Src, 5805 uint64_t Size, unsigned Alignment, 5806 bool isVol, bool AlwaysInline, 5807 MachinePointerInfo DstPtrInfo, 5808 MachinePointerInfo SrcPtrInfo) { 5809 // Turn a memcpy of undef to nop. 5810 // FIXME: We need to honor volatile even is Src is undef. 5811 if (Src.isUndef()) 5812 return Chain; 5813 5814 // Expand memcpy to a series of load and store ops if the size operand falls 5815 // below a certain threshold. 5816 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5817 // rather than maybe a humongous number of loads and stores. 5818 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5819 const DataLayout &DL = DAG.getDataLayout(); 5820 LLVMContext &C = *DAG.getContext(); 5821 std::vector<EVT> MemOps; 5822 bool DstAlignCanChange = false; 5823 MachineFunction &MF = DAG.getMachineFunction(); 5824 MachineFrameInfo &MFI = MF.getFrameInfo(); 5825 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 5826 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5827 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5828 DstAlignCanChange = true; 5829 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5830 if (Alignment > SrcAlign) 5831 SrcAlign = Alignment; 5832 ConstantDataArraySlice Slice; 5833 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5834 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5835 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5836 5837 if (!TLI.findOptimalMemOpLowering( 5838 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment), 5839 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false, 5840 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant, 5841 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), 5842 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) 5843 return SDValue(); 5844 5845 if (DstAlignCanChange) { 5846 Type *Ty = MemOps[0].getTypeForEVT(C); 5847 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5848 5849 // Don't promote to an alignment that would require dynamic stack 5850 // realignment. 5851 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5852 if (!TRI->needsStackRealignment(MF)) 5853 while (NewAlign > Alignment && 5854 DL.exceedsNaturalStackAlignment(Align(NewAlign))) 5855 NewAlign /= 2; 5856 5857 if (NewAlign > Alignment) { 5858 // Give the stack frame object a larger alignment if needed. 5859 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5860 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5861 Alignment = NewAlign; 5862 } 5863 } 5864 5865 MachineMemOperand::Flags MMOFlags = 5866 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5867 SmallVector<SDValue, 16> OutLoadChains; 5868 SmallVector<SDValue, 16> OutStoreChains; 5869 SmallVector<SDValue, 32> OutChains; 5870 unsigned NumMemOps = MemOps.size(); 5871 uint64_t SrcOff = 0, DstOff = 0; 5872 for (unsigned i = 0; i != NumMemOps; ++i) { 5873 EVT VT = MemOps[i]; 5874 unsigned VTSize = VT.getSizeInBits() / 8; 5875 SDValue Value, Store; 5876 5877 if (VTSize > Size) { 5878 // Issuing an unaligned load / store pair that overlaps with the previous 5879 // pair. Adjust the offset accordingly. 5880 assert(i == NumMemOps-1 && i != 0); 5881 SrcOff -= VTSize - Size; 5882 DstOff -= VTSize - Size; 5883 } 5884 5885 if (CopyFromConstant && 5886 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5887 // It's unlikely a store of a vector immediate can be done in a single 5888 // instruction. It would require a load from a constantpool first. 5889 // We only handle zero vectors here. 5890 // FIXME: Handle other cases where store of vector immediate is done in 5891 // a single instruction. 5892 ConstantDataArraySlice SubSlice; 5893 if (SrcOff < Slice.Length) { 5894 SubSlice = Slice; 5895 SubSlice.move(SrcOff); 5896 } else { 5897 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5898 SubSlice.Array = nullptr; 5899 SubSlice.Offset = 0; 5900 SubSlice.Length = VTSize; 5901 } 5902 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5903 if (Value.getNode()) { 5904 Store = DAG.getStore( 5905 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5906 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); 5907 OutChains.push_back(Store); 5908 } 5909 } 5910 5911 if (!Store.getNode()) { 5912 // The type might not be legal for the target. This should only happen 5913 // if the type is smaller than a legal type, as on PPC, so the right 5914 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5915 // to Load/Store if NVT==VT. 5916 // FIXME does the case above also need this? 5917 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5918 assert(NVT.bitsGE(VT)); 5919 5920 bool isDereferenceable = 5921 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5922 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5923 if (isDereferenceable) 5924 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5925 5926 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5927 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5928 SrcPtrInfo.getWithOffset(SrcOff), VT, 5929 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5930 OutLoadChains.push_back(Value.getValue(1)); 5931 5932 Store = DAG.getTruncStore( 5933 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5934 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); 5935 OutStoreChains.push_back(Store); 5936 } 5937 SrcOff += VTSize; 5938 DstOff += VTSize; 5939 Size -= VTSize; 5940 } 5941 5942 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 5943 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 5944 unsigned NumLdStInMemcpy = OutStoreChains.size(); 5945 5946 if (NumLdStInMemcpy) { 5947 // It may be that memcpy might be converted to memset if it's memcpy 5948 // of constants. In such a case, we won't have loads and stores, but 5949 // just stores. In the absence of loads, there is nothing to gang up. 5950 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 5951 // If target does not care, just leave as it. 5952 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 5953 OutChains.push_back(OutLoadChains[i]); 5954 OutChains.push_back(OutStoreChains[i]); 5955 } 5956 } else { 5957 // Ld/St less than/equal limit set by target. 5958 if (NumLdStInMemcpy <= GluedLdStLimit) { 5959 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5960 NumLdStInMemcpy, OutLoadChains, 5961 OutStoreChains); 5962 } else { 5963 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 5964 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 5965 unsigned GlueIter = 0; 5966 5967 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 5968 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 5969 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 5970 5971 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 5972 OutLoadChains, OutStoreChains); 5973 GlueIter += GluedLdStLimit; 5974 } 5975 5976 // Residual ld/st. 5977 if (RemainingLdStInMemcpy) { 5978 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5979 RemainingLdStInMemcpy, OutLoadChains, 5980 OutStoreChains); 5981 } 5982 } 5983 } 5984 } 5985 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5986 } 5987 5988 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5989 SDValue Chain, SDValue Dst, SDValue Src, 5990 uint64_t Size, unsigned Align, 5991 bool isVol, bool AlwaysInline, 5992 MachinePointerInfo DstPtrInfo, 5993 MachinePointerInfo SrcPtrInfo) { 5994 // Turn a memmove of undef to nop. 5995 // FIXME: We need to honor volatile even is Src is undef. 5996 if (Src.isUndef()) 5997 return Chain; 5998 5999 // Expand memmove to a series of load and store ops if the size operand falls 6000 // below a certain threshold. 6001 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6002 const DataLayout &DL = DAG.getDataLayout(); 6003 LLVMContext &C = *DAG.getContext(); 6004 std::vector<EVT> MemOps; 6005 bool DstAlignCanChange = false; 6006 MachineFunction &MF = DAG.getMachineFunction(); 6007 MachineFrameInfo &MFI = MF.getFrameInfo(); 6008 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6009 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6010 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6011 DstAlignCanChange = true; 6012 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 6013 if (Align > SrcAlign) 6014 SrcAlign = Align; 6015 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 6016 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in 6017 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the 6018 // correct code. 6019 bool AllowOverlap = false; 6020 if (!TLI.findOptimalMemOpLowering( 6021 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign, 6022 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, 6023 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), 6024 MF.getFunction().getAttributes())) 6025 return SDValue(); 6026 6027 if (DstAlignCanChange) { 6028 Type *Ty = MemOps[0].getTypeForEVT(C); 6029 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 6030 if (NewAlign > Align) { 6031 // Give the stack frame object a larger alignment if needed. 6032 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6033 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6034 Align = NewAlign; 6035 } 6036 } 6037 6038 MachineMemOperand::Flags MMOFlags = 6039 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6040 uint64_t SrcOff = 0, DstOff = 0; 6041 SmallVector<SDValue, 8> LoadValues; 6042 SmallVector<SDValue, 8> LoadChains; 6043 SmallVector<SDValue, 8> OutChains; 6044 unsigned NumMemOps = MemOps.size(); 6045 for (unsigned i = 0; i < NumMemOps; i++) { 6046 EVT VT = MemOps[i]; 6047 unsigned VTSize = VT.getSizeInBits() / 8; 6048 SDValue Value; 6049 6050 bool isDereferenceable = 6051 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6052 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6053 if (isDereferenceable) 6054 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6055 6056 Value = 6057 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6058 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 6059 LoadValues.push_back(Value); 6060 LoadChains.push_back(Value.getValue(1)); 6061 SrcOff += VTSize; 6062 } 6063 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6064 OutChains.clear(); 6065 for (unsigned i = 0; i < NumMemOps; i++) { 6066 EVT VT = MemOps[i]; 6067 unsigned VTSize = VT.getSizeInBits() / 8; 6068 SDValue Store; 6069 6070 Store = DAG.getStore(Chain, dl, LoadValues[i], 6071 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6072 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 6073 OutChains.push_back(Store); 6074 DstOff += VTSize; 6075 } 6076 6077 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6078 } 6079 6080 /// Lower the call to 'memset' intrinsic function into a series of store 6081 /// operations. 6082 /// 6083 /// \param DAG Selection DAG where lowered code is placed. 6084 /// \param dl Link to corresponding IR location. 6085 /// \param Chain Control flow dependency. 6086 /// \param Dst Pointer to destination memory location. 6087 /// \param Src Value of byte to write into the memory. 6088 /// \param Size Number of bytes to write. 6089 /// \param Align Alignment of the destination in bytes. 6090 /// \param isVol True if destination is volatile. 6091 /// \param DstPtrInfo IR information on the memory pointer. 6092 /// \returns New head in the control flow, if lowering was successful, empty 6093 /// SDValue otherwise. 6094 /// 6095 /// The function tries to replace 'llvm.memset' intrinsic with several store 6096 /// operations and value calculation code. This is usually profitable for small 6097 /// memory size. 6098 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6099 SDValue Chain, SDValue Dst, SDValue Src, 6100 uint64_t Size, unsigned Align, bool isVol, 6101 MachinePointerInfo DstPtrInfo) { 6102 // Turn a memset of undef to nop. 6103 // FIXME: We need to honor volatile even is Src is undef. 6104 if (Src.isUndef()) 6105 return Chain; 6106 6107 // Expand memset to a series of load/store ops if the size operand 6108 // falls below a certain threshold. 6109 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6110 std::vector<EVT> MemOps; 6111 bool DstAlignCanChange = false; 6112 MachineFunction &MF = DAG.getMachineFunction(); 6113 MachineFrameInfo &MFI = MF.getFrameInfo(); 6114 bool OptSize = shouldLowerMemFuncForSize(MF, DAG); 6115 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6116 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6117 DstAlignCanChange = true; 6118 bool IsZeroVal = 6119 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6120 if (!TLI.findOptimalMemOpLowering( 6121 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size, 6122 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true, 6123 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false, 6124 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u, 6125 MF.getFunction().getAttributes())) 6126 return SDValue(); 6127 6128 if (DstAlignCanChange) { 6129 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6130 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 6131 if (NewAlign > Align) { 6132 // Give the stack frame object a larger alignment if needed. 6133 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6134 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6135 Align = NewAlign; 6136 } 6137 } 6138 6139 SmallVector<SDValue, 8> OutChains; 6140 uint64_t DstOff = 0; 6141 unsigned NumMemOps = MemOps.size(); 6142 6143 // Find the largest store and generate the bit pattern for it. 6144 EVT LargestVT = MemOps[0]; 6145 for (unsigned i = 1; i < NumMemOps; i++) 6146 if (MemOps[i].bitsGT(LargestVT)) 6147 LargestVT = MemOps[i]; 6148 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6149 6150 for (unsigned i = 0; i < NumMemOps; i++) { 6151 EVT VT = MemOps[i]; 6152 unsigned VTSize = VT.getSizeInBits() / 8; 6153 if (VTSize > Size) { 6154 // Issuing an unaligned load / store pair that overlaps with the previous 6155 // pair. Adjust the offset accordingly. 6156 assert(i == NumMemOps-1 && i != 0); 6157 DstOff -= VTSize - Size; 6158 } 6159 6160 // If this store is smaller than the largest store see whether we can get 6161 // the smaller value for free with a truncate. 6162 SDValue Value = MemSetValue; 6163 if (VT.bitsLT(LargestVT)) { 6164 if (!LargestVT.isVector() && !VT.isVector() && 6165 TLI.isTruncateFree(LargestVT, VT)) 6166 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6167 else 6168 Value = getMemsetValue(Src, VT, DAG, dl); 6169 } 6170 assert(Value.getValueType() == VT && "Value with wrong type."); 6171 SDValue Store = DAG.getStore( 6172 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6173 DstPtrInfo.getWithOffset(DstOff), Align, 6174 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6175 OutChains.push_back(Store); 6176 DstOff += VT.getSizeInBits() / 8; 6177 Size -= VTSize; 6178 } 6179 6180 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6181 } 6182 6183 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6184 unsigned AS) { 6185 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6186 // pointer operands can be losslessly bitcasted to pointers of address space 0 6187 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6188 report_fatal_error("cannot lower memory intrinsic in address space " + 6189 Twine(AS)); 6190 } 6191 } 6192 6193 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6194 SDValue Src, SDValue Size, unsigned Align, 6195 bool isVol, bool AlwaysInline, bool isTailCall, 6196 MachinePointerInfo DstPtrInfo, 6197 MachinePointerInfo SrcPtrInfo) { 6198 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6199 6200 // Check to see if we should lower the memcpy to loads and stores first. 6201 // For cases within the target-specified limits, this is the best choice. 6202 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6203 if (ConstantSize) { 6204 // Memcpy with size zero? Just return the original chain. 6205 if (ConstantSize->isNullValue()) 6206 return Chain; 6207 6208 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6209 ConstantSize->getZExtValue(),Align, 6210 isVol, false, DstPtrInfo, SrcPtrInfo); 6211 if (Result.getNode()) 6212 return Result; 6213 } 6214 6215 // Then check to see if we should lower the memcpy with target-specific 6216 // code. If the target chooses to do this, this is the next best. 6217 if (TSI) { 6218 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6219 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 6220 DstPtrInfo, SrcPtrInfo); 6221 if (Result.getNode()) 6222 return Result; 6223 } 6224 6225 // If we really need inline code and the target declined to provide it, 6226 // use a (potentially long) sequence of loads and stores. 6227 if (AlwaysInline) { 6228 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6229 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6230 ConstantSize->getZExtValue(), Align, isVol, 6231 true, DstPtrInfo, SrcPtrInfo); 6232 } 6233 6234 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6235 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6236 6237 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6238 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6239 // respect volatile, so they may do things like read or write memory 6240 // beyond the given memory regions. But fixing this isn't easy, and most 6241 // people don't care. 6242 6243 // Emit a library call. 6244 TargetLowering::ArgListTy Args; 6245 TargetLowering::ArgListEntry Entry; 6246 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6247 Entry.Node = Dst; Args.push_back(Entry); 6248 Entry.Node = Src; Args.push_back(Entry); 6249 6250 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6251 Entry.Node = Size; Args.push_back(Entry); 6252 // FIXME: pass in SDLoc 6253 TargetLowering::CallLoweringInfo CLI(*this); 6254 CLI.setDebugLoc(dl) 6255 .setChain(Chain) 6256 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6257 Dst.getValueType().getTypeForEVT(*getContext()), 6258 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6259 TLI->getPointerTy(getDataLayout())), 6260 std::move(Args)) 6261 .setDiscardResult() 6262 .setTailCall(isTailCall); 6263 6264 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6265 return CallResult.second; 6266 } 6267 6268 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6269 SDValue Dst, unsigned DstAlign, 6270 SDValue Src, unsigned SrcAlign, 6271 SDValue Size, Type *SizeTy, 6272 unsigned ElemSz, bool isTailCall, 6273 MachinePointerInfo DstPtrInfo, 6274 MachinePointerInfo SrcPtrInfo) { 6275 // Emit a library call. 6276 TargetLowering::ArgListTy Args; 6277 TargetLowering::ArgListEntry Entry; 6278 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6279 Entry.Node = Dst; 6280 Args.push_back(Entry); 6281 6282 Entry.Node = Src; 6283 Args.push_back(Entry); 6284 6285 Entry.Ty = SizeTy; 6286 Entry.Node = Size; 6287 Args.push_back(Entry); 6288 6289 RTLIB::Libcall LibraryCall = 6290 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6291 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6292 report_fatal_error("Unsupported element size"); 6293 6294 TargetLowering::CallLoweringInfo CLI(*this); 6295 CLI.setDebugLoc(dl) 6296 .setChain(Chain) 6297 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6298 Type::getVoidTy(*getContext()), 6299 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6300 TLI->getPointerTy(getDataLayout())), 6301 std::move(Args)) 6302 .setDiscardResult() 6303 .setTailCall(isTailCall); 6304 6305 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6306 return CallResult.second; 6307 } 6308 6309 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6310 SDValue Src, SDValue Size, unsigned Align, 6311 bool isVol, bool isTailCall, 6312 MachinePointerInfo DstPtrInfo, 6313 MachinePointerInfo SrcPtrInfo) { 6314 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6315 6316 // Check to see if we should lower the memmove to loads and stores first. 6317 // For cases within the target-specified limits, this is the best choice. 6318 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6319 if (ConstantSize) { 6320 // Memmove with size zero? Just return the original chain. 6321 if (ConstantSize->isNullValue()) 6322 return Chain; 6323 6324 SDValue Result = 6325 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 6326 ConstantSize->getZExtValue(), Align, isVol, 6327 false, DstPtrInfo, SrcPtrInfo); 6328 if (Result.getNode()) 6329 return Result; 6330 } 6331 6332 // Then check to see if we should lower the memmove with target-specific 6333 // code. If the target chooses to do this, this is the next best. 6334 if (TSI) { 6335 SDValue Result = TSI->EmitTargetCodeForMemmove( 6336 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 6337 if (Result.getNode()) 6338 return Result; 6339 } 6340 6341 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6342 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6343 6344 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6345 // not be safe. See memcpy above for more details. 6346 6347 // Emit a library call. 6348 TargetLowering::ArgListTy Args; 6349 TargetLowering::ArgListEntry Entry; 6350 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6351 Entry.Node = Dst; Args.push_back(Entry); 6352 Entry.Node = Src; Args.push_back(Entry); 6353 6354 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6355 Entry.Node = Size; Args.push_back(Entry); 6356 // FIXME: pass in SDLoc 6357 TargetLowering::CallLoweringInfo CLI(*this); 6358 CLI.setDebugLoc(dl) 6359 .setChain(Chain) 6360 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6361 Dst.getValueType().getTypeForEVT(*getContext()), 6362 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6363 TLI->getPointerTy(getDataLayout())), 6364 std::move(Args)) 6365 .setDiscardResult() 6366 .setTailCall(isTailCall); 6367 6368 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6369 return CallResult.second; 6370 } 6371 6372 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6373 SDValue Dst, unsigned DstAlign, 6374 SDValue Src, unsigned SrcAlign, 6375 SDValue Size, Type *SizeTy, 6376 unsigned ElemSz, bool isTailCall, 6377 MachinePointerInfo DstPtrInfo, 6378 MachinePointerInfo SrcPtrInfo) { 6379 // Emit a library call. 6380 TargetLowering::ArgListTy Args; 6381 TargetLowering::ArgListEntry Entry; 6382 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6383 Entry.Node = Dst; 6384 Args.push_back(Entry); 6385 6386 Entry.Node = Src; 6387 Args.push_back(Entry); 6388 6389 Entry.Ty = SizeTy; 6390 Entry.Node = Size; 6391 Args.push_back(Entry); 6392 6393 RTLIB::Libcall LibraryCall = 6394 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6395 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6396 report_fatal_error("Unsupported element size"); 6397 6398 TargetLowering::CallLoweringInfo CLI(*this); 6399 CLI.setDebugLoc(dl) 6400 .setChain(Chain) 6401 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6402 Type::getVoidTy(*getContext()), 6403 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6404 TLI->getPointerTy(getDataLayout())), 6405 std::move(Args)) 6406 .setDiscardResult() 6407 .setTailCall(isTailCall); 6408 6409 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6410 return CallResult.second; 6411 } 6412 6413 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6414 SDValue Src, SDValue Size, unsigned Align, 6415 bool isVol, bool isTailCall, 6416 MachinePointerInfo DstPtrInfo) { 6417 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6418 6419 // Check to see if we should lower the memset to stores first. 6420 // For cases within the target-specified limits, this is the best choice. 6421 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6422 if (ConstantSize) { 6423 // Memset with size zero? Just return the original chain. 6424 if (ConstantSize->isNullValue()) 6425 return Chain; 6426 6427 SDValue Result = 6428 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 6429 Align, isVol, DstPtrInfo); 6430 6431 if (Result.getNode()) 6432 return Result; 6433 } 6434 6435 // Then check to see if we should lower the memset with target-specific 6436 // code. If the target chooses to do this, this is the next best. 6437 if (TSI) { 6438 SDValue Result = TSI->EmitTargetCodeForMemset( 6439 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 6440 if (Result.getNode()) 6441 return Result; 6442 } 6443 6444 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6445 6446 // Emit a library call. 6447 TargetLowering::ArgListTy Args; 6448 TargetLowering::ArgListEntry Entry; 6449 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6450 Args.push_back(Entry); 6451 Entry.Node = Src; 6452 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6453 Args.push_back(Entry); 6454 Entry.Node = Size; 6455 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6456 Args.push_back(Entry); 6457 6458 // FIXME: pass in SDLoc 6459 TargetLowering::CallLoweringInfo CLI(*this); 6460 CLI.setDebugLoc(dl) 6461 .setChain(Chain) 6462 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6463 Dst.getValueType().getTypeForEVT(*getContext()), 6464 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6465 TLI->getPointerTy(getDataLayout())), 6466 std::move(Args)) 6467 .setDiscardResult() 6468 .setTailCall(isTailCall); 6469 6470 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6471 return CallResult.second; 6472 } 6473 6474 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6475 SDValue Dst, unsigned DstAlign, 6476 SDValue Value, SDValue Size, Type *SizeTy, 6477 unsigned ElemSz, bool isTailCall, 6478 MachinePointerInfo DstPtrInfo) { 6479 // Emit a library call. 6480 TargetLowering::ArgListTy Args; 6481 TargetLowering::ArgListEntry Entry; 6482 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6483 Entry.Node = Dst; 6484 Args.push_back(Entry); 6485 6486 Entry.Ty = Type::getInt8Ty(*getContext()); 6487 Entry.Node = Value; 6488 Args.push_back(Entry); 6489 6490 Entry.Ty = SizeTy; 6491 Entry.Node = Size; 6492 Args.push_back(Entry); 6493 6494 RTLIB::Libcall LibraryCall = 6495 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6496 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6497 report_fatal_error("Unsupported element size"); 6498 6499 TargetLowering::CallLoweringInfo CLI(*this); 6500 CLI.setDebugLoc(dl) 6501 .setChain(Chain) 6502 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6503 Type::getVoidTy(*getContext()), 6504 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6505 TLI->getPointerTy(getDataLayout())), 6506 std::move(Args)) 6507 .setDiscardResult() 6508 .setTailCall(isTailCall); 6509 6510 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6511 return CallResult.second; 6512 } 6513 6514 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6515 SDVTList VTList, ArrayRef<SDValue> Ops, 6516 MachineMemOperand *MMO) { 6517 FoldingSetNodeID ID; 6518 ID.AddInteger(MemVT.getRawBits()); 6519 AddNodeIDNode(ID, Opcode, VTList, Ops); 6520 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6521 void* IP = nullptr; 6522 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6523 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6524 return SDValue(E, 0); 6525 } 6526 6527 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6528 VTList, MemVT, MMO); 6529 createOperands(N, Ops); 6530 6531 CSEMap.InsertNode(N, IP); 6532 InsertNode(N); 6533 return SDValue(N, 0); 6534 } 6535 6536 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6537 EVT MemVT, SDVTList VTs, SDValue Chain, 6538 SDValue Ptr, SDValue Cmp, SDValue Swp, 6539 MachineMemOperand *MMO) { 6540 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6541 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6542 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6543 6544 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6545 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6546 } 6547 6548 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6549 SDValue Chain, SDValue Ptr, SDValue Val, 6550 MachineMemOperand *MMO) { 6551 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6552 Opcode == ISD::ATOMIC_LOAD_SUB || 6553 Opcode == ISD::ATOMIC_LOAD_AND || 6554 Opcode == ISD::ATOMIC_LOAD_CLR || 6555 Opcode == ISD::ATOMIC_LOAD_OR || 6556 Opcode == ISD::ATOMIC_LOAD_XOR || 6557 Opcode == ISD::ATOMIC_LOAD_NAND || 6558 Opcode == ISD::ATOMIC_LOAD_MIN || 6559 Opcode == ISD::ATOMIC_LOAD_MAX || 6560 Opcode == ISD::ATOMIC_LOAD_UMIN || 6561 Opcode == ISD::ATOMIC_LOAD_UMAX || 6562 Opcode == ISD::ATOMIC_LOAD_FADD || 6563 Opcode == ISD::ATOMIC_LOAD_FSUB || 6564 Opcode == ISD::ATOMIC_SWAP || 6565 Opcode == ISD::ATOMIC_STORE) && 6566 "Invalid Atomic Op"); 6567 6568 EVT VT = Val.getValueType(); 6569 6570 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6571 getVTList(VT, MVT::Other); 6572 SDValue Ops[] = {Chain, Ptr, Val}; 6573 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6574 } 6575 6576 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6577 EVT VT, SDValue Chain, SDValue Ptr, 6578 MachineMemOperand *MMO) { 6579 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6580 6581 SDVTList VTs = getVTList(VT, MVT::Other); 6582 SDValue Ops[] = {Chain, Ptr}; 6583 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6584 } 6585 6586 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6587 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6588 if (Ops.size() == 1) 6589 return Ops[0]; 6590 6591 SmallVector<EVT, 4> VTs; 6592 VTs.reserve(Ops.size()); 6593 for (unsigned i = 0; i < Ops.size(); ++i) 6594 VTs.push_back(Ops[i].getValueType()); 6595 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6596 } 6597 6598 SDValue SelectionDAG::getMemIntrinsicNode( 6599 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6600 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 6601 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { 6602 if (Align == 0) // Ensure that codegen never sees alignment 0 6603 Align = getEVTAlignment(MemVT); 6604 6605 if (!Size && MemVT.isScalableVector()) 6606 Size = MemoryLocation::UnknownSize; 6607 else if (!Size) 6608 Size = MemVT.getStoreSize(); 6609 6610 MachineFunction &MF = getMachineFunction(); 6611 MachineMemOperand *MMO = 6612 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo); 6613 6614 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6615 } 6616 6617 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6618 SDVTList VTList, 6619 ArrayRef<SDValue> Ops, EVT MemVT, 6620 MachineMemOperand *MMO) { 6621 assert((Opcode == ISD::INTRINSIC_VOID || 6622 Opcode == ISD::INTRINSIC_W_CHAIN || 6623 Opcode == ISD::PREFETCH || 6624 Opcode == ISD::LIFETIME_START || 6625 Opcode == ISD::LIFETIME_END || 6626 ((int)Opcode <= std::numeric_limits<int>::max() && 6627 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6628 "Opcode is not a memory-accessing opcode!"); 6629 6630 // Memoize the node unless it returns a flag. 6631 MemIntrinsicSDNode *N; 6632 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6633 FoldingSetNodeID ID; 6634 AddNodeIDNode(ID, Opcode, VTList, Ops); 6635 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6636 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6637 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6638 void *IP = nullptr; 6639 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6640 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6641 return SDValue(E, 0); 6642 } 6643 6644 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6645 VTList, MemVT, MMO); 6646 createOperands(N, Ops); 6647 6648 CSEMap.InsertNode(N, IP); 6649 } else { 6650 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6651 VTList, MemVT, MMO); 6652 createOperands(N, Ops); 6653 } 6654 InsertNode(N); 6655 SDValue V(N, 0); 6656 NewSDValueDbgMsg(V, "Creating new node: ", this); 6657 return V; 6658 } 6659 6660 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6661 SDValue Chain, int FrameIndex, 6662 int64_t Size, int64_t Offset) { 6663 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6664 const auto VTs = getVTList(MVT::Other); 6665 SDValue Ops[2] = { 6666 Chain, 6667 getFrameIndex(FrameIndex, 6668 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6669 true)}; 6670 6671 FoldingSetNodeID ID; 6672 AddNodeIDNode(ID, Opcode, VTs, Ops); 6673 ID.AddInteger(FrameIndex); 6674 ID.AddInteger(Size); 6675 ID.AddInteger(Offset); 6676 void *IP = nullptr; 6677 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6678 return SDValue(E, 0); 6679 6680 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6681 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6682 createOperands(N, Ops); 6683 CSEMap.InsertNode(N, IP); 6684 InsertNode(N); 6685 SDValue V(N, 0); 6686 NewSDValueDbgMsg(V, "Creating new node: ", this); 6687 return V; 6688 } 6689 6690 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6691 /// MachinePointerInfo record from it. This is particularly useful because the 6692 /// code generator has many cases where it doesn't bother passing in a 6693 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6694 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6695 SelectionDAG &DAG, SDValue Ptr, 6696 int64_t Offset = 0) { 6697 // If this is FI+Offset, we can model it. 6698 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6699 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6700 FI->getIndex(), Offset); 6701 6702 // If this is (FI+Offset1)+Offset2, we can model it. 6703 if (Ptr.getOpcode() != ISD::ADD || 6704 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6705 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6706 return Info; 6707 6708 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6709 return MachinePointerInfo::getFixedStack( 6710 DAG.getMachineFunction(), FI, 6711 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6712 } 6713 6714 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6715 /// MachinePointerInfo record from it. This is particularly useful because the 6716 /// code generator has many cases where it doesn't bother passing in a 6717 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6718 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6719 SelectionDAG &DAG, SDValue Ptr, 6720 SDValue OffsetOp) { 6721 // If the 'Offset' value isn't a constant, we can't handle this. 6722 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6723 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6724 if (OffsetOp.isUndef()) 6725 return InferPointerInfo(Info, DAG, Ptr); 6726 return Info; 6727 } 6728 6729 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6730 EVT VT, const SDLoc &dl, SDValue Chain, 6731 SDValue Ptr, SDValue Offset, 6732 MachinePointerInfo PtrInfo, EVT MemVT, 6733 unsigned Alignment, 6734 MachineMemOperand::Flags MMOFlags, 6735 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6736 assert(Chain.getValueType() == MVT::Other && 6737 "Invalid chain type"); 6738 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6739 Alignment = getEVTAlignment(MemVT); 6740 6741 MMOFlags |= MachineMemOperand::MOLoad; 6742 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6743 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6744 // clients. 6745 if (PtrInfo.V.isNull()) 6746 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6747 6748 MachineFunction &MF = getMachineFunction(); 6749 MachineMemOperand *MMO = MF.getMachineMemOperand( 6750 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6751 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6752 } 6753 6754 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6755 EVT VT, const SDLoc &dl, SDValue Chain, 6756 SDValue Ptr, SDValue Offset, EVT MemVT, 6757 MachineMemOperand *MMO) { 6758 if (VT == MemVT) { 6759 ExtType = ISD::NON_EXTLOAD; 6760 } else if (ExtType == ISD::NON_EXTLOAD) { 6761 assert(VT == MemVT && "Non-extending load from different memory type!"); 6762 } else { 6763 // Extending load. 6764 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6765 "Should only be an extending load, not truncating!"); 6766 assert(VT.isInteger() == MemVT.isInteger() && 6767 "Cannot convert from FP to Int or Int -> FP!"); 6768 assert(VT.isVector() == MemVT.isVector() && 6769 "Cannot use an ext load to convert to or from a vector!"); 6770 assert((!VT.isVector() || 6771 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6772 "Cannot use an ext load to change the number of vector elements!"); 6773 } 6774 6775 bool Indexed = AM != ISD::UNINDEXED; 6776 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6777 6778 SDVTList VTs = Indexed ? 6779 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6780 SDValue Ops[] = { Chain, Ptr, Offset }; 6781 FoldingSetNodeID ID; 6782 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6783 ID.AddInteger(MemVT.getRawBits()); 6784 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6785 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6786 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6787 void *IP = nullptr; 6788 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6789 cast<LoadSDNode>(E)->refineAlignment(MMO); 6790 return SDValue(E, 0); 6791 } 6792 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6793 ExtType, MemVT, MMO); 6794 createOperands(N, Ops); 6795 6796 CSEMap.InsertNode(N, IP); 6797 InsertNode(N); 6798 SDValue V(N, 0); 6799 NewSDValueDbgMsg(V, "Creating new node: ", this); 6800 return V; 6801 } 6802 6803 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6804 SDValue Ptr, MachinePointerInfo PtrInfo, 6805 unsigned Alignment, 6806 MachineMemOperand::Flags MMOFlags, 6807 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6808 SDValue Undef = getUNDEF(Ptr.getValueType()); 6809 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6810 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6811 } 6812 6813 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6814 SDValue Ptr, MachineMemOperand *MMO) { 6815 SDValue Undef = getUNDEF(Ptr.getValueType()); 6816 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6817 VT, MMO); 6818 } 6819 6820 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6821 EVT VT, SDValue Chain, SDValue Ptr, 6822 MachinePointerInfo PtrInfo, EVT MemVT, 6823 unsigned Alignment, 6824 MachineMemOperand::Flags MMOFlags, 6825 const AAMDNodes &AAInfo) { 6826 SDValue Undef = getUNDEF(Ptr.getValueType()); 6827 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6828 MemVT, Alignment, MMOFlags, AAInfo); 6829 } 6830 6831 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6832 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6833 MachineMemOperand *MMO) { 6834 SDValue Undef = getUNDEF(Ptr.getValueType()); 6835 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6836 MemVT, MMO); 6837 } 6838 6839 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6840 SDValue Base, SDValue Offset, 6841 ISD::MemIndexedMode AM) { 6842 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6843 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6844 // Don't propagate the invariant or dereferenceable flags. 6845 auto MMOFlags = 6846 LD->getMemOperand()->getFlags() & 6847 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6848 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6849 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6850 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6851 LD->getAAInfo()); 6852 } 6853 6854 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6855 SDValue Ptr, MachinePointerInfo PtrInfo, 6856 unsigned Alignment, 6857 MachineMemOperand::Flags MMOFlags, 6858 const AAMDNodes &AAInfo) { 6859 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6860 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6861 Alignment = getEVTAlignment(Val.getValueType()); 6862 6863 MMOFlags |= MachineMemOperand::MOStore; 6864 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6865 6866 if (PtrInfo.V.isNull()) 6867 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6868 6869 MachineFunction &MF = getMachineFunction(); 6870 MachineMemOperand *MMO = MF.getMachineMemOperand( 6871 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6872 return getStore(Chain, dl, Val, Ptr, MMO); 6873 } 6874 6875 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6876 SDValue Ptr, MachineMemOperand *MMO) { 6877 assert(Chain.getValueType() == MVT::Other && 6878 "Invalid chain type"); 6879 EVT VT = Val.getValueType(); 6880 SDVTList VTs = getVTList(MVT::Other); 6881 SDValue Undef = getUNDEF(Ptr.getValueType()); 6882 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6883 FoldingSetNodeID ID; 6884 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6885 ID.AddInteger(VT.getRawBits()); 6886 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6887 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6888 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6889 void *IP = nullptr; 6890 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6891 cast<StoreSDNode>(E)->refineAlignment(MMO); 6892 return SDValue(E, 0); 6893 } 6894 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6895 ISD::UNINDEXED, false, VT, MMO); 6896 createOperands(N, Ops); 6897 6898 CSEMap.InsertNode(N, IP); 6899 InsertNode(N); 6900 SDValue V(N, 0); 6901 NewSDValueDbgMsg(V, "Creating new node: ", this); 6902 return V; 6903 } 6904 6905 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6906 SDValue Ptr, MachinePointerInfo PtrInfo, 6907 EVT SVT, unsigned Alignment, 6908 MachineMemOperand::Flags MMOFlags, 6909 const AAMDNodes &AAInfo) { 6910 assert(Chain.getValueType() == MVT::Other && 6911 "Invalid chain type"); 6912 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6913 Alignment = getEVTAlignment(SVT); 6914 6915 MMOFlags |= MachineMemOperand::MOStore; 6916 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6917 6918 if (PtrInfo.V.isNull()) 6919 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6920 6921 MachineFunction &MF = getMachineFunction(); 6922 MachineMemOperand *MMO = MF.getMachineMemOperand( 6923 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6924 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6925 } 6926 6927 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6928 SDValue Ptr, EVT SVT, 6929 MachineMemOperand *MMO) { 6930 EVT VT = Val.getValueType(); 6931 6932 assert(Chain.getValueType() == MVT::Other && 6933 "Invalid chain type"); 6934 if (VT == SVT) 6935 return getStore(Chain, dl, Val, Ptr, MMO); 6936 6937 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6938 "Should only be a truncating store, not extending!"); 6939 assert(VT.isInteger() == SVT.isInteger() && 6940 "Can't do FP-INT conversion!"); 6941 assert(VT.isVector() == SVT.isVector() && 6942 "Cannot use trunc store to convert to or from a vector!"); 6943 assert((!VT.isVector() || 6944 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6945 "Cannot use trunc store to change the number of vector elements!"); 6946 6947 SDVTList VTs = getVTList(MVT::Other); 6948 SDValue Undef = getUNDEF(Ptr.getValueType()); 6949 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6950 FoldingSetNodeID ID; 6951 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6952 ID.AddInteger(SVT.getRawBits()); 6953 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6954 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6955 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6956 void *IP = nullptr; 6957 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6958 cast<StoreSDNode>(E)->refineAlignment(MMO); 6959 return SDValue(E, 0); 6960 } 6961 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6962 ISD::UNINDEXED, true, SVT, MMO); 6963 createOperands(N, Ops); 6964 6965 CSEMap.InsertNode(N, IP); 6966 InsertNode(N); 6967 SDValue V(N, 0); 6968 NewSDValueDbgMsg(V, "Creating new node: ", this); 6969 return V; 6970 } 6971 6972 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6973 SDValue Base, SDValue Offset, 6974 ISD::MemIndexedMode AM) { 6975 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6976 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6977 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6978 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6979 FoldingSetNodeID ID; 6980 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6981 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6982 ID.AddInteger(ST->getRawSubclassData()); 6983 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6984 void *IP = nullptr; 6985 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6986 return SDValue(E, 0); 6987 6988 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6989 ST->isTruncatingStore(), ST->getMemoryVT(), 6990 ST->getMemOperand()); 6991 createOperands(N, Ops); 6992 6993 CSEMap.InsertNode(N, IP); 6994 InsertNode(N); 6995 SDValue V(N, 0); 6996 NewSDValueDbgMsg(V, "Creating new node: ", this); 6997 return V; 6998 } 6999 7000 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 7001 SDValue Base, SDValue Offset, SDValue Mask, 7002 SDValue PassThru, EVT MemVT, 7003 MachineMemOperand *MMO, 7004 ISD::MemIndexedMode AM, 7005 ISD::LoadExtType ExtTy, bool isExpanding) { 7006 bool Indexed = AM != ISD::UNINDEXED; 7007 assert((Indexed || Offset.isUndef()) && 7008 "Unindexed masked load with an offset!"); 7009 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) 7010 : getVTList(VT, MVT::Other); 7011 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; 7012 FoldingSetNodeID ID; 7013 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 7014 ID.AddInteger(MemVT.getRawBits()); 7015 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 7016 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); 7017 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7018 void *IP = nullptr; 7019 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7020 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 7021 return SDValue(E, 0); 7022 } 7023 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7024 AM, ExtTy, isExpanding, MemVT, MMO); 7025 createOperands(N, Ops); 7026 7027 CSEMap.InsertNode(N, IP); 7028 InsertNode(N); 7029 SDValue V(N, 0); 7030 NewSDValueDbgMsg(V, "Creating new node: ", this); 7031 return V; 7032 } 7033 7034 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, 7035 SDValue Base, SDValue Offset, 7036 ISD::MemIndexedMode AM) { 7037 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); 7038 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); 7039 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, 7040 Offset, LD->getMask(), LD->getPassThru(), 7041 LD->getMemoryVT(), LD->getMemOperand(), AM, 7042 LD->getExtensionType(), LD->isExpandingLoad()); 7043 } 7044 7045 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 7046 SDValue Val, SDValue Base, SDValue Offset, 7047 SDValue Mask, EVT MemVT, 7048 MachineMemOperand *MMO, 7049 ISD::MemIndexedMode AM, bool IsTruncating, 7050 bool IsCompressing) { 7051 assert(Chain.getValueType() == MVT::Other && 7052 "Invalid chain type"); 7053 bool Indexed = AM != ISD::UNINDEXED; 7054 assert((Indexed || Offset.isUndef()) && 7055 "Unindexed masked store with an offset!"); 7056 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) 7057 : getVTList(MVT::Other); 7058 SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; 7059 FoldingSetNodeID ID; 7060 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 7061 ID.AddInteger(MemVT.getRawBits()); 7062 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 7063 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); 7064 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7065 void *IP = nullptr; 7066 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7067 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7068 return SDValue(E, 0); 7069 } 7070 auto *N = 7071 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 7072 IsTruncating, IsCompressing, MemVT, MMO); 7073 createOperands(N, Ops); 7074 7075 CSEMap.InsertNode(N, IP); 7076 InsertNode(N); 7077 SDValue V(N, 0); 7078 NewSDValueDbgMsg(V, "Creating new node: ", this); 7079 return V; 7080 } 7081 7082 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, 7083 SDValue Base, SDValue Offset, 7084 ISD::MemIndexedMode AM) { 7085 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); 7086 assert(ST->getOffset().isUndef() && 7087 "Masked store is already a indexed store!"); 7088 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, 7089 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), 7090 AM, ST->isTruncatingStore(), ST->isCompressingStore()); 7091 } 7092 7093 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7094 ArrayRef<SDValue> Ops, 7095 MachineMemOperand *MMO, 7096 ISD::MemIndexType IndexType) { 7097 assert(Ops.size() == 6 && "Incompatible number of operands"); 7098 7099 FoldingSetNodeID ID; 7100 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7101 ID.AddInteger(VT.getRawBits()); 7102 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7103 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7104 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7105 void *IP = nullptr; 7106 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7107 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7108 return SDValue(E, 0); 7109 } 7110 7111 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7112 VTs, VT, MMO, IndexType); 7113 createOperands(N, Ops); 7114 7115 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7116 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7117 assert(N->getMask().getValueType().getVectorNumElements() == 7118 N->getValueType(0).getVectorNumElements() && 7119 "Vector width mismatch between mask and data"); 7120 assert(N->getIndex().getValueType().getVectorNumElements() >= 7121 N->getValueType(0).getVectorNumElements() && 7122 "Vector width mismatch between index and data"); 7123 assert(isa<ConstantSDNode>(N->getScale()) && 7124 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7125 "Scale should be a constant power of 2"); 7126 7127 CSEMap.InsertNode(N, IP); 7128 InsertNode(N); 7129 SDValue V(N, 0); 7130 NewSDValueDbgMsg(V, "Creating new node: ", this); 7131 return V; 7132 } 7133 7134 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7135 ArrayRef<SDValue> Ops, 7136 MachineMemOperand *MMO, 7137 ISD::MemIndexType IndexType) { 7138 assert(Ops.size() == 6 && "Incompatible number of operands"); 7139 7140 FoldingSetNodeID ID; 7141 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7142 ID.AddInteger(VT.getRawBits()); 7143 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7144 dl.getIROrder(), VTs, VT, MMO, IndexType)); 7145 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7146 void *IP = nullptr; 7147 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7148 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7149 return SDValue(E, 0); 7150 } 7151 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7152 VTs, VT, MMO, IndexType); 7153 createOperands(N, Ops); 7154 7155 assert(N->getMask().getValueType().getVectorNumElements() == 7156 N->getValue().getValueType().getVectorNumElements() && 7157 "Vector width mismatch between mask and data"); 7158 assert(N->getIndex().getValueType().getVectorNumElements() >= 7159 N->getValue().getValueType().getVectorNumElements() && 7160 "Vector width mismatch between index and data"); 7161 assert(isa<ConstantSDNode>(N->getScale()) && 7162 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7163 "Scale should be a constant power of 2"); 7164 7165 CSEMap.InsertNode(N, IP); 7166 InsertNode(N); 7167 SDValue V(N, 0); 7168 NewSDValueDbgMsg(V, "Creating new node: ", this); 7169 return V; 7170 } 7171 7172 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7173 // select undef, T, F --> T (if T is a constant), otherwise F 7174 // select, ?, undef, F --> F 7175 // select, ?, T, undef --> T 7176 if (Cond.isUndef()) 7177 return isConstantValueOfAnyType(T) ? T : F; 7178 if (T.isUndef()) 7179 return F; 7180 if (F.isUndef()) 7181 return T; 7182 7183 // select true, T, F --> T 7184 // select false, T, F --> F 7185 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7186 return CondC->isNullValue() ? F : T; 7187 7188 // TODO: This should simplify VSELECT with constant condition using something 7189 // like this (but check boolean contents to be complete?): 7190 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7191 // return T; 7192 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7193 // return F; 7194 7195 // select ?, T, T --> T 7196 if (T == F) 7197 return T; 7198 7199 return SDValue(); 7200 } 7201 7202 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7203 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7204 if (X.isUndef()) 7205 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7206 // shift X, undef --> undef (because it may shift by the bitwidth) 7207 if (Y.isUndef()) 7208 return getUNDEF(X.getValueType()); 7209 7210 // shift 0, Y --> 0 7211 // shift X, 0 --> X 7212 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7213 return X; 7214 7215 // shift X, C >= bitwidth(X) --> undef 7216 // All vector elements must be too big (or undef) to avoid partial undefs. 7217 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7218 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7219 }; 7220 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7221 return getUNDEF(X.getValueType()); 7222 7223 return SDValue(); 7224 } 7225 7226 // TODO: Use fast-math-flags to enable more simplifications. 7227 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) { 7228 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7229 if (!YC) 7230 return SDValue(); 7231 7232 // X + -0.0 --> X 7233 if (Opcode == ISD::FADD) 7234 if (YC->getValueAPF().isNegZero()) 7235 return X; 7236 7237 // X - +0.0 --> X 7238 if (Opcode == ISD::FSUB) 7239 if (YC->getValueAPF().isPosZero()) 7240 return X; 7241 7242 // X * 1.0 --> X 7243 // X / 1.0 --> X 7244 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7245 if (YC->getValueAPF().isExactlyValue(1.0)) 7246 return X; 7247 7248 return SDValue(); 7249 } 7250 7251 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7252 SDValue Ptr, SDValue SV, unsigned Align) { 7253 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7254 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7255 } 7256 7257 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7258 ArrayRef<SDUse> Ops) { 7259 switch (Ops.size()) { 7260 case 0: return getNode(Opcode, DL, VT); 7261 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7262 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7263 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7264 default: break; 7265 } 7266 7267 // Copy from an SDUse array into an SDValue array for use with 7268 // the regular getNode logic. 7269 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7270 return getNode(Opcode, DL, VT, NewOps); 7271 } 7272 7273 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7274 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7275 unsigned NumOps = Ops.size(); 7276 switch (NumOps) { 7277 case 0: return getNode(Opcode, DL, VT); 7278 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7279 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7280 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7281 default: break; 7282 } 7283 7284 switch (Opcode) { 7285 default: break; 7286 case ISD::BUILD_VECTOR: 7287 // Attempt to simplify BUILD_VECTOR. 7288 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7289 return V; 7290 break; 7291 case ISD::CONCAT_VECTORS: 7292 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7293 return V; 7294 break; 7295 case ISD::SELECT_CC: 7296 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7297 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7298 "LHS and RHS of condition must have same type!"); 7299 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7300 "True and False arms of SelectCC must have same type!"); 7301 assert(Ops[2].getValueType() == VT && 7302 "select_cc node must be of same type as true and false value!"); 7303 break; 7304 case ISD::BR_CC: 7305 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7306 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7307 "LHS/RHS of comparison should match types!"); 7308 break; 7309 } 7310 7311 // Memoize nodes. 7312 SDNode *N; 7313 SDVTList VTs = getVTList(VT); 7314 7315 if (VT != MVT::Glue) { 7316 FoldingSetNodeID ID; 7317 AddNodeIDNode(ID, Opcode, VTs, Ops); 7318 void *IP = nullptr; 7319 7320 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7321 return SDValue(E, 0); 7322 7323 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7324 createOperands(N, Ops); 7325 7326 CSEMap.InsertNode(N, IP); 7327 } else { 7328 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7329 createOperands(N, Ops); 7330 } 7331 7332 InsertNode(N); 7333 SDValue V(N, 0); 7334 NewSDValueDbgMsg(V, "Creating new node: ", this); 7335 return V; 7336 } 7337 7338 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7339 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7340 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7341 } 7342 7343 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7344 ArrayRef<SDValue> Ops) { 7345 if (VTList.NumVTs == 1) 7346 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7347 7348 switch (Opcode) { 7349 case ISD::STRICT_FP_EXTEND: 7350 assert(VTList.NumVTs == 2 && Ops.size() == 2 && 7351 "Invalid STRICT_FP_EXTEND!"); 7352 assert(VTList.VTs[0].isFloatingPoint() && 7353 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); 7354 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7355 "STRICT_FP_EXTEND result type should be vector iff the operand " 7356 "type is vector!"); 7357 assert((!VTList.VTs[0].isVector() || 7358 VTList.VTs[0].getVectorNumElements() == 7359 Ops[1].getValueType().getVectorNumElements()) && 7360 "Vector element count mismatch!"); 7361 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && 7362 "Invalid fpext node, dst <= src!"); 7363 break; 7364 case ISD::STRICT_FP_ROUND: 7365 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); 7366 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && 7367 "STRICT_FP_ROUND result type should be vector iff the operand " 7368 "type is vector!"); 7369 assert((!VTList.VTs[0].isVector() || 7370 VTList.VTs[0].getVectorNumElements() == 7371 Ops[1].getValueType().getVectorNumElements()) && 7372 "Vector element count mismatch!"); 7373 assert(VTList.VTs[0].isFloatingPoint() && 7374 Ops[1].getValueType().isFloatingPoint() && 7375 VTList.VTs[0].bitsLT(Ops[1].getValueType()) && 7376 isa<ConstantSDNode>(Ops[2]) && 7377 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || 7378 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && 7379 "Invalid STRICT_FP_ROUND!"); 7380 break; 7381 #if 0 7382 // FIXME: figure out how to safely handle things like 7383 // int foo(int x) { return 1 << (x & 255); } 7384 // int bar() { return foo(256); } 7385 case ISD::SRA_PARTS: 7386 case ISD::SRL_PARTS: 7387 case ISD::SHL_PARTS: 7388 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7389 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7390 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7391 else if (N3.getOpcode() == ISD::AND) 7392 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7393 // If the and is only masking out bits that cannot effect the shift, 7394 // eliminate the and. 7395 unsigned NumBits = VT.getScalarSizeInBits()*2; 7396 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7397 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7398 } 7399 break; 7400 #endif 7401 } 7402 7403 // Memoize the node unless it returns a flag. 7404 SDNode *N; 7405 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7406 FoldingSetNodeID ID; 7407 AddNodeIDNode(ID, Opcode, VTList, Ops); 7408 void *IP = nullptr; 7409 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7410 return SDValue(E, 0); 7411 7412 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7413 createOperands(N, Ops); 7414 CSEMap.InsertNode(N, IP); 7415 } else { 7416 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7417 createOperands(N, Ops); 7418 } 7419 InsertNode(N); 7420 SDValue V(N, 0); 7421 NewSDValueDbgMsg(V, "Creating new node: ", this); 7422 return V; 7423 } 7424 7425 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7426 SDVTList VTList) { 7427 return getNode(Opcode, DL, VTList, None); 7428 } 7429 7430 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7431 SDValue N1) { 7432 SDValue Ops[] = { N1 }; 7433 return getNode(Opcode, DL, VTList, Ops); 7434 } 7435 7436 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7437 SDValue N1, SDValue N2) { 7438 SDValue Ops[] = { N1, N2 }; 7439 return getNode(Opcode, DL, VTList, Ops); 7440 } 7441 7442 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7443 SDValue N1, SDValue N2, SDValue N3) { 7444 SDValue Ops[] = { N1, N2, N3 }; 7445 return getNode(Opcode, DL, VTList, Ops); 7446 } 7447 7448 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7449 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7450 SDValue Ops[] = { N1, N2, N3, N4 }; 7451 return getNode(Opcode, DL, VTList, Ops); 7452 } 7453 7454 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7455 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7456 SDValue N5) { 7457 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7458 return getNode(Opcode, DL, VTList, Ops); 7459 } 7460 7461 SDVTList SelectionDAG::getVTList(EVT VT) { 7462 return makeVTList(SDNode::getValueTypeList(VT), 1); 7463 } 7464 7465 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7466 FoldingSetNodeID ID; 7467 ID.AddInteger(2U); 7468 ID.AddInteger(VT1.getRawBits()); 7469 ID.AddInteger(VT2.getRawBits()); 7470 7471 void *IP = nullptr; 7472 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7473 if (!Result) { 7474 EVT *Array = Allocator.Allocate<EVT>(2); 7475 Array[0] = VT1; 7476 Array[1] = VT2; 7477 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7478 VTListMap.InsertNode(Result, IP); 7479 } 7480 return Result->getSDVTList(); 7481 } 7482 7483 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7484 FoldingSetNodeID ID; 7485 ID.AddInteger(3U); 7486 ID.AddInteger(VT1.getRawBits()); 7487 ID.AddInteger(VT2.getRawBits()); 7488 ID.AddInteger(VT3.getRawBits()); 7489 7490 void *IP = nullptr; 7491 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7492 if (!Result) { 7493 EVT *Array = Allocator.Allocate<EVT>(3); 7494 Array[0] = VT1; 7495 Array[1] = VT2; 7496 Array[2] = VT3; 7497 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7498 VTListMap.InsertNode(Result, IP); 7499 } 7500 return Result->getSDVTList(); 7501 } 7502 7503 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7504 FoldingSetNodeID ID; 7505 ID.AddInteger(4U); 7506 ID.AddInteger(VT1.getRawBits()); 7507 ID.AddInteger(VT2.getRawBits()); 7508 ID.AddInteger(VT3.getRawBits()); 7509 ID.AddInteger(VT4.getRawBits()); 7510 7511 void *IP = nullptr; 7512 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7513 if (!Result) { 7514 EVT *Array = Allocator.Allocate<EVT>(4); 7515 Array[0] = VT1; 7516 Array[1] = VT2; 7517 Array[2] = VT3; 7518 Array[3] = VT4; 7519 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7520 VTListMap.InsertNode(Result, IP); 7521 } 7522 return Result->getSDVTList(); 7523 } 7524 7525 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7526 unsigned NumVTs = VTs.size(); 7527 FoldingSetNodeID ID; 7528 ID.AddInteger(NumVTs); 7529 for (unsigned index = 0; index < NumVTs; index++) { 7530 ID.AddInteger(VTs[index].getRawBits()); 7531 } 7532 7533 void *IP = nullptr; 7534 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7535 if (!Result) { 7536 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7537 llvm::copy(VTs, Array); 7538 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7539 VTListMap.InsertNode(Result, IP); 7540 } 7541 return Result->getSDVTList(); 7542 } 7543 7544 7545 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7546 /// specified operands. If the resultant node already exists in the DAG, 7547 /// this does not modify the specified node, instead it returns the node that 7548 /// already exists. If the resultant node does not exist in the DAG, the 7549 /// input node is returned. As a degenerate case, if you specify the same 7550 /// input operands as the node already has, the input node is returned. 7551 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7552 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7553 7554 // Check to see if there is no change. 7555 if (Op == N->getOperand(0)) return N; 7556 7557 // See if the modified node already exists. 7558 void *InsertPos = nullptr; 7559 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7560 return Existing; 7561 7562 // Nope it doesn't. Remove the node from its current place in the maps. 7563 if (InsertPos) 7564 if (!RemoveNodeFromCSEMaps(N)) 7565 InsertPos = nullptr; 7566 7567 // Now we update the operands. 7568 N->OperandList[0].set(Op); 7569 7570 updateDivergence(N); 7571 // If this gets put into a CSE map, add it. 7572 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7573 return N; 7574 } 7575 7576 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7577 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7578 7579 // Check to see if there is no change. 7580 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7581 return N; // No operands changed, just return the input node. 7582 7583 // See if the modified node already exists. 7584 void *InsertPos = nullptr; 7585 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7586 return Existing; 7587 7588 // Nope it doesn't. Remove the node from its current place in the maps. 7589 if (InsertPos) 7590 if (!RemoveNodeFromCSEMaps(N)) 7591 InsertPos = nullptr; 7592 7593 // Now we update the operands. 7594 if (N->OperandList[0] != Op1) 7595 N->OperandList[0].set(Op1); 7596 if (N->OperandList[1] != Op2) 7597 N->OperandList[1].set(Op2); 7598 7599 updateDivergence(N); 7600 // If this gets put into a CSE map, add it. 7601 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7602 return N; 7603 } 7604 7605 SDNode *SelectionDAG:: 7606 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7607 SDValue Ops[] = { Op1, Op2, Op3 }; 7608 return UpdateNodeOperands(N, Ops); 7609 } 7610 7611 SDNode *SelectionDAG:: 7612 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7613 SDValue Op3, SDValue Op4) { 7614 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7615 return UpdateNodeOperands(N, Ops); 7616 } 7617 7618 SDNode *SelectionDAG:: 7619 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7620 SDValue Op3, SDValue Op4, SDValue Op5) { 7621 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7622 return UpdateNodeOperands(N, Ops); 7623 } 7624 7625 SDNode *SelectionDAG:: 7626 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7627 unsigned NumOps = Ops.size(); 7628 assert(N->getNumOperands() == NumOps && 7629 "Update with wrong number of operands"); 7630 7631 // If no operands changed just return the input node. 7632 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7633 return N; 7634 7635 // See if the modified node already exists. 7636 void *InsertPos = nullptr; 7637 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7638 return Existing; 7639 7640 // Nope it doesn't. Remove the node from its current place in the maps. 7641 if (InsertPos) 7642 if (!RemoveNodeFromCSEMaps(N)) 7643 InsertPos = nullptr; 7644 7645 // Now we update the operands. 7646 for (unsigned i = 0; i != NumOps; ++i) 7647 if (N->OperandList[i] != Ops[i]) 7648 N->OperandList[i].set(Ops[i]); 7649 7650 updateDivergence(N); 7651 // If this gets put into a CSE map, add it. 7652 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7653 return N; 7654 } 7655 7656 /// DropOperands - Release the operands and set this node to have 7657 /// zero operands. 7658 void SDNode::DropOperands() { 7659 // Unlike the code in MorphNodeTo that does this, we don't need to 7660 // watch for dead nodes here. 7661 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7662 SDUse &Use = *I++; 7663 Use.set(SDValue()); 7664 } 7665 } 7666 7667 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7668 ArrayRef<MachineMemOperand *> NewMemRefs) { 7669 if (NewMemRefs.empty()) { 7670 N->clearMemRefs(); 7671 return; 7672 } 7673 7674 // Check if we can avoid allocating by storing a single reference directly. 7675 if (NewMemRefs.size() == 1) { 7676 N->MemRefs = NewMemRefs[0]; 7677 N->NumMemRefs = 1; 7678 return; 7679 } 7680 7681 MachineMemOperand **MemRefsBuffer = 7682 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7683 llvm::copy(NewMemRefs, MemRefsBuffer); 7684 N->MemRefs = MemRefsBuffer; 7685 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7686 } 7687 7688 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7689 /// machine opcode. 7690 /// 7691 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7692 EVT VT) { 7693 SDVTList VTs = getVTList(VT); 7694 return SelectNodeTo(N, MachineOpc, VTs, None); 7695 } 7696 7697 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7698 EVT VT, SDValue Op1) { 7699 SDVTList VTs = getVTList(VT); 7700 SDValue Ops[] = { Op1 }; 7701 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7702 } 7703 7704 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7705 EVT VT, SDValue Op1, 7706 SDValue Op2) { 7707 SDVTList VTs = getVTList(VT); 7708 SDValue Ops[] = { Op1, Op2 }; 7709 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7710 } 7711 7712 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7713 EVT VT, SDValue Op1, 7714 SDValue Op2, SDValue Op3) { 7715 SDVTList VTs = getVTList(VT); 7716 SDValue Ops[] = { Op1, Op2, Op3 }; 7717 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7718 } 7719 7720 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7721 EVT VT, ArrayRef<SDValue> Ops) { 7722 SDVTList VTs = getVTList(VT); 7723 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7724 } 7725 7726 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7727 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7728 SDVTList VTs = getVTList(VT1, VT2); 7729 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7730 } 7731 7732 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7733 EVT VT1, EVT VT2) { 7734 SDVTList VTs = getVTList(VT1, VT2); 7735 return SelectNodeTo(N, MachineOpc, VTs, None); 7736 } 7737 7738 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7739 EVT VT1, EVT VT2, EVT VT3, 7740 ArrayRef<SDValue> Ops) { 7741 SDVTList VTs = getVTList(VT1, VT2, VT3); 7742 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7743 } 7744 7745 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7746 EVT VT1, EVT VT2, 7747 SDValue Op1, SDValue Op2) { 7748 SDVTList VTs = getVTList(VT1, VT2); 7749 SDValue Ops[] = { Op1, Op2 }; 7750 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7751 } 7752 7753 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7754 SDVTList VTs,ArrayRef<SDValue> Ops) { 7755 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7756 // Reset the NodeID to -1. 7757 New->setNodeId(-1); 7758 if (New != N) { 7759 ReplaceAllUsesWith(N, New); 7760 RemoveDeadNode(N); 7761 } 7762 return New; 7763 } 7764 7765 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7766 /// the line number information on the merged node since it is not possible to 7767 /// preserve the information that operation is associated with multiple lines. 7768 /// This will make the debugger working better at -O0, were there is a higher 7769 /// probability having other instructions associated with that line. 7770 /// 7771 /// For IROrder, we keep the smaller of the two 7772 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7773 DebugLoc NLoc = N->getDebugLoc(); 7774 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7775 N->setDebugLoc(DebugLoc()); 7776 } 7777 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7778 N->setIROrder(Order); 7779 return N; 7780 } 7781 7782 /// MorphNodeTo - This *mutates* the specified node to have the specified 7783 /// return type, opcode, and operands. 7784 /// 7785 /// Note that MorphNodeTo returns the resultant node. If there is already a 7786 /// node of the specified opcode and operands, it returns that node instead of 7787 /// the current one. Note that the SDLoc need not be the same. 7788 /// 7789 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7790 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7791 /// node, and because it doesn't require CSE recalculation for any of 7792 /// the node's users. 7793 /// 7794 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7795 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7796 /// the legalizer which maintain worklists that would need to be updated when 7797 /// deleting things. 7798 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7799 SDVTList VTs, ArrayRef<SDValue> Ops) { 7800 // If an identical node already exists, use it. 7801 void *IP = nullptr; 7802 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7803 FoldingSetNodeID ID; 7804 AddNodeIDNode(ID, Opc, VTs, Ops); 7805 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7806 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7807 } 7808 7809 if (!RemoveNodeFromCSEMaps(N)) 7810 IP = nullptr; 7811 7812 // Start the morphing. 7813 N->NodeType = Opc; 7814 N->ValueList = VTs.VTs; 7815 N->NumValues = VTs.NumVTs; 7816 7817 // Clear the operands list, updating used nodes to remove this from their 7818 // use list. Keep track of any operands that become dead as a result. 7819 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7820 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7821 SDUse &Use = *I++; 7822 SDNode *Used = Use.getNode(); 7823 Use.set(SDValue()); 7824 if (Used->use_empty()) 7825 DeadNodeSet.insert(Used); 7826 } 7827 7828 // For MachineNode, initialize the memory references information. 7829 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7830 MN->clearMemRefs(); 7831 7832 // Swap for an appropriately sized array from the recycler. 7833 removeOperands(N); 7834 createOperands(N, Ops); 7835 7836 // Delete any nodes that are still dead after adding the uses for the 7837 // new operands. 7838 if (!DeadNodeSet.empty()) { 7839 SmallVector<SDNode *, 16> DeadNodes; 7840 for (SDNode *N : DeadNodeSet) 7841 if (N->use_empty()) 7842 DeadNodes.push_back(N); 7843 RemoveDeadNodes(DeadNodes); 7844 } 7845 7846 if (IP) 7847 CSEMap.InsertNode(N, IP); // Memoize the new node. 7848 return N; 7849 } 7850 7851 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7852 unsigned OrigOpc = Node->getOpcode(); 7853 unsigned NewOpc; 7854 switch (OrigOpc) { 7855 default: 7856 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7857 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7858 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; 7859 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 7860 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; 7861 #include "llvm/IR/ConstrainedOps.def" 7862 } 7863 7864 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 7865 7866 // We're taking this node out of the chain, so we need to re-link things. 7867 SDValue InputChain = Node->getOperand(0); 7868 SDValue OutputChain = SDValue(Node, 1); 7869 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7870 7871 SmallVector<SDValue, 3> Ops; 7872 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 7873 Ops.push_back(Node->getOperand(i)); 7874 7875 SDVTList VTs = getVTList(Node->getValueType(0)); 7876 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 7877 7878 // MorphNodeTo can operate in two ways: if an existing node with the 7879 // specified operands exists, it can just return it. Otherwise, it 7880 // updates the node in place to have the requested operands. 7881 if (Res == Node) { 7882 // If we updated the node in place, reset the node ID. To the isel, 7883 // this should be just like a newly allocated machine node. 7884 Res->setNodeId(-1); 7885 } else { 7886 ReplaceAllUsesWith(Node, Res); 7887 RemoveDeadNode(Node); 7888 } 7889 7890 return Res; 7891 } 7892 7893 /// getMachineNode - These are used for target selectors to create a new node 7894 /// with specified return type(s), MachineInstr opcode, and operands. 7895 /// 7896 /// Note that getMachineNode returns the resultant node. If there is already a 7897 /// node of the specified opcode and operands, it returns that node instead of 7898 /// the current one. 7899 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7900 EVT VT) { 7901 SDVTList VTs = getVTList(VT); 7902 return getMachineNode(Opcode, dl, VTs, None); 7903 } 7904 7905 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7906 EVT VT, SDValue Op1) { 7907 SDVTList VTs = getVTList(VT); 7908 SDValue Ops[] = { Op1 }; 7909 return getMachineNode(Opcode, dl, VTs, Ops); 7910 } 7911 7912 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7913 EVT VT, SDValue Op1, SDValue Op2) { 7914 SDVTList VTs = getVTList(VT); 7915 SDValue Ops[] = { Op1, Op2 }; 7916 return getMachineNode(Opcode, dl, VTs, Ops); 7917 } 7918 7919 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7920 EVT VT, SDValue Op1, SDValue Op2, 7921 SDValue Op3) { 7922 SDVTList VTs = getVTList(VT); 7923 SDValue Ops[] = { Op1, Op2, Op3 }; 7924 return getMachineNode(Opcode, dl, VTs, Ops); 7925 } 7926 7927 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7928 EVT VT, ArrayRef<SDValue> Ops) { 7929 SDVTList VTs = getVTList(VT); 7930 return getMachineNode(Opcode, dl, VTs, Ops); 7931 } 7932 7933 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7934 EVT VT1, EVT VT2, SDValue Op1, 7935 SDValue Op2) { 7936 SDVTList VTs = getVTList(VT1, VT2); 7937 SDValue Ops[] = { Op1, Op2 }; 7938 return getMachineNode(Opcode, dl, VTs, Ops); 7939 } 7940 7941 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7942 EVT VT1, EVT VT2, SDValue Op1, 7943 SDValue Op2, SDValue Op3) { 7944 SDVTList VTs = getVTList(VT1, VT2); 7945 SDValue Ops[] = { Op1, Op2, Op3 }; 7946 return getMachineNode(Opcode, dl, VTs, Ops); 7947 } 7948 7949 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7950 EVT VT1, EVT VT2, 7951 ArrayRef<SDValue> Ops) { 7952 SDVTList VTs = getVTList(VT1, VT2); 7953 return getMachineNode(Opcode, dl, VTs, Ops); 7954 } 7955 7956 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7957 EVT VT1, EVT VT2, EVT VT3, 7958 SDValue Op1, SDValue Op2) { 7959 SDVTList VTs = getVTList(VT1, VT2, VT3); 7960 SDValue Ops[] = { Op1, Op2 }; 7961 return getMachineNode(Opcode, dl, VTs, Ops); 7962 } 7963 7964 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7965 EVT VT1, EVT VT2, EVT VT3, 7966 SDValue Op1, SDValue Op2, 7967 SDValue Op3) { 7968 SDVTList VTs = getVTList(VT1, VT2, VT3); 7969 SDValue Ops[] = { Op1, Op2, Op3 }; 7970 return getMachineNode(Opcode, dl, VTs, Ops); 7971 } 7972 7973 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7974 EVT VT1, EVT VT2, EVT VT3, 7975 ArrayRef<SDValue> Ops) { 7976 SDVTList VTs = getVTList(VT1, VT2, VT3); 7977 return getMachineNode(Opcode, dl, VTs, Ops); 7978 } 7979 7980 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7981 ArrayRef<EVT> ResultTys, 7982 ArrayRef<SDValue> Ops) { 7983 SDVTList VTs = getVTList(ResultTys); 7984 return getMachineNode(Opcode, dl, VTs, Ops); 7985 } 7986 7987 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 7988 SDVTList VTs, 7989 ArrayRef<SDValue> Ops) { 7990 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 7991 MachineSDNode *N; 7992 void *IP = nullptr; 7993 7994 if (DoCSE) { 7995 FoldingSetNodeID ID; 7996 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 7997 IP = nullptr; 7998 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 7999 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 8000 } 8001 } 8002 8003 // Allocate a new MachineSDNode. 8004 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 8005 createOperands(N, Ops); 8006 8007 if (DoCSE) 8008 CSEMap.InsertNode(N, IP); 8009 8010 InsertNode(N); 8011 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); 8012 return N; 8013 } 8014 8015 /// getTargetExtractSubreg - A convenience function for creating 8016 /// TargetOpcode::EXTRACT_SUBREG nodes. 8017 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8018 SDValue Operand) { 8019 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8020 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 8021 VT, Operand, SRIdxVal); 8022 return SDValue(Subreg, 0); 8023 } 8024 8025 /// getTargetInsertSubreg - A convenience function for creating 8026 /// TargetOpcode::INSERT_SUBREG nodes. 8027 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 8028 SDValue Operand, SDValue Subreg) { 8029 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 8030 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 8031 VT, Operand, Subreg, SRIdxVal); 8032 return SDValue(Result, 0); 8033 } 8034 8035 /// getNodeIfExists - Get the specified node if it's already available, or 8036 /// else return NULL. 8037 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 8038 ArrayRef<SDValue> Ops, 8039 const SDNodeFlags Flags) { 8040 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 8041 FoldingSetNodeID ID; 8042 AddNodeIDNode(ID, Opcode, VTList, Ops); 8043 void *IP = nullptr; 8044 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 8045 E->intersectFlagsWith(Flags); 8046 return E; 8047 } 8048 } 8049 return nullptr; 8050 } 8051 8052 /// getDbgValue - Creates a SDDbgValue node. 8053 /// 8054 /// SDNode 8055 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 8056 SDNode *N, unsigned R, bool IsIndirect, 8057 const DebugLoc &DL, unsigned O) { 8058 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8059 "Expected inlined-at fields to agree"); 8060 return new (DbgInfo->getAlloc()) 8061 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 8062 } 8063 8064 /// Constant 8065 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 8066 DIExpression *Expr, 8067 const Value *C, 8068 const DebugLoc &DL, unsigned O) { 8069 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8070 "Expected inlined-at fields to agree"); 8071 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 8072 } 8073 8074 /// FrameIndex 8075 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 8076 DIExpression *Expr, unsigned FI, 8077 bool IsIndirect, 8078 const DebugLoc &DL, 8079 unsigned O) { 8080 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8081 "Expected inlined-at fields to agree"); 8082 return new (DbgInfo->getAlloc()) 8083 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 8084 } 8085 8086 /// VReg 8087 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 8088 DIExpression *Expr, 8089 unsigned VReg, bool IsIndirect, 8090 const DebugLoc &DL, unsigned O) { 8091 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 8092 "Expected inlined-at fields to agree"); 8093 return new (DbgInfo->getAlloc()) 8094 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8095 } 8096 8097 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8098 unsigned OffsetInBits, unsigned SizeInBits, 8099 bool InvalidateDbg) { 8100 SDNode *FromNode = From.getNode(); 8101 SDNode *ToNode = To.getNode(); 8102 assert(FromNode && ToNode && "Can't modify dbg values"); 8103 8104 // PR35338 8105 // TODO: assert(From != To && "Redundant dbg value transfer"); 8106 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8107 if (From == To || FromNode == ToNode) 8108 return; 8109 8110 if (!FromNode->getHasDebugValue()) 8111 return; 8112 8113 SmallVector<SDDbgValue *, 2> ClonedDVs; 8114 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8115 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8116 continue; 8117 8118 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8119 8120 // Just transfer the dbg value attached to From. 8121 if (Dbg->getResNo() != From.getResNo()) 8122 continue; 8123 8124 DIVariable *Var = Dbg->getVariable(); 8125 auto *Expr = Dbg->getExpression(); 8126 // If a fragment is requested, update the expression. 8127 if (SizeInBits) { 8128 // When splitting a larger (e.g., sign-extended) value whose 8129 // lower bits are described with an SDDbgValue, do not attempt 8130 // to transfer the SDDbgValue to the upper bits. 8131 if (auto FI = Expr->getFragmentInfo()) 8132 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8133 continue; 8134 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8135 SizeInBits); 8136 if (!Fragment) 8137 continue; 8138 Expr = *Fragment; 8139 } 8140 // Clone the SDDbgValue and move it to To. 8141 SDDbgValue *Clone = 8142 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), 8143 Dbg->getDebugLoc(), Dbg->getOrder()); 8144 ClonedDVs.push_back(Clone); 8145 8146 if (InvalidateDbg) { 8147 // Invalidate value and indicate the SDDbgValue should not be emitted. 8148 Dbg->setIsInvalidated(); 8149 Dbg->setIsEmitted(); 8150 } 8151 } 8152 8153 for (SDDbgValue *Dbg : ClonedDVs) 8154 AddDbgValue(Dbg, ToNode, false); 8155 } 8156 8157 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8158 if (!N.getHasDebugValue()) 8159 return; 8160 8161 SmallVector<SDDbgValue *, 2> ClonedDVs; 8162 for (auto DV : GetDbgValues(&N)) { 8163 if (DV->isInvalidated()) 8164 continue; 8165 switch (N.getOpcode()) { 8166 default: 8167 break; 8168 case ISD::ADD: 8169 SDValue N0 = N.getOperand(0); 8170 SDValue N1 = N.getOperand(1); 8171 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8172 isConstantIntBuildVectorOrConstantInt(N1)) { 8173 uint64_t Offset = N.getConstantOperandVal(1); 8174 // Rewrite an ADD constant node into a DIExpression. Since we are 8175 // performing arithmetic to compute the variable's *value* in the 8176 // DIExpression, we need to mark the expression with a 8177 // DW_OP_stack_value. 8178 auto *DIExpr = DV->getExpression(); 8179 DIExpr = 8180 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8181 SDDbgValue *Clone = 8182 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8183 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8184 ClonedDVs.push_back(Clone); 8185 DV->setIsInvalidated(); 8186 DV->setIsEmitted(); 8187 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8188 N0.getNode()->dumprFull(this); 8189 dbgs() << " into " << *DIExpr << '\n'); 8190 } 8191 } 8192 } 8193 8194 for (SDDbgValue *Dbg : ClonedDVs) 8195 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8196 } 8197 8198 /// Creates a SDDbgLabel node. 8199 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8200 const DebugLoc &DL, unsigned O) { 8201 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8202 "Expected inlined-at fields to agree"); 8203 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8204 } 8205 8206 namespace { 8207 8208 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8209 /// pointed to by a use iterator is deleted, increment the use iterator 8210 /// so that it doesn't dangle. 8211 /// 8212 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8213 SDNode::use_iterator &UI; 8214 SDNode::use_iterator &UE; 8215 8216 void NodeDeleted(SDNode *N, SDNode *E) override { 8217 // Increment the iterator as needed. 8218 while (UI != UE && N == *UI) 8219 ++UI; 8220 } 8221 8222 public: 8223 RAUWUpdateListener(SelectionDAG &d, 8224 SDNode::use_iterator &ui, 8225 SDNode::use_iterator &ue) 8226 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8227 }; 8228 8229 } // end anonymous namespace 8230 8231 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8232 /// This can cause recursive merging of nodes in the DAG. 8233 /// 8234 /// This version assumes From has a single result value. 8235 /// 8236 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8237 SDNode *From = FromN.getNode(); 8238 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8239 "Cannot replace with this method!"); 8240 assert(From != To.getNode() && "Cannot replace uses of with self"); 8241 8242 // Preserve Debug Values 8243 transferDbgValues(FromN, To); 8244 8245 // Iterate over all the existing uses of From. New uses will be added 8246 // to the beginning of the use list, which we avoid visiting. 8247 // This specifically avoids visiting uses of From that arise while the 8248 // replacement is happening, because any such uses would be the result 8249 // of CSE: If an existing node looks like From after one of its operands 8250 // is replaced by To, we don't want to replace of all its users with To 8251 // too. See PR3018 for more info. 8252 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8253 RAUWUpdateListener Listener(*this, UI, UE); 8254 while (UI != UE) { 8255 SDNode *User = *UI; 8256 8257 // This node is about to morph, remove its old self from the CSE maps. 8258 RemoveNodeFromCSEMaps(User); 8259 8260 // A user can appear in a use list multiple times, and when this 8261 // happens the uses are usually next to each other in the list. 8262 // To help reduce the number of CSE recomputations, process all 8263 // the uses of this user that we can find this way. 8264 do { 8265 SDUse &Use = UI.getUse(); 8266 ++UI; 8267 Use.set(To); 8268 if (To->isDivergent() != From->isDivergent()) 8269 updateDivergence(User); 8270 } while (UI != UE && *UI == User); 8271 // Now that we have modified User, add it back to the CSE maps. If it 8272 // already exists there, recursively merge the results together. 8273 AddModifiedNodeToCSEMaps(User); 8274 } 8275 8276 // If we just RAUW'd the root, take note. 8277 if (FromN == getRoot()) 8278 setRoot(To); 8279 } 8280 8281 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8282 /// This can cause recursive merging of nodes in the DAG. 8283 /// 8284 /// This version assumes that for each value of From, there is a 8285 /// corresponding value in To in the same position with the same type. 8286 /// 8287 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8288 #ifndef NDEBUG 8289 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8290 assert((!From->hasAnyUseOfValue(i) || 8291 From->getValueType(i) == To->getValueType(i)) && 8292 "Cannot use this version of ReplaceAllUsesWith!"); 8293 #endif 8294 8295 // Handle the trivial case. 8296 if (From == To) 8297 return; 8298 8299 // Preserve Debug Info. Only do this if there's a use. 8300 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8301 if (From->hasAnyUseOfValue(i)) { 8302 assert((i < To->getNumValues()) && "Invalid To location"); 8303 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8304 } 8305 8306 // Iterate over just the existing users of From. See the comments in 8307 // the ReplaceAllUsesWith above. 8308 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8309 RAUWUpdateListener Listener(*this, UI, UE); 8310 while (UI != UE) { 8311 SDNode *User = *UI; 8312 8313 // This node is about to morph, remove its old self from the CSE maps. 8314 RemoveNodeFromCSEMaps(User); 8315 8316 // A user can appear in a use list multiple times, and when this 8317 // happens the uses are usually next to each other in the list. 8318 // To help reduce the number of CSE recomputations, process all 8319 // the uses of this user that we can find this way. 8320 do { 8321 SDUse &Use = UI.getUse(); 8322 ++UI; 8323 Use.setNode(To); 8324 if (To->isDivergent() != From->isDivergent()) 8325 updateDivergence(User); 8326 } while (UI != UE && *UI == User); 8327 8328 // Now that we have modified User, add it back to the CSE maps. If it 8329 // already exists there, recursively merge the results together. 8330 AddModifiedNodeToCSEMaps(User); 8331 } 8332 8333 // If we just RAUW'd the root, take note. 8334 if (From == getRoot().getNode()) 8335 setRoot(SDValue(To, getRoot().getResNo())); 8336 } 8337 8338 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8339 /// This can cause recursive merging of nodes in the DAG. 8340 /// 8341 /// This version can replace From with any result values. To must match the 8342 /// number and types of values returned by From. 8343 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8344 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8345 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8346 8347 // Preserve Debug Info. 8348 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8349 transferDbgValues(SDValue(From, i), To[i]); 8350 8351 // Iterate over just the existing users of From. See the comments in 8352 // the ReplaceAllUsesWith above. 8353 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8354 RAUWUpdateListener Listener(*this, UI, UE); 8355 while (UI != UE) { 8356 SDNode *User = *UI; 8357 8358 // This node is about to morph, remove its old self from the CSE maps. 8359 RemoveNodeFromCSEMaps(User); 8360 8361 // A user can appear in a use list multiple times, and when this happens the 8362 // uses are usually next to each other in the list. To help reduce the 8363 // number of CSE and divergence recomputations, process all the uses of this 8364 // user that we can find this way. 8365 bool To_IsDivergent = false; 8366 do { 8367 SDUse &Use = UI.getUse(); 8368 const SDValue &ToOp = To[Use.getResNo()]; 8369 ++UI; 8370 Use.set(ToOp); 8371 To_IsDivergent |= ToOp->isDivergent(); 8372 } while (UI != UE && *UI == User); 8373 8374 if (To_IsDivergent != From->isDivergent()) 8375 updateDivergence(User); 8376 8377 // Now that we have modified User, add it back to the CSE maps. If it 8378 // already exists there, recursively merge the results together. 8379 AddModifiedNodeToCSEMaps(User); 8380 } 8381 8382 // If we just RAUW'd the root, take note. 8383 if (From == getRoot().getNode()) 8384 setRoot(SDValue(To[getRoot().getResNo()])); 8385 } 8386 8387 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8388 /// uses of other values produced by From.getNode() alone. The Deleted 8389 /// vector is handled the same way as for ReplaceAllUsesWith. 8390 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8391 // Handle the really simple, really trivial case efficiently. 8392 if (From == To) return; 8393 8394 // Handle the simple, trivial, case efficiently. 8395 if (From.getNode()->getNumValues() == 1) { 8396 ReplaceAllUsesWith(From, To); 8397 return; 8398 } 8399 8400 // Preserve Debug Info. 8401 transferDbgValues(From, To); 8402 8403 // Iterate over just the existing users of From. See the comments in 8404 // the ReplaceAllUsesWith above. 8405 SDNode::use_iterator UI = From.getNode()->use_begin(), 8406 UE = From.getNode()->use_end(); 8407 RAUWUpdateListener Listener(*this, UI, UE); 8408 while (UI != UE) { 8409 SDNode *User = *UI; 8410 bool UserRemovedFromCSEMaps = false; 8411 8412 // A user can appear in a use list multiple times, and when this 8413 // happens the uses are usually next to each other in the list. 8414 // To help reduce the number of CSE recomputations, process all 8415 // the uses of this user that we can find this way. 8416 do { 8417 SDUse &Use = UI.getUse(); 8418 8419 // Skip uses of different values from the same node. 8420 if (Use.getResNo() != From.getResNo()) { 8421 ++UI; 8422 continue; 8423 } 8424 8425 // If this node hasn't been modified yet, it's still in the CSE maps, 8426 // so remove its old self from the CSE maps. 8427 if (!UserRemovedFromCSEMaps) { 8428 RemoveNodeFromCSEMaps(User); 8429 UserRemovedFromCSEMaps = true; 8430 } 8431 8432 ++UI; 8433 Use.set(To); 8434 if (To->isDivergent() != From->isDivergent()) 8435 updateDivergence(User); 8436 } while (UI != UE && *UI == User); 8437 // We are iterating over all uses of the From node, so if a use 8438 // doesn't use the specific value, no changes are made. 8439 if (!UserRemovedFromCSEMaps) 8440 continue; 8441 8442 // Now that we have modified User, add it back to the CSE maps. If it 8443 // already exists there, recursively merge the results together. 8444 AddModifiedNodeToCSEMaps(User); 8445 } 8446 8447 // If we just RAUW'd the root, take note. 8448 if (From == getRoot()) 8449 setRoot(To); 8450 } 8451 8452 namespace { 8453 8454 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8455 /// to record information about a use. 8456 struct UseMemo { 8457 SDNode *User; 8458 unsigned Index; 8459 SDUse *Use; 8460 }; 8461 8462 /// operator< - Sort Memos by User. 8463 bool operator<(const UseMemo &L, const UseMemo &R) { 8464 return (intptr_t)L.User < (intptr_t)R.User; 8465 } 8466 8467 } // end anonymous namespace 8468 8469 void SelectionDAG::updateDivergence(SDNode * N) 8470 { 8471 if (TLI->isSDNodeAlwaysUniform(N)) 8472 return; 8473 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8474 for (auto &Op : N->ops()) { 8475 if (Op.Val.getValueType() != MVT::Other) 8476 IsDivergent |= Op.getNode()->isDivergent(); 8477 } 8478 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8479 N->SDNodeBits.IsDivergent = IsDivergent; 8480 for (auto U : N->uses()) { 8481 updateDivergence(U); 8482 } 8483 } 8484 } 8485 8486 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8487 DenseMap<SDNode *, unsigned> Degree; 8488 Order.reserve(AllNodes.size()); 8489 for (auto &N : allnodes()) { 8490 unsigned NOps = N.getNumOperands(); 8491 Degree[&N] = NOps; 8492 if (0 == NOps) 8493 Order.push_back(&N); 8494 } 8495 for (size_t I = 0; I != Order.size(); ++I) { 8496 SDNode *N = Order[I]; 8497 for (auto U : N->uses()) { 8498 unsigned &UnsortedOps = Degree[U]; 8499 if (0 == --UnsortedOps) 8500 Order.push_back(U); 8501 } 8502 } 8503 } 8504 8505 #ifndef NDEBUG 8506 void SelectionDAG::VerifyDAGDiverence() { 8507 std::vector<SDNode *> TopoOrder; 8508 CreateTopologicalOrder(TopoOrder); 8509 const TargetLowering &TLI = getTargetLoweringInfo(); 8510 DenseMap<const SDNode *, bool> DivergenceMap; 8511 for (auto &N : allnodes()) { 8512 DivergenceMap[&N] = false; 8513 } 8514 for (auto N : TopoOrder) { 8515 bool IsDivergent = DivergenceMap[N]; 8516 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8517 for (auto &Op : N->ops()) { 8518 if (Op.Val.getValueType() != MVT::Other) 8519 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8520 } 8521 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8522 DivergenceMap[N] = true; 8523 } 8524 } 8525 for (auto &N : allnodes()) { 8526 (void)N; 8527 assert(DivergenceMap[&N] == N.isDivergent() && 8528 "Divergence bit inconsistency detected\n"); 8529 } 8530 } 8531 #endif 8532 8533 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8534 /// uses of other values produced by From.getNode() alone. The same value 8535 /// may appear in both the From and To list. The Deleted vector is 8536 /// handled the same way as for ReplaceAllUsesWith. 8537 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8538 const SDValue *To, 8539 unsigned Num){ 8540 // Handle the simple, trivial case efficiently. 8541 if (Num == 1) 8542 return ReplaceAllUsesOfValueWith(*From, *To); 8543 8544 transferDbgValues(*From, *To); 8545 8546 // Read up all the uses and make records of them. This helps 8547 // processing new uses that are introduced during the 8548 // replacement process. 8549 SmallVector<UseMemo, 4> Uses; 8550 for (unsigned i = 0; i != Num; ++i) { 8551 unsigned FromResNo = From[i].getResNo(); 8552 SDNode *FromNode = From[i].getNode(); 8553 for (SDNode::use_iterator UI = FromNode->use_begin(), 8554 E = FromNode->use_end(); UI != E; ++UI) { 8555 SDUse &Use = UI.getUse(); 8556 if (Use.getResNo() == FromResNo) { 8557 UseMemo Memo = { *UI, i, &Use }; 8558 Uses.push_back(Memo); 8559 } 8560 } 8561 } 8562 8563 // Sort the uses, so that all the uses from a given User are together. 8564 llvm::sort(Uses); 8565 8566 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8567 UseIndex != UseIndexEnd; ) { 8568 // We know that this user uses some value of From. If it is the right 8569 // value, update it. 8570 SDNode *User = Uses[UseIndex].User; 8571 8572 // This node is about to morph, remove its old self from the CSE maps. 8573 RemoveNodeFromCSEMaps(User); 8574 8575 // The Uses array is sorted, so all the uses for a given User 8576 // are next to each other in the list. 8577 // To help reduce the number of CSE recomputations, process all 8578 // the uses of this user that we can find this way. 8579 do { 8580 unsigned i = Uses[UseIndex].Index; 8581 SDUse &Use = *Uses[UseIndex].Use; 8582 ++UseIndex; 8583 8584 Use.set(To[i]); 8585 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8586 8587 // Now that we have modified User, add it back to the CSE maps. If it 8588 // already exists there, recursively merge the results together. 8589 AddModifiedNodeToCSEMaps(User); 8590 } 8591 } 8592 8593 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8594 /// based on their topological order. It returns the maximum id and a vector 8595 /// of the SDNodes* in assigned order by reference. 8596 unsigned SelectionDAG::AssignTopologicalOrder() { 8597 unsigned DAGSize = 0; 8598 8599 // SortedPos tracks the progress of the algorithm. Nodes before it are 8600 // sorted, nodes after it are unsorted. When the algorithm completes 8601 // it is at the end of the list. 8602 allnodes_iterator SortedPos = allnodes_begin(); 8603 8604 // Visit all the nodes. Move nodes with no operands to the front of 8605 // the list immediately. Annotate nodes that do have operands with their 8606 // operand count. Before we do this, the Node Id fields of the nodes 8607 // may contain arbitrary values. After, the Node Id fields for nodes 8608 // before SortedPos will contain the topological sort index, and the 8609 // Node Id fields for nodes At SortedPos and after will contain the 8610 // count of outstanding operands. 8611 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8612 SDNode *N = &*I++; 8613 checkForCycles(N, this); 8614 unsigned Degree = N->getNumOperands(); 8615 if (Degree == 0) { 8616 // A node with no uses, add it to the result array immediately. 8617 N->setNodeId(DAGSize++); 8618 allnodes_iterator Q(N); 8619 if (Q != SortedPos) 8620 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8621 assert(SortedPos != AllNodes.end() && "Overran node list"); 8622 ++SortedPos; 8623 } else { 8624 // Temporarily use the Node Id as scratch space for the degree count. 8625 N->setNodeId(Degree); 8626 } 8627 } 8628 8629 // Visit all the nodes. As we iterate, move nodes into sorted order, 8630 // such that by the time the end is reached all nodes will be sorted. 8631 for (SDNode &Node : allnodes()) { 8632 SDNode *N = &Node; 8633 checkForCycles(N, this); 8634 // N is in sorted position, so all its uses have one less operand 8635 // that needs to be sorted. 8636 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8637 UI != UE; ++UI) { 8638 SDNode *P = *UI; 8639 unsigned Degree = P->getNodeId(); 8640 assert(Degree != 0 && "Invalid node degree"); 8641 --Degree; 8642 if (Degree == 0) { 8643 // All of P's operands are sorted, so P may sorted now. 8644 P->setNodeId(DAGSize++); 8645 if (P->getIterator() != SortedPos) 8646 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8647 assert(SortedPos != AllNodes.end() && "Overran node list"); 8648 ++SortedPos; 8649 } else { 8650 // Update P's outstanding operand count. 8651 P->setNodeId(Degree); 8652 } 8653 } 8654 if (Node.getIterator() == SortedPos) { 8655 #ifndef NDEBUG 8656 allnodes_iterator I(N); 8657 SDNode *S = &*++I; 8658 dbgs() << "Overran sorted position:\n"; 8659 S->dumprFull(this); dbgs() << "\n"; 8660 dbgs() << "Checking if this is due to cycles\n"; 8661 checkForCycles(this, true); 8662 #endif 8663 llvm_unreachable(nullptr); 8664 } 8665 } 8666 8667 assert(SortedPos == AllNodes.end() && 8668 "Topological sort incomplete!"); 8669 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8670 "First node in topological sort is not the entry token!"); 8671 assert(AllNodes.front().getNodeId() == 0 && 8672 "First node in topological sort has non-zero id!"); 8673 assert(AllNodes.front().getNumOperands() == 0 && 8674 "First node in topological sort has operands!"); 8675 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8676 "Last node in topologic sort has unexpected id!"); 8677 assert(AllNodes.back().use_empty() && 8678 "Last node in topologic sort has users!"); 8679 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8680 return DAGSize; 8681 } 8682 8683 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8684 /// value is produced by SD. 8685 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8686 if (SD) { 8687 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8688 SD->setHasDebugValue(true); 8689 } 8690 DbgInfo->add(DB, SD, isParameter); 8691 } 8692 8693 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8694 DbgInfo->add(DB); 8695 } 8696 8697 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8698 SDValue NewMemOp) { 8699 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8700 // The new memory operation must have the same position as the old load in 8701 // terms of memory dependency. Create a TokenFactor for the old load and new 8702 // memory operation and update uses of the old load's output chain to use that 8703 // TokenFactor. 8704 SDValue OldChain = SDValue(OldLoad, 1); 8705 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8706 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1)) 8707 return NewChain; 8708 8709 SDValue TokenFactor = 8710 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8711 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8712 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8713 return TokenFactor; 8714 } 8715 8716 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8717 Function **OutFunction) { 8718 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8719 8720 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8721 auto *Module = MF->getFunction().getParent(); 8722 auto *Function = Module->getFunction(Symbol); 8723 8724 if (OutFunction != nullptr) 8725 *OutFunction = Function; 8726 8727 if (Function != nullptr) { 8728 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8729 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8730 } 8731 8732 std::string ErrorStr; 8733 raw_string_ostream ErrorFormatter(ErrorStr); 8734 8735 ErrorFormatter << "Undefined external symbol "; 8736 ErrorFormatter << '"' << Symbol << '"'; 8737 ErrorFormatter.flush(); 8738 8739 report_fatal_error(ErrorStr); 8740 } 8741 8742 //===----------------------------------------------------------------------===// 8743 // SDNode Class 8744 //===----------------------------------------------------------------------===// 8745 8746 bool llvm::isNullConstant(SDValue V) { 8747 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8748 return Const != nullptr && Const->isNullValue(); 8749 } 8750 8751 bool llvm::isNullFPConstant(SDValue V) { 8752 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8753 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8754 } 8755 8756 bool llvm::isAllOnesConstant(SDValue V) { 8757 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8758 return Const != nullptr && Const->isAllOnesValue(); 8759 } 8760 8761 bool llvm::isOneConstant(SDValue V) { 8762 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8763 return Const != nullptr && Const->isOne(); 8764 } 8765 8766 SDValue llvm::peekThroughBitcasts(SDValue V) { 8767 while (V.getOpcode() == ISD::BITCAST) 8768 V = V.getOperand(0); 8769 return V; 8770 } 8771 8772 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8773 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8774 V = V.getOperand(0); 8775 return V; 8776 } 8777 8778 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8779 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8780 V = V.getOperand(0); 8781 return V; 8782 } 8783 8784 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8785 if (V.getOpcode() != ISD::XOR) 8786 return false; 8787 V = peekThroughBitcasts(V.getOperand(1)); 8788 unsigned NumBits = V.getScalarValueSizeInBits(); 8789 ConstantSDNode *C = 8790 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8791 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8792 } 8793 8794 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8795 bool AllowTruncation) { 8796 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8797 return CN; 8798 8799 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8800 BitVector UndefElements; 8801 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8802 8803 // BuildVectors can truncate their operands. Ignore that case here unless 8804 // AllowTruncation is set. 8805 if (CN && (UndefElements.none() || AllowUndefs)) { 8806 EVT CVT = CN->getValueType(0); 8807 EVT NSVT = N.getValueType().getScalarType(); 8808 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8809 if (AllowTruncation || (CVT == NSVT)) 8810 return CN; 8811 } 8812 } 8813 8814 return nullptr; 8815 } 8816 8817 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8818 bool AllowUndefs, 8819 bool AllowTruncation) { 8820 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8821 return CN; 8822 8823 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8824 BitVector UndefElements; 8825 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8826 8827 // BuildVectors can truncate their operands. Ignore that case here unless 8828 // AllowTruncation is set. 8829 if (CN && (UndefElements.none() || AllowUndefs)) { 8830 EVT CVT = CN->getValueType(0); 8831 EVT NSVT = N.getValueType().getScalarType(); 8832 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8833 if (AllowTruncation || (CVT == NSVT)) 8834 return CN; 8835 } 8836 } 8837 8838 return nullptr; 8839 } 8840 8841 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8842 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8843 return CN; 8844 8845 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8846 BitVector UndefElements; 8847 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8848 if (CN && (UndefElements.none() || AllowUndefs)) 8849 return CN; 8850 } 8851 8852 return nullptr; 8853 } 8854 8855 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 8856 const APInt &DemandedElts, 8857 bool AllowUndefs) { 8858 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8859 return CN; 8860 8861 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8862 BitVector UndefElements; 8863 ConstantFPSDNode *CN = 8864 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 8865 if (CN && (UndefElements.none() || AllowUndefs)) 8866 return CN; 8867 } 8868 8869 return nullptr; 8870 } 8871 8872 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 8873 // TODO: may want to use peekThroughBitcast() here. 8874 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 8875 return C && C->isNullValue(); 8876 } 8877 8878 bool llvm::isOneOrOneSplat(SDValue N) { 8879 // TODO: may want to use peekThroughBitcast() here. 8880 unsigned BitWidth = N.getScalarValueSizeInBits(); 8881 ConstantSDNode *C = isConstOrConstSplat(N); 8882 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8883 } 8884 8885 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8886 N = peekThroughBitcasts(N); 8887 unsigned BitWidth = N.getScalarValueSizeInBits(); 8888 ConstantSDNode *C = isConstOrConstSplat(N); 8889 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8890 } 8891 8892 HandleSDNode::~HandleSDNode() { 8893 DropOperands(); 8894 } 8895 8896 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8897 const DebugLoc &DL, 8898 const GlobalValue *GA, EVT VT, 8899 int64_t o, unsigned TF) 8900 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 8901 TheGlobal = GA; 8902 } 8903 8904 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 8905 EVT VT, unsigned SrcAS, 8906 unsigned DestAS) 8907 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 8908 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 8909 8910 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 8911 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 8912 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 8913 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 8914 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 8915 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 8916 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 8917 8918 // We check here that the size of the memory operand fits within the size of 8919 // the MMO. This is because the MMO might indicate only a possible address 8920 // range instead of specifying the affected memory addresses precisely. 8921 // TODO: Make MachineMemOperands aware of scalable vectors. 8922 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && 8923 "Size mismatch!"); 8924 } 8925 8926 /// Profile - Gather unique data for the node. 8927 /// 8928 void SDNode::Profile(FoldingSetNodeID &ID) const { 8929 AddNodeIDNode(ID, this); 8930 } 8931 8932 namespace { 8933 8934 struct EVTArray { 8935 std::vector<EVT> VTs; 8936 8937 EVTArray() { 8938 VTs.reserve(MVT::LAST_VALUETYPE); 8939 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 8940 VTs.push_back(MVT((MVT::SimpleValueType)i)); 8941 } 8942 }; 8943 8944 } // end anonymous namespace 8945 8946 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 8947 static ManagedStatic<EVTArray> SimpleVTArray; 8948 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 8949 8950 /// getValueTypeList - Return a pointer to the specified value type. 8951 /// 8952 const EVT *SDNode::getValueTypeList(EVT VT) { 8953 if (VT.isExtended()) { 8954 sys::SmartScopedLock<true> Lock(*VTMutex); 8955 return &(*EVTs->insert(VT).first); 8956 } else { 8957 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 8958 "Value type out of range!"); 8959 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 8960 } 8961 } 8962 8963 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 8964 /// indicated value. This method ignores uses of other values defined by this 8965 /// operation. 8966 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 8967 assert(Value < getNumValues() && "Bad value!"); 8968 8969 // TODO: Only iterate over uses of a given value of the node 8970 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 8971 if (UI.getUse().getResNo() == Value) { 8972 if (NUses == 0) 8973 return false; 8974 --NUses; 8975 } 8976 } 8977 8978 // Found exactly the right number of uses? 8979 return NUses == 0; 8980 } 8981 8982 /// hasAnyUseOfValue - Return true if there are any use of the indicated 8983 /// value. This method ignores uses of other values defined by this operation. 8984 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 8985 assert(Value < getNumValues() && "Bad value!"); 8986 8987 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 8988 if (UI.getUse().getResNo() == Value) 8989 return true; 8990 8991 return false; 8992 } 8993 8994 /// isOnlyUserOf - Return true if this node is the only use of N. 8995 bool SDNode::isOnlyUserOf(const SDNode *N) const { 8996 bool Seen = false; 8997 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 8998 SDNode *User = *I; 8999 if (User == this) 9000 Seen = true; 9001 else 9002 return false; 9003 } 9004 9005 return Seen; 9006 } 9007 9008 /// Return true if the only users of N are contained in Nodes. 9009 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 9010 bool Seen = false; 9011 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 9012 SDNode *User = *I; 9013 if (llvm::any_of(Nodes, 9014 [&User](const SDNode *Node) { return User == Node; })) 9015 Seen = true; 9016 else 9017 return false; 9018 } 9019 9020 return Seen; 9021 } 9022 9023 /// isOperand - Return true if this node is an operand of N. 9024 bool SDValue::isOperandOf(const SDNode *N) const { 9025 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 9026 } 9027 9028 bool SDNode::isOperandOf(const SDNode *N) const { 9029 return any_of(N->op_values(), 9030 [this](SDValue Op) { return this == Op.getNode(); }); 9031 } 9032 9033 /// reachesChainWithoutSideEffects - Return true if this operand (which must 9034 /// be a chain) reaches the specified operand without crossing any 9035 /// side-effecting instructions on any chain path. In practice, this looks 9036 /// through token factors and non-volatile loads. In order to remain efficient, 9037 /// this only looks a couple of nodes in, it does not do an exhaustive search. 9038 /// 9039 /// Note that we only need to examine chains when we're searching for 9040 /// side-effects; SelectionDAG requires that all side-effects are represented 9041 /// by chains, even if another operand would force a specific ordering. This 9042 /// constraint is necessary to allow transformations like splitting loads. 9043 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 9044 unsigned Depth) const { 9045 if (*this == Dest) return true; 9046 9047 // Don't search too deeply, we just want to be able to see through 9048 // TokenFactor's etc. 9049 if (Depth == 0) return false; 9050 9051 // If this is a token factor, all inputs to the TF happen in parallel. 9052 if (getOpcode() == ISD::TokenFactor) { 9053 // First, try a shallow search. 9054 if (is_contained((*this)->ops(), Dest)) { 9055 // We found the chain we want as an operand of this TokenFactor. 9056 // Essentially, we reach the chain without side-effects if we could 9057 // serialize the TokenFactor into a simple chain of operations with 9058 // Dest as the last operation. This is automatically true if the 9059 // chain has one use: there are no other ordering constraints. 9060 // If the chain has more than one use, we give up: some other 9061 // use of Dest might force a side-effect between Dest and the current 9062 // node. 9063 if (Dest.hasOneUse()) 9064 return true; 9065 } 9066 // Next, try a deep search: check whether every operand of the TokenFactor 9067 // reaches Dest. 9068 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 9069 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 9070 }); 9071 } 9072 9073 // Loads don't have side effects, look through them. 9074 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 9075 if (Ld->isUnordered()) 9076 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 9077 } 9078 return false; 9079 } 9080 9081 bool SDNode::hasPredecessor(const SDNode *N) const { 9082 SmallPtrSet<const SDNode *, 32> Visited; 9083 SmallVector<const SDNode *, 16> Worklist; 9084 Worklist.push_back(this); 9085 return hasPredecessorHelper(N, Visited, Worklist); 9086 } 9087 9088 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 9089 this->Flags.intersectWith(Flags); 9090 } 9091 9092 SDValue 9093 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 9094 ArrayRef<ISD::NodeType> CandidateBinOps, 9095 bool AllowPartials) { 9096 // The pattern must end in an extract from index 0. 9097 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9098 !isNullConstant(Extract->getOperand(1))) 9099 return SDValue(); 9100 9101 // Match against one of the candidate binary ops. 9102 SDValue Op = Extract->getOperand(0); 9103 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9104 return Op.getOpcode() == unsigned(BinOp); 9105 })) 9106 return SDValue(); 9107 9108 // Floating-point reductions may require relaxed constraints on the final step 9109 // of the reduction because they may reorder intermediate operations. 9110 unsigned CandidateBinOp = Op.getOpcode(); 9111 if (Op.getValueType().isFloatingPoint()) { 9112 SDNodeFlags Flags = Op->getFlags(); 9113 switch (CandidateBinOp) { 9114 case ISD::FADD: 9115 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) 9116 return SDValue(); 9117 break; 9118 default: 9119 llvm_unreachable("Unhandled FP opcode for binop reduction"); 9120 } 9121 } 9122 9123 // Matching failed - attempt to see if we did enough stages that a partial 9124 // reduction from a subvector is possible. 9125 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { 9126 if (!AllowPartials || !Op) 9127 return SDValue(); 9128 EVT OpVT = Op.getValueType(); 9129 EVT OpSVT = OpVT.getScalarType(); 9130 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); 9131 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) 9132 return SDValue(); 9133 BinOp = (ISD::NodeType)CandidateBinOp; 9134 return getNode( 9135 ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, 9136 getConstant(0, SDLoc(Op), TLI->getVectorIdxTy(getDataLayout()))); 9137 }; 9138 9139 // At each stage, we're looking for something that looks like: 9140 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9141 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9142 // i32 undef, i32 undef, i32 undef, i32 undef> 9143 // %a = binop <8 x i32> %op, %s 9144 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9145 // we expect something like: 9146 // <4,5,6,7,u,u,u,u> 9147 // <2,3,u,u,u,u,u,u> 9148 // <1,u,u,u,u,u,u,u> 9149 // While a partial reduction match would be: 9150 // <2,3,u,u,u,u,u,u> 9151 // <1,u,u,u,u,u,u,u> 9152 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9153 SDValue PrevOp; 9154 for (unsigned i = 0; i < Stages; ++i) { 9155 unsigned MaskEnd = (1 << i); 9156 9157 if (Op.getOpcode() != CandidateBinOp) 9158 return PartialReduction(PrevOp, MaskEnd); 9159 9160 SDValue Op0 = Op.getOperand(0); 9161 SDValue Op1 = Op.getOperand(1); 9162 9163 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9164 if (Shuffle) { 9165 Op = Op1; 9166 } else { 9167 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9168 Op = Op0; 9169 } 9170 9171 // The first operand of the shuffle should be the same as the other operand 9172 // of the binop. 9173 if (!Shuffle || Shuffle->getOperand(0) != Op) 9174 return PartialReduction(PrevOp, MaskEnd); 9175 9176 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9177 for (int Index = 0; Index < (int)MaskEnd; ++Index) 9178 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) 9179 return PartialReduction(PrevOp, MaskEnd); 9180 9181 PrevOp = Op; 9182 } 9183 9184 BinOp = (ISD::NodeType)CandidateBinOp; 9185 return Op; 9186 } 9187 9188 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9189 assert(N->getNumValues() == 1 && 9190 "Can't unroll a vector with multiple results!"); 9191 9192 EVT VT = N->getValueType(0); 9193 unsigned NE = VT.getVectorNumElements(); 9194 EVT EltVT = VT.getVectorElementType(); 9195 SDLoc dl(N); 9196 9197 SmallVector<SDValue, 8> Scalars; 9198 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9199 9200 // If ResNE is 0, fully unroll the vector op. 9201 if (ResNE == 0) 9202 ResNE = NE; 9203 else if (NE > ResNE) 9204 NE = ResNE; 9205 9206 unsigned i; 9207 for (i= 0; i != NE; ++i) { 9208 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9209 SDValue Operand = N->getOperand(j); 9210 EVT OperandVT = Operand.getValueType(); 9211 if (OperandVT.isVector()) { 9212 // A vector operand; extract a single element. 9213 EVT OperandEltVT = OperandVT.getVectorElementType(); 9214 Operands[j] = 9215 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 9216 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 9217 } else { 9218 // A scalar operand; just use it as is. 9219 Operands[j] = Operand; 9220 } 9221 } 9222 9223 switch (N->getOpcode()) { 9224 default: { 9225 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9226 N->getFlags())); 9227 break; 9228 } 9229 case ISD::VSELECT: 9230 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9231 break; 9232 case ISD::SHL: 9233 case ISD::SRA: 9234 case ISD::SRL: 9235 case ISD::ROTL: 9236 case ISD::ROTR: 9237 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9238 getShiftAmountOperand(Operands[0].getValueType(), 9239 Operands[1]))); 9240 break; 9241 case ISD::SIGN_EXTEND_INREG: { 9242 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9243 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9244 Operands[0], 9245 getValueType(ExtVT))); 9246 } 9247 } 9248 } 9249 9250 for (; i < ResNE; ++i) 9251 Scalars.push_back(getUNDEF(EltVT)); 9252 9253 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9254 return getBuildVector(VecVT, dl, Scalars); 9255 } 9256 9257 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9258 SDNode *N, unsigned ResNE) { 9259 unsigned Opcode = N->getOpcode(); 9260 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9261 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9262 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9263 "Expected an overflow opcode"); 9264 9265 EVT ResVT = N->getValueType(0); 9266 EVT OvVT = N->getValueType(1); 9267 EVT ResEltVT = ResVT.getVectorElementType(); 9268 EVT OvEltVT = OvVT.getVectorElementType(); 9269 SDLoc dl(N); 9270 9271 // If ResNE is 0, fully unroll the vector op. 9272 unsigned NE = ResVT.getVectorNumElements(); 9273 if (ResNE == 0) 9274 ResNE = NE; 9275 else if (NE > ResNE) 9276 NE = ResNE; 9277 9278 SmallVector<SDValue, 8> LHSScalars; 9279 SmallVector<SDValue, 8> RHSScalars; 9280 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9281 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9282 9283 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9284 SDVTList VTs = getVTList(ResEltVT, SVT); 9285 SmallVector<SDValue, 8> ResScalars; 9286 SmallVector<SDValue, 8> OvScalars; 9287 for (unsigned i = 0; i < NE; ++i) { 9288 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9289 SDValue Ov = 9290 getSelect(dl, OvEltVT, Res.getValue(1), 9291 getBoolConstant(true, dl, OvEltVT, ResVT), 9292 getConstant(0, dl, OvEltVT)); 9293 9294 ResScalars.push_back(Res); 9295 OvScalars.push_back(Ov); 9296 } 9297 9298 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9299 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9300 9301 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9302 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9303 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9304 getBuildVector(NewOvVT, dl, OvScalars)); 9305 } 9306 9307 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9308 LoadSDNode *Base, 9309 unsigned Bytes, 9310 int Dist) const { 9311 if (LD->isVolatile() || Base->isVolatile()) 9312 return false; 9313 // TODO: probably too restrictive for atomics, revisit 9314 if (!LD->isSimple()) 9315 return false; 9316 if (LD->isIndexed() || Base->isIndexed()) 9317 return false; 9318 if (LD->getChain() != Base->getChain()) 9319 return false; 9320 EVT VT = LD->getValueType(0); 9321 if (VT.getSizeInBits() / 8 != Bytes) 9322 return false; 9323 9324 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9325 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9326 9327 int64_t Offset = 0; 9328 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9329 return (Dist * Bytes == Offset); 9330 return false; 9331 } 9332 9333 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 9334 /// it cannot be inferred. 9335 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 9336 // If this is a GlobalAddress + cst, return the alignment. 9337 const GlobalValue *GV = nullptr; 9338 int64_t GVOffset = 0; 9339 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9340 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 9341 KnownBits Known(PtrWidth); 9342 llvm::computeKnownBits(GV, Known, getDataLayout()); 9343 unsigned AlignBits = Known.countMinTrailingZeros(); 9344 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 9345 if (Align) 9346 return MinAlign(Align, GVOffset); 9347 } 9348 9349 // If this is a direct reference to a stack slot, use information about the 9350 // stack slot's alignment. 9351 int FrameIdx = INT_MIN; 9352 int64_t FrameOffset = 0; 9353 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9354 FrameIdx = FI->getIndex(); 9355 } else if (isBaseWithConstantOffset(Ptr) && 9356 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9357 // Handle FI+Cst 9358 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9359 FrameOffset = Ptr.getConstantOperandVal(1); 9360 } 9361 9362 if (FrameIdx != INT_MIN) { 9363 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9364 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 9365 FrameOffset); 9366 return FIInfoAlign; 9367 } 9368 9369 return 0; 9370 } 9371 9372 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9373 /// which is split (or expanded) into two not necessarily identical pieces. 9374 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9375 // Currently all types are split in half. 9376 EVT LoVT, HiVT; 9377 if (!VT.isVector()) 9378 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9379 else 9380 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9381 9382 return std::make_pair(LoVT, HiVT); 9383 } 9384 9385 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9386 /// low/high part. 9387 std::pair<SDValue, SDValue> 9388 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9389 const EVT &HiVT) { 9390 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9391 N.getValueType().getVectorNumElements() && 9392 "More vector elements requested than available!"); 9393 SDValue Lo, Hi; 9394 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 9395 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9396 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9397 getConstant(LoVT.getVectorNumElements(), DL, 9398 TLI->getVectorIdxTy(getDataLayout()))); 9399 return std::make_pair(Lo, Hi); 9400 } 9401 9402 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9403 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9404 EVT VT = N.getValueType(); 9405 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9406 NextPowerOf2(VT.getVectorNumElements())); 9407 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9408 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9409 } 9410 9411 void SelectionDAG::ExtractVectorElements(SDValue Op, 9412 SmallVectorImpl<SDValue> &Args, 9413 unsigned Start, unsigned Count) { 9414 EVT VT = Op.getValueType(); 9415 if (Count == 0) 9416 Count = VT.getVectorNumElements(); 9417 9418 EVT EltVT = VT.getVectorElementType(); 9419 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 9420 SDLoc SL(Op); 9421 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9422 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9423 Op, getConstant(i, SL, IdxTy))); 9424 } 9425 } 9426 9427 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9428 unsigned GlobalAddressSDNode::getAddressSpace() const { 9429 return getGlobal()->getType()->getAddressSpace(); 9430 } 9431 9432 Type *ConstantPoolSDNode::getType() const { 9433 if (isMachineConstantPoolEntry()) 9434 return Val.MachineCPVal->getType(); 9435 return Val.ConstVal->getType(); 9436 } 9437 9438 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9439 unsigned &SplatBitSize, 9440 bool &HasAnyUndefs, 9441 unsigned MinSplatBits, 9442 bool IsBigEndian) const { 9443 EVT VT = getValueType(0); 9444 assert(VT.isVector() && "Expected a vector type"); 9445 unsigned VecWidth = VT.getSizeInBits(); 9446 if (MinSplatBits > VecWidth) 9447 return false; 9448 9449 // FIXME: The widths are based on this node's type, but build vectors can 9450 // truncate their operands. 9451 SplatValue = APInt(VecWidth, 0); 9452 SplatUndef = APInt(VecWidth, 0); 9453 9454 // Get the bits. Bits with undefined values (when the corresponding element 9455 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9456 // in SplatValue. If any of the values are not constant, give up and return 9457 // false. 9458 unsigned int NumOps = getNumOperands(); 9459 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9460 unsigned EltWidth = VT.getScalarSizeInBits(); 9461 9462 for (unsigned j = 0; j < NumOps; ++j) { 9463 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9464 SDValue OpVal = getOperand(i); 9465 unsigned BitPos = j * EltWidth; 9466 9467 if (OpVal.isUndef()) 9468 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9469 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9470 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9471 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9472 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9473 else 9474 return false; 9475 } 9476 9477 // The build_vector is all constants or undefs. Find the smallest element 9478 // size that splats the vector. 9479 HasAnyUndefs = (SplatUndef != 0); 9480 9481 // FIXME: This does not work for vectors with elements less than 8 bits. 9482 while (VecWidth > 8) { 9483 unsigned HalfSize = VecWidth / 2; 9484 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9485 APInt LowValue = SplatValue.trunc(HalfSize); 9486 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9487 APInt LowUndef = SplatUndef.trunc(HalfSize); 9488 9489 // If the two halves do not match (ignoring undef bits), stop here. 9490 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9491 MinSplatBits > HalfSize) 9492 break; 9493 9494 SplatValue = HighValue | LowValue; 9495 SplatUndef = HighUndef & LowUndef; 9496 9497 VecWidth = HalfSize; 9498 } 9499 9500 SplatBitSize = VecWidth; 9501 return true; 9502 } 9503 9504 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9505 BitVector *UndefElements) const { 9506 if (UndefElements) { 9507 UndefElements->clear(); 9508 UndefElements->resize(getNumOperands()); 9509 } 9510 assert(getNumOperands() == DemandedElts.getBitWidth() && 9511 "Unexpected vector size"); 9512 if (!DemandedElts) 9513 return SDValue(); 9514 SDValue Splatted; 9515 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9516 if (!DemandedElts[i]) 9517 continue; 9518 SDValue Op = getOperand(i); 9519 if (Op.isUndef()) { 9520 if (UndefElements) 9521 (*UndefElements)[i] = true; 9522 } else if (!Splatted) { 9523 Splatted = Op; 9524 } else if (Splatted != Op) { 9525 return SDValue(); 9526 } 9527 } 9528 9529 if (!Splatted) { 9530 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9531 assert(getOperand(FirstDemandedIdx).isUndef() && 9532 "Can only have a splat without a constant for all undefs."); 9533 return getOperand(FirstDemandedIdx); 9534 } 9535 9536 return Splatted; 9537 } 9538 9539 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9540 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9541 return getSplatValue(DemandedElts, UndefElements); 9542 } 9543 9544 ConstantSDNode * 9545 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9546 BitVector *UndefElements) const { 9547 return dyn_cast_or_null<ConstantSDNode>( 9548 getSplatValue(DemandedElts, UndefElements)); 9549 } 9550 9551 ConstantSDNode * 9552 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9553 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9554 } 9555 9556 ConstantFPSDNode * 9557 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9558 BitVector *UndefElements) const { 9559 return dyn_cast_or_null<ConstantFPSDNode>( 9560 getSplatValue(DemandedElts, UndefElements)); 9561 } 9562 9563 ConstantFPSDNode * 9564 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9565 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9566 } 9567 9568 int32_t 9569 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9570 uint32_t BitWidth) const { 9571 if (ConstantFPSDNode *CN = 9572 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9573 bool IsExact; 9574 APSInt IntVal(BitWidth); 9575 const APFloat &APF = CN->getValueAPF(); 9576 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9577 APFloat::opOK || 9578 !IsExact) 9579 return -1; 9580 9581 return IntVal.exactLogBase2(); 9582 } 9583 return -1; 9584 } 9585 9586 bool BuildVectorSDNode::isConstant() const { 9587 for (const SDValue &Op : op_values()) { 9588 unsigned Opc = Op.getOpcode(); 9589 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9590 return false; 9591 } 9592 return true; 9593 } 9594 9595 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9596 // Find the first non-undef value in the shuffle mask. 9597 unsigned i, e; 9598 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9599 /* search */; 9600 9601 // If all elements are undefined, this shuffle can be considered a splat 9602 // (although it should eventually get simplified away completely). 9603 if (i == e) 9604 return true; 9605 9606 // Make sure all remaining elements are either undef or the same as the first 9607 // non-undef value. 9608 for (int Idx = Mask[i]; i != e; ++i) 9609 if (Mask[i] >= 0 && Mask[i] != Idx) 9610 return false; 9611 return true; 9612 } 9613 9614 // Returns the SDNode if it is a constant integer BuildVector 9615 // or constant integer. 9616 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9617 if (isa<ConstantSDNode>(N)) 9618 return N.getNode(); 9619 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9620 return N.getNode(); 9621 // Treat a GlobalAddress supporting constant offset folding as a 9622 // constant integer. 9623 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9624 if (GA->getOpcode() == ISD::GlobalAddress && 9625 TLI->isOffsetFoldingLegal(GA)) 9626 return GA; 9627 return nullptr; 9628 } 9629 9630 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9631 if (isa<ConstantFPSDNode>(N)) 9632 return N.getNode(); 9633 9634 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9635 return N.getNode(); 9636 9637 return nullptr; 9638 } 9639 9640 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9641 assert(!Node->OperandList && "Node already has operands"); 9642 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9643 "too many operands to fit into SDNode"); 9644 SDUse *Ops = OperandRecycler.allocate( 9645 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9646 9647 bool IsDivergent = false; 9648 for (unsigned I = 0; I != Vals.size(); ++I) { 9649 Ops[I].setUser(Node); 9650 Ops[I].setInitial(Vals[I]); 9651 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9652 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9653 } 9654 Node->NumOperands = Vals.size(); 9655 Node->OperandList = Ops; 9656 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9657 if (!TLI->isSDNodeAlwaysUniform(Node)) 9658 Node->SDNodeBits.IsDivergent = IsDivergent; 9659 checkForCycles(Node); 9660 } 9661 9662 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9663 SmallVectorImpl<SDValue> &Vals) { 9664 size_t Limit = SDNode::getMaxNumOperands(); 9665 while (Vals.size() > Limit) { 9666 unsigned SliceIdx = Vals.size() - Limit; 9667 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9668 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9669 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9670 Vals.emplace_back(NewTF); 9671 } 9672 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9673 } 9674 9675 #ifndef NDEBUG 9676 static void checkForCyclesHelper(const SDNode *N, 9677 SmallPtrSetImpl<const SDNode*> &Visited, 9678 SmallPtrSetImpl<const SDNode*> &Checked, 9679 const llvm::SelectionDAG *DAG) { 9680 // If this node has already been checked, don't check it again. 9681 if (Checked.count(N)) 9682 return; 9683 9684 // If a node has already been visited on this depth-first walk, reject it as 9685 // a cycle. 9686 if (!Visited.insert(N).second) { 9687 errs() << "Detected cycle in SelectionDAG\n"; 9688 dbgs() << "Offending node:\n"; 9689 N->dumprFull(DAG); dbgs() << "\n"; 9690 abort(); 9691 } 9692 9693 for (const SDValue &Op : N->op_values()) 9694 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9695 9696 Checked.insert(N); 9697 Visited.erase(N); 9698 } 9699 #endif 9700 9701 void llvm::checkForCycles(const llvm::SDNode *N, 9702 const llvm::SelectionDAG *DAG, 9703 bool force) { 9704 #ifndef NDEBUG 9705 bool check = force; 9706 #ifdef EXPENSIVE_CHECKS 9707 check = true; 9708 #endif // EXPENSIVE_CHECKS 9709 if (check) { 9710 assert(N && "Checking nonexistent SDNode"); 9711 SmallPtrSet<const SDNode*, 32> visited; 9712 SmallPtrSet<const SDNode*, 32> checked; 9713 checkForCyclesHelper(N, visited, checked, DAG); 9714 } 9715 #endif // !NDEBUG 9716 } 9717 9718 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9719 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9720 } 9721