1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the SelectionDAG class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/SelectionDAG.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/APSInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/FoldingSet.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/ADT/Twine.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/CodeGen/ISDOpcodes.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineConstantPool.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineMemOperand.h" 34 #include "llvm/CodeGen/RuntimeLibcalls.h" 35 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetRegisterInfo.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/ValueTypes.h" 42 #include "llvm/IR/Constant.h" 43 #include "llvm/IR/Constants.h" 44 #include "llvm/IR/DataLayout.h" 45 #include "llvm/IR/DebugInfoMetadata.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CodeGen.h" 55 #include "llvm/Support/Compiler.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/KnownBits.h" 59 #include "llvm/Support/MachineValueType.h" 60 #include "llvm/Support/ManagedStatic.h" 61 #include "llvm/Support/MathExtras.h" 62 #include "llvm/Support/Mutex.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Target/TargetMachine.h" 65 #include "llvm/Target/TargetOptions.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <cstdint> 69 #include <cstdlib> 70 #include <limits> 71 #include <set> 72 #include <string> 73 #include <utility> 74 #include <vector> 75 76 using namespace llvm; 77 78 /// makeVTList - Return an instance of the SDVTList struct initialized with the 79 /// specified members. 80 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 81 SDVTList Res = {VTs, NumVTs}; 82 return Res; 83 } 84 85 // Default null implementations of the callbacks. 86 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 87 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 88 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} 89 90 void SelectionDAG::DAGNodeDeletedListener::anchor() {} 91 92 #define DEBUG_TYPE "selectiondag" 93 94 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", 95 cl::Hidden, cl::init(true), 96 cl::desc("Gang up loads and stores generated by inlining of memcpy")); 97 98 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", 99 cl::desc("Number limit for gluing ld/st of memcpy."), 100 cl::Hidden, cl::init(0)); 101 102 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { 103 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); 104 } 105 106 //===----------------------------------------------------------------------===// 107 // ConstantFPSDNode Class 108 //===----------------------------------------------------------------------===// 109 110 /// isExactlyValue - We don't rely on operator== working on double values, as 111 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 112 /// As such, this method can be used to do an exact bit-for-bit comparison of 113 /// two floating point values. 114 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 115 return getValueAPF().bitwiseIsEqual(V); 116 } 117 118 bool ConstantFPSDNode::isValueValidForType(EVT VT, 119 const APFloat& Val) { 120 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 121 122 // convert modifies in place, so make a copy. 123 APFloat Val2 = APFloat(Val); 124 bool losesInfo; 125 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 126 APFloat::rmNearestTiesToEven, 127 &losesInfo); 128 return !losesInfo; 129 } 130 131 //===----------------------------------------------------------------------===// 132 // ISD Namespace 133 //===----------------------------------------------------------------------===// 134 135 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { 136 auto *BV = dyn_cast<BuildVectorSDNode>(N); 137 if (!BV) 138 return false; 139 140 APInt SplatUndef; 141 unsigned SplatBitSize; 142 bool HasUndefs; 143 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 144 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, 145 EltSize) && 146 EltSize == SplatBitSize; 147 } 148 149 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be 150 // specializations of the more general isConstantSplatVector()? 151 152 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 153 // Look through a bit convert. 154 while (N->getOpcode() == ISD::BITCAST) 155 N = N->getOperand(0).getNode(); 156 157 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 158 159 unsigned i = 0, e = N->getNumOperands(); 160 161 // Skip over all of the undef values. 162 while (i != e && N->getOperand(i).isUndef()) 163 ++i; 164 165 // Do not accept an all-undef vector. 166 if (i == e) return false; 167 168 // Do not accept build_vectors that aren't all constants or which have non-~0 169 // elements. We have to be a bit careful here, as the type of the constant 170 // may not be the same as the type of the vector elements due to type 171 // legalization (the elements are promoted to a legal type for the target and 172 // a vector of a type may be legal when the base element type is not). 173 // We only want to check enough bits to cover the vector elements, because 174 // we care if the resultant vector is all ones, not whether the individual 175 // constants are. 176 SDValue NotZero = N->getOperand(i); 177 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 178 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 179 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 180 return false; 181 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 182 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 183 return false; 184 } else 185 return false; 186 187 // Okay, we have at least one ~0 value, check to see if the rest match or are 188 // undefs. Even with the above element type twiddling, this should be OK, as 189 // the same type legalization should have applied to all the elements. 190 for (++i; i != e; ++i) 191 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) 192 return false; 193 return true; 194 } 195 196 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 197 // Look through a bit convert. 198 while (N->getOpcode() == ISD::BITCAST) 199 N = N->getOperand(0).getNode(); 200 201 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 202 203 bool IsAllUndef = true; 204 for (const SDValue &Op : N->op_values()) { 205 if (Op.isUndef()) 206 continue; 207 IsAllUndef = false; 208 // Do not accept build_vectors that aren't all constants or which have non-0 209 // elements. We have to be a bit careful here, as the type of the constant 210 // may not be the same as the type of the vector elements due to type 211 // legalization (the elements are promoted to a legal type for the target 212 // and a vector of a type may be legal when the base element type is not). 213 // We only want to check enough bits to cover the vector elements, because 214 // we care if the resultant vector is all zeros, not whether the individual 215 // constants are. 216 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); 217 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { 218 if (CN->getAPIntValue().countTrailingZeros() < EltSize) 219 return false; 220 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { 221 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) 222 return false; 223 } else 224 return false; 225 } 226 227 // Do not accept an all-undef vector. 228 if (IsAllUndef) 229 return false; 230 return true; 231 } 232 233 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { 234 if (N->getOpcode() != ISD::BUILD_VECTOR) 235 return false; 236 237 for (const SDValue &Op : N->op_values()) { 238 if (Op.isUndef()) 239 continue; 240 if (!isa<ConstantSDNode>(Op)) 241 return false; 242 } 243 return true; 244 } 245 246 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { 247 if (N->getOpcode() != ISD::BUILD_VECTOR) 248 return false; 249 250 for (const SDValue &Op : N->op_values()) { 251 if (Op.isUndef()) 252 continue; 253 if (!isa<ConstantFPSDNode>(Op)) 254 return false; 255 } 256 return true; 257 } 258 259 bool ISD::allOperandsUndef(const SDNode *N) { 260 // Return false if the node has no operands. 261 // This is "logically inconsistent" with the definition of "all" but 262 // is probably the desired behavior. 263 if (N->getNumOperands() == 0) 264 return false; 265 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); 266 } 267 268 bool ISD::matchUnaryPredicate(SDValue Op, 269 std::function<bool(ConstantSDNode *)> Match, 270 bool AllowUndefs) { 271 // FIXME: Add support for scalar UNDEF cases? 272 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) 273 return Match(Cst); 274 275 // FIXME: Add support for vector UNDEF cases? 276 if (ISD::BUILD_VECTOR != Op.getOpcode()) 277 return false; 278 279 EVT SVT = Op.getValueType().getScalarType(); 280 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 281 if (AllowUndefs && Op.getOperand(i).isUndef()) { 282 if (!Match(nullptr)) 283 return false; 284 continue; 285 } 286 287 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i)); 288 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) 289 return false; 290 } 291 return true; 292 } 293 294 bool ISD::matchBinaryPredicate( 295 SDValue LHS, SDValue RHS, 296 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, 297 bool AllowUndefs) { 298 if (LHS.getValueType() != RHS.getValueType()) 299 return false; 300 301 // TODO: Add support for scalar UNDEF cases? 302 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) 303 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) 304 return Match(LHSCst, RHSCst); 305 306 // TODO: Add support for vector UNDEF cases? 307 if (ISD::BUILD_VECTOR != LHS.getOpcode() || 308 ISD::BUILD_VECTOR != RHS.getOpcode()) 309 return false; 310 311 EVT SVT = LHS.getValueType().getScalarType(); 312 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 313 SDValue LHSOp = LHS.getOperand(i); 314 SDValue RHSOp = RHS.getOperand(i); 315 bool LHSUndef = AllowUndefs && LHSOp.isUndef(); 316 bool RHSUndef = AllowUndefs && RHSOp.isUndef(); 317 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); 318 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); 319 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) 320 return false; 321 if (LHSOp.getValueType() != SVT || 322 LHSOp.getValueType() != RHSOp.getValueType()) 323 return false; 324 if (!Match(LHSCst, RHSCst)) 325 return false; 326 } 327 return true; 328 } 329 330 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { 331 switch (ExtType) { 332 case ISD::EXTLOAD: 333 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; 334 case ISD::SEXTLOAD: 335 return ISD::SIGN_EXTEND; 336 case ISD::ZEXTLOAD: 337 return ISD::ZERO_EXTEND; 338 default: 339 break; 340 } 341 342 llvm_unreachable("Invalid LoadExtType"); 343 } 344 345 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 346 // To perform this operation, we just need to swap the L and G bits of the 347 // operation. 348 unsigned OldL = (Operation >> 2) & 1; 349 unsigned OldG = (Operation >> 1) & 1; 350 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 351 (OldL << 1) | // New G bit 352 (OldG << 2)); // New L bit. 353 } 354 355 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 356 unsigned Operation = Op; 357 if (isInteger) 358 Operation ^= 7; // Flip L, G, E bits, but not U. 359 else 360 Operation ^= 15; // Flip all of the condition bits. 361 362 if (Operation > ISD::SETTRUE2) 363 Operation &= ~8; // Don't let N and U bits get set. 364 365 return ISD::CondCode(Operation); 366 } 367 368 /// For an integer comparison, return 1 if the comparison is a signed operation 369 /// and 2 if the result is an unsigned comparison. Return zero if the operation 370 /// does not depend on the sign of the input (setne and seteq). 371 static int isSignedOp(ISD::CondCode Opcode) { 372 switch (Opcode) { 373 default: llvm_unreachable("Illegal integer setcc operation!"); 374 case ISD::SETEQ: 375 case ISD::SETNE: return 0; 376 case ISD::SETLT: 377 case ISD::SETLE: 378 case ISD::SETGT: 379 case ISD::SETGE: return 1; 380 case ISD::SETULT: 381 case ISD::SETULE: 382 case ISD::SETUGT: 383 case ISD::SETUGE: return 2; 384 } 385 } 386 387 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 388 bool IsInteger) { 389 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 390 // Cannot fold a signed integer setcc with an unsigned integer setcc. 391 return ISD::SETCC_INVALID; 392 393 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 394 395 // If the N and U bits get set, then the resultant comparison DOES suddenly 396 // care about orderedness, and it is true when ordered. 397 if (Op > ISD::SETTRUE2) 398 Op &= ~16; // Clear the U bit if the N bit is set. 399 400 // Canonicalize illegal integer setcc's. 401 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 402 Op = ISD::SETNE; 403 404 return ISD::CondCode(Op); 405 } 406 407 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 408 bool IsInteger) { 409 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 410 // Cannot fold a signed setcc with an unsigned setcc. 411 return ISD::SETCC_INVALID; 412 413 // Combine all of the condition bits. 414 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 415 416 // Canonicalize illegal integer setcc's. 417 if (IsInteger) { 418 switch (Result) { 419 default: break; 420 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 421 case ISD::SETOEQ: // SETEQ & SETU[LG]E 422 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 423 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 424 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 425 } 426 } 427 428 return Result; 429 } 430 431 //===----------------------------------------------------------------------===// 432 // SDNode Profile Support 433 //===----------------------------------------------------------------------===// 434 435 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 436 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 437 ID.AddInteger(OpC); 438 } 439 440 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 441 /// solely with their pointer. 442 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 443 ID.AddPointer(VTList.VTs); 444 } 445 446 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 447 static void AddNodeIDOperands(FoldingSetNodeID &ID, 448 ArrayRef<SDValue> Ops) { 449 for (auto& Op : Ops) { 450 ID.AddPointer(Op.getNode()); 451 ID.AddInteger(Op.getResNo()); 452 } 453 } 454 455 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 456 static void AddNodeIDOperands(FoldingSetNodeID &ID, 457 ArrayRef<SDUse> Ops) { 458 for (auto& Op : Ops) { 459 ID.AddPointer(Op.getNode()); 460 ID.AddInteger(Op.getResNo()); 461 } 462 } 463 464 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, 465 SDVTList VTList, ArrayRef<SDValue> OpList) { 466 AddNodeIDOpcode(ID, OpC); 467 AddNodeIDValueTypes(ID, VTList); 468 AddNodeIDOperands(ID, OpList); 469 } 470 471 /// If this is an SDNode with special info, add this info to the NodeID data. 472 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 473 switch (N->getOpcode()) { 474 case ISD::TargetExternalSymbol: 475 case ISD::ExternalSymbol: 476 case ISD::MCSymbol: 477 llvm_unreachable("Should only be used on nodes with operands"); 478 default: break; // Normal nodes don't need extra info. 479 case ISD::TargetConstant: 480 case ISD::Constant: { 481 const ConstantSDNode *C = cast<ConstantSDNode>(N); 482 ID.AddPointer(C->getConstantIntValue()); 483 ID.AddBoolean(C->isOpaque()); 484 break; 485 } 486 case ISD::TargetConstantFP: 487 case ISD::ConstantFP: 488 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 489 break; 490 case ISD::TargetGlobalAddress: 491 case ISD::GlobalAddress: 492 case ISD::TargetGlobalTLSAddress: 493 case ISD::GlobalTLSAddress: { 494 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 495 ID.AddPointer(GA->getGlobal()); 496 ID.AddInteger(GA->getOffset()); 497 ID.AddInteger(GA->getTargetFlags()); 498 break; 499 } 500 case ISD::BasicBlock: 501 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 502 break; 503 case ISD::Register: 504 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 505 break; 506 case ISD::RegisterMask: 507 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 508 break; 509 case ISD::SRCVALUE: 510 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 511 break; 512 case ISD::FrameIndex: 513 case ISD::TargetFrameIndex: 514 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 515 break; 516 case ISD::LIFETIME_START: 517 case ISD::LIFETIME_END: 518 if (cast<LifetimeSDNode>(N)->hasOffset()) { 519 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); 520 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); 521 } 522 break; 523 case ISD::JumpTable: 524 case ISD::TargetJumpTable: 525 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 526 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 527 break; 528 case ISD::ConstantPool: 529 case ISD::TargetConstantPool: { 530 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 531 ID.AddInteger(CP->getAlignment()); 532 ID.AddInteger(CP->getOffset()); 533 if (CP->isMachineConstantPoolEntry()) 534 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 535 else 536 ID.AddPointer(CP->getConstVal()); 537 ID.AddInteger(CP->getTargetFlags()); 538 break; 539 } 540 case ISD::TargetIndex: { 541 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 542 ID.AddInteger(TI->getIndex()); 543 ID.AddInteger(TI->getOffset()); 544 ID.AddInteger(TI->getTargetFlags()); 545 break; 546 } 547 case ISD::LOAD: { 548 const LoadSDNode *LD = cast<LoadSDNode>(N); 549 ID.AddInteger(LD->getMemoryVT().getRawBits()); 550 ID.AddInteger(LD->getRawSubclassData()); 551 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 552 break; 553 } 554 case ISD::STORE: { 555 const StoreSDNode *ST = cast<StoreSDNode>(N); 556 ID.AddInteger(ST->getMemoryVT().getRawBits()); 557 ID.AddInteger(ST->getRawSubclassData()); 558 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 559 break; 560 } 561 case ISD::MLOAD: { 562 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); 563 ID.AddInteger(MLD->getMemoryVT().getRawBits()); 564 ID.AddInteger(MLD->getRawSubclassData()); 565 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); 566 break; 567 } 568 case ISD::MSTORE: { 569 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); 570 ID.AddInteger(MST->getMemoryVT().getRawBits()); 571 ID.AddInteger(MST->getRawSubclassData()); 572 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); 573 break; 574 } 575 case ISD::MGATHER: { 576 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); 577 ID.AddInteger(MG->getMemoryVT().getRawBits()); 578 ID.AddInteger(MG->getRawSubclassData()); 579 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); 580 break; 581 } 582 case ISD::MSCATTER: { 583 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); 584 ID.AddInteger(MS->getMemoryVT().getRawBits()); 585 ID.AddInteger(MS->getRawSubclassData()); 586 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); 587 break; 588 } 589 case ISD::ATOMIC_CMP_SWAP: 590 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 591 case ISD::ATOMIC_SWAP: 592 case ISD::ATOMIC_LOAD_ADD: 593 case ISD::ATOMIC_LOAD_SUB: 594 case ISD::ATOMIC_LOAD_AND: 595 case ISD::ATOMIC_LOAD_CLR: 596 case ISD::ATOMIC_LOAD_OR: 597 case ISD::ATOMIC_LOAD_XOR: 598 case ISD::ATOMIC_LOAD_NAND: 599 case ISD::ATOMIC_LOAD_MIN: 600 case ISD::ATOMIC_LOAD_MAX: 601 case ISD::ATOMIC_LOAD_UMIN: 602 case ISD::ATOMIC_LOAD_UMAX: 603 case ISD::ATOMIC_LOAD: 604 case ISD::ATOMIC_STORE: { 605 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 606 ID.AddInteger(AT->getMemoryVT().getRawBits()); 607 ID.AddInteger(AT->getRawSubclassData()); 608 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 609 break; 610 } 611 case ISD::PREFETCH: { 612 const MemSDNode *PF = cast<MemSDNode>(N); 613 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 614 break; 615 } 616 case ISD::VECTOR_SHUFFLE: { 617 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 618 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 619 i != e; ++i) 620 ID.AddInteger(SVN->getMaskElt(i)); 621 break; 622 } 623 case ISD::TargetBlockAddress: 624 case ISD::BlockAddress: { 625 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 626 ID.AddPointer(BA->getBlockAddress()); 627 ID.AddInteger(BA->getOffset()); 628 ID.AddInteger(BA->getTargetFlags()); 629 break; 630 } 631 } // end switch (N->getOpcode()) 632 633 // Target specific memory nodes could also have address spaces to check. 634 if (N->isTargetMemoryOpcode()) 635 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 636 } 637 638 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 639 /// data. 640 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 641 AddNodeIDOpcode(ID, N->getOpcode()); 642 // Add the return value info. 643 AddNodeIDValueTypes(ID, N->getVTList()); 644 // Add the operand info. 645 AddNodeIDOperands(ID, N->ops()); 646 647 // Handle SDNode leafs with special info. 648 AddNodeIDCustom(ID, N); 649 } 650 651 //===----------------------------------------------------------------------===// 652 // SelectionDAG Class 653 //===----------------------------------------------------------------------===// 654 655 /// doNotCSE - Return true if CSE should not be performed for this node. 656 static bool doNotCSE(SDNode *N) { 657 if (N->getValueType(0) == MVT::Glue) 658 return true; // Never CSE anything that produces a flag. 659 660 switch (N->getOpcode()) { 661 default: break; 662 case ISD::HANDLENODE: 663 case ISD::EH_LABEL: 664 return true; // Never CSE these nodes. 665 } 666 667 // Check that remaining values produced are not flags. 668 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 669 if (N->getValueType(i) == MVT::Glue) 670 return true; // Never CSE anything that produces a flag. 671 672 return false; 673 } 674 675 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 676 /// SelectionDAG. 677 void SelectionDAG::RemoveDeadNodes() { 678 // Create a dummy node (which is not added to allnodes), that adds a reference 679 // to the root node, preventing it from being deleted. 680 HandleSDNode Dummy(getRoot()); 681 682 SmallVector<SDNode*, 128> DeadNodes; 683 684 // Add all obviously-dead nodes to the DeadNodes worklist. 685 for (SDNode &Node : allnodes()) 686 if (Node.use_empty()) 687 DeadNodes.push_back(&Node); 688 689 RemoveDeadNodes(DeadNodes); 690 691 // If the root changed (e.g. it was a dead load, update the root). 692 setRoot(Dummy.getValue()); 693 } 694 695 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 696 /// given list, and any nodes that become unreachable as a result. 697 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 698 699 // Process the worklist, deleting the nodes and adding their uses to the 700 // worklist. 701 while (!DeadNodes.empty()) { 702 SDNode *N = DeadNodes.pop_back_val(); 703 // Skip to next node if we've already managed to delete the node. This could 704 // happen if replacing a node causes a node previously added to the node to 705 // be deleted. 706 if (N->getOpcode() == ISD::DELETED_NODE) 707 continue; 708 709 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 710 DUL->NodeDeleted(N, nullptr); 711 712 // Take the node out of the appropriate CSE map. 713 RemoveNodeFromCSEMaps(N); 714 715 // Next, brutally remove the operand list. This is safe to do, as there are 716 // no cycles in the graph. 717 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 718 SDUse &Use = *I++; 719 SDNode *Operand = Use.getNode(); 720 Use.set(SDValue()); 721 722 // Now that we removed this operand, see if there are no uses of it left. 723 if (Operand->use_empty()) 724 DeadNodes.push_back(Operand); 725 } 726 727 DeallocateNode(N); 728 } 729 } 730 731 void SelectionDAG::RemoveDeadNode(SDNode *N){ 732 SmallVector<SDNode*, 16> DeadNodes(1, N); 733 734 // Create a dummy node that adds a reference to the root node, preventing 735 // it from being deleted. (This matters if the root is an operand of the 736 // dead node.) 737 HandleSDNode Dummy(getRoot()); 738 739 RemoveDeadNodes(DeadNodes); 740 } 741 742 void SelectionDAG::DeleteNode(SDNode *N) { 743 // First take this out of the appropriate CSE map. 744 RemoveNodeFromCSEMaps(N); 745 746 // Finally, remove uses due to operands of this node, remove from the 747 // AllNodes list, and delete the node. 748 DeleteNodeNotInCSEMaps(N); 749 } 750 751 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 752 assert(N->getIterator() != AllNodes.begin() && 753 "Cannot delete the entry node!"); 754 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 755 756 // Drop all of the operands and decrement used node's use counts. 757 N->DropOperands(); 758 759 DeallocateNode(N); 760 } 761 762 void SDDbgInfo::erase(const SDNode *Node) { 763 DbgValMapType::iterator I = DbgValMap.find(Node); 764 if (I == DbgValMap.end()) 765 return; 766 for (auto &Val: I->second) 767 Val->setIsInvalidated(); 768 DbgValMap.erase(I); 769 } 770 771 void SelectionDAG::DeallocateNode(SDNode *N) { 772 // If we have operands, deallocate them. 773 removeOperands(N); 774 775 NodeAllocator.Deallocate(AllNodes.remove(N)); 776 777 // Set the opcode to DELETED_NODE to help catch bugs when node 778 // memory is reallocated. 779 // FIXME: There are places in SDag that have grown a dependency on the opcode 780 // value in the released node. 781 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); 782 N->NodeType = ISD::DELETED_NODE; 783 784 // If any of the SDDbgValue nodes refer to this SDNode, invalidate 785 // them and forget about that node. 786 DbgInfo->erase(N); 787 } 788 789 #ifndef NDEBUG 790 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 791 static void VerifySDNode(SDNode *N) { 792 switch (N->getOpcode()) { 793 default: 794 break; 795 case ISD::BUILD_PAIR: { 796 EVT VT = N->getValueType(0); 797 assert(N->getNumValues() == 1 && "Too many results!"); 798 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 799 "Wrong return type!"); 800 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 801 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 802 "Mismatched operand types!"); 803 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 804 "Wrong operand type!"); 805 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 806 "Wrong return type size"); 807 break; 808 } 809 case ISD::BUILD_VECTOR: { 810 assert(N->getNumValues() == 1 && "Too many results!"); 811 assert(N->getValueType(0).isVector() && "Wrong return type!"); 812 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 813 "Wrong number of operands!"); 814 EVT EltVT = N->getValueType(0).getVectorElementType(); 815 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 816 assert((I->getValueType() == EltVT || 817 (EltVT.isInteger() && I->getValueType().isInteger() && 818 EltVT.bitsLE(I->getValueType()))) && 819 "Wrong operand type!"); 820 assert(I->getValueType() == N->getOperand(0).getValueType() && 821 "Operands must all have the same type"); 822 } 823 break; 824 } 825 } 826 } 827 #endif // NDEBUG 828 829 /// Insert a newly allocated node into the DAG. 830 /// 831 /// Handles insertion into the all nodes list and CSE map, as well as 832 /// verification and other common operations when a new node is allocated. 833 void SelectionDAG::InsertNode(SDNode *N) { 834 AllNodes.push_back(N); 835 #ifndef NDEBUG 836 N->PersistentId = NextPersistentId++; 837 VerifySDNode(N); 838 #endif 839 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 840 DUL->NodeInserted(N); 841 } 842 843 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 844 /// correspond to it. This is useful when we're about to delete or repurpose 845 /// the node. We don't want future request for structurally identical nodes 846 /// to return N anymore. 847 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 848 bool Erased = false; 849 switch (N->getOpcode()) { 850 case ISD::HANDLENODE: return false; // noop. 851 case ISD::CONDCODE: 852 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 853 "Cond code doesn't exist!"); 854 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; 855 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; 856 break; 857 case ISD::ExternalSymbol: 858 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 859 break; 860 case ISD::TargetExternalSymbol: { 861 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 862 Erased = TargetExternalSymbols.erase( 863 std::pair<std::string,unsigned char>(ESN->getSymbol(), 864 ESN->getTargetFlags())); 865 break; 866 } 867 case ISD::MCSymbol: { 868 auto *MCSN = cast<MCSymbolSDNode>(N); 869 Erased = MCSymbols.erase(MCSN->getMCSymbol()); 870 break; 871 } 872 case ISD::VALUETYPE: { 873 EVT VT = cast<VTSDNode>(N)->getVT(); 874 if (VT.isExtended()) { 875 Erased = ExtendedValueTypeNodes.erase(VT); 876 } else { 877 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; 878 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; 879 } 880 break; 881 } 882 default: 883 // Remove it from the CSE Map. 884 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 885 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 886 Erased = CSEMap.RemoveNode(N); 887 break; 888 } 889 #ifndef NDEBUG 890 // Verify that the node was actually in one of the CSE maps, unless it has a 891 // flag result (which cannot be CSE'd) or is one of the special cases that are 892 // not subject to CSE. 893 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 894 !N->isMachineOpcode() && !doNotCSE(N)) { 895 N->dump(this); 896 dbgs() << "\n"; 897 llvm_unreachable("Node is not in map!"); 898 } 899 #endif 900 return Erased; 901 } 902 903 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 904 /// maps and modified in place. Add it back to the CSE maps, unless an identical 905 /// node already exists, in which case transfer all its users to the existing 906 /// node. This transfer can potentially trigger recursive merging. 907 void 908 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 909 // For node types that aren't CSE'd, just act as if no identical node 910 // already exists. 911 if (!doNotCSE(N)) { 912 SDNode *Existing = CSEMap.GetOrInsertNode(N); 913 if (Existing != N) { 914 // If there was already an existing matching node, use ReplaceAllUsesWith 915 // to replace the dead one with the existing one. This can cause 916 // recursive merging of other unrelated nodes down the line. 917 ReplaceAllUsesWith(N, Existing); 918 919 // N is now dead. Inform the listeners and delete it. 920 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 921 DUL->NodeDeleted(N, Existing); 922 DeleteNodeNotInCSEMaps(N); 923 return; 924 } 925 } 926 927 // If the node doesn't already exist, we updated it. Inform listeners. 928 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 929 DUL->NodeUpdated(N); 930 } 931 932 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 933 /// were replaced with those specified. If this node is never memoized, 934 /// return null, otherwise return a pointer to the slot it would take. If a 935 /// node already exists with these operands, the slot will be non-null. 936 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 937 void *&InsertPos) { 938 if (doNotCSE(N)) 939 return nullptr; 940 941 SDValue Ops[] = { Op }; 942 FoldingSetNodeID ID; 943 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 944 AddNodeIDCustom(ID, N); 945 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 946 if (Node) 947 Node->intersectFlagsWith(N->getFlags()); 948 return Node; 949 } 950 951 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 952 /// were replaced with those specified. If this node is never memoized, 953 /// return null, otherwise return a pointer to the slot it would take. If a 954 /// node already exists with these operands, the slot will be non-null. 955 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 956 SDValue Op1, SDValue Op2, 957 void *&InsertPos) { 958 if (doNotCSE(N)) 959 return nullptr; 960 961 SDValue Ops[] = { Op1, Op2 }; 962 FoldingSetNodeID ID; 963 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 964 AddNodeIDCustom(ID, N); 965 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 966 if (Node) 967 Node->intersectFlagsWith(N->getFlags()); 968 return Node; 969 } 970 971 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 972 /// were replaced with those specified. If this node is never memoized, 973 /// return null, otherwise return a pointer to the slot it would take. If a 974 /// node already exists with these operands, the slot will be non-null. 975 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, 976 void *&InsertPos) { 977 if (doNotCSE(N)) 978 return nullptr; 979 980 FoldingSetNodeID ID; 981 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); 982 AddNodeIDCustom(ID, N); 983 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); 984 if (Node) 985 Node->intersectFlagsWith(N->getFlags()); 986 return Node; 987 } 988 989 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 990 Type *Ty = VT == MVT::iPTR ? 991 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 992 VT.getTypeForEVT(*getContext()); 993 994 return getDataLayout().getABITypeAlignment(Ty); 995 } 996 997 // EntryNode could meaningfully have debug info if we can find it... 998 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 999 : TM(tm), OptLevel(OL), 1000 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 1001 Root(getEntryNode()) { 1002 InsertNode(&EntryNode); 1003 DbgInfo = new SDDbgInfo(); 1004 } 1005 1006 void SelectionDAG::init(MachineFunction &NewMF, 1007 OptimizationRemarkEmitter &NewORE, 1008 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, 1009 LegacyDivergenceAnalysis * Divergence) { 1010 MF = &NewMF; 1011 SDAGISelPass = PassPtr; 1012 ORE = &NewORE; 1013 TLI = getSubtarget().getTargetLowering(); 1014 TSI = getSubtarget().getSelectionDAGInfo(); 1015 LibInfo = LibraryInfo; 1016 Context = &MF->getFunction().getContext(); 1017 DA = Divergence; 1018 } 1019 1020 SelectionDAG::~SelectionDAG() { 1021 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 1022 allnodes_clear(); 1023 OperandRecycler.clear(OperandAllocator); 1024 delete DbgInfo; 1025 } 1026 1027 void SelectionDAG::allnodes_clear() { 1028 assert(&*AllNodes.begin() == &EntryNode); 1029 AllNodes.remove(AllNodes.begin()); 1030 while (!AllNodes.empty()) 1031 DeallocateNode(&AllNodes.front()); 1032 #ifndef NDEBUG 1033 NextPersistentId = 0; 1034 #endif 1035 } 1036 1037 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1038 void *&InsertPos) { 1039 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1040 if (N) { 1041 switch (N->getOpcode()) { 1042 default: break; 1043 case ISD::Constant: 1044 case ISD::ConstantFP: 1045 llvm_unreachable("Querying for Constant and ConstantFP nodes requires " 1046 "debug location. Use another overload."); 1047 } 1048 } 1049 return N; 1050 } 1051 1052 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, 1053 const SDLoc &DL, void *&InsertPos) { 1054 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 1055 if (N) { 1056 switch (N->getOpcode()) { 1057 case ISD::Constant: 1058 case ISD::ConstantFP: 1059 // Erase debug location from the node if the node is used at several 1060 // different places. Do not propagate one location to all uses as it 1061 // will cause a worse single stepping debugging experience. 1062 if (N->getDebugLoc() != DL.getDebugLoc()) 1063 N->setDebugLoc(DebugLoc()); 1064 break; 1065 default: 1066 // When the node's point of use is located earlier in the instruction 1067 // sequence than its prior point of use, update its debug info to the 1068 // earlier location. 1069 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) 1070 N->setDebugLoc(DL.getDebugLoc()); 1071 break; 1072 } 1073 } 1074 return N; 1075 } 1076 1077 void SelectionDAG::clear() { 1078 allnodes_clear(); 1079 OperandRecycler.clear(OperandAllocator); 1080 OperandAllocator.Reset(); 1081 CSEMap.clear(); 1082 1083 ExtendedValueTypeNodes.clear(); 1084 ExternalSymbols.clear(); 1085 TargetExternalSymbols.clear(); 1086 MCSymbols.clear(); 1087 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 1088 static_cast<CondCodeSDNode*>(nullptr)); 1089 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 1090 static_cast<SDNode*>(nullptr)); 1091 1092 EntryNode.UseList = nullptr; 1093 InsertNode(&EntryNode); 1094 Root = getEntryNode(); 1095 DbgInfo->clear(); 1096 } 1097 1098 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { 1099 return VT.bitsGT(Op.getValueType()) 1100 ? getNode(ISD::FP_EXTEND, DL, VT, Op) 1101 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL)); 1102 } 1103 1104 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1105 return VT.bitsGT(Op.getValueType()) ? 1106 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 1107 getNode(ISD::TRUNCATE, DL, VT, Op); 1108 } 1109 1110 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1111 return VT.bitsGT(Op.getValueType()) ? 1112 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 1113 getNode(ISD::TRUNCATE, DL, VT, Op); 1114 } 1115 1116 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1117 return VT.bitsGT(Op.getValueType()) ? 1118 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 1119 getNode(ISD::TRUNCATE, DL, VT, Op); 1120 } 1121 1122 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, 1123 EVT OpVT) { 1124 if (VT.bitsLE(Op.getValueType())) 1125 return getNode(ISD::TRUNCATE, SL, VT, Op); 1126 1127 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); 1128 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); 1129 } 1130 1131 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1132 assert(!VT.isVector() && 1133 "getZeroExtendInReg should use the vector element type instead of " 1134 "the vector type!"); 1135 if (Op.getValueType().getScalarType() == VT) return Op; 1136 unsigned BitWidth = Op.getScalarValueSizeInBits(); 1137 APInt Imm = APInt::getLowBitsSet(BitWidth, 1138 VT.getSizeInBits()); 1139 return getNode(ISD::AND, DL, Op.getValueType(), Op, 1140 getConstant(Imm, DL, Op.getValueType())); 1141 } 1142 1143 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { 1144 // Only unsigned pointer semantics are supported right now. In the future this 1145 // might delegate to TLI to check pointer signedness. 1146 return getZExtOrTrunc(Op, DL, VT); 1147 } 1148 1149 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { 1150 // Only unsigned pointer semantics are supported right now. In the future this 1151 // might delegate to TLI to check pointer signedness. 1152 return getZeroExtendInReg(Op, DL, VT); 1153 } 1154 1155 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 1156 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1157 EVT EltVT = VT.getScalarType(); 1158 SDValue NegOne = 1159 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT); 1160 return getNode(ISD::XOR, DL, VT, Val, NegOne); 1161 } 1162 1163 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { 1164 SDValue TrueValue = getBoolConstant(true, DL, VT, VT); 1165 return getNode(ISD::XOR, DL, VT, Val, TrueValue); 1166 } 1167 1168 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, 1169 EVT OpVT) { 1170 if (!V) 1171 return getConstant(0, DL, VT); 1172 1173 switch (TLI->getBooleanContents(OpVT)) { 1174 case TargetLowering::ZeroOrOneBooleanContent: 1175 case TargetLowering::UndefinedBooleanContent: 1176 return getConstant(1, DL, VT); 1177 case TargetLowering::ZeroOrNegativeOneBooleanContent: 1178 return getAllOnesConstant(DL, VT); 1179 } 1180 llvm_unreachable("Unexpected boolean content enum!"); 1181 } 1182 1183 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, 1184 bool isT, bool isO) { 1185 EVT EltVT = VT.getScalarType(); 1186 assert((EltVT.getSizeInBits() >= 64 || 1187 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 1188 "getConstant with a uint64_t value that doesn't fit in the type!"); 1189 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); 1190 } 1191 1192 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, 1193 bool isT, bool isO) { 1194 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); 1195 } 1196 1197 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, 1198 EVT VT, bool isT, bool isO) { 1199 assert(VT.isInteger() && "Cannot create FP integer constant!"); 1200 1201 EVT EltVT = VT.getScalarType(); 1202 const ConstantInt *Elt = &Val; 1203 1204 // In some cases the vector type is legal but the element type is illegal and 1205 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 1206 // inserted value (the type does not need to match the vector element type). 1207 // Any extra bits introduced will be truncated away. 1208 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 1209 TargetLowering::TypePromoteInteger) { 1210 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1211 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); 1212 Elt = ConstantInt::get(*getContext(), NewVal); 1213 } 1214 // In other cases the element type is illegal and needs to be expanded, for 1215 // example v2i64 on MIPS32. In this case, find the nearest legal type, split 1216 // the value into n parts and use a vector type with n-times the elements. 1217 // Then bitcast to the type requested. 1218 // Legalizing constants too early makes the DAGCombiner's job harder so we 1219 // only legalize if the DAG tells us we must produce legal types. 1220 else if (NewNodesMustHaveLegalTypes && VT.isVector() && 1221 TLI->getTypeAction(*getContext(), EltVT) == 1222 TargetLowering::TypeExpandInteger) { 1223 const APInt &NewVal = Elt->getValue(); 1224 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 1225 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); 1226 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; 1227 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); 1228 1229 // Check the temporary vector is the correct size. If this fails then 1230 // getTypeToTransformTo() probably returned a type whose size (in bits) 1231 // isn't a power-of-2 factor of the requested type size. 1232 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); 1233 1234 SmallVector<SDValue, 2> EltParts; 1235 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { 1236 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) 1237 .zextOrTrunc(ViaEltSizeInBits), DL, 1238 ViaEltVT, isT, isO)); 1239 } 1240 1241 // EltParts is currently in little endian order. If we actually want 1242 // big-endian order then reverse it now. 1243 if (getDataLayout().isBigEndian()) 1244 std::reverse(EltParts.begin(), EltParts.end()); 1245 1246 // The elements must be reversed when the element order is different 1247 // to the endianness of the elements (because the BITCAST is itself a 1248 // vector shuffle in this situation). However, we do not need any code to 1249 // perform this reversal because getConstant() is producing a vector 1250 // splat. 1251 // This situation occurs in MIPS MSA. 1252 1253 SmallVector<SDValue, 8> Ops; 1254 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 1255 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end()); 1256 1257 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); 1258 return V; 1259 } 1260 1261 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 1262 "APInt size does not match type size!"); 1263 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 1264 FoldingSetNodeID ID; 1265 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1266 ID.AddPointer(Elt); 1267 ID.AddBoolean(isO); 1268 void *IP = nullptr; 1269 SDNode *N = nullptr; 1270 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1271 if (!VT.isVector()) 1272 return SDValue(N, 0); 1273 1274 if (!N) { 1275 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); 1276 CSEMap.InsertNode(N, IP); 1277 InsertNode(N); 1278 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); 1279 } 1280 1281 SDValue Result(N, 0); 1282 if (VT.isVector()) 1283 Result = getSplatBuildVector(VT, DL, Result); 1284 1285 return Result; 1286 } 1287 1288 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, 1289 bool isTarget) { 1290 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); 1291 } 1292 1293 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, 1294 const SDLoc &DL, bool LegalTypes) { 1295 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); 1296 return getConstant(Val, DL, ShiftVT); 1297 } 1298 1299 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, 1300 bool isTarget) { 1301 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); 1302 } 1303 1304 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, 1305 EVT VT, bool isTarget) { 1306 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1307 1308 EVT EltVT = VT.getScalarType(); 1309 1310 // Do the map lookup using the actual bit pattern for the floating point 1311 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1312 // we don't have issues with SNANs. 1313 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1314 FoldingSetNodeID ID; 1315 AddNodeIDNode(ID, Opc, getVTList(EltVT), None); 1316 ID.AddPointer(&V); 1317 void *IP = nullptr; 1318 SDNode *N = nullptr; 1319 if ((N = FindNodeOrInsertPos(ID, DL, IP))) 1320 if (!VT.isVector()) 1321 return SDValue(N, 0); 1322 1323 if (!N) { 1324 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); 1325 CSEMap.InsertNode(N, IP); 1326 InsertNode(N); 1327 } 1328 1329 SDValue Result(N, 0); 1330 if (VT.isVector()) 1331 Result = getSplatBuildVector(VT, DL, Result); 1332 NewSDValueDbgMsg(Result, "Creating fp constant: ", this); 1333 return Result; 1334 } 1335 1336 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, 1337 bool isTarget) { 1338 EVT EltVT = VT.getScalarType(); 1339 if (EltVT == MVT::f32) 1340 return getConstantFP(APFloat((float)Val), DL, VT, isTarget); 1341 else if (EltVT == MVT::f64) 1342 return getConstantFP(APFloat(Val), DL, VT, isTarget); 1343 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || 1344 EltVT == MVT::f16) { 1345 bool Ignored; 1346 APFloat APF = APFloat(Val); 1347 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1348 &Ignored); 1349 return getConstantFP(APF, DL, VT, isTarget); 1350 } else 1351 llvm_unreachable("Unsupported type in getConstantFP"); 1352 } 1353 1354 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, 1355 EVT VT, int64_t Offset, bool isTargetGA, 1356 unsigned char TargetFlags) { 1357 assert((TargetFlags == 0 || isTargetGA) && 1358 "Cannot set target flags on target-independent globals"); 1359 1360 // Truncate (with sign-extension) the offset value to the pointer size. 1361 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); 1362 if (BitWidth < 64) 1363 Offset = SignExtend64(Offset, BitWidth); 1364 1365 unsigned Opc; 1366 if (GV->isThreadLocal()) 1367 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1368 else 1369 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1370 1371 FoldingSetNodeID ID; 1372 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1373 ID.AddPointer(GV); 1374 ID.AddInteger(Offset); 1375 ID.AddInteger(TargetFlags); 1376 void *IP = nullptr; 1377 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 1378 return SDValue(E, 0); 1379 1380 auto *N = newSDNode<GlobalAddressSDNode>( 1381 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); 1382 CSEMap.InsertNode(N, IP); 1383 InsertNode(N); 1384 return SDValue(N, 0); 1385 } 1386 1387 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1388 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1389 FoldingSetNodeID ID; 1390 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1391 ID.AddInteger(FI); 1392 void *IP = nullptr; 1393 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1394 return SDValue(E, 0); 1395 1396 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); 1397 CSEMap.InsertNode(N, IP); 1398 InsertNode(N); 1399 return SDValue(N, 0); 1400 } 1401 1402 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1403 unsigned char TargetFlags) { 1404 assert((TargetFlags == 0 || isTarget) && 1405 "Cannot set target flags on target-independent jump tables"); 1406 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1407 FoldingSetNodeID ID; 1408 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1409 ID.AddInteger(JTI); 1410 ID.AddInteger(TargetFlags); 1411 void *IP = nullptr; 1412 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1413 return SDValue(E, 0); 1414 1415 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); 1416 CSEMap.InsertNode(N, IP); 1417 InsertNode(N); 1418 return SDValue(N, 0); 1419 } 1420 1421 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1422 unsigned Alignment, int Offset, 1423 bool isTarget, 1424 unsigned char TargetFlags) { 1425 assert((TargetFlags == 0 || isTarget) && 1426 "Cannot set target flags on target-independent globals"); 1427 if (Alignment == 0) 1428 Alignment = MF->getFunction().hasOptSize() 1429 ? getDataLayout().getABITypeAlignment(C->getType()) 1430 : getDataLayout().getPrefTypeAlignment(C->getType()); 1431 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1432 FoldingSetNodeID ID; 1433 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1434 ID.AddInteger(Alignment); 1435 ID.AddInteger(Offset); 1436 ID.AddPointer(C); 1437 ID.AddInteger(TargetFlags); 1438 void *IP = nullptr; 1439 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1440 return SDValue(E, 0); 1441 1442 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1443 TargetFlags); 1444 CSEMap.InsertNode(N, IP); 1445 InsertNode(N); 1446 return SDValue(N, 0); 1447 } 1448 1449 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1450 unsigned Alignment, int Offset, 1451 bool isTarget, 1452 unsigned char TargetFlags) { 1453 assert((TargetFlags == 0 || isTarget) && 1454 "Cannot set target flags on target-independent globals"); 1455 if (Alignment == 0) 1456 Alignment = getDataLayout().getPrefTypeAlignment(C->getType()); 1457 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1458 FoldingSetNodeID ID; 1459 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1460 ID.AddInteger(Alignment); 1461 ID.AddInteger(Offset); 1462 C->addSelectionDAGCSEId(ID); 1463 ID.AddInteger(TargetFlags); 1464 void *IP = nullptr; 1465 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1466 return SDValue(E, 0); 1467 1468 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment, 1469 TargetFlags); 1470 CSEMap.InsertNode(N, IP); 1471 InsertNode(N); 1472 return SDValue(N, 0); 1473 } 1474 1475 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1476 unsigned char TargetFlags) { 1477 FoldingSetNodeID ID; 1478 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None); 1479 ID.AddInteger(Index); 1480 ID.AddInteger(Offset); 1481 ID.AddInteger(TargetFlags); 1482 void *IP = nullptr; 1483 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1484 return SDValue(E, 0); 1485 1486 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags); 1487 CSEMap.InsertNode(N, IP); 1488 InsertNode(N); 1489 return SDValue(N, 0); 1490 } 1491 1492 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1493 FoldingSetNodeID ID; 1494 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None); 1495 ID.AddPointer(MBB); 1496 void *IP = nullptr; 1497 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1498 return SDValue(E, 0); 1499 1500 auto *N = newSDNode<BasicBlockSDNode>(MBB); 1501 CSEMap.InsertNode(N, IP); 1502 InsertNode(N); 1503 return SDValue(N, 0); 1504 } 1505 1506 SDValue SelectionDAG::getValueType(EVT VT) { 1507 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1508 ValueTypeNodes.size()) 1509 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1510 1511 SDNode *&N = VT.isExtended() ? 1512 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1513 1514 if (N) return SDValue(N, 0); 1515 N = newSDNode<VTSDNode>(VT); 1516 InsertNode(N); 1517 return SDValue(N, 0); 1518 } 1519 1520 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1521 SDNode *&N = ExternalSymbols[Sym]; 1522 if (N) return SDValue(N, 0); 1523 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); 1524 InsertNode(N); 1525 return SDValue(N, 0); 1526 } 1527 1528 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { 1529 SDNode *&N = MCSymbols[Sym]; 1530 if (N) 1531 return SDValue(N, 0); 1532 N = newSDNode<MCSymbolSDNode>(Sym, VT); 1533 InsertNode(N); 1534 return SDValue(N, 0); 1535 } 1536 1537 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1538 unsigned char TargetFlags) { 1539 SDNode *&N = 1540 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1541 TargetFlags)]; 1542 if (N) return SDValue(N, 0); 1543 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); 1544 InsertNode(N); 1545 return SDValue(N, 0); 1546 } 1547 1548 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1549 if ((unsigned)Cond >= CondCodeNodes.size()) 1550 CondCodeNodes.resize(Cond+1); 1551 1552 if (!CondCodeNodes[Cond]) { 1553 auto *N = newSDNode<CondCodeSDNode>(Cond); 1554 CondCodeNodes[Cond] = N; 1555 InsertNode(N); 1556 } 1557 1558 return SDValue(CondCodeNodes[Cond], 0); 1559 } 1560 1561 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that 1562 /// point at N1 to point at N2 and indices that point at N2 to point at N1. 1563 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { 1564 std::swap(N1, N2); 1565 ShuffleVectorSDNode::commuteMask(M); 1566 } 1567 1568 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, 1569 SDValue N2, ArrayRef<int> Mask) { 1570 assert(VT.getVectorNumElements() == Mask.size() && 1571 "Must have the same number of vector elements as mask elements!"); 1572 assert(VT == N1.getValueType() && VT == N2.getValueType() && 1573 "Invalid VECTOR_SHUFFLE"); 1574 1575 // Canonicalize shuffle undef, undef -> undef 1576 if (N1.isUndef() && N2.isUndef()) 1577 return getUNDEF(VT); 1578 1579 // Validate that all indices in Mask are within the range of the elements 1580 // input to the shuffle. 1581 int NElts = Mask.size(); 1582 assert(llvm::all_of(Mask, 1583 [&](int M) { return M < (NElts * 2) && M >= -1; }) && 1584 "Index out of range"); 1585 1586 // Copy the mask so we can do any needed cleanup. 1587 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end()); 1588 1589 // Canonicalize shuffle v, v -> v, undef 1590 if (N1 == N2) { 1591 N2 = getUNDEF(VT); 1592 for (int i = 0; i != NElts; ++i) 1593 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; 1594 } 1595 1596 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1597 if (N1.isUndef()) 1598 commuteShuffle(N1, N2, MaskVec); 1599 1600 if (TLI->hasVectorBlend()) { 1601 // If shuffling a splat, try to blend the splat instead. We do this here so 1602 // that even when this arises during lowering we don't have to re-handle it. 1603 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { 1604 BitVector UndefElements; 1605 SDValue Splat = BV->getSplatValue(&UndefElements); 1606 if (!Splat) 1607 return; 1608 1609 for (int i = 0; i < NElts; ++i) { 1610 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) 1611 continue; 1612 1613 // If this input comes from undef, mark it as such. 1614 if (UndefElements[MaskVec[i] - Offset]) { 1615 MaskVec[i] = -1; 1616 continue; 1617 } 1618 1619 // If we can blend a non-undef lane, use that instead. 1620 if (!UndefElements[i]) 1621 MaskVec[i] = i + Offset; 1622 } 1623 }; 1624 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) 1625 BlendSplat(N1BV, 0); 1626 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) 1627 BlendSplat(N2BV, NElts); 1628 } 1629 1630 // Canonicalize all index into lhs, -> shuffle lhs, undef 1631 // Canonicalize all index into rhs, -> shuffle rhs, undef 1632 bool AllLHS = true, AllRHS = true; 1633 bool N2Undef = N2.isUndef(); 1634 for (int i = 0; i != NElts; ++i) { 1635 if (MaskVec[i] >= NElts) { 1636 if (N2Undef) 1637 MaskVec[i] = -1; 1638 else 1639 AllLHS = false; 1640 } else if (MaskVec[i] >= 0) { 1641 AllRHS = false; 1642 } 1643 } 1644 if (AllLHS && AllRHS) 1645 return getUNDEF(VT); 1646 if (AllLHS && !N2Undef) 1647 N2 = getUNDEF(VT); 1648 if (AllRHS) { 1649 N1 = getUNDEF(VT); 1650 commuteShuffle(N1, N2, MaskVec); 1651 } 1652 // Reset our undef status after accounting for the mask. 1653 N2Undef = N2.isUndef(); 1654 // Re-check whether both sides ended up undef. 1655 if (N1.isUndef() && N2Undef) 1656 return getUNDEF(VT); 1657 1658 // If Identity shuffle return that node. 1659 bool Identity = true, AllSame = true; 1660 for (int i = 0; i != NElts; ++i) { 1661 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; 1662 if (MaskVec[i] != MaskVec[0]) AllSame = false; 1663 } 1664 if (Identity && NElts) 1665 return N1; 1666 1667 // Shuffling a constant splat doesn't change the result. 1668 if (N2Undef) { 1669 SDValue V = N1; 1670 1671 // Look through any bitcasts. We check that these don't change the number 1672 // (and size) of elements and just changes their types. 1673 while (V.getOpcode() == ISD::BITCAST) 1674 V = V->getOperand(0); 1675 1676 // A splat should always show up as a build vector node. 1677 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 1678 BitVector UndefElements; 1679 SDValue Splat = BV->getSplatValue(&UndefElements); 1680 // If this is a splat of an undef, shuffling it is also undef. 1681 if (Splat && Splat.isUndef()) 1682 return getUNDEF(VT); 1683 1684 bool SameNumElts = 1685 V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); 1686 1687 // We only have a splat which can skip shuffles if there is a splatted 1688 // value and no undef lanes rearranged by the shuffle. 1689 if (Splat && UndefElements.none()) { 1690 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the 1691 // number of elements match or the value splatted is a zero constant. 1692 if (SameNumElts) 1693 return N1; 1694 if (auto *C = dyn_cast<ConstantSDNode>(Splat)) 1695 if (C->isNullValue()) 1696 return N1; 1697 } 1698 1699 // If the shuffle itself creates a splat, build the vector directly. 1700 if (AllSame && SameNumElts) { 1701 EVT BuildVT = BV->getValueType(0); 1702 const SDValue &Splatted = BV->getOperand(MaskVec[0]); 1703 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); 1704 1705 // We may have jumped through bitcasts, so the type of the 1706 // BUILD_VECTOR may not match the type of the shuffle. 1707 if (BuildVT != VT) 1708 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); 1709 return NewBV; 1710 } 1711 } 1712 } 1713 1714 FoldingSetNodeID ID; 1715 SDValue Ops[2] = { N1, N2 }; 1716 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); 1717 for (int i = 0; i != NElts; ++i) 1718 ID.AddInteger(MaskVec[i]); 1719 1720 void* IP = nullptr; 1721 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1722 return SDValue(E, 0); 1723 1724 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1725 // SDNode doesn't have access to it. This memory will be "leaked" when 1726 // the node is deallocated, but recovered when the NodeAllocator is released. 1727 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1728 llvm::copy(MaskVec, MaskAlloc); 1729 1730 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), 1731 dl.getDebugLoc(), MaskAlloc); 1732 createOperands(N, Ops); 1733 1734 CSEMap.InsertNode(N, IP); 1735 InsertNode(N); 1736 SDValue V = SDValue(N, 0); 1737 NewSDValueDbgMsg(V, "Creating new node: ", this); 1738 return V; 1739 } 1740 1741 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { 1742 EVT VT = SV.getValueType(0); 1743 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end()); 1744 ShuffleVectorSDNode::commuteMask(MaskVec); 1745 1746 SDValue Op0 = SV.getOperand(0); 1747 SDValue Op1 = SV.getOperand(1); 1748 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); 1749 } 1750 1751 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1752 FoldingSetNodeID ID; 1753 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None); 1754 ID.AddInteger(RegNo); 1755 void *IP = nullptr; 1756 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1757 return SDValue(E, 0); 1758 1759 auto *N = newSDNode<RegisterSDNode>(RegNo, VT); 1760 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 1761 CSEMap.InsertNode(N, IP); 1762 InsertNode(N); 1763 return SDValue(N, 0); 1764 } 1765 1766 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1767 FoldingSetNodeID ID; 1768 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None); 1769 ID.AddPointer(RegMask); 1770 void *IP = nullptr; 1771 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1772 return SDValue(E, 0); 1773 1774 auto *N = newSDNode<RegisterMaskSDNode>(RegMask); 1775 CSEMap.InsertNode(N, IP); 1776 InsertNode(N); 1777 return SDValue(N, 0); 1778 } 1779 1780 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, 1781 MCSymbol *Label) { 1782 return getLabelNode(ISD::EH_LABEL, dl, Root, Label); 1783 } 1784 1785 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, 1786 SDValue Root, MCSymbol *Label) { 1787 FoldingSetNodeID ID; 1788 SDValue Ops[] = { Root }; 1789 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); 1790 ID.AddPointer(Label); 1791 void *IP = nullptr; 1792 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1793 return SDValue(E, 0); 1794 1795 auto *N = 1796 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); 1797 createOperands(N, Ops); 1798 1799 CSEMap.InsertNode(N, IP); 1800 InsertNode(N); 1801 return SDValue(N, 0); 1802 } 1803 1804 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1805 int64_t Offset, 1806 bool isTarget, 1807 unsigned char TargetFlags) { 1808 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1809 1810 FoldingSetNodeID ID; 1811 AddNodeIDNode(ID, Opc, getVTList(VT), None); 1812 ID.AddPointer(BA); 1813 ID.AddInteger(Offset); 1814 ID.AddInteger(TargetFlags); 1815 void *IP = nullptr; 1816 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1817 return SDValue(E, 0); 1818 1819 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); 1820 CSEMap.InsertNode(N, IP); 1821 InsertNode(N); 1822 return SDValue(N, 0); 1823 } 1824 1825 SDValue SelectionDAG::getSrcValue(const Value *V) { 1826 assert((!V || V->getType()->isPointerTy()) && 1827 "SrcValue is not a pointer?"); 1828 1829 FoldingSetNodeID ID; 1830 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None); 1831 ID.AddPointer(V); 1832 1833 void *IP = nullptr; 1834 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1835 return SDValue(E, 0); 1836 1837 auto *N = newSDNode<SrcValueSDNode>(V); 1838 CSEMap.InsertNode(N, IP); 1839 InsertNode(N); 1840 return SDValue(N, 0); 1841 } 1842 1843 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1844 FoldingSetNodeID ID; 1845 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None); 1846 ID.AddPointer(MD); 1847 1848 void *IP = nullptr; 1849 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) 1850 return SDValue(E, 0); 1851 1852 auto *N = newSDNode<MDNodeSDNode>(MD); 1853 CSEMap.InsertNode(N, IP); 1854 InsertNode(N); 1855 return SDValue(N, 0); 1856 } 1857 1858 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { 1859 if (VT == V.getValueType()) 1860 return V; 1861 1862 return getNode(ISD::BITCAST, SDLoc(V), VT, V); 1863 } 1864 1865 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, 1866 unsigned SrcAS, unsigned DestAS) { 1867 SDValue Ops[] = {Ptr}; 1868 FoldingSetNodeID ID; 1869 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); 1870 ID.AddInteger(SrcAS); 1871 ID.AddInteger(DestAS); 1872 1873 void *IP = nullptr; 1874 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 1875 return SDValue(E, 0); 1876 1877 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), 1878 VT, SrcAS, DestAS); 1879 createOperands(N, Ops); 1880 1881 CSEMap.InsertNode(N, IP); 1882 InsertNode(N); 1883 return SDValue(N, 0); 1884 } 1885 1886 /// getShiftAmountOperand - Return the specified value casted to 1887 /// the target's desired shift amount type. 1888 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1889 EVT OpTy = Op.getValueType(); 1890 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); 1891 if (OpTy == ShTy || OpTy.isVector()) return Op; 1892 1893 return getZExtOrTrunc(Op, SDLoc(Op), ShTy); 1894 } 1895 1896 SDValue SelectionDAG::expandVAArg(SDNode *Node) { 1897 SDLoc dl(Node); 1898 const TargetLowering &TLI = getTargetLoweringInfo(); 1899 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1900 EVT VT = Node->getValueType(0); 1901 SDValue Tmp1 = Node->getOperand(0); 1902 SDValue Tmp2 = Node->getOperand(1); 1903 unsigned Align = Node->getConstantOperandVal(3); 1904 1905 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, 1906 Tmp2, MachinePointerInfo(V)); 1907 SDValue VAList = VAListLoad; 1908 1909 if (Align > TLI.getMinStackArgumentAlignment()) { 1910 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 1911 1912 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1913 getConstant(Align - 1, dl, VAList.getValueType())); 1914 1915 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList, 1916 getConstant(-(int64_t)Align, dl, VAList.getValueType())); 1917 } 1918 1919 // Increment the pointer, VAList, to the next vaarg 1920 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, 1921 getConstant(getDataLayout().getTypeAllocSize( 1922 VT.getTypeForEVT(*getContext())), 1923 dl, VAList.getValueType())); 1924 // Store the incremented VAList to the legalized pointer 1925 Tmp1 = 1926 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); 1927 // Load the actual argument out of the pointer VAList 1928 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); 1929 } 1930 1931 SDValue SelectionDAG::expandVACopy(SDNode *Node) { 1932 SDLoc dl(Node); 1933 const TargetLowering &TLI = getTargetLoweringInfo(); 1934 // This defaults to loading a pointer from the input and storing it to the 1935 // output, returning the chain. 1936 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 1937 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 1938 SDValue Tmp1 = 1939 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), 1940 Node->getOperand(2), MachinePointerInfo(VS)); 1941 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 1942 MachinePointerInfo(VD)); 1943 } 1944 1945 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1946 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1947 unsigned ByteSize = VT.getStoreSize(); 1948 Type *Ty = VT.getTypeForEVT(*getContext()); 1949 unsigned StackAlign = 1950 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign); 1951 1952 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 1953 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1954 } 1955 1956 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1957 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize()); 1958 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1959 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1960 const DataLayout &DL = getDataLayout(); 1961 unsigned Align = 1962 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2)); 1963 1964 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 1965 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false); 1966 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); 1967 } 1968 1969 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, 1970 ISD::CondCode Cond, const SDLoc &dl) { 1971 EVT OpVT = N1.getValueType(); 1972 1973 // These setcc operations always fold. 1974 switch (Cond) { 1975 default: break; 1976 case ISD::SETFALSE: 1977 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); 1978 case ISD::SETTRUE: 1979 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); 1980 1981 case ISD::SETOEQ: 1982 case ISD::SETOGT: 1983 case ISD::SETOGE: 1984 case ISD::SETOLT: 1985 case ISD::SETOLE: 1986 case ISD::SETONE: 1987 case ISD::SETO: 1988 case ISD::SETUO: 1989 case ISD::SETUEQ: 1990 case ISD::SETUNE: 1991 assert(!OpVT.isInteger() && "Illegal setcc for integer!"); 1992 break; 1993 } 1994 1995 if (OpVT.isInteger()) { 1996 // For EQ and NE, we can always pick a value for the undef to make the 1997 // predicate pass or fail, so we can return undef. 1998 // Matches behavior in llvm::ConstantFoldCompareInstruction. 1999 // icmp eq/ne X, undef -> undef. 2000 if ((N1.isUndef() || N2.isUndef()) && 2001 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) 2002 return getUNDEF(VT); 2003 2004 // If both operands are undef, we can return undef for int comparison. 2005 // icmp undef, undef -> undef. 2006 if (N1.isUndef() && N2.isUndef()) 2007 return getUNDEF(VT); 2008 2009 // icmp X, X -> true/false 2010 // icmp X, undef -> true/false because undef could be X. 2011 if (N1 == N2) 2012 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); 2013 } 2014 2015 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { 2016 const APInt &C2 = N2C->getAPIntValue(); 2017 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { 2018 const APInt &C1 = N1C->getAPIntValue(); 2019 2020 switch (Cond) { 2021 default: llvm_unreachable("Unknown integer setcc!"); 2022 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT); 2023 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT); 2024 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT); 2025 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT); 2026 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT); 2027 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT); 2028 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT); 2029 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT); 2030 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT); 2031 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT); 2032 } 2033 } 2034 } 2035 2036 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 2037 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 2038 2039 if (N1CFP && N2CFP) { 2040 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); 2041 switch (Cond) { 2042 default: break; 2043 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 2044 return getUNDEF(VT); 2045 LLVM_FALLTHROUGH; 2046 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, 2047 OpVT); 2048 case ISD::SETNE: if (R==APFloat::cmpUnordered) 2049 return getUNDEF(VT); 2050 LLVM_FALLTHROUGH; 2051 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2052 R==APFloat::cmpLessThan, dl, VT, 2053 OpVT); 2054 case ISD::SETLT: if (R==APFloat::cmpUnordered) 2055 return getUNDEF(VT); 2056 LLVM_FALLTHROUGH; 2057 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, 2058 OpVT); 2059 case ISD::SETGT: if (R==APFloat::cmpUnordered) 2060 return getUNDEF(VT); 2061 LLVM_FALLTHROUGH; 2062 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, 2063 VT, OpVT); 2064 case ISD::SETLE: if (R==APFloat::cmpUnordered) 2065 return getUNDEF(VT); 2066 LLVM_FALLTHROUGH; 2067 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || 2068 R==APFloat::cmpEqual, dl, VT, 2069 OpVT); 2070 case ISD::SETGE: if (R==APFloat::cmpUnordered) 2071 return getUNDEF(VT); 2072 LLVM_FALLTHROUGH; 2073 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || 2074 R==APFloat::cmpEqual, dl, VT, OpVT); 2075 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, 2076 OpVT); 2077 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, 2078 OpVT); 2079 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || 2080 R==APFloat::cmpEqual, dl, VT, 2081 OpVT); 2082 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, 2083 OpVT); 2084 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || 2085 R==APFloat::cmpLessThan, dl, VT, 2086 OpVT); 2087 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || 2088 R==APFloat::cmpUnordered, dl, VT, 2089 OpVT); 2090 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, 2091 VT, OpVT); 2092 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, 2093 OpVT); 2094 } 2095 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { 2096 // Ensure that the constant occurs on the RHS. 2097 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); 2098 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) 2099 return SDValue(); 2100 return getSetCC(dl, VT, N2, N1, SwappedCond); 2101 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || 2102 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { 2103 // If an operand is known to be a nan (or undef that could be a nan), we can 2104 // fold it. 2105 // Choosing NaN for the undef will always make unordered comparison succeed 2106 // and ordered comparison fails. 2107 // Matches behavior in llvm::ConstantFoldCompareInstruction. 2108 switch (ISD::getUnorderedFlavor(Cond)) { 2109 default: 2110 llvm_unreachable("Unknown flavor!"); 2111 case 0: // Known false. 2112 return getBoolConstant(false, dl, VT, OpVT); 2113 case 1: // Known true. 2114 return getBoolConstant(true, dl, VT, OpVT); 2115 case 2: // Undefined. 2116 return getUNDEF(VT); 2117 } 2118 } 2119 2120 // Could not fold it. 2121 return SDValue(); 2122 } 2123 2124 /// See if the specified operand can be simplified with the knowledge that only 2125 /// the bits specified by DemandedBits are used. 2126 /// TODO: really we should be making this into the DAG equivalent of 2127 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2128 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) { 2129 EVT VT = V.getValueType(); 2130 APInt DemandedElts = VT.isVector() 2131 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2132 : APInt(1, 1); 2133 return GetDemandedBits(V, DemandedBits, DemandedElts); 2134 } 2135 2136 /// See if the specified operand can be simplified with the knowledge that only 2137 /// the bits specified by DemandedBits are used in the elements specified by 2138 /// DemandedElts. 2139 /// TODO: really we should be making this into the DAG equivalent of 2140 /// SimplifyMultipleUseDemandedBits and not generate any new nodes. 2141 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits, 2142 const APInt &DemandedElts) { 2143 switch (V.getOpcode()) { 2144 default: 2145 break; 2146 case ISD::Constant: { 2147 auto *CV = cast<ConstantSDNode>(V.getNode()); 2148 assert(CV && "Const value should be ConstSDNode."); 2149 const APInt &CVal = CV->getAPIntValue(); 2150 APInt NewVal = CVal & DemandedBits; 2151 if (NewVal != CVal) 2152 return getConstant(NewVal, SDLoc(V), V.getValueType()); 2153 break; 2154 } 2155 case ISD::OR: 2156 case ISD::XOR: 2157 // If the LHS or RHS don't contribute bits to the or, drop them. 2158 if (MaskedValueIsZero(V.getOperand(0), DemandedBits)) 2159 return V.getOperand(1); 2160 if (MaskedValueIsZero(V.getOperand(1), DemandedBits)) 2161 return V.getOperand(0); 2162 break; 2163 case ISD::SRL: 2164 // Only look at single-use SRLs. 2165 if (!V.getNode()->hasOneUse()) 2166 break; 2167 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 2168 // See if we can recursively simplify the LHS. 2169 unsigned Amt = RHSC->getZExtValue(); 2170 2171 // Watch out for shift count overflow though. 2172 if (Amt >= DemandedBits.getBitWidth()) 2173 break; 2174 APInt SrcDemandedBits = DemandedBits << Amt; 2175 if (SDValue SimplifyLHS = 2176 GetDemandedBits(V.getOperand(0), SrcDemandedBits)) 2177 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, 2178 V.getOperand(1)); 2179 } 2180 break; 2181 case ISD::AND: { 2182 // X & -1 -> X (ignoring bits which aren't demanded). 2183 // Also handle the case where masked out bits in X are known to be zero. 2184 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) { 2185 const APInt &AndVal = RHSC->getAPIntValue(); 2186 if (DemandedBits.isSubsetOf(AndVal) || 2187 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero | 2188 AndVal)) 2189 return V.getOperand(0); 2190 } 2191 break; 2192 } 2193 case ISD::ANY_EXTEND: { 2194 SDValue Src = V.getOperand(0); 2195 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 2196 // Being conservative here - only peek through if we only demand bits in the 2197 // non-extended source (even though the extended bits are technically 2198 // undef). 2199 if (DemandedBits.getActiveBits() > SrcBitWidth) 2200 break; 2201 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth); 2202 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits)) 2203 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc); 2204 break; 2205 } 2206 case ISD::SIGN_EXTEND_INREG: 2207 EVT ExVT = cast<VTSDNode>(V.getOperand(1))->getVT(); 2208 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 2209 2210 // If none of the extended bits are demanded, eliminate the sextinreg. 2211 if (DemandedBits.getActiveBits() <= ExVTBits) 2212 return V.getOperand(0); 2213 2214 break; 2215 } 2216 return SDValue(); 2217 } 2218 2219 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 2220 /// use this predicate to simplify operations downstream. 2221 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 2222 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2223 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); 2224 } 2225 2226 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 2227 /// this predicate to simplify operations downstream. Mask is known to be zero 2228 /// for bits that V cannot have. 2229 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2230 unsigned Depth) const { 2231 EVT VT = V.getValueType(); 2232 APInt DemandedElts = VT.isVector() 2233 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2234 : APInt(1, 1); 2235 return MaskedValueIsZero(V, Mask, DemandedElts, Depth); 2236 } 2237 2238 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in 2239 /// DemandedElts. We use this predicate to simplify operations downstream. 2240 /// Mask is known to be zero for bits that V cannot have. 2241 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, 2242 const APInt &DemandedElts, 2243 unsigned Depth) const { 2244 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); 2245 } 2246 2247 /// isSplatValue - Return true if the vector V has the same value 2248 /// across all DemandedElts. 2249 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, 2250 APInt &UndefElts) { 2251 if (!DemandedElts) 2252 return false; // No demanded elts, better to assume we don't know anything. 2253 2254 EVT VT = V.getValueType(); 2255 assert(VT.isVector() && "Vector type expected"); 2256 2257 unsigned NumElts = VT.getVectorNumElements(); 2258 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); 2259 UndefElts = APInt::getNullValue(NumElts); 2260 2261 switch (V.getOpcode()) { 2262 case ISD::BUILD_VECTOR: { 2263 SDValue Scl; 2264 for (unsigned i = 0; i != NumElts; ++i) { 2265 SDValue Op = V.getOperand(i); 2266 if (Op.isUndef()) { 2267 UndefElts.setBit(i); 2268 continue; 2269 } 2270 if (!DemandedElts[i]) 2271 continue; 2272 if (Scl && Scl != Op) 2273 return false; 2274 Scl = Op; 2275 } 2276 return true; 2277 } 2278 case ISD::VECTOR_SHUFFLE: { 2279 // Check if this is a shuffle node doing a splat. 2280 // TODO: Do we need to handle shuffle(splat, undef, mask)? 2281 int SplatIndex = -1; 2282 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); 2283 for (int i = 0; i != (int)NumElts; ++i) { 2284 int M = Mask[i]; 2285 if (M < 0) { 2286 UndefElts.setBit(i); 2287 continue; 2288 } 2289 if (!DemandedElts[i]) 2290 continue; 2291 if (0 <= SplatIndex && SplatIndex != M) 2292 return false; 2293 SplatIndex = M; 2294 } 2295 return true; 2296 } 2297 case ISD::EXTRACT_SUBVECTOR: { 2298 SDValue Src = V.getOperand(0); 2299 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1)); 2300 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2301 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2302 // Offset the demanded elts by the subvector index. 2303 uint64_t Idx = SubIdx->getZExtValue(); 2304 APInt UndefSrcElts; 2305 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2306 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) { 2307 UndefElts = UndefSrcElts.extractBits(NumElts, Idx); 2308 return true; 2309 } 2310 } 2311 break; 2312 } 2313 case ISD::ADD: 2314 case ISD::SUB: 2315 case ISD::AND: { 2316 APInt UndefLHS, UndefRHS; 2317 SDValue LHS = V.getOperand(0); 2318 SDValue RHS = V.getOperand(1); 2319 if (isSplatValue(LHS, DemandedElts, UndefLHS) && 2320 isSplatValue(RHS, DemandedElts, UndefRHS)) { 2321 UndefElts = UndefLHS | UndefRHS; 2322 return true; 2323 } 2324 break; 2325 } 2326 } 2327 2328 return false; 2329 } 2330 2331 /// Helper wrapper to main isSplatValue function. 2332 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) { 2333 EVT VT = V.getValueType(); 2334 assert(VT.isVector() && "Vector type expected"); 2335 unsigned NumElts = VT.getVectorNumElements(); 2336 2337 APInt UndefElts; 2338 APInt DemandedElts = APInt::getAllOnesValue(NumElts); 2339 return isSplatValue(V, DemandedElts, UndefElts) && 2340 (AllowUndefs || !UndefElts); 2341 } 2342 2343 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { 2344 V = peekThroughExtractSubvectors(V); 2345 2346 EVT VT = V.getValueType(); 2347 unsigned Opcode = V.getOpcode(); 2348 switch (Opcode) { 2349 default: { 2350 APInt UndefElts; 2351 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); 2352 if (isSplatValue(V, DemandedElts, UndefElts)) { 2353 // Handle case where all demanded elements are UNDEF. 2354 if (DemandedElts.isSubsetOf(UndefElts)) { 2355 SplatIdx = 0; 2356 return getUNDEF(VT); 2357 } 2358 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); 2359 return V; 2360 } 2361 break; 2362 } 2363 case ISD::VECTOR_SHUFFLE: { 2364 // Check if this is a shuffle node doing a splat. 2365 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, 2366 // getTargetVShiftNode currently struggles without the splat source. 2367 auto *SVN = cast<ShuffleVectorSDNode>(V); 2368 if (!SVN->isSplat()) 2369 break; 2370 int Idx = SVN->getSplatIndex(); 2371 int NumElts = V.getValueType().getVectorNumElements(); 2372 SplatIdx = Idx % NumElts; 2373 return V.getOperand(Idx / NumElts); 2374 } 2375 } 2376 2377 return SDValue(); 2378 } 2379 2380 SDValue SelectionDAG::getSplatValue(SDValue V) { 2381 int SplatIdx; 2382 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) 2383 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), 2384 SrcVector.getValueType().getScalarType(), SrcVector, 2385 getIntPtrConstant(SplatIdx, SDLoc(V))); 2386 return SDValue(); 2387 } 2388 2389 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that 2390 /// is less than the element bit-width of the shift node, return it. 2391 static const APInt *getValidShiftAmountConstant(SDValue V) { 2392 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) { 2393 // Shifting more than the bitwidth is not valid. 2394 const APInt &ShAmt = SA->getAPIntValue(); 2395 if (ShAmt.ult(V.getScalarValueSizeInBits())) 2396 return &ShAmt; 2397 } 2398 return nullptr; 2399 } 2400 2401 /// Determine which bits of Op are known to be either zero or one and return 2402 /// them in Known. For vectors, the known bits are those that are shared by 2403 /// every vector element. 2404 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { 2405 EVT VT = Op.getValueType(); 2406 APInt DemandedElts = VT.isVector() 2407 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 2408 : APInt(1, 1); 2409 return computeKnownBits(Op, DemandedElts, Depth); 2410 } 2411 2412 /// Determine which bits of Op are known to be either zero or one and return 2413 /// them in Known. The DemandedElts argument allows us to only collect the known 2414 /// bits that are shared by the requested vector elements. 2415 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, 2416 unsigned Depth) const { 2417 unsigned BitWidth = Op.getScalarValueSizeInBits(); 2418 2419 KnownBits Known(BitWidth); // Don't know anything. 2420 2421 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2422 // We know all of the bits for a constant! 2423 Known.One = C->getAPIntValue(); 2424 Known.Zero = ~Known.One; 2425 return Known; 2426 } 2427 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { 2428 // We know all of the bits for a constant fp! 2429 Known.One = C->getValueAPF().bitcastToAPInt(); 2430 Known.Zero = ~Known.One; 2431 return Known; 2432 } 2433 2434 if (Depth == 6) 2435 return Known; // Limit search depth. 2436 2437 KnownBits Known2; 2438 unsigned NumElts = DemandedElts.getBitWidth(); 2439 assert((!Op.getValueType().isVector() || 2440 NumElts == Op.getValueType().getVectorNumElements()) && 2441 "Unexpected vector size"); 2442 2443 if (!DemandedElts) 2444 return Known; // No demanded elts, better to assume we don't know anything. 2445 2446 unsigned Opcode = Op.getOpcode(); 2447 switch (Opcode) { 2448 case ISD::BUILD_VECTOR: 2449 // Collect the known bits that are shared by every demanded vector element. 2450 Known.Zero.setAllBits(); Known.One.setAllBits(); 2451 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { 2452 if (!DemandedElts[i]) 2453 continue; 2454 2455 SDValue SrcOp = Op.getOperand(i); 2456 Known2 = computeKnownBits(SrcOp, Depth + 1); 2457 2458 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 2459 if (SrcOp.getValueSizeInBits() != BitWidth) { 2460 assert(SrcOp.getValueSizeInBits() > BitWidth && 2461 "Expected BUILD_VECTOR implicit truncation"); 2462 Known2 = Known2.trunc(BitWidth); 2463 } 2464 2465 // Known bits are the values that are shared by every demanded element. 2466 Known.One &= Known2.One; 2467 Known.Zero &= Known2.Zero; 2468 2469 // If we don't know any bits, early out. 2470 if (Known.isUnknown()) 2471 break; 2472 } 2473 break; 2474 case ISD::VECTOR_SHUFFLE: { 2475 // Collect the known bits that are shared by every vector element referenced 2476 // by the shuffle. 2477 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2478 Known.Zero.setAllBits(); Known.One.setAllBits(); 2479 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 2480 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 2481 for (unsigned i = 0; i != NumElts; ++i) { 2482 if (!DemandedElts[i]) 2483 continue; 2484 2485 int M = SVN->getMaskElt(i); 2486 if (M < 0) { 2487 // For UNDEF elements, we don't know anything about the common state of 2488 // the shuffle result. 2489 Known.resetAll(); 2490 DemandedLHS.clearAllBits(); 2491 DemandedRHS.clearAllBits(); 2492 break; 2493 } 2494 2495 if ((unsigned)M < NumElts) 2496 DemandedLHS.setBit((unsigned)M % NumElts); 2497 else 2498 DemandedRHS.setBit((unsigned)M % NumElts); 2499 } 2500 // Known bits are the values that are shared by every demanded element. 2501 if (!!DemandedLHS) { 2502 SDValue LHS = Op.getOperand(0); 2503 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); 2504 Known.One &= Known2.One; 2505 Known.Zero &= Known2.Zero; 2506 } 2507 // If we don't know any bits, early out. 2508 if (Known.isUnknown()) 2509 break; 2510 if (!!DemandedRHS) { 2511 SDValue RHS = Op.getOperand(1); 2512 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); 2513 Known.One &= Known2.One; 2514 Known.Zero &= Known2.Zero; 2515 } 2516 break; 2517 } 2518 case ISD::CONCAT_VECTORS: { 2519 // Split DemandedElts and test each of the demanded subvectors. 2520 Known.Zero.setAllBits(); Known.One.setAllBits(); 2521 EVT SubVectorVT = Op.getOperand(0).getValueType(); 2522 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 2523 unsigned NumSubVectors = Op.getNumOperands(); 2524 for (unsigned i = 0; i != NumSubVectors; ++i) { 2525 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 2526 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 2527 if (!!DemandedSub) { 2528 SDValue Sub = Op.getOperand(i); 2529 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); 2530 Known.One &= Known2.One; 2531 Known.Zero &= Known2.Zero; 2532 } 2533 // If we don't know any bits, early out. 2534 if (Known.isUnknown()) 2535 break; 2536 } 2537 break; 2538 } 2539 case ISD::INSERT_SUBVECTOR: { 2540 // If we know the element index, demand any elements from the subvector and 2541 // the remainder from the src its inserted into, otherwise demand them all. 2542 SDValue Src = Op.getOperand(0); 2543 SDValue Sub = Op.getOperand(1); 2544 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2545 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2546 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 2547 Known.One.setAllBits(); 2548 Known.Zero.setAllBits(); 2549 uint64_t Idx = SubIdx->getZExtValue(); 2550 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2551 if (!!DemandedSubElts) { 2552 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); 2553 if (Known.isUnknown()) 2554 break; // early-out. 2555 } 2556 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 2557 APInt DemandedSrcElts = DemandedElts & ~SubMask; 2558 if (!!DemandedSrcElts) { 2559 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); 2560 Known.One &= Known2.One; 2561 Known.Zero &= Known2.Zero; 2562 } 2563 } else { 2564 Known = computeKnownBits(Sub, Depth + 1); 2565 if (Known.isUnknown()) 2566 break; // early-out. 2567 Known2 = computeKnownBits(Src, Depth + 1); 2568 Known.One &= Known2.One; 2569 Known.Zero &= Known2.Zero; 2570 } 2571 break; 2572 } 2573 case ISD::EXTRACT_SUBVECTOR: { 2574 // If we know the element index, just demand that subvector elements, 2575 // otherwise demand them all. 2576 SDValue Src = Op.getOperand(0); 2577 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2578 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2579 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 2580 // Offset the demanded elts by the subvector index. 2581 uint64_t Idx = SubIdx->getZExtValue(); 2582 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2583 Known = computeKnownBits(Src, DemandedSrc, Depth + 1); 2584 } else { 2585 Known = computeKnownBits(Src, Depth + 1); 2586 } 2587 break; 2588 } 2589 case ISD::SCALAR_TO_VECTOR: { 2590 // We know about scalar_to_vector as much as we know about it source, 2591 // which becomes the first element of otherwise unknown vector. 2592 if (DemandedElts != 1) 2593 break; 2594 2595 SDValue N0 = Op.getOperand(0); 2596 Known = computeKnownBits(N0, Depth + 1); 2597 if (N0.getValueSizeInBits() != BitWidth) 2598 Known = Known.trunc(BitWidth); 2599 2600 break; 2601 } 2602 case ISD::BITCAST: { 2603 SDValue N0 = Op.getOperand(0); 2604 EVT SubVT = N0.getValueType(); 2605 unsigned SubBitWidth = SubVT.getScalarSizeInBits(); 2606 2607 // Ignore bitcasts from unsupported types. 2608 if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) 2609 break; 2610 2611 // Fast handling of 'identity' bitcasts. 2612 if (BitWidth == SubBitWidth) { 2613 Known = computeKnownBits(N0, DemandedElts, Depth + 1); 2614 break; 2615 } 2616 2617 bool IsLE = getDataLayout().isLittleEndian(); 2618 2619 // Bitcast 'small element' vector to 'large element' scalar/vector. 2620 if ((BitWidth % SubBitWidth) == 0) { 2621 assert(N0.getValueType().isVector() && "Expected bitcast from vector"); 2622 2623 // Collect known bits for the (larger) output by collecting the known 2624 // bits from each set of sub elements and shift these into place. 2625 // We need to separately call computeKnownBits for each set of 2626 // sub elements as the knownbits for each is likely to be different. 2627 unsigned SubScale = BitWidth / SubBitWidth; 2628 APInt SubDemandedElts(NumElts * SubScale, 0); 2629 for (unsigned i = 0; i != NumElts; ++i) 2630 if (DemandedElts[i]) 2631 SubDemandedElts.setBit(i * SubScale); 2632 2633 for (unsigned i = 0; i != SubScale; ++i) { 2634 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), 2635 Depth + 1); 2636 unsigned Shifts = IsLE ? i : SubScale - 1 - i; 2637 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts); 2638 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts); 2639 } 2640 } 2641 2642 // Bitcast 'large element' scalar/vector to 'small element' vector. 2643 if ((SubBitWidth % BitWidth) == 0) { 2644 assert(Op.getValueType().isVector() && "Expected bitcast to vector"); 2645 2646 // Collect known bits for the (smaller) output by collecting the known 2647 // bits from the overlapping larger input elements and extracting the 2648 // sub sections we actually care about. 2649 unsigned SubScale = SubBitWidth / BitWidth; 2650 APInt SubDemandedElts(NumElts / SubScale, 0); 2651 for (unsigned i = 0; i != NumElts; ++i) 2652 if (DemandedElts[i]) 2653 SubDemandedElts.setBit(i / SubScale); 2654 2655 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); 2656 2657 Known.Zero.setAllBits(); Known.One.setAllBits(); 2658 for (unsigned i = 0; i != NumElts; ++i) 2659 if (DemandedElts[i]) { 2660 unsigned Shifts = IsLE ? i : NumElts - 1 - i; 2661 unsigned Offset = (Shifts % SubScale) * BitWidth; 2662 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth); 2663 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth); 2664 // If we don't know any bits, early out. 2665 if (Known.isUnknown()) 2666 break; 2667 } 2668 } 2669 break; 2670 } 2671 case ISD::AND: 2672 // If either the LHS or the RHS are Zero, the result is zero. 2673 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2674 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2675 2676 // Output known-1 bits are only known if set in both the LHS & RHS. 2677 Known.One &= Known2.One; 2678 // Output known-0 are known to be clear if zero in either the LHS | RHS. 2679 Known.Zero |= Known2.Zero; 2680 break; 2681 case ISD::OR: 2682 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2683 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2684 2685 // Output known-0 bits are only known if clear in both the LHS & RHS. 2686 Known.Zero &= Known2.Zero; 2687 // Output known-1 are known to be set if set in either the LHS | RHS. 2688 Known.One |= Known2.One; 2689 break; 2690 case ISD::XOR: { 2691 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2692 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2693 2694 // Output known-0 bits are known if clear or set in both the LHS & RHS. 2695 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 2696 // Output known-1 are known to be set if set in only one of the LHS, RHS. 2697 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 2698 Known.Zero = KnownZeroOut; 2699 break; 2700 } 2701 case ISD::MUL: { 2702 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2703 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2704 2705 // If low bits are zero in either operand, output low known-0 bits. 2706 // Also compute a conservative estimate for high known-0 bits. 2707 // More trickiness is possible, but this is sufficient for the 2708 // interesting case of alignment computation. 2709 unsigned TrailZ = Known.countMinTrailingZeros() + 2710 Known2.countMinTrailingZeros(); 2711 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 2712 Known2.countMinLeadingZeros(), 2713 BitWidth) - BitWidth; 2714 2715 Known.resetAll(); 2716 Known.Zero.setLowBits(std::min(TrailZ, BitWidth)); 2717 Known.Zero.setHighBits(std::min(LeadZ, BitWidth)); 2718 break; 2719 } 2720 case ISD::UDIV: { 2721 // For the purposes of computing leading zeros we can conservatively 2722 // treat a udiv as a logical right shift by the power of 2 known to 2723 // be less than the denominator. 2724 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2725 unsigned LeadZ = Known2.countMinLeadingZeros(); 2726 2727 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2728 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 2729 if (RHSMaxLeadingZeros != BitWidth) 2730 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 2731 2732 Known.Zero.setHighBits(LeadZ); 2733 break; 2734 } 2735 case ISD::SELECT: 2736 case ISD::VSELECT: 2737 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2738 // If we don't know any bits, early out. 2739 if (Known.isUnknown()) 2740 break; 2741 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); 2742 2743 // Only known if known in both the LHS and RHS. 2744 Known.One &= Known2.One; 2745 Known.Zero &= Known2.Zero; 2746 break; 2747 case ISD::SELECT_CC: 2748 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); 2749 // If we don't know any bits, early out. 2750 if (Known.isUnknown()) 2751 break; 2752 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); 2753 2754 // Only known if known in both the LHS and RHS. 2755 Known.One &= Known2.One; 2756 Known.Zero &= Known2.Zero; 2757 break; 2758 case ISD::SMULO: 2759 case ISD::UMULO: 2760 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2761 if (Op.getResNo() != 1) 2762 break; 2763 // The boolean result conforms to getBooleanContents. 2764 // If we know the result of a setcc has the top bits zero, use this info. 2765 // We know that we have an integer-based boolean since these operations 2766 // are only available for integer. 2767 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == 2768 TargetLowering::ZeroOrOneBooleanContent && 2769 BitWidth > 1) 2770 Known.Zero.setBitsFrom(1); 2771 break; 2772 case ISD::SETCC: 2773 // If we know the result of a setcc has the top bits zero, use this info. 2774 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 2775 TargetLowering::ZeroOrOneBooleanContent && 2776 BitWidth > 1) 2777 Known.Zero.setBitsFrom(1); 2778 break; 2779 case ISD::SHL: 2780 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2781 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2782 unsigned Shift = ShAmt->getZExtValue(); 2783 Known.Zero <<= Shift; 2784 Known.One <<= Shift; 2785 // Low bits are known zero. 2786 Known.Zero.setLowBits(Shift); 2787 } 2788 break; 2789 case ISD::SRL: 2790 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2791 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2792 unsigned Shift = ShAmt->getZExtValue(); 2793 Known.Zero.lshrInPlace(Shift); 2794 Known.One.lshrInPlace(Shift); 2795 // High bits are known zero. 2796 Known.Zero.setHighBits(Shift); 2797 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) { 2798 // If the shift amount is a vector of constants see if we can bound 2799 // the number of upper zero bits. 2800 unsigned ShiftAmountMin = BitWidth; 2801 for (unsigned i = 0; i != BV->getNumOperands(); ++i) { 2802 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) { 2803 const APInt &ShAmt = C->getAPIntValue(); 2804 if (ShAmt.ult(BitWidth)) { 2805 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin, 2806 ShAmt.getZExtValue()); 2807 continue; 2808 } 2809 } 2810 // Don't know anything. 2811 ShiftAmountMin = 0; 2812 break; 2813 } 2814 2815 Known.Zero.setHighBits(ShiftAmountMin); 2816 } 2817 break; 2818 case ISD::SRA: 2819 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { 2820 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2821 unsigned Shift = ShAmt->getZExtValue(); 2822 // Sign extend known zero/one bit (else is unknown). 2823 Known.Zero.ashrInPlace(Shift); 2824 Known.One.ashrInPlace(Shift); 2825 } 2826 break; 2827 case ISD::FSHL: 2828 case ISD::FSHR: 2829 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { 2830 unsigned Amt = C->getAPIntValue().urem(BitWidth); 2831 2832 // For fshl, 0-shift returns the 1st arg. 2833 // For fshr, 0-shift returns the 2nd arg. 2834 if (Amt == 0) { 2835 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), 2836 DemandedElts, Depth + 1); 2837 break; 2838 } 2839 2840 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 2841 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 2842 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2843 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 2844 if (Opcode == ISD::FSHL) { 2845 Known.One <<= Amt; 2846 Known.Zero <<= Amt; 2847 Known2.One.lshrInPlace(BitWidth - Amt); 2848 Known2.Zero.lshrInPlace(BitWidth - Amt); 2849 } else { 2850 Known.One <<= BitWidth - Amt; 2851 Known.Zero <<= BitWidth - Amt; 2852 Known2.One.lshrInPlace(Amt); 2853 Known2.Zero.lshrInPlace(Amt); 2854 } 2855 Known.One |= Known2.One; 2856 Known.Zero |= Known2.Zero; 2857 } 2858 break; 2859 case ISD::SIGN_EXTEND_INREG: { 2860 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2861 unsigned EBits = EVT.getScalarSizeInBits(); 2862 2863 // Sign extension. Compute the demanded bits in the result that are not 2864 // present in the input. 2865 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 2866 2867 APInt InSignMask = APInt::getSignMask(EBits); 2868 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 2869 2870 // If the sign extended bits are demanded, we know that the sign 2871 // bit is demanded. 2872 InSignMask = InSignMask.zext(BitWidth); 2873 if (NewBits.getBoolValue()) 2874 InputDemandedBits |= InSignMask; 2875 2876 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2877 Known.One &= InputDemandedBits; 2878 Known.Zero &= InputDemandedBits; 2879 2880 // If the sign bit of the input is known set or clear, then we know the 2881 // top bits of the result. 2882 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear 2883 Known.Zero |= NewBits; 2884 Known.One &= ~NewBits; 2885 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set 2886 Known.One |= NewBits; 2887 Known.Zero &= ~NewBits; 2888 } else { // Input sign bit unknown 2889 Known.Zero &= ~NewBits; 2890 Known.One &= ~NewBits; 2891 } 2892 break; 2893 } 2894 case ISD::CTTZ: 2895 case ISD::CTTZ_ZERO_UNDEF: { 2896 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2897 // If we have a known 1, its position is our upper bound. 2898 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 2899 unsigned LowBits = Log2_32(PossibleTZ) + 1; 2900 Known.Zero.setBitsFrom(LowBits); 2901 break; 2902 } 2903 case ISD::CTLZ: 2904 case ISD::CTLZ_ZERO_UNDEF: { 2905 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2906 // If we have a known 1, its position is our upper bound. 2907 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 2908 unsigned LowBits = Log2_32(PossibleLZ) + 1; 2909 Known.Zero.setBitsFrom(LowBits); 2910 break; 2911 } 2912 case ISD::CTPOP: { 2913 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2914 // If we know some of the bits are zero, they can't be one. 2915 unsigned PossibleOnes = Known2.countMaxPopulation(); 2916 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1); 2917 break; 2918 } 2919 case ISD::LOAD: { 2920 LoadSDNode *LD = cast<LoadSDNode>(Op); 2921 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); 2922 if (ISD::isNON_EXTLoad(LD) && Cst) { 2923 // Determine any common known bits from the loaded constant pool value. 2924 Type *CstTy = Cst->getType(); 2925 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) { 2926 // If its a vector splat, then we can (quickly) reuse the scalar path. 2927 // NOTE: We assume all elements match and none are UNDEF. 2928 if (CstTy->isVectorTy()) { 2929 if (const Constant *Splat = Cst->getSplatValue()) { 2930 Cst = Splat; 2931 CstTy = Cst->getType(); 2932 } 2933 } 2934 // TODO - do we need to handle different bitwidths? 2935 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { 2936 // Iterate across all vector elements finding common known bits. 2937 Known.One.setAllBits(); 2938 Known.Zero.setAllBits(); 2939 for (unsigned i = 0; i != NumElts; ++i) { 2940 if (!DemandedElts[i]) 2941 continue; 2942 if (Constant *Elt = Cst->getAggregateElement(i)) { 2943 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 2944 const APInt &Value = CInt->getValue(); 2945 Known.One &= Value; 2946 Known.Zero &= ~Value; 2947 continue; 2948 } 2949 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 2950 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 2951 Known.One &= Value; 2952 Known.Zero &= ~Value; 2953 continue; 2954 } 2955 } 2956 Known.One.clearAllBits(); 2957 Known.Zero.clearAllBits(); 2958 break; 2959 } 2960 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { 2961 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { 2962 const APInt &Value = CInt->getValue(); 2963 Known.One = Value; 2964 Known.Zero = ~Value; 2965 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { 2966 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 2967 Known.One = Value; 2968 Known.Zero = ~Value; 2969 } 2970 } 2971 } 2972 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 2973 // If this is a ZEXTLoad and we are looking at the loaded value. 2974 EVT VT = LD->getMemoryVT(); 2975 unsigned MemBits = VT.getScalarSizeInBits(); 2976 Known.Zero.setBitsFrom(MemBits); 2977 } else if (const MDNode *Ranges = LD->getRanges()) { 2978 if (LD->getExtensionType() == ISD::NON_EXTLOAD) 2979 computeKnownBitsFromRangeMetadata(*Ranges, Known); 2980 } 2981 break; 2982 } 2983 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2984 EVT InVT = Op.getOperand(0).getValueType(); 2985 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 2986 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 2987 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 2988 break; 2989 } 2990 case ISD::ZERO_EXTEND: { 2991 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 2992 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */); 2993 break; 2994 } 2995 case ISD::SIGN_EXTEND_VECTOR_INREG: { 2996 EVT InVT = Op.getOperand(0).getValueType(); 2997 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); 2998 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); 2999 // If the sign bit is known to be zero or one, then sext will extend 3000 // it to the top bits, else it will just zext. 3001 Known = Known.sext(BitWidth); 3002 break; 3003 } 3004 case ISD::SIGN_EXTEND: { 3005 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3006 // If the sign bit is known to be zero or one, then sext will extend 3007 // it to the top bits, else it will just zext. 3008 Known = Known.sext(BitWidth); 3009 break; 3010 } 3011 case ISD::ANY_EXTEND: { 3012 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3013 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */); 3014 break; 3015 } 3016 case ISD::TRUNCATE: { 3017 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3018 Known = Known.trunc(BitWidth); 3019 break; 3020 } 3021 case ISD::AssertZext: { 3022 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 3023 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 3024 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3025 Known.Zero |= (~InMask); 3026 Known.One &= (~Known.Zero); 3027 break; 3028 } 3029 case ISD::FGETSIGN: 3030 // All bits are zero except the low bit. 3031 Known.Zero.setBitsFrom(1); 3032 break; 3033 case ISD::USUBO: 3034 case ISD::SSUBO: 3035 if (Op.getResNo() == 1) { 3036 // If we know the result of a setcc has the top bits zero, use this info. 3037 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3038 TargetLowering::ZeroOrOneBooleanContent && 3039 BitWidth > 1) 3040 Known.Zero.setBitsFrom(1); 3041 break; 3042 } 3043 LLVM_FALLTHROUGH; 3044 case ISD::SUB: 3045 case ISD::SUBC: { 3046 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3047 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3048 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false, 3049 Known, Known2); 3050 break; 3051 } 3052 case ISD::UADDO: 3053 case ISD::SADDO: 3054 case ISD::ADDCARRY: 3055 if (Op.getResNo() == 1) { 3056 // If we know the result of a setcc has the top bits zero, use this info. 3057 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3058 TargetLowering::ZeroOrOneBooleanContent && 3059 BitWidth > 1) 3060 Known.Zero.setBitsFrom(1); 3061 break; 3062 } 3063 LLVM_FALLTHROUGH; 3064 case ISD::ADD: 3065 case ISD::ADDC: 3066 case ISD::ADDE: { 3067 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); 3068 3069 // With ADDE and ADDCARRY, a carry bit may be added in. 3070 KnownBits Carry(1); 3071 if (Opcode == ISD::ADDE) 3072 // Can't track carry from glue, set carry to unknown. 3073 Carry.resetAll(); 3074 else if (Opcode == ISD::ADDCARRY) 3075 // TODO: Compute known bits for the carry operand. Not sure if it is worth 3076 // the trouble (how often will we find a known carry bit). And I haven't 3077 // tested this very much yet, but something like this might work: 3078 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); 3079 // Carry = Carry.zextOrTrunc(1, false); 3080 Carry.resetAll(); 3081 else 3082 Carry.setAllZero(); 3083 3084 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3085 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3086 Known = KnownBits::computeForAddCarry(Known, Known2, Carry); 3087 break; 3088 } 3089 case ISD::SREM: 3090 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3091 const APInt &RA = Rem->getAPIntValue().abs(); 3092 if (RA.isPowerOf2()) { 3093 APInt LowBits = RA - 1; 3094 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3095 3096 // The low bits of the first operand are unchanged by the srem. 3097 Known.Zero = Known2.Zero & LowBits; 3098 Known.One = Known2.One & LowBits; 3099 3100 // If the first operand is non-negative or has all low bits zero, then 3101 // the upper bits are all zero. 3102 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits)) 3103 Known.Zero |= ~LowBits; 3104 3105 // If the first operand is negative and not all low bits are zero, then 3106 // the upper bits are all one. 3107 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0)) 3108 Known.One |= ~LowBits; 3109 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?"); 3110 } 3111 } 3112 break; 3113 case ISD::UREM: { 3114 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) { 3115 const APInt &RA = Rem->getAPIntValue(); 3116 if (RA.isPowerOf2()) { 3117 APInt LowBits = (RA - 1); 3118 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3119 3120 // The upper bits are all zero, the lower ones are unchanged. 3121 Known.Zero = Known2.Zero | ~LowBits; 3122 Known.One = Known2.One & LowBits; 3123 break; 3124 } 3125 } 3126 3127 // Since the result is less than or equal to either operand, any leading 3128 // zero bits in either operand must also exist in the result. 3129 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3130 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3131 3132 uint32_t Leaders = 3133 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 3134 Known.resetAll(); 3135 Known.Zero.setHighBits(Leaders); 3136 break; 3137 } 3138 case ISD::EXTRACT_ELEMENT: { 3139 Known = computeKnownBits(Op.getOperand(0), Depth+1); 3140 const unsigned Index = Op.getConstantOperandVal(1); 3141 const unsigned EltBitWidth = Op.getValueSizeInBits(); 3142 3143 // Remove low part of known bits mask 3144 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3145 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); 3146 3147 // Remove high part of known bit mask 3148 Known = Known.trunc(EltBitWidth); 3149 break; 3150 } 3151 case ISD::EXTRACT_VECTOR_ELT: { 3152 SDValue InVec = Op.getOperand(0); 3153 SDValue EltNo = Op.getOperand(1); 3154 EVT VecVT = InVec.getValueType(); 3155 const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); 3156 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3157 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 3158 // anything about the extended bits. 3159 if (BitWidth > EltBitWidth) 3160 Known = Known.trunc(EltBitWidth); 3161 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3162 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { 3163 // If we know the element index, just demand that vector element. 3164 unsigned Idx = ConstEltNo->getZExtValue(); 3165 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 3166 Known = computeKnownBits(InVec, DemandedElt, Depth + 1); 3167 } else { 3168 // Unknown element index, so ignore DemandedElts and demand them all. 3169 Known = computeKnownBits(InVec, Depth + 1); 3170 } 3171 if (BitWidth > EltBitWidth) 3172 Known = Known.zext(BitWidth, false /* => any extend */); 3173 break; 3174 } 3175 case ISD::INSERT_VECTOR_ELT: { 3176 SDValue InVec = Op.getOperand(0); 3177 SDValue InVal = Op.getOperand(1); 3178 SDValue EltNo = Op.getOperand(2); 3179 3180 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3181 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3182 // If we know the element index, split the demand between the 3183 // source vector and the inserted element. 3184 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth); 3185 unsigned EltIdx = CEltNo->getZExtValue(); 3186 3187 // If we demand the inserted element then add its common known bits. 3188 if (DemandedElts[EltIdx]) { 3189 Known2 = computeKnownBits(InVal, Depth + 1); 3190 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3191 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3192 } 3193 3194 // If we demand the source vector then add its common known bits, ensuring 3195 // that we don't demand the inserted element. 3196 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx)); 3197 if (!!VectorElts) { 3198 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1); 3199 Known.One &= Known2.One; 3200 Known.Zero &= Known2.Zero; 3201 } 3202 } else { 3203 // Unknown element index, so ignore DemandedElts and demand them all. 3204 Known = computeKnownBits(InVec, Depth + 1); 3205 Known2 = computeKnownBits(InVal, Depth + 1); 3206 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth()); 3207 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth()); 3208 } 3209 break; 3210 } 3211 case ISD::BITREVERSE: { 3212 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3213 Known.Zero = Known2.Zero.reverseBits(); 3214 Known.One = Known2.One.reverseBits(); 3215 break; 3216 } 3217 case ISD::BSWAP: { 3218 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3219 Known.Zero = Known2.Zero.byteSwap(); 3220 Known.One = Known2.One.byteSwap(); 3221 break; 3222 } 3223 case ISD::ABS: { 3224 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3225 3226 // If the source's MSB is zero then we know the rest of the bits already. 3227 if (Known2.isNonNegative()) { 3228 Known.Zero = Known2.Zero; 3229 Known.One = Known2.One; 3230 break; 3231 } 3232 3233 // We only know that the absolute values's MSB will be zero iff there is 3234 // a set bit that isn't the sign bit (otherwise it could be INT_MIN). 3235 Known2.One.clearSignBit(); 3236 if (Known2.One.getBoolValue()) { 3237 Known.Zero = APInt::getSignMask(BitWidth); 3238 break; 3239 } 3240 break; 3241 } 3242 case ISD::UMIN: { 3243 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3244 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3245 3246 // UMIN - we know that the result will have the maximum of the 3247 // known zero leading bits of the inputs. 3248 unsigned LeadZero = Known.countMinLeadingZeros(); 3249 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros()); 3250 3251 Known.Zero &= Known2.Zero; 3252 Known.One &= Known2.One; 3253 Known.Zero.setHighBits(LeadZero); 3254 break; 3255 } 3256 case ISD::UMAX: { 3257 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3258 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3259 3260 // UMAX - we know that the result will have the maximum of the 3261 // known one leading bits of the inputs. 3262 unsigned LeadOne = Known.countMinLeadingOnes(); 3263 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes()); 3264 3265 Known.Zero &= Known2.Zero; 3266 Known.One &= Known2.One; 3267 Known.One.setHighBits(LeadOne); 3268 break; 3269 } 3270 case ISD::SMIN: 3271 case ISD::SMAX: { 3272 // If we have a clamp pattern, we know that the number of sign bits will be 3273 // the minimum of the clamp min/max range. 3274 bool IsMax = (Opcode == ISD::SMAX); 3275 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3276 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3277 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3278 CstHigh = 3279 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3280 if (CstLow && CstHigh) { 3281 if (!IsMax) 3282 std::swap(CstLow, CstHigh); 3283 3284 const APInt &ValueLow = CstLow->getAPIntValue(); 3285 const APInt &ValueHigh = CstHigh->getAPIntValue(); 3286 if (ValueLow.sle(ValueHigh)) { 3287 unsigned LowSignBits = ValueLow.getNumSignBits(); 3288 unsigned HighSignBits = ValueHigh.getNumSignBits(); 3289 unsigned MinSignBits = std::min(LowSignBits, HighSignBits); 3290 if (ValueLow.isNegative() && ValueHigh.isNegative()) { 3291 Known.One.setHighBits(MinSignBits); 3292 break; 3293 } 3294 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { 3295 Known.Zero.setHighBits(MinSignBits); 3296 break; 3297 } 3298 } 3299 } 3300 3301 // Fallback - just get the shared known bits of the operands. 3302 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 3303 if (Known.isUnknown()) break; // Early-out 3304 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 3305 Known.Zero &= Known2.Zero; 3306 Known.One &= Known2.One; 3307 break; 3308 } 3309 case ISD::FrameIndex: 3310 case ISD::TargetFrameIndex: 3311 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth); 3312 break; 3313 3314 default: 3315 if (Opcode < ISD::BUILTIN_OP_END) 3316 break; 3317 LLVM_FALLTHROUGH; 3318 case ISD::INTRINSIC_WO_CHAIN: 3319 case ISD::INTRINSIC_W_CHAIN: 3320 case ISD::INTRINSIC_VOID: 3321 // Allow the target to implement this method for its nodes. 3322 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); 3323 break; 3324 } 3325 3326 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 3327 return Known; 3328 } 3329 3330 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0, 3331 SDValue N1) const { 3332 // X + 0 never overflow 3333 if (isNullConstant(N1)) 3334 return OFK_Never; 3335 3336 KnownBits N1Known = computeKnownBits(N1); 3337 if (N1Known.Zero.getBoolValue()) { 3338 KnownBits N0Known = computeKnownBits(N0); 3339 3340 bool overflow; 3341 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow); 3342 if (!overflow) 3343 return OFK_Never; 3344 } 3345 3346 // mulhi + 1 never overflow 3347 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && 3348 (~N1Known.Zero & 0x01) == ~N1Known.Zero) 3349 return OFK_Never; 3350 3351 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) { 3352 KnownBits N0Known = computeKnownBits(N0); 3353 3354 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero) 3355 return OFK_Never; 3356 } 3357 3358 return OFK_Sometime; 3359 } 3360 3361 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { 3362 EVT OpVT = Val.getValueType(); 3363 unsigned BitWidth = OpVT.getScalarSizeInBits(); 3364 3365 // Is the constant a known power of 2? 3366 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val)) 3367 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3368 3369 // A left-shift of a constant one will have exactly one bit set because 3370 // shifting the bit off the end is undefined. 3371 if (Val.getOpcode() == ISD::SHL) { 3372 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3373 if (C && C->getAPIntValue() == 1) 3374 return true; 3375 } 3376 3377 // Similarly, a logical right-shift of a constant sign-bit will have exactly 3378 // one bit set. 3379 if (Val.getOpcode() == ISD::SRL) { 3380 auto *C = isConstOrConstSplat(Val.getOperand(0)); 3381 if (C && C->getAPIntValue().isSignMask()) 3382 return true; 3383 } 3384 3385 // Are all operands of a build vector constant powers of two? 3386 if (Val.getOpcode() == ISD::BUILD_VECTOR) 3387 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { 3388 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) 3389 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); 3390 return false; 3391 })) 3392 return true; 3393 3394 // More could be done here, though the above checks are enough 3395 // to handle some common cases. 3396 3397 // Fall back to computeKnownBits to catch other known cases. 3398 KnownBits Known = computeKnownBits(Val); 3399 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); 3400 } 3401 3402 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { 3403 EVT VT = Op.getValueType(); 3404 APInt DemandedElts = VT.isVector() 3405 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 3406 : APInt(1, 1); 3407 return ComputeNumSignBits(Op, DemandedElts, Depth); 3408 } 3409 3410 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, 3411 unsigned Depth) const { 3412 EVT VT = Op.getValueType(); 3413 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); 3414 unsigned VTBits = VT.getScalarSizeInBits(); 3415 unsigned NumElts = DemandedElts.getBitWidth(); 3416 unsigned Tmp, Tmp2; 3417 unsigned FirstAnswer = 1; 3418 3419 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3420 const APInt &Val = C->getAPIntValue(); 3421 return Val.getNumSignBits(); 3422 } 3423 3424 if (Depth == 6) 3425 return 1; // Limit search depth. 3426 3427 if (!DemandedElts) 3428 return 1; // No demanded elts, better to assume we don't know anything. 3429 3430 unsigned Opcode = Op.getOpcode(); 3431 switch (Opcode) { 3432 default: break; 3433 case ISD::AssertSext: 3434 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3435 return VTBits-Tmp+1; 3436 case ISD::AssertZext: 3437 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 3438 return VTBits-Tmp; 3439 3440 case ISD::BUILD_VECTOR: 3441 Tmp = VTBits; 3442 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { 3443 if (!DemandedElts[i]) 3444 continue; 3445 3446 SDValue SrcOp = Op.getOperand(i); 3447 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1); 3448 3449 // BUILD_VECTOR can implicitly truncate sources, we must handle this. 3450 if (SrcOp.getValueSizeInBits() != VTBits) { 3451 assert(SrcOp.getValueSizeInBits() > VTBits && 3452 "Expected BUILD_VECTOR implicit truncation"); 3453 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; 3454 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); 3455 } 3456 Tmp = std::min(Tmp, Tmp2); 3457 } 3458 return Tmp; 3459 3460 case ISD::VECTOR_SHUFFLE: { 3461 // Collect the minimum number of sign bits that are shared by every vector 3462 // element referenced by the shuffle. 3463 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 3464 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); 3465 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); 3466 for (unsigned i = 0; i != NumElts; ++i) { 3467 int M = SVN->getMaskElt(i); 3468 if (!DemandedElts[i]) 3469 continue; 3470 // For UNDEF elements, we don't know anything about the common state of 3471 // the shuffle result. 3472 if (M < 0) 3473 return 1; 3474 if ((unsigned)M < NumElts) 3475 DemandedLHS.setBit((unsigned)M % NumElts); 3476 else 3477 DemandedRHS.setBit((unsigned)M % NumElts); 3478 } 3479 Tmp = std::numeric_limits<unsigned>::max(); 3480 if (!!DemandedLHS) 3481 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); 3482 if (!!DemandedRHS) { 3483 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); 3484 Tmp = std::min(Tmp, Tmp2); 3485 } 3486 // If we don't know anything, early out and try computeKnownBits fall-back. 3487 if (Tmp == 1) 3488 break; 3489 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3490 return Tmp; 3491 } 3492 3493 case ISD::BITCAST: { 3494 SDValue N0 = Op.getOperand(0); 3495 EVT SrcVT = N0.getValueType(); 3496 unsigned SrcBits = SrcVT.getScalarSizeInBits(); 3497 3498 // Ignore bitcasts from unsupported types.. 3499 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) 3500 break; 3501 3502 // Fast handling of 'identity' bitcasts. 3503 if (VTBits == SrcBits) 3504 return ComputeNumSignBits(N0, DemandedElts, Depth + 1); 3505 3506 bool IsLE = getDataLayout().isLittleEndian(); 3507 3508 // Bitcast 'large element' scalar/vector to 'small element' vector. 3509 if ((SrcBits % VTBits) == 0) { 3510 assert(VT.isVector() && "Expected bitcast to vector"); 3511 3512 unsigned Scale = SrcBits / VTBits; 3513 APInt SrcDemandedElts(NumElts / Scale, 0); 3514 for (unsigned i = 0; i != NumElts; ++i) 3515 if (DemandedElts[i]) 3516 SrcDemandedElts.setBit(i / Scale); 3517 3518 // Fast case - sign splat can be simply split across the small elements. 3519 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); 3520 if (Tmp == SrcBits) 3521 return VTBits; 3522 3523 // Slow case - determine how far the sign extends into each sub-element. 3524 Tmp2 = VTBits; 3525 for (unsigned i = 0; i != NumElts; ++i) 3526 if (DemandedElts[i]) { 3527 unsigned SubOffset = i % Scale; 3528 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); 3529 SubOffset = SubOffset * VTBits; 3530 if (Tmp <= SubOffset) 3531 return 1; 3532 Tmp2 = std::min(Tmp2, Tmp - SubOffset); 3533 } 3534 return Tmp2; 3535 } 3536 break; 3537 } 3538 3539 case ISD::SIGN_EXTEND: 3540 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); 3541 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; 3542 case ISD::SIGN_EXTEND_INREG: 3543 // Max of the input and what this extends. 3544 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); 3545 Tmp = VTBits-Tmp+1; 3546 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3547 return std::max(Tmp, Tmp2); 3548 case ISD::SIGN_EXTEND_VECTOR_INREG: { 3549 SDValue Src = Op.getOperand(0); 3550 EVT SrcVT = Src.getValueType(); 3551 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements()); 3552 Tmp = VTBits - SrcVT.getScalarSizeInBits(); 3553 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; 3554 } 3555 3556 case ISD::SRA: 3557 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3558 // SRA X, C -> adds C sign bits. 3559 if (ConstantSDNode *C = 3560 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3561 APInt ShiftVal = C->getAPIntValue(); 3562 ShiftVal += Tmp; 3563 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); 3564 } 3565 return Tmp; 3566 case ISD::SHL: 3567 if (ConstantSDNode *C = 3568 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { 3569 // shl destroys sign bits. 3570 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3571 if (C->getAPIntValue().uge(VTBits) || // Bad shift. 3572 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. 3573 return Tmp - C->getZExtValue(); 3574 } 3575 break; 3576 case ISD::AND: 3577 case ISD::OR: 3578 case ISD::XOR: // NOT is handled here. 3579 // Logical binary ops preserve the number of sign bits at the worst. 3580 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); 3581 if (Tmp != 1) { 3582 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3583 FirstAnswer = std::min(Tmp, Tmp2); 3584 // We computed what we know about the sign bits as our first 3585 // answer. Now proceed to the generic code that uses 3586 // computeKnownBits, and pick whichever answer is better. 3587 } 3588 break; 3589 3590 case ISD::SELECT: 3591 case ISD::VSELECT: 3592 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); 3593 if (Tmp == 1) return 1; // Early out. 3594 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3595 return std::min(Tmp, Tmp2); 3596 case ISD::SELECT_CC: 3597 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); 3598 if (Tmp == 1) return 1; // Early out. 3599 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); 3600 return std::min(Tmp, Tmp2); 3601 3602 case ISD::SMIN: 3603 case ISD::SMAX: { 3604 // If we have a clamp pattern, we know that the number of sign bits will be 3605 // the minimum of the clamp min/max range. 3606 bool IsMax = (Opcode == ISD::SMAX); 3607 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; 3608 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) 3609 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) 3610 CstHigh = 3611 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); 3612 if (CstLow && CstHigh) { 3613 if (!IsMax) 3614 std::swap(CstLow, CstHigh); 3615 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { 3616 Tmp = CstLow->getAPIntValue().getNumSignBits(); 3617 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); 3618 return std::min(Tmp, Tmp2); 3619 } 3620 } 3621 3622 // Fallback - just get the minimum number of sign bits of the operands. 3623 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3624 if (Tmp == 1) 3625 return 1; // Early out. 3626 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3627 return std::min(Tmp, Tmp2); 3628 } 3629 case ISD::UMIN: 3630 case ISD::UMAX: 3631 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3632 if (Tmp == 1) 3633 return 1; // Early out. 3634 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); 3635 return std::min(Tmp, Tmp2); 3636 case ISD::SADDO: 3637 case ISD::UADDO: 3638 case ISD::SSUBO: 3639 case ISD::USUBO: 3640 case ISD::SMULO: 3641 case ISD::UMULO: 3642 if (Op.getResNo() != 1) 3643 break; 3644 // The boolean result conforms to getBooleanContents. Fall through. 3645 // If setcc returns 0/-1, all bits are sign bits. 3646 // We know that we have an integer-based boolean since these operations 3647 // are only available for integer. 3648 if (TLI->getBooleanContents(VT.isVector(), false) == 3649 TargetLowering::ZeroOrNegativeOneBooleanContent) 3650 return VTBits; 3651 break; 3652 case ISD::SETCC: 3653 // If setcc returns 0/-1, all bits are sign bits. 3654 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == 3655 TargetLowering::ZeroOrNegativeOneBooleanContent) 3656 return VTBits; 3657 break; 3658 case ISD::ROTL: 3659 case ISD::ROTR: 3660 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3661 unsigned RotAmt = C->getAPIntValue().urem(VTBits); 3662 3663 // Handle rotate right by N like a rotate left by 32-N. 3664 if (Opcode == ISD::ROTR) 3665 RotAmt = (VTBits - RotAmt) % VTBits; 3666 3667 // If we aren't rotating out all of the known-in sign bits, return the 3668 // number that are left. This handles rotl(sext(x), 1) for example. 3669 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3670 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); 3671 } 3672 break; 3673 case ISD::ADD: 3674 case ISD::ADDC: 3675 // Add can have at most one carry bit. Thus we know that the output 3676 // is, at worst, one more bit than the inputs. 3677 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3678 if (Tmp == 1) return 1; // Early out. 3679 3680 // Special case decrementing a value (ADD X, -1): 3681 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 3682 if (CRHS->isAllOnesValue()) { 3683 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1); 3684 3685 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3686 // sign bits set. 3687 if ((Known.Zero | 1).isAllOnesValue()) 3688 return VTBits; 3689 3690 // If we are subtracting one from a positive number, there is no carry 3691 // out of the result. 3692 if (Known.isNonNegative()) 3693 return Tmp; 3694 } 3695 3696 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3697 if (Tmp2 == 1) return 1; 3698 return std::min(Tmp, Tmp2)-1; 3699 3700 case ISD::SUB: 3701 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 3702 if (Tmp2 == 1) return 1; 3703 3704 // Handle NEG. 3705 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) 3706 if (CLHS->isNullValue()) { 3707 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1); 3708 // If the input is known to be 0 or 1, the output is 0/-1, which is all 3709 // sign bits set. 3710 if ((Known.Zero | 1).isAllOnesValue()) 3711 return VTBits; 3712 3713 // If the input is known to be positive (the sign bit is known clear), 3714 // the output of the NEG has the same number of sign bits as the input. 3715 if (Known.isNonNegative()) 3716 return Tmp2; 3717 3718 // Otherwise, we treat this like a SUB. 3719 } 3720 3721 // Sub can have at most one carry bit. Thus we know that the output 3722 // is, at worst, one more bit than the inputs. 3723 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3724 if (Tmp == 1) return 1; // Early out. 3725 return std::min(Tmp, Tmp2)-1; 3726 case ISD::TRUNCATE: { 3727 // Check if the sign bits of source go down as far as the truncated value. 3728 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); 3729 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3730 if (NumSrcSignBits > (NumSrcBits - VTBits)) 3731 return NumSrcSignBits - (NumSrcBits - VTBits); 3732 break; 3733 } 3734 case ISD::EXTRACT_ELEMENT: { 3735 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); 3736 const int BitWidth = Op.getValueSizeInBits(); 3737 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; 3738 3739 // Get reverse index (starting from 1), Op1 value indexes elements from 3740 // little end. Sign starts at big end. 3741 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); 3742 3743 // If the sign portion ends in our element the subtraction gives correct 3744 // result. Otherwise it gives either negative or > bitwidth result 3745 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); 3746 } 3747 case ISD::INSERT_VECTOR_ELT: { 3748 SDValue InVec = Op.getOperand(0); 3749 SDValue InVal = Op.getOperand(1); 3750 SDValue EltNo = Op.getOperand(2); 3751 3752 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo); 3753 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { 3754 // If we know the element index, split the demand between the 3755 // source vector and the inserted element. 3756 unsigned EltIdx = CEltNo->getZExtValue(); 3757 3758 // If we demand the inserted element then get its sign bits. 3759 Tmp = std::numeric_limits<unsigned>::max(); 3760 if (DemandedElts[EltIdx]) { 3761 // TODO - handle implicit truncation of inserted elements. 3762 if (InVal.getScalarValueSizeInBits() != VTBits) 3763 break; 3764 Tmp = ComputeNumSignBits(InVal, Depth + 1); 3765 } 3766 3767 // If we demand the source vector then get its sign bits, and determine 3768 // the minimum. 3769 APInt VectorElts = DemandedElts; 3770 VectorElts.clearBit(EltIdx); 3771 if (!!VectorElts) { 3772 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1); 3773 Tmp = std::min(Tmp, Tmp2); 3774 } 3775 } else { 3776 // Unknown element index, so ignore DemandedElts and demand them all. 3777 Tmp = ComputeNumSignBits(InVec, Depth + 1); 3778 Tmp2 = ComputeNumSignBits(InVal, Depth + 1); 3779 Tmp = std::min(Tmp, Tmp2); 3780 } 3781 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3782 return Tmp; 3783 } 3784 case ISD::EXTRACT_VECTOR_ELT: { 3785 SDValue InVec = Op.getOperand(0); 3786 SDValue EltNo = Op.getOperand(1); 3787 EVT VecVT = InVec.getValueType(); 3788 const unsigned BitWidth = Op.getValueSizeInBits(); 3789 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); 3790 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 3791 3792 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know 3793 // anything about sign bits. But if the sizes match we can derive knowledge 3794 // about sign bits from the vector operand. 3795 if (BitWidth != EltBitWidth) 3796 break; 3797 3798 // If we know the element index, just demand that vector element, else for 3799 // an unknown element index, ignore DemandedElts and demand them all. 3800 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 3801 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 3802 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) 3803 DemandedSrcElts = 3804 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); 3805 3806 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); 3807 } 3808 case ISD::EXTRACT_SUBVECTOR: { 3809 // If we know the element index, just demand that subvector elements, 3810 // otherwise demand them all. 3811 SDValue Src = Op.getOperand(0); 3812 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3813 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3814 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { 3815 // Offset the demanded elts by the subvector index. 3816 uint64_t Idx = SubIdx->getZExtValue(); 3817 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 3818 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1); 3819 } 3820 return ComputeNumSignBits(Src, Depth + 1); 3821 } 3822 case ISD::CONCAT_VECTORS: { 3823 // Determine the minimum number of sign bits across all demanded 3824 // elts of the input vectors. Early out if the result is already 1. 3825 Tmp = std::numeric_limits<unsigned>::max(); 3826 EVT SubVectorVT = Op.getOperand(0).getValueType(); 3827 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); 3828 unsigned NumSubVectors = Op.getNumOperands(); 3829 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { 3830 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts); 3831 DemandedSub = DemandedSub.trunc(NumSubVectorElts); 3832 if (!DemandedSub) 3833 continue; 3834 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); 3835 Tmp = std::min(Tmp, Tmp2); 3836 } 3837 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3838 return Tmp; 3839 } 3840 case ISD::INSERT_SUBVECTOR: { 3841 // If we know the element index, demand any elements from the subvector and 3842 // the remainder from the src its inserted into, otherwise demand them all. 3843 SDValue Src = Op.getOperand(0); 3844 SDValue Sub = Op.getOperand(1); 3845 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3846 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3847 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) { 3848 Tmp = std::numeric_limits<unsigned>::max(); 3849 uint64_t Idx = SubIdx->getZExtValue(); 3850 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3851 if (!!DemandedSubElts) { 3852 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); 3853 if (Tmp == 1) return 1; // early-out 3854 } 3855 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts); 3856 APInt DemandedSrcElts = DemandedElts & ~SubMask; 3857 if (!!DemandedSrcElts) { 3858 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); 3859 Tmp = std::min(Tmp, Tmp2); 3860 } 3861 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3862 return Tmp; 3863 } 3864 3865 // Not able to determine the index so just assume worst case. 3866 Tmp = ComputeNumSignBits(Sub, Depth + 1); 3867 if (Tmp == 1) return 1; // early-out 3868 Tmp2 = ComputeNumSignBits(Src, Depth + 1); 3869 Tmp = std::min(Tmp, Tmp2); 3870 assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); 3871 return Tmp; 3872 } 3873 } 3874 3875 // If we are looking at the loaded value of the SDNode. 3876 if (Op.getResNo() == 0) { 3877 // Handle LOADX separately here. EXTLOAD case will fallthrough. 3878 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 3879 unsigned ExtType = LD->getExtensionType(); 3880 switch (ExtType) { 3881 default: break; 3882 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. 3883 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3884 return VTBits - Tmp + 1; 3885 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. 3886 Tmp = LD->getMemoryVT().getScalarSizeInBits(); 3887 return VTBits - Tmp; 3888 case ISD::NON_EXTLOAD: 3889 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { 3890 // We only need to handle vectors - computeKnownBits should handle 3891 // scalar cases. 3892 Type *CstTy = Cst->getType(); 3893 if (CstTy->isVectorTy() && 3894 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) { 3895 Tmp = VTBits; 3896 for (unsigned i = 0; i != NumElts; ++i) { 3897 if (!DemandedElts[i]) 3898 continue; 3899 if (Constant *Elt = Cst->getAggregateElement(i)) { 3900 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { 3901 const APInt &Value = CInt->getValue(); 3902 Tmp = std::min(Tmp, Value.getNumSignBits()); 3903 continue; 3904 } 3905 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { 3906 APInt Value = CFP->getValueAPF().bitcastToAPInt(); 3907 Tmp = std::min(Tmp, Value.getNumSignBits()); 3908 continue; 3909 } 3910 } 3911 // Unknown type. Conservatively assume no bits match sign bit. 3912 return 1; 3913 } 3914 return Tmp; 3915 } 3916 } 3917 break; 3918 } 3919 } 3920 } 3921 3922 // Allow the target to implement this method for its nodes. 3923 if (Opcode >= ISD::BUILTIN_OP_END || 3924 Opcode == ISD::INTRINSIC_WO_CHAIN || 3925 Opcode == ISD::INTRINSIC_W_CHAIN || 3926 Opcode == ISD::INTRINSIC_VOID) { 3927 unsigned NumBits = 3928 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); 3929 if (NumBits > 1) 3930 FirstAnswer = std::max(FirstAnswer, NumBits); 3931 } 3932 3933 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3934 // use this information. 3935 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); 3936 3937 APInt Mask; 3938 if (Known.isNonNegative()) { // sign bit is 0 3939 Mask = Known.Zero; 3940 } else if (Known.isNegative()) { // sign bit is 1; 3941 Mask = Known.One; 3942 } else { 3943 // Nothing known. 3944 return FirstAnswer; 3945 } 3946 3947 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 3948 // the number of identical bits in the top of the input value. 3949 Mask = ~Mask; 3950 Mask <<= Mask.getBitWidth()-VTBits; 3951 // Return # leading zeros. We use 'min' here in case Val was zero before 3952 // shifting. We don't want to return '64' as for an i32 "0". 3953 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 3954 } 3955 3956 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 3957 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 3958 !isa<ConstantSDNode>(Op.getOperand(1))) 3959 return false; 3960 3961 if (Op.getOpcode() == ISD::OR && 3962 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) 3963 return false; 3964 3965 return true; 3966 } 3967 3968 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { 3969 // If we're told that NaNs won't happen, assume they won't. 3970 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) 3971 return true; 3972 3973 if (Depth == 6) 3974 return false; // Limit search depth. 3975 3976 // TODO: Handle vectors. 3977 // If the value is a constant, we can obviously see if it is a NaN or not. 3978 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { 3979 return !C->getValueAPF().isNaN() || 3980 (SNaN && !C->getValueAPF().isSignaling()); 3981 } 3982 3983 unsigned Opcode = Op.getOpcode(); 3984 switch (Opcode) { 3985 case ISD::FADD: 3986 case ISD::FSUB: 3987 case ISD::FMUL: 3988 case ISD::FDIV: 3989 case ISD::FREM: 3990 case ISD::FSIN: 3991 case ISD::FCOS: { 3992 if (SNaN) 3993 return true; 3994 // TODO: Need isKnownNeverInfinity 3995 return false; 3996 } 3997 case ISD::FCANONICALIZE: 3998 case ISD::FEXP: 3999 case ISD::FEXP2: 4000 case ISD::FTRUNC: 4001 case ISD::FFLOOR: 4002 case ISD::FCEIL: 4003 case ISD::FROUND: 4004 case ISD::FRINT: 4005 case ISD::FNEARBYINT: { 4006 if (SNaN) 4007 return true; 4008 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4009 } 4010 case ISD::FABS: 4011 case ISD::FNEG: 4012 case ISD::FCOPYSIGN: { 4013 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4014 } 4015 case ISD::SELECT: 4016 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4017 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4018 case ISD::FP_EXTEND: 4019 case ISD::FP_ROUND: { 4020 if (SNaN) 4021 return true; 4022 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4023 } 4024 case ISD::SINT_TO_FP: 4025 case ISD::UINT_TO_FP: 4026 return true; 4027 case ISD::FMA: 4028 case ISD::FMAD: { 4029 if (SNaN) 4030 return true; 4031 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4032 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4033 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4034 } 4035 case ISD::FSQRT: // Need is known positive 4036 case ISD::FLOG: 4037 case ISD::FLOG2: 4038 case ISD::FLOG10: 4039 case ISD::FPOWI: 4040 case ISD::FPOW: { 4041 if (SNaN) 4042 return true; 4043 // TODO: Refine on operand 4044 return false; 4045 } 4046 case ISD::FMINNUM: 4047 case ISD::FMAXNUM: { 4048 // Only one needs to be known not-nan, since it will be returned if the 4049 // other ends up being one. 4050 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || 4051 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4052 } 4053 case ISD::FMINNUM_IEEE: 4054 case ISD::FMAXNUM_IEEE: { 4055 if (SNaN) 4056 return true; 4057 // This can return a NaN if either operand is an sNaN, or if both operands 4058 // are NaN. 4059 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && 4060 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || 4061 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && 4062 isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); 4063 } 4064 case ISD::FMINIMUM: 4065 case ISD::FMAXIMUM: { 4066 // TODO: Does this quiet or return the origina NaN as-is? 4067 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4068 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4069 } 4070 case ISD::EXTRACT_VECTOR_ELT: { 4071 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4072 } 4073 default: 4074 if (Opcode >= ISD::BUILTIN_OP_END || 4075 Opcode == ISD::INTRINSIC_WO_CHAIN || 4076 Opcode == ISD::INTRINSIC_W_CHAIN || 4077 Opcode == ISD::INTRINSIC_VOID) { 4078 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); 4079 } 4080 4081 return false; 4082 } 4083 } 4084 4085 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { 4086 assert(Op.getValueType().isFloatingPoint() && 4087 "Floating point type expected"); 4088 4089 // If the value is a constant, we can obviously see if it is a zero or not. 4090 // TODO: Add BuildVector support. 4091 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 4092 return !C->isZero(); 4093 return false; 4094 } 4095 4096 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 4097 assert(!Op.getValueType().isFloatingPoint() && 4098 "Floating point types unsupported - use isKnownNeverZeroFloat"); 4099 4100 // If the value is a constant, we can obviously see if it is a zero or not. 4101 if (ISD::matchUnaryPredicate( 4102 Op, [](ConstantSDNode *C) { return !C->isNullValue(); })) 4103 return true; 4104 4105 // TODO: Recognize more cases here. 4106 switch (Op.getOpcode()) { 4107 default: break; 4108 case ISD::OR: 4109 if (isKnownNeverZero(Op.getOperand(1)) || 4110 isKnownNeverZero(Op.getOperand(0))) 4111 return true; 4112 break; 4113 } 4114 4115 return false; 4116 } 4117 4118 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 4119 // Check the obvious case. 4120 if (A == B) return true; 4121 4122 // For for negative and positive zero. 4123 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 4124 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 4125 if (CA->isZero() && CB->isZero()) return true; 4126 4127 // Otherwise they may not be equal. 4128 return false; 4129 } 4130 4131 // FIXME: unify with llvm::haveNoCommonBitsSet. 4132 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M) 4133 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { 4134 assert(A.getValueType() == B.getValueType() && 4135 "Values must have the same type"); 4136 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue(); 4137 } 4138 4139 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, 4140 ArrayRef<SDValue> Ops, 4141 SelectionDAG &DAG) { 4142 int NumOps = Ops.size(); 4143 assert(NumOps != 0 && "Can't build an empty vector!"); 4144 assert(VT.getVectorNumElements() == (unsigned)NumOps && 4145 "Incorrect element count in BUILD_VECTOR!"); 4146 4147 // BUILD_VECTOR of UNDEFs is UNDEF. 4148 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4149 return DAG.getUNDEF(VT); 4150 4151 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. 4152 SDValue IdentitySrc; 4153 bool IsIdentity = true; 4154 for (int i = 0; i != NumOps; ++i) { 4155 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4156 Ops[i].getOperand(0).getValueType() != VT || 4157 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || 4158 !isa<ConstantSDNode>(Ops[i].getOperand(1)) || 4159 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) { 4160 IsIdentity = false; 4161 break; 4162 } 4163 IdentitySrc = Ops[i].getOperand(0); 4164 } 4165 if (IsIdentity) 4166 return IdentitySrc; 4167 4168 return SDValue(); 4169 } 4170 4171 /// Try to simplify vector concatenation to an input value, undef, or build 4172 /// vector. 4173 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, 4174 ArrayRef<SDValue> Ops, 4175 SelectionDAG &DAG) { 4176 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); 4177 assert(llvm::all_of(Ops, 4178 [Ops](SDValue Op) { 4179 return Ops[0].getValueType() == Op.getValueType(); 4180 }) && 4181 "Concatenation of vectors with inconsistent value types!"); 4182 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) == 4183 VT.getVectorNumElements() && 4184 "Incorrect element count in vector concatenation!"); 4185 4186 if (Ops.size() == 1) 4187 return Ops[0]; 4188 4189 // Concat of UNDEFs is UNDEF. 4190 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) 4191 return DAG.getUNDEF(VT); 4192 4193 // Scan the operands and look for extract operations from a single source 4194 // that correspond to insertion at the same location via this concatenation: 4195 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... 4196 SDValue IdentitySrc; 4197 bool IsIdentity = true; 4198 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 4199 SDValue Op = Ops[i]; 4200 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements(); 4201 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || 4202 Op.getOperand(0).getValueType() != VT || 4203 (IdentitySrc && Op.getOperand(0) != IdentitySrc) || 4204 !isa<ConstantSDNode>(Op.getOperand(1)) || 4205 Op.getConstantOperandVal(1) != IdentityIndex) { 4206 IsIdentity = false; 4207 break; 4208 } 4209 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && 4210 "Unexpected identity source vector for concat of extracts"); 4211 IdentitySrc = Op.getOperand(0); 4212 } 4213 if (IsIdentity) { 4214 assert(IdentitySrc && "Failed to set source vector of extracts"); 4215 return IdentitySrc; 4216 } 4217 4218 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be 4219 // simplified to one big BUILD_VECTOR. 4220 // FIXME: Add support for SCALAR_TO_VECTOR as well. 4221 EVT SVT = VT.getScalarType(); 4222 SmallVector<SDValue, 16> Elts; 4223 for (SDValue Op : Ops) { 4224 EVT OpVT = Op.getValueType(); 4225 if (Op.isUndef()) 4226 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); 4227 else if (Op.getOpcode() == ISD::BUILD_VECTOR) 4228 Elts.append(Op->op_begin(), Op->op_end()); 4229 else 4230 return SDValue(); 4231 } 4232 4233 // BUILD_VECTOR requires all inputs to be of the same type, find the 4234 // maximum type and extend them all. 4235 for (SDValue Op : Elts) 4236 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 4237 4238 if (SVT.bitsGT(VT.getScalarType())) 4239 for (SDValue &Op : Elts) 4240 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) 4241 ? DAG.getZExtOrTrunc(Op, DL, SVT) 4242 : DAG.getSExtOrTrunc(Op, DL, SVT); 4243 4244 SDValue V = DAG.getBuildVector(VT, DL, Elts); 4245 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); 4246 return V; 4247 } 4248 4249 /// Gets or creates the specified node. 4250 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { 4251 FoldingSetNodeID ID; 4252 AddNodeIDNode(ID, Opcode, getVTList(VT), None); 4253 void *IP = nullptr; 4254 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 4255 return SDValue(E, 0); 4256 4257 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), 4258 getVTList(VT)); 4259 CSEMap.InsertNode(N, IP); 4260 4261 InsertNode(N); 4262 SDValue V = SDValue(N, 0); 4263 NewSDValueDbgMsg(V, "Creating new node: ", this); 4264 return V; 4265 } 4266 4267 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 4268 SDValue Operand, const SDNodeFlags Flags) { 4269 // Constant fold unary operations with an integer constant operand. Even 4270 // opaque constant will be folded, because the folding of unary operations 4271 // doesn't create new constants with different values. Nevertheless, the 4272 // opaque flag is preserved during folding to prevent future folding with 4273 // other constants. 4274 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) { 4275 const APInt &Val = C->getAPIntValue(); 4276 switch (Opcode) { 4277 default: break; 4278 case ISD::SIGN_EXTEND: 4279 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, 4280 C->isTargetOpcode(), C->isOpaque()); 4281 case ISD::TRUNCATE: 4282 if (C->isOpaque()) 4283 break; 4284 LLVM_FALLTHROUGH; 4285 case ISD::ANY_EXTEND: 4286 case ISD::ZERO_EXTEND: 4287 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, 4288 C->isTargetOpcode(), C->isOpaque()); 4289 case ISD::UINT_TO_FP: 4290 case ISD::SINT_TO_FP: { 4291 APFloat apf(EVTToAPFloatSemantics(VT), 4292 APInt::getNullValue(VT.getSizeInBits())); 4293 (void)apf.convertFromAPInt(Val, 4294 Opcode==ISD::SINT_TO_FP, 4295 APFloat::rmNearestTiesToEven); 4296 return getConstantFP(apf, DL, VT); 4297 } 4298 case ISD::BITCAST: 4299 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) 4300 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); 4301 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 4302 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); 4303 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 4304 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); 4305 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) 4306 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); 4307 break; 4308 case ISD::ABS: 4309 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), 4310 C->isOpaque()); 4311 case ISD::BITREVERSE: 4312 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), 4313 C->isOpaque()); 4314 case ISD::BSWAP: 4315 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), 4316 C->isOpaque()); 4317 case ISD::CTPOP: 4318 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(), 4319 C->isOpaque()); 4320 case ISD::CTLZ: 4321 case ISD::CTLZ_ZERO_UNDEF: 4322 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(), 4323 C->isOpaque()); 4324 case ISD::CTTZ: 4325 case ISD::CTTZ_ZERO_UNDEF: 4326 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(), 4327 C->isOpaque()); 4328 case ISD::FP16_TO_FP: { 4329 bool Ignored; 4330 APFloat FPV(APFloat::IEEEhalf(), 4331 (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); 4332 4333 // This can return overflow, underflow, or inexact; we don't care. 4334 // FIXME need to be more flexible about rounding mode. 4335 (void)FPV.convert(EVTToAPFloatSemantics(VT), 4336 APFloat::rmNearestTiesToEven, &Ignored); 4337 return getConstantFP(FPV, DL, VT); 4338 } 4339 } 4340 } 4341 4342 // Constant fold unary operations with a floating point constant operand. 4343 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) { 4344 APFloat V = C->getValueAPF(); // make copy 4345 switch (Opcode) { 4346 case ISD::FNEG: 4347 V.changeSign(); 4348 return getConstantFP(V, DL, VT); 4349 case ISD::FABS: 4350 V.clearSign(); 4351 return getConstantFP(V, DL, VT); 4352 case ISD::FCEIL: { 4353 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 4354 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4355 return getConstantFP(V, DL, VT); 4356 break; 4357 } 4358 case ISD::FTRUNC: { 4359 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 4360 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4361 return getConstantFP(V, DL, VT); 4362 break; 4363 } 4364 case ISD::FFLOOR: { 4365 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 4366 if (fs == APFloat::opOK || fs == APFloat::opInexact) 4367 return getConstantFP(V, DL, VT); 4368 break; 4369 } 4370 case ISD::FP_EXTEND: { 4371 bool ignored; 4372 // This can return overflow, underflow, or inexact; we don't care. 4373 // FIXME need to be more flexible about rounding mode. 4374 (void)V.convert(EVTToAPFloatSemantics(VT), 4375 APFloat::rmNearestTiesToEven, &ignored); 4376 return getConstantFP(V, DL, VT); 4377 } 4378 case ISD::FP_TO_SINT: 4379 case ISD::FP_TO_UINT: { 4380 bool ignored; 4381 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); 4382 // FIXME need to be more flexible about rounding mode. 4383 APFloat::opStatus s = 4384 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); 4385 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual 4386 break; 4387 return getConstant(IntVal, DL, VT); 4388 } 4389 case ISD::BITCAST: 4390 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) 4391 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4392 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 4393 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT); 4394 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 4395 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); 4396 break; 4397 case ISD::FP_TO_FP16: { 4398 bool Ignored; 4399 // This can return overflow, underflow, or inexact; we don't care. 4400 // FIXME need to be more flexible about rounding mode. 4401 (void)V.convert(APFloat::IEEEhalf(), 4402 APFloat::rmNearestTiesToEven, &Ignored); 4403 return getConstant(V.bitcastToAPInt(), DL, VT); 4404 } 4405 } 4406 } 4407 4408 // Constant fold unary operations with a vector integer or float operand. 4409 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) { 4410 if (BV->isConstant()) { 4411 switch (Opcode) { 4412 default: 4413 // FIXME: Entirely reasonable to perform folding of other unary 4414 // operations here as the need arises. 4415 break; 4416 case ISD::FNEG: 4417 case ISD::FABS: 4418 case ISD::FCEIL: 4419 case ISD::FTRUNC: 4420 case ISD::FFLOOR: 4421 case ISD::FP_EXTEND: 4422 case ISD::FP_TO_SINT: 4423 case ISD::FP_TO_UINT: 4424 case ISD::TRUNCATE: 4425 case ISD::ANY_EXTEND: 4426 case ISD::ZERO_EXTEND: 4427 case ISD::SIGN_EXTEND: 4428 case ISD::UINT_TO_FP: 4429 case ISD::SINT_TO_FP: 4430 case ISD::ABS: 4431 case ISD::BITREVERSE: 4432 case ISD::BSWAP: 4433 case ISD::CTLZ: 4434 case ISD::CTLZ_ZERO_UNDEF: 4435 case ISD::CTTZ: 4436 case ISD::CTTZ_ZERO_UNDEF: 4437 case ISD::CTPOP: { 4438 SDValue Ops = { Operand }; 4439 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) 4440 return Fold; 4441 } 4442 } 4443 } 4444 } 4445 4446 unsigned OpOpcode = Operand.getNode()->getOpcode(); 4447 switch (Opcode) { 4448 case ISD::TokenFactor: 4449 case ISD::MERGE_VALUES: 4450 case ISD::CONCAT_VECTORS: 4451 return Operand; // Factor, merge or concat of one node? No need. 4452 case ISD::BUILD_VECTOR: { 4453 // Attempt to simplify BUILD_VECTOR. 4454 SDValue Ops[] = {Operand}; 4455 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 4456 return V; 4457 break; 4458 } 4459 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 4460 case ISD::FP_EXTEND: 4461 assert(VT.isFloatingPoint() && 4462 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 4463 if (Operand.getValueType() == VT) return Operand; // noop conversion. 4464 assert((!VT.isVector() || 4465 VT.getVectorNumElements() == 4466 Operand.getValueType().getVectorNumElements()) && 4467 "Vector element count mismatch!"); 4468 assert(Operand.getValueType().bitsLT(VT) && 4469 "Invalid fpext node, dst < src!"); 4470 if (Operand.isUndef()) 4471 return getUNDEF(VT); 4472 break; 4473 case ISD::FP_TO_SINT: 4474 case ISD::FP_TO_UINT: 4475 if (Operand.isUndef()) 4476 return getUNDEF(VT); 4477 break; 4478 case ISD::SINT_TO_FP: 4479 case ISD::UINT_TO_FP: 4480 // [us]itofp(undef) = 0, because the result value is bounded. 4481 if (Operand.isUndef()) 4482 return getConstantFP(0.0, DL, VT); 4483 break; 4484 case ISD::SIGN_EXTEND: 4485 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4486 "Invalid SIGN_EXTEND!"); 4487 assert(VT.isVector() == Operand.getValueType().isVector() && 4488 "SIGN_EXTEND result type type should be vector iff the operand " 4489 "type is vector!"); 4490 if (Operand.getValueType() == VT) return Operand; // noop extension 4491 assert((!VT.isVector() || 4492 VT.getVectorNumElements() == 4493 Operand.getValueType().getVectorNumElements()) && 4494 "Vector element count mismatch!"); 4495 assert(Operand.getValueType().bitsLT(VT) && 4496 "Invalid sext node, dst < src!"); 4497 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 4498 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4499 else if (OpOpcode == ISD::UNDEF) 4500 // sext(undef) = 0, because the top bits will all be the same. 4501 return getConstant(0, DL, VT); 4502 break; 4503 case ISD::ZERO_EXTEND: 4504 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4505 "Invalid ZERO_EXTEND!"); 4506 assert(VT.isVector() == Operand.getValueType().isVector() && 4507 "ZERO_EXTEND result type type should be vector iff the operand " 4508 "type is vector!"); 4509 if (Operand.getValueType() == VT) return Operand; // noop extension 4510 assert((!VT.isVector() || 4511 VT.getVectorNumElements() == 4512 Operand.getValueType().getVectorNumElements()) && 4513 "Vector element count mismatch!"); 4514 assert(Operand.getValueType().bitsLT(VT) && 4515 "Invalid zext node, dst < src!"); 4516 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 4517 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0)); 4518 else if (OpOpcode == ISD::UNDEF) 4519 // zext(undef) = 0, because the top bits will be zero. 4520 return getConstant(0, DL, VT); 4521 break; 4522 case ISD::ANY_EXTEND: 4523 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4524 "Invalid ANY_EXTEND!"); 4525 assert(VT.isVector() == Operand.getValueType().isVector() && 4526 "ANY_EXTEND result type type should be vector iff the operand " 4527 "type is vector!"); 4528 if (Operand.getValueType() == VT) return Operand; // noop extension 4529 assert((!VT.isVector() || 4530 VT.getVectorNumElements() == 4531 Operand.getValueType().getVectorNumElements()) && 4532 "Vector element count mismatch!"); 4533 assert(Operand.getValueType().bitsLT(VT) && 4534 "Invalid anyext node, dst < src!"); 4535 4536 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4537 OpOpcode == ISD::ANY_EXTEND) 4538 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 4539 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4540 else if (OpOpcode == ISD::UNDEF) 4541 return getUNDEF(VT); 4542 4543 // (ext (trunc x)) -> x 4544 if (OpOpcode == ISD::TRUNCATE) { 4545 SDValue OpOp = Operand.getOperand(0); 4546 if (OpOp.getValueType() == VT) { 4547 transferDbgValues(Operand, OpOp); 4548 return OpOp; 4549 } 4550 } 4551 break; 4552 case ISD::TRUNCATE: 4553 assert(VT.isInteger() && Operand.getValueType().isInteger() && 4554 "Invalid TRUNCATE!"); 4555 assert(VT.isVector() == Operand.getValueType().isVector() && 4556 "TRUNCATE result type type should be vector iff the operand " 4557 "type is vector!"); 4558 if (Operand.getValueType() == VT) return Operand; // noop truncate 4559 assert((!VT.isVector() || 4560 VT.getVectorNumElements() == 4561 Operand.getValueType().getVectorNumElements()) && 4562 "Vector element count mismatch!"); 4563 assert(Operand.getValueType().bitsGT(VT) && 4564 "Invalid truncate node, src < dst!"); 4565 if (OpOpcode == ISD::TRUNCATE) 4566 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4567 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 4568 OpOpcode == ISD::ANY_EXTEND) { 4569 // If the source is smaller than the dest, we still need an extend. 4570 if (Operand.getOperand(0).getValueType().getScalarType() 4571 .bitsLT(VT.getScalarType())) 4572 return getNode(OpOpcode, DL, VT, Operand.getOperand(0)); 4573 if (Operand.getOperand(0).getValueType().bitsGT(VT)) 4574 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0)); 4575 return Operand.getOperand(0); 4576 } 4577 if (OpOpcode == ISD::UNDEF) 4578 return getUNDEF(VT); 4579 break; 4580 case ISD::ANY_EXTEND_VECTOR_INREG: 4581 case ISD::ZERO_EXTEND_VECTOR_INREG: 4582 case ISD::SIGN_EXTEND_VECTOR_INREG: 4583 assert(VT.isVector() && "This DAG node is restricted to vector types."); 4584 assert(Operand.getValueType().bitsLE(VT) && 4585 "The input must be the same size or smaller than the result."); 4586 assert(VT.getVectorNumElements() < 4587 Operand.getValueType().getVectorNumElements() && 4588 "The destination vector type must have fewer lanes than the input."); 4589 break; 4590 case ISD::ABS: 4591 assert(VT.isInteger() && VT == Operand.getValueType() && 4592 "Invalid ABS!"); 4593 if (OpOpcode == ISD::UNDEF) 4594 return getUNDEF(VT); 4595 break; 4596 case ISD::BSWAP: 4597 assert(VT.isInteger() && VT == Operand.getValueType() && 4598 "Invalid BSWAP!"); 4599 assert((VT.getScalarSizeInBits() % 16 == 0) && 4600 "BSWAP types must be a multiple of 16 bits!"); 4601 if (OpOpcode == ISD::UNDEF) 4602 return getUNDEF(VT); 4603 break; 4604 case ISD::BITREVERSE: 4605 assert(VT.isInteger() && VT == Operand.getValueType() && 4606 "Invalid BITREVERSE!"); 4607 if (OpOpcode == ISD::UNDEF) 4608 return getUNDEF(VT); 4609 break; 4610 case ISD::BITCAST: 4611 // Basic sanity checking. 4612 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && 4613 "Cannot BITCAST between types of different sizes!"); 4614 if (VT == Operand.getValueType()) return Operand; // noop conversion. 4615 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 4616 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 4617 if (OpOpcode == ISD::UNDEF) 4618 return getUNDEF(VT); 4619 break; 4620 case ISD::SCALAR_TO_VECTOR: 4621 assert(VT.isVector() && !Operand.getValueType().isVector() && 4622 (VT.getVectorElementType() == Operand.getValueType() || 4623 (VT.getVectorElementType().isInteger() && 4624 Operand.getValueType().isInteger() && 4625 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 4626 "Illegal SCALAR_TO_VECTOR node!"); 4627 if (OpOpcode == ISD::UNDEF) 4628 return getUNDEF(VT); 4629 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 4630 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 4631 isa<ConstantSDNode>(Operand.getOperand(1)) && 4632 Operand.getConstantOperandVal(1) == 0 && 4633 Operand.getOperand(0).getValueType() == VT) 4634 return Operand.getOperand(0); 4635 break; 4636 case ISD::FNEG: 4637 // Negation of an unknown bag of bits is still completely undefined. 4638 if (OpOpcode == ISD::UNDEF) 4639 return getUNDEF(VT); 4640 4641 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 4642 if ((getTarget().Options.UnsafeFPMath || Flags.hasNoSignedZeros()) && 4643 OpOpcode == ISD::FSUB) 4644 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1), 4645 Operand.getOperand(0), Flags); 4646 if (OpOpcode == ISD::FNEG) // --X -> X 4647 return Operand.getOperand(0); 4648 break; 4649 case ISD::FABS: 4650 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 4651 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0)); 4652 break; 4653 } 4654 4655 SDNode *N; 4656 SDVTList VTs = getVTList(VT); 4657 SDValue Ops[] = {Operand}; 4658 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 4659 FoldingSetNodeID ID; 4660 AddNodeIDNode(ID, Opcode, VTs, Ops); 4661 void *IP = nullptr; 4662 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 4663 E->intersectFlagsWith(Flags); 4664 return SDValue(E, 0); 4665 } 4666 4667 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4668 N->setFlags(Flags); 4669 createOperands(N, Ops); 4670 CSEMap.InsertNode(N, IP); 4671 } else { 4672 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 4673 createOperands(N, Ops); 4674 } 4675 4676 InsertNode(N); 4677 SDValue V = SDValue(N, 0); 4678 NewSDValueDbgMsg(V, "Creating new node: ", this); 4679 return V; 4680 } 4681 4682 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1, 4683 const APInt &C2) { 4684 switch (Opcode) { 4685 case ISD::ADD: return std::make_pair(C1 + C2, true); 4686 case ISD::SUB: return std::make_pair(C1 - C2, true); 4687 case ISD::MUL: return std::make_pair(C1 * C2, true); 4688 case ISD::AND: return std::make_pair(C1 & C2, true); 4689 case ISD::OR: return std::make_pair(C1 | C2, true); 4690 case ISD::XOR: return std::make_pair(C1 ^ C2, true); 4691 case ISD::SHL: return std::make_pair(C1 << C2, true); 4692 case ISD::SRL: return std::make_pair(C1.lshr(C2), true); 4693 case ISD::SRA: return std::make_pair(C1.ashr(C2), true); 4694 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true); 4695 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true); 4696 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true); 4697 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true); 4698 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true); 4699 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true); 4700 case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true); 4701 case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true); 4702 case ISD::SSUBSAT: return std::make_pair(C1.ssub_sat(C2), true); 4703 case ISD::USUBSAT: return std::make_pair(C1.usub_sat(C2), true); 4704 case ISD::UDIV: 4705 if (!C2.getBoolValue()) 4706 break; 4707 return std::make_pair(C1.udiv(C2), true); 4708 case ISD::UREM: 4709 if (!C2.getBoolValue()) 4710 break; 4711 return std::make_pair(C1.urem(C2), true); 4712 case ISD::SDIV: 4713 if (!C2.getBoolValue()) 4714 break; 4715 return std::make_pair(C1.sdiv(C2), true); 4716 case ISD::SREM: 4717 if (!C2.getBoolValue()) 4718 break; 4719 return std::make_pair(C1.srem(C2), true); 4720 } 4721 return std::make_pair(APInt(1, 0), false); 4722 } 4723 4724 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4725 EVT VT, const ConstantSDNode *C1, 4726 const ConstantSDNode *C2) { 4727 if (C1->isOpaque() || C2->isOpaque()) 4728 return SDValue(); 4729 4730 std::pair<APInt, bool> Folded = FoldValue(Opcode, C1->getAPIntValue(), 4731 C2->getAPIntValue()); 4732 if (!Folded.second) 4733 return SDValue(); 4734 return getConstant(Folded.first, DL, VT); 4735 } 4736 4737 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, 4738 const GlobalAddressSDNode *GA, 4739 const SDNode *N2) { 4740 if (GA->getOpcode() != ISD::GlobalAddress) 4741 return SDValue(); 4742 if (!TLI->isOffsetFoldingLegal(GA)) 4743 return SDValue(); 4744 auto *C2 = dyn_cast<ConstantSDNode>(N2); 4745 if (!C2) 4746 return SDValue(); 4747 int64_t Offset = C2->getSExtValue(); 4748 switch (Opcode) { 4749 case ISD::ADD: break; 4750 case ISD::SUB: Offset = -uint64_t(Offset); break; 4751 default: return SDValue(); 4752 } 4753 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, 4754 GA->getOffset() + uint64_t(Offset)); 4755 } 4756 4757 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { 4758 switch (Opcode) { 4759 case ISD::SDIV: 4760 case ISD::UDIV: 4761 case ISD::SREM: 4762 case ISD::UREM: { 4763 // If a divisor is zero/undef or any element of a divisor vector is 4764 // zero/undef, the whole op is undef. 4765 assert(Ops.size() == 2 && "Div/rem should have 2 operands"); 4766 SDValue Divisor = Ops[1]; 4767 if (Divisor.isUndef() || isNullConstant(Divisor)) 4768 return true; 4769 4770 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && 4771 llvm::any_of(Divisor->op_values(), 4772 [](SDValue V) { return V.isUndef() || 4773 isNullConstant(V); }); 4774 // TODO: Handle signed overflow. 4775 } 4776 // TODO: Handle oversized shifts. 4777 default: 4778 return false; 4779 } 4780 } 4781 4782 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, 4783 EVT VT, SDNode *N1, SDNode *N2) { 4784 // If the opcode is a target-specific ISD node, there's nothing we can 4785 // do here and the operand rules may not line up with the below, so 4786 // bail early. 4787 if (Opcode >= ISD::BUILTIN_OP_END) 4788 return SDValue(); 4789 4790 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)})) 4791 return getUNDEF(VT); 4792 4793 // Handle the case of two scalars. 4794 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) { 4795 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) { 4796 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2); 4797 assert((!Folded || !VT.isVector()) && 4798 "Can't fold vectors ops with scalar operands"); 4799 return Folded; 4800 } 4801 } 4802 4803 // fold (add Sym, c) -> Sym+c 4804 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1)) 4805 return FoldSymbolOffset(Opcode, VT, GA, N2); 4806 if (TLI->isCommutativeBinOp(Opcode)) 4807 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2)) 4808 return FoldSymbolOffset(Opcode, VT, GA, N1); 4809 4810 // For vectors, extract each constant element and fold them individually. 4811 // Either input may be an undef value. 4812 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 4813 if (!BV1 && !N1->isUndef()) 4814 return SDValue(); 4815 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); 4816 if (!BV2 && !N2->isUndef()) 4817 return SDValue(); 4818 // If both operands are undef, that's handled the same way as scalars. 4819 if (!BV1 && !BV2) 4820 return SDValue(); 4821 4822 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) && 4823 "Vector binop with different number of elements in operands?"); 4824 4825 EVT SVT = VT.getScalarType(); 4826 EVT LegalSVT = SVT; 4827 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4828 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4829 if (LegalSVT.bitsLT(SVT)) 4830 return SDValue(); 4831 } 4832 SmallVector<SDValue, 4> Outputs; 4833 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands(); 4834 for (unsigned I = 0; I != NumOps; ++I) { 4835 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT); 4836 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT); 4837 if (SVT.isInteger()) { 4838 if (V1->getValueType(0).bitsGT(SVT)) 4839 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1); 4840 if (V2->getValueType(0).bitsGT(SVT)) 4841 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2); 4842 } 4843 4844 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 4845 return SDValue(); 4846 4847 // Fold one vector element. 4848 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2); 4849 if (LegalSVT != SVT) 4850 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4851 4852 // Scalar folding only succeeded if the result is a constant or UNDEF. 4853 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4854 ScalarResult.getOpcode() != ISD::ConstantFP) 4855 return SDValue(); 4856 Outputs.push_back(ScalarResult); 4857 } 4858 4859 assert(VT.getVectorNumElements() == Outputs.size() && 4860 "Vector size mismatch!"); 4861 4862 // We may have a vector type but a scalar result. Create a splat. 4863 Outputs.resize(VT.getVectorNumElements(), Outputs.back()); 4864 4865 // Build a big vector out of the scalar elements we generated. 4866 return getBuildVector(VT, SDLoc(), Outputs); 4867 } 4868 4869 // TODO: Merge with FoldConstantArithmetic 4870 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, 4871 const SDLoc &DL, EVT VT, 4872 ArrayRef<SDValue> Ops, 4873 const SDNodeFlags Flags) { 4874 // If the opcode is a target-specific ISD node, there's nothing we can 4875 // do here and the operand rules may not line up with the below, so 4876 // bail early. 4877 if (Opcode >= ISD::BUILTIN_OP_END) 4878 return SDValue(); 4879 4880 if (isUndef(Opcode, Ops)) 4881 return getUNDEF(VT); 4882 4883 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday? 4884 if (!VT.isVector()) 4885 return SDValue(); 4886 4887 unsigned NumElts = VT.getVectorNumElements(); 4888 4889 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) { 4890 return !Op.getValueType().isVector() || 4891 Op.getValueType().getVectorNumElements() == NumElts; 4892 }; 4893 4894 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { 4895 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); 4896 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || 4897 (BV && BV->isConstant()); 4898 }; 4899 4900 // All operands must be vector types with the same number of elements as 4901 // the result type and must be either UNDEF or a build vector of constant 4902 // or UNDEF scalars. 4903 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) || 4904 !llvm::all_of(Ops, IsScalarOrSameVectorSize)) 4905 return SDValue(); 4906 4907 // If we are comparing vectors, then the result needs to be a i1 boolean 4908 // that is then sign-extended back to the legal result type. 4909 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); 4910 4911 // Find legal integer scalar type for constant promotion and 4912 // ensure that its scalar size is at least as large as source. 4913 EVT LegalSVT = VT.getScalarType(); 4914 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { 4915 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); 4916 if (LegalSVT.bitsLT(VT.getScalarType())) 4917 return SDValue(); 4918 } 4919 4920 // Constant fold each scalar lane separately. 4921 SmallVector<SDValue, 4> ScalarResults; 4922 for (unsigned i = 0; i != NumElts; i++) { 4923 SmallVector<SDValue, 4> ScalarOps; 4924 for (SDValue Op : Ops) { 4925 EVT InSVT = Op.getValueType().getScalarType(); 4926 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op); 4927 if (!InBV) { 4928 // We've checked that this is UNDEF or a constant of some kind. 4929 if (Op.isUndef()) 4930 ScalarOps.push_back(getUNDEF(InSVT)); 4931 else 4932 ScalarOps.push_back(Op); 4933 continue; 4934 } 4935 4936 SDValue ScalarOp = InBV->getOperand(i); 4937 EVT ScalarVT = ScalarOp.getValueType(); 4938 4939 // Build vector (integer) scalar operands may need implicit 4940 // truncation - do this before constant folding. 4941 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) 4942 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); 4943 4944 ScalarOps.push_back(ScalarOp); 4945 } 4946 4947 // Constant fold the scalar operands. 4948 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags); 4949 4950 // Legalize the (integer) scalar constant if necessary. 4951 if (LegalSVT != SVT) 4952 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult); 4953 4954 // Scalar folding only succeeded if the result is a constant or UNDEF. 4955 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && 4956 ScalarResult.getOpcode() != ISD::ConstantFP) 4957 return SDValue(); 4958 ScalarResults.push_back(ScalarResult); 4959 } 4960 4961 SDValue V = getBuildVector(VT, DL, ScalarResults); 4962 NewSDValueDbgMsg(V, "New node fold constant vector: ", this); 4963 return V; 4964 } 4965 4966 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, 4967 EVT VT, SDValue N1, SDValue N2) { 4968 // TODO: We don't do any constant folding for strict FP opcodes here, but we 4969 // should. That will require dealing with a potentially non-default 4970 // rounding mode, checking the "opStatus" return value from the APFloat 4971 // math calculations, and possibly other variations. 4972 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 4973 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 4974 if (N1CFP && N2CFP) { 4975 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF(); 4976 switch (Opcode) { 4977 case ISD::FADD: 4978 C1.add(C2, APFloat::rmNearestTiesToEven); 4979 return getConstantFP(C1, DL, VT); 4980 case ISD::FSUB: 4981 C1.subtract(C2, APFloat::rmNearestTiesToEven); 4982 return getConstantFP(C1, DL, VT); 4983 case ISD::FMUL: 4984 C1.multiply(C2, APFloat::rmNearestTiesToEven); 4985 return getConstantFP(C1, DL, VT); 4986 case ISD::FDIV: 4987 C1.divide(C2, APFloat::rmNearestTiesToEven); 4988 return getConstantFP(C1, DL, VT); 4989 case ISD::FREM: 4990 C1.mod(C2); 4991 return getConstantFP(C1, DL, VT); 4992 case ISD::FCOPYSIGN: 4993 C1.copySign(C2); 4994 return getConstantFP(C1, DL, VT); 4995 default: break; 4996 } 4997 } 4998 if (N1CFP && Opcode == ISD::FP_ROUND) { 4999 APFloat C1 = N1CFP->getValueAPF(); // make copy 5000 bool Unused; 5001 // This can return overflow, underflow, or inexact; we don't care. 5002 // FIXME need to be more flexible about rounding mode. 5003 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, 5004 &Unused); 5005 return getConstantFP(C1, DL, VT); 5006 } 5007 5008 switch (Opcode) { 5009 case ISD::FADD: 5010 case ISD::FSUB: 5011 case ISD::FMUL: 5012 case ISD::FDIV: 5013 case ISD::FREM: 5014 // If both operands are undef, the result is undef. If 1 operand is undef, 5015 // the result is NaN. This should match the behavior of the IR optimizer. 5016 if (N1.isUndef() && N2.isUndef()) 5017 return getUNDEF(VT); 5018 if (N1.isUndef() || N2.isUndef()) 5019 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); 5020 } 5021 return SDValue(); 5022 } 5023 5024 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5025 SDValue N1, SDValue N2, const SDNodeFlags Flags) { 5026 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5027 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 5028 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5029 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5030 5031 // Canonicalize constant to RHS if commutative. 5032 if (TLI->isCommutativeBinOp(Opcode)) { 5033 if (N1C && !N2C) { 5034 std::swap(N1C, N2C); 5035 std::swap(N1, N2); 5036 } else if (N1CFP && !N2CFP) { 5037 std::swap(N1CFP, N2CFP); 5038 std::swap(N1, N2); 5039 } 5040 } 5041 5042 switch (Opcode) { 5043 default: break; 5044 case ISD::TokenFactor: 5045 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 5046 N2.getValueType() == MVT::Other && "Invalid token factor!"); 5047 // Fold trivial token factors. 5048 if (N1.getOpcode() == ISD::EntryToken) return N2; 5049 if (N2.getOpcode() == ISD::EntryToken) return N1; 5050 if (N1 == N2) return N1; 5051 break; 5052 case ISD::BUILD_VECTOR: { 5053 // Attempt to simplify BUILD_VECTOR. 5054 SDValue Ops[] = {N1, N2}; 5055 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5056 return V; 5057 break; 5058 } 5059 case ISD::CONCAT_VECTORS: { 5060 SDValue Ops[] = {N1, N2}; 5061 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5062 return V; 5063 break; 5064 } 5065 case ISD::AND: 5066 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5067 assert(N1.getValueType() == N2.getValueType() && 5068 N1.getValueType() == VT && "Binary operator types must match!"); 5069 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 5070 // worth handling here. 5071 if (N2C && N2C->isNullValue()) 5072 return N2; 5073 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 5074 return N1; 5075 break; 5076 case ISD::OR: 5077 case ISD::XOR: 5078 case ISD::ADD: 5079 case ISD::SUB: 5080 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5081 assert(N1.getValueType() == N2.getValueType() && 5082 N1.getValueType() == VT && "Binary operator types must match!"); 5083 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 5084 // it's worth handling here. 5085 if (N2C && N2C->isNullValue()) 5086 return N1; 5087 break; 5088 case ISD::UDIV: 5089 case ISD::UREM: 5090 case ISD::MULHU: 5091 case ISD::MULHS: 5092 case ISD::MUL: 5093 case ISD::SDIV: 5094 case ISD::SREM: 5095 case ISD::SMIN: 5096 case ISD::SMAX: 5097 case ISD::UMIN: 5098 case ISD::UMAX: 5099 case ISD::SADDSAT: 5100 case ISD::SSUBSAT: 5101 case ISD::UADDSAT: 5102 case ISD::USUBSAT: 5103 assert(VT.isInteger() && "This operator does not apply to FP types!"); 5104 assert(N1.getValueType() == N2.getValueType() && 5105 N1.getValueType() == VT && "Binary operator types must match!"); 5106 break; 5107 case ISD::FADD: 5108 case ISD::FSUB: 5109 case ISD::FMUL: 5110 case ISD::FDIV: 5111 case ISD::FREM: 5112 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5113 assert(N1.getValueType() == N2.getValueType() && 5114 N1.getValueType() == VT && "Binary operator types must match!"); 5115 if (SDValue V = simplifyFPBinop(Opcode, N1, N2)) 5116 return V; 5117 break; 5118 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 5119 assert(N1.getValueType() == VT && 5120 N1.getValueType().isFloatingPoint() && 5121 N2.getValueType().isFloatingPoint() && 5122 "Invalid FCOPYSIGN!"); 5123 break; 5124 case ISD::SHL: 5125 case ISD::SRA: 5126 case ISD::SRL: 5127 if (SDValue V = simplifyShift(N1, N2)) 5128 return V; 5129 LLVM_FALLTHROUGH; 5130 case ISD::ROTL: 5131 case ISD::ROTR: 5132 assert(VT == N1.getValueType() && 5133 "Shift operators return type must be the same as their first arg"); 5134 assert(VT.isInteger() && N2.getValueType().isInteger() && 5135 "Shifts only work on integers"); 5136 assert((!VT.isVector() || VT == N2.getValueType()) && 5137 "Vector shift amounts must be in the same as their first arg"); 5138 // Verify that the shift amount VT is big enough to hold valid shift 5139 // amounts. This catches things like trying to shift an i1024 value by an 5140 // i8, which is easy to fall into in generic code that uses 5141 // TLI.getShiftAmount(). 5142 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) && 5143 "Invalid use of small shift amount with oversized value!"); 5144 5145 // Always fold shifts of i1 values so the code generator doesn't need to 5146 // handle them. Since we know the size of the shift has to be less than the 5147 // size of the value, the shift/rotate count is guaranteed to be zero. 5148 if (VT == MVT::i1) 5149 return N1; 5150 if (N2C && N2C->isNullValue()) 5151 return N1; 5152 break; 5153 case ISD::FP_ROUND_INREG: { 5154 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5155 assert(VT == N1.getValueType() && "Not an inreg round!"); 5156 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 5157 "Cannot FP_ROUND_INREG integer types"); 5158 assert(EVT.isVector() == VT.isVector() && 5159 "FP_ROUND_INREG type should be vector iff the operand " 5160 "type is vector!"); 5161 assert((!EVT.isVector() || 5162 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 5163 "Vector element counts must match in FP_ROUND_INREG"); 5164 assert(EVT.bitsLE(VT) && "Not rounding down!"); 5165 (void)EVT; 5166 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 5167 break; 5168 } 5169 case ISD::FP_ROUND: 5170 assert(VT.isFloatingPoint() && 5171 N1.getValueType().isFloatingPoint() && 5172 VT.bitsLE(N1.getValueType()) && 5173 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && 5174 "Invalid FP_ROUND!"); 5175 if (N1.getValueType() == VT) return N1; // noop conversion. 5176 break; 5177 case ISD::AssertSext: 5178 case ISD::AssertZext: { 5179 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5180 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5181 assert(VT.isInteger() && EVT.isInteger() && 5182 "Cannot *_EXTEND_INREG FP types"); 5183 assert(!EVT.isVector() && 5184 "AssertSExt/AssertZExt type should be the vector element type " 5185 "rather than the vector type!"); 5186 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); 5187 if (VT.getScalarType() == EVT) return N1; // noop assertion. 5188 break; 5189 } 5190 case ISD::SIGN_EXTEND_INREG: { 5191 EVT EVT = cast<VTSDNode>(N2)->getVT(); 5192 assert(VT == N1.getValueType() && "Not an inreg extend!"); 5193 assert(VT.isInteger() && EVT.isInteger() && 5194 "Cannot *_EXTEND_INREG FP types"); 5195 assert(EVT.isVector() == VT.isVector() && 5196 "SIGN_EXTEND_INREG type should be vector iff the operand " 5197 "type is vector!"); 5198 assert((!EVT.isVector() || 5199 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 5200 "Vector element counts must match in SIGN_EXTEND_INREG"); 5201 assert(EVT.bitsLE(VT) && "Not extending!"); 5202 if (EVT == VT) return N1; // Not actually extending 5203 5204 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { 5205 unsigned FromBits = EVT.getScalarSizeInBits(); 5206 Val <<= Val.getBitWidth() - FromBits; 5207 Val.ashrInPlace(Val.getBitWidth() - FromBits); 5208 return getConstant(Val, DL, ConstantVT); 5209 }; 5210 5211 if (N1C) { 5212 const APInt &Val = N1C->getAPIntValue(); 5213 return SignExtendInReg(Val, VT); 5214 } 5215 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { 5216 SmallVector<SDValue, 8> Ops; 5217 llvm::EVT OpVT = N1.getOperand(0).getValueType(); 5218 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5219 SDValue Op = N1.getOperand(i); 5220 if (Op.isUndef()) { 5221 Ops.push_back(getUNDEF(OpVT)); 5222 continue; 5223 } 5224 ConstantSDNode *C = cast<ConstantSDNode>(Op); 5225 APInt Val = C->getAPIntValue(); 5226 Ops.push_back(SignExtendInReg(Val, OpVT)); 5227 } 5228 return getBuildVector(VT, DL, Ops); 5229 } 5230 break; 5231 } 5232 case ISD::EXTRACT_VECTOR_ELT: 5233 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && 5234 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ 5235 element type of the vector."); 5236 5237 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 5238 if (N1.isUndef()) 5239 return getUNDEF(VT); 5240 5241 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF 5242 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) 5243 return getUNDEF(VT); 5244 5245 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 5246 // expanding copies of large vectors from registers. 5247 if (N2C && 5248 N1.getOpcode() == ISD::CONCAT_VECTORS && 5249 N1.getNumOperands() > 0) { 5250 unsigned Factor = 5251 N1.getOperand(0).getValueType().getVectorNumElements(); 5252 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 5253 N1.getOperand(N2C->getZExtValue() / Factor), 5254 getConstant(N2C->getZExtValue() % Factor, DL, 5255 N2.getValueType())); 5256 } 5257 5258 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 5259 // expanding large vector constants. 5260 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 5261 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 5262 5263 if (VT != Elt.getValueType()) 5264 // If the vector element type is not legal, the BUILD_VECTOR operands 5265 // are promoted and implicitly truncated, and the result implicitly 5266 // extended. Make that explicit here. 5267 Elt = getAnyExtOrTrunc(Elt, DL, VT); 5268 5269 return Elt; 5270 } 5271 5272 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 5273 // operations are lowered to scalars. 5274 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 5275 // If the indices are the same, return the inserted element else 5276 // if the indices are known different, extract the element from 5277 // the original vector. 5278 SDValue N1Op2 = N1.getOperand(2); 5279 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); 5280 5281 if (N1Op2C && N2C) { 5282 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 5283 if (VT == N1.getOperand(1).getValueType()) 5284 return N1.getOperand(1); 5285 else 5286 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 5287 } 5288 5289 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 5290 } 5291 } 5292 5293 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed 5294 // when vector types are scalarized and v1iX is legal. 5295 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx) 5296 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5297 N1.getValueType().getVectorNumElements() == 1) { 5298 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), 5299 N1.getOperand(1)); 5300 } 5301 break; 5302 case ISD::EXTRACT_ELEMENT: 5303 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 5304 assert(!N1.getValueType().isVector() && !VT.isVector() && 5305 (N1.getValueType().isInteger() == VT.isInteger()) && 5306 N1.getValueType() != VT && 5307 "Wrong types for EXTRACT_ELEMENT!"); 5308 5309 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 5310 // 64-bit integers into 32-bit parts. Instead of building the extract of 5311 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 5312 if (N1.getOpcode() == ISD::BUILD_PAIR) 5313 return N1.getOperand(N2C->getZExtValue()); 5314 5315 // EXTRACT_ELEMENT of a constant int is also very common. 5316 if (N1C) { 5317 unsigned ElementSize = VT.getSizeInBits(); 5318 unsigned Shift = ElementSize * N2C->getZExtValue(); 5319 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift); 5320 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT); 5321 } 5322 break; 5323 case ISD::EXTRACT_SUBVECTOR: 5324 if (VT.isSimple() && N1.getValueType().isSimple()) { 5325 assert(VT.isVector() && N1.getValueType().isVector() && 5326 "Extract subvector VTs must be a vectors!"); 5327 assert(VT.getVectorElementType() == 5328 N1.getValueType().getVectorElementType() && 5329 "Extract subvector VTs must have the same element type!"); 5330 assert(VT.getSimpleVT() <= N1.getSimpleValueType() && 5331 "Extract subvector must be from larger vector to smaller vector!"); 5332 5333 if (N2C) { 5334 assert((VT.getVectorNumElements() + N2C->getZExtValue() 5335 <= N1.getValueType().getVectorNumElements()) 5336 && "Extract subvector overflow!"); 5337 } 5338 5339 // Trivial extraction. 5340 if (VT.getSimpleVT() == N1.getSimpleValueType()) 5341 return N1; 5342 5343 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. 5344 if (N1.isUndef()) 5345 return getUNDEF(VT); 5346 5347 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of 5348 // the concat have the same type as the extract. 5349 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && 5350 N1.getNumOperands() > 0 && 5351 VT == N1.getOperand(0).getValueType()) { 5352 unsigned Factor = VT.getVectorNumElements(); 5353 return N1.getOperand(N2C->getZExtValue() / Factor); 5354 } 5355 5356 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created 5357 // during shuffle legalization. 5358 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && 5359 VT == N1.getOperand(1).getValueType()) 5360 return N1.getOperand(1); 5361 } 5362 break; 5363 } 5364 5365 // Perform trivial constant folding. 5366 if (SDValue SV = 5367 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode())) 5368 return SV; 5369 5370 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2)) 5371 return V; 5372 5373 // Canonicalize an UNDEF to the RHS, even over a constant. 5374 if (N1.isUndef()) { 5375 if (TLI->isCommutativeBinOp(Opcode)) { 5376 std::swap(N1, N2); 5377 } else { 5378 switch (Opcode) { 5379 case ISD::FP_ROUND_INREG: 5380 case ISD::SIGN_EXTEND_INREG: 5381 case ISD::SUB: 5382 return getUNDEF(VT); // fold op(undef, arg2) -> undef 5383 case ISD::UDIV: 5384 case ISD::SDIV: 5385 case ISD::UREM: 5386 case ISD::SREM: 5387 case ISD::SSUBSAT: 5388 case ISD::USUBSAT: 5389 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 5390 } 5391 } 5392 } 5393 5394 // Fold a bunch of operators when the RHS is undef. 5395 if (N2.isUndef()) { 5396 switch (Opcode) { 5397 case ISD::XOR: 5398 if (N1.isUndef()) 5399 // Handle undef ^ undef -> 0 special case. This is a common 5400 // idiom (misuse). 5401 return getConstant(0, DL, VT); 5402 LLVM_FALLTHROUGH; 5403 case ISD::ADD: 5404 case ISD::SUB: 5405 case ISD::UDIV: 5406 case ISD::SDIV: 5407 case ISD::UREM: 5408 case ISD::SREM: 5409 return getUNDEF(VT); // fold op(arg1, undef) -> undef 5410 case ISD::MUL: 5411 case ISD::AND: 5412 case ISD::SSUBSAT: 5413 case ISD::USUBSAT: 5414 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 5415 case ISD::OR: 5416 case ISD::SADDSAT: 5417 case ISD::UADDSAT: 5418 return getAllOnesConstant(DL, VT); 5419 } 5420 } 5421 5422 // Memoize this node if possible. 5423 SDNode *N; 5424 SDVTList VTs = getVTList(VT); 5425 SDValue Ops[] = {N1, N2}; 5426 if (VT != MVT::Glue) { 5427 FoldingSetNodeID ID; 5428 AddNodeIDNode(ID, Opcode, VTs, Ops); 5429 void *IP = nullptr; 5430 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5431 E->intersectFlagsWith(Flags); 5432 return SDValue(E, 0); 5433 } 5434 5435 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5436 N->setFlags(Flags); 5437 createOperands(N, Ops); 5438 CSEMap.InsertNode(N, IP); 5439 } else { 5440 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5441 createOperands(N, Ops); 5442 } 5443 5444 InsertNode(N); 5445 SDValue V = SDValue(N, 0); 5446 NewSDValueDbgMsg(V, "Creating new node: ", this); 5447 return V; 5448 } 5449 5450 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5451 SDValue N1, SDValue N2, SDValue N3, 5452 const SDNodeFlags Flags) { 5453 // Perform various simplifications. 5454 switch (Opcode) { 5455 case ISD::FMA: { 5456 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 5457 assert(N1.getValueType() == VT && N2.getValueType() == VT && 5458 N3.getValueType() == VT && "FMA types must match!"); 5459 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5460 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 5461 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 5462 if (N1CFP && N2CFP && N3CFP) { 5463 APFloat V1 = N1CFP->getValueAPF(); 5464 const APFloat &V2 = N2CFP->getValueAPF(); 5465 const APFloat &V3 = N3CFP->getValueAPF(); 5466 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 5467 return getConstantFP(V1, DL, VT); 5468 } 5469 break; 5470 } 5471 case ISD::BUILD_VECTOR: { 5472 // Attempt to simplify BUILD_VECTOR. 5473 SDValue Ops[] = {N1, N2, N3}; 5474 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 5475 return V; 5476 break; 5477 } 5478 case ISD::CONCAT_VECTORS: { 5479 SDValue Ops[] = {N1, N2, N3}; 5480 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 5481 return V; 5482 break; 5483 } 5484 case ISD::SETCC: { 5485 assert(VT.isInteger() && "SETCC result type must be an integer!"); 5486 assert(N1.getValueType() == N2.getValueType() && 5487 "SETCC operands must have the same type!"); 5488 assert(VT.isVector() == N1.getValueType().isVector() && 5489 "SETCC type should be vector iff the operand type is vector!"); 5490 assert((!VT.isVector() || 5491 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) && 5492 "SETCC vector element counts must match!"); 5493 // Use FoldSetCC to simplify SETCC's. 5494 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) 5495 return V; 5496 // Vector constant folding. 5497 SDValue Ops[] = {N1, N2, N3}; 5498 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) { 5499 NewSDValueDbgMsg(V, "New node vector constant folding: ", this); 5500 return V; 5501 } 5502 break; 5503 } 5504 case ISD::SELECT: 5505 case ISD::VSELECT: 5506 if (SDValue V = simplifySelect(N1, N2, N3)) 5507 return V; 5508 break; 5509 case ISD::VECTOR_SHUFFLE: 5510 llvm_unreachable("should use getVectorShuffle constructor!"); 5511 case ISD::INSERT_VECTOR_ELT: { 5512 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); 5513 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF 5514 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) 5515 return getUNDEF(VT); 5516 break; 5517 } 5518 case ISD::INSERT_SUBVECTOR: { 5519 // Inserting undef into undef is still undef. 5520 if (N1.isUndef() && N2.isUndef()) 5521 return getUNDEF(VT); 5522 SDValue Index = N3; 5523 if (VT.isSimple() && N1.getValueType().isSimple() 5524 && N2.getValueType().isSimple()) { 5525 assert(VT.isVector() && N1.getValueType().isVector() && 5526 N2.getValueType().isVector() && 5527 "Insert subvector VTs must be a vectors"); 5528 assert(VT == N1.getValueType() && 5529 "Dest and insert subvector source types must match!"); 5530 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() && 5531 "Insert subvector must be from smaller vector to larger vector!"); 5532 if (isa<ConstantSDNode>(Index)) { 5533 assert((N2.getValueType().getVectorNumElements() + 5534 cast<ConstantSDNode>(Index)->getZExtValue() 5535 <= VT.getVectorNumElements()) 5536 && "Insert subvector overflow!"); 5537 } 5538 5539 // Trivial insertion. 5540 if (VT.getSimpleVT() == N2.getSimpleValueType()) 5541 return N2; 5542 5543 // If this is an insert of an extracted vector into an undef vector, we 5544 // can just use the input to the extract. 5545 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && 5546 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) 5547 return N2.getOperand(0); 5548 } 5549 break; 5550 } 5551 case ISD::BITCAST: 5552 // Fold bit_convert nodes from a type to themselves. 5553 if (N1.getValueType() == VT) 5554 return N1; 5555 break; 5556 } 5557 5558 // Memoize node if it doesn't produce a flag. 5559 SDNode *N; 5560 SDVTList VTs = getVTList(VT); 5561 SDValue Ops[] = {N1, N2, N3}; 5562 if (VT != MVT::Glue) { 5563 FoldingSetNodeID ID; 5564 AddNodeIDNode(ID, Opcode, VTs, Ops); 5565 void *IP = nullptr; 5566 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 5567 E->intersectFlagsWith(Flags); 5568 return SDValue(E, 0); 5569 } 5570 5571 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5572 N->setFlags(Flags); 5573 createOperands(N, Ops); 5574 CSEMap.InsertNode(N, IP); 5575 } else { 5576 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5577 createOperands(N, Ops); 5578 } 5579 5580 InsertNode(N); 5581 SDValue V = SDValue(N, 0); 5582 NewSDValueDbgMsg(V, "Creating new node: ", this); 5583 return V; 5584 } 5585 5586 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5587 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 5588 SDValue Ops[] = { N1, N2, N3, N4 }; 5589 return getNode(Opcode, DL, VT, Ops); 5590 } 5591 5592 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 5593 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 5594 SDValue N5) { 5595 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 5596 return getNode(Opcode, DL, VT, Ops); 5597 } 5598 5599 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 5600 /// the incoming stack arguments to be loaded from the stack. 5601 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 5602 SmallVector<SDValue, 8> ArgChains; 5603 5604 // Include the original chain at the beginning of the list. When this is 5605 // used by target LowerCall hooks, this helps legalize find the 5606 // CALLSEQ_BEGIN node. 5607 ArgChains.push_back(Chain); 5608 5609 // Add a chain value for each stack argument. 5610 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 5611 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 5612 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 5613 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 5614 if (FI->getIndex() < 0) 5615 ArgChains.push_back(SDValue(L, 1)); 5616 5617 // Build a tokenfactor for all the chains. 5618 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 5619 } 5620 5621 /// getMemsetValue - Vectorized representation of the memset value 5622 /// operand. 5623 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 5624 const SDLoc &dl) { 5625 assert(!Value.isUndef()); 5626 5627 unsigned NumBits = VT.getScalarSizeInBits(); 5628 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 5629 assert(C->getAPIntValue().getBitWidth() == 8); 5630 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 5631 if (VT.isInteger()) { 5632 bool IsOpaque = VT.getSizeInBits() > 64 || 5633 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); 5634 return DAG.getConstant(Val, dl, VT, false, IsOpaque); 5635 } 5636 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, 5637 VT); 5638 } 5639 5640 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); 5641 EVT IntVT = VT.getScalarType(); 5642 if (!IntVT.isInteger()) 5643 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); 5644 5645 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); 5646 if (NumBits > 8) { 5647 // Use a multiplication with 0x010101... to extend the input to the 5648 // required length. 5649 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 5650 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, 5651 DAG.getConstant(Magic, dl, IntVT)); 5652 } 5653 5654 if (VT != Value.getValueType() && !VT.isInteger()) 5655 Value = DAG.getBitcast(VT.getScalarType(), Value); 5656 if (VT != Value.getValueType()) 5657 Value = DAG.getSplatBuildVector(VT, dl, Value); 5658 5659 return Value; 5660 } 5661 5662 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 5663 /// used when a memcpy is turned into a memset when the source is a constant 5664 /// string ptr. 5665 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, 5666 const TargetLowering &TLI, 5667 const ConstantDataArraySlice &Slice) { 5668 // Handle vector with all elements zero. 5669 if (Slice.Array == nullptr) { 5670 if (VT.isInteger()) 5671 return DAG.getConstant(0, dl, VT); 5672 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) 5673 return DAG.getConstantFP(0.0, dl, VT); 5674 else if (VT.isVector()) { 5675 unsigned NumElts = VT.getVectorNumElements(); 5676 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 5677 return DAG.getNode(ISD::BITCAST, dl, VT, 5678 DAG.getConstant(0, dl, 5679 EVT::getVectorVT(*DAG.getContext(), 5680 EltVT, NumElts))); 5681 } else 5682 llvm_unreachable("Expected type!"); 5683 } 5684 5685 assert(!VT.isVector() && "Can't handle vector type here!"); 5686 unsigned NumVTBits = VT.getSizeInBits(); 5687 unsigned NumVTBytes = NumVTBits / 8; 5688 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); 5689 5690 APInt Val(NumVTBits, 0); 5691 if (DAG.getDataLayout().isLittleEndian()) { 5692 for (unsigned i = 0; i != NumBytes; ++i) 5693 Val |= (uint64_t)(unsigned char)Slice[i] << i*8; 5694 } else { 5695 for (unsigned i = 0; i != NumBytes; ++i) 5696 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; 5697 } 5698 5699 // If the "cost" of materializing the integer immediate is less than the cost 5700 // of a load, then it is cost effective to turn the load into the immediate. 5701 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 5702 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) 5703 return DAG.getConstant(Val, dl, VT); 5704 return SDValue(nullptr, 0); 5705 } 5706 5707 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset, 5708 const SDLoc &DL) { 5709 EVT VT = Base.getValueType(); 5710 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT)); 5711 } 5712 5713 /// Returns true if memcpy source is constant data. 5714 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { 5715 uint64_t SrcDelta = 0; 5716 GlobalAddressSDNode *G = nullptr; 5717 if (Src.getOpcode() == ISD::GlobalAddress) 5718 G = cast<GlobalAddressSDNode>(Src); 5719 else if (Src.getOpcode() == ISD::ADD && 5720 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 5721 Src.getOperand(1).getOpcode() == ISD::Constant) { 5722 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 5723 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 5724 } 5725 if (!G) 5726 return false; 5727 5728 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, 5729 SrcDelta + G->getOffset()); 5730 } 5731 5732 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 5733 // On Darwin, -Os means optimize for size without hurting performance, so 5734 // only really optimize for size when -Oz (MinSize) is used. 5735 if (MF.getTarget().getTargetTriple().isOSDarwin()) 5736 return MF.getFunction().hasMinSize(); 5737 return MF.getFunction().hasOptSize(); 5738 } 5739 5740 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, 5741 SmallVector<SDValue, 32> &OutChains, unsigned From, 5742 unsigned To, SmallVector<SDValue, 16> &OutLoadChains, 5743 SmallVector<SDValue, 16> &OutStoreChains) { 5744 assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); 5745 assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); 5746 SmallVector<SDValue, 16> GluedLoadChains; 5747 for (unsigned i = From; i < To; ++i) { 5748 OutChains.push_back(OutLoadChains[i]); 5749 GluedLoadChains.push_back(OutLoadChains[i]); 5750 } 5751 5752 // Chain for all loads. 5753 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 5754 GluedLoadChains); 5755 5756 for (unsigned i = From; i < To; ++i) { 5757 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); 5758 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), 5759 ST->getBasePtr(), ST->getMemoryVT(), 5760 ST->getMemOperand()); 5761 OutChains.push_back(NewStore); 5762 } 5763 } 5764 5765 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5766 SDValue Chain, SDValue Dst, SDValue Src, 5767 uint64_t Size, unsigned Align, 5768 bool isVol, bool AlwaysInline, 5769 MachinePointerInfo DstPtrInfo, 5770 MachinePointerInfo SrcPtrInfo) { 5771 // Turn a memcpy of undef to nop. 5772 if (Src.isUndef()) 5773 return Chain; 5774 5775 // Expand memcpy to a series of load and store ops if the size operand falls 5776 // below a certain threshold. 5777 // TODO: In the AlwaysInline case, if the size is big then generate a loop 5778 // rather than maybe a humongous number of loads and stores. 5779 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5780 const DataLayout &DL = DAG.getDataLayout(); 5781 LLVMContext &C = *DAG.getContext(); 5782 std::vector<EVT> MemOps; 5783 bool DstAlignCanChange = false; 5784 MachineFunction &MF = DAG.getMachineFunction(); 5785 MachineFrameInfo &MFI = MF.getFrameInfo(); 5786 bool OptSize = shouldLowerMemFuncForSize(MF); 5787 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5788 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5789 DstAlignCanChange = true; 5790 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5791 if (Align > SrcAlign) 5792 SrcAlign = Align; 5793 ConstantDataArraySlice Slice; 5794 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); 5795 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; 5796 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 5797 5798 if (!TLI.findOptimalMemOpLowering(MemOps, Limit, Size, 5799 (DstAlignCanChange ? 0 : Align), 5800 (isZeroConstant ? 0 : SrcAlign), 5801 false, false, CopyFromConstant, true, 5802 DstPtrInfo.getAddrSpace(), 5803 SrcPtrInfo.getAddrSpace(), 5804 MF.getFunction().getAttributes())) 5805 return SDValue(); 5806 5807 if (DstAlignCanChange) { 5808 Type *Ty = MemOps[0].getTypeForEVT(C); 5809 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5810 5811 // Don't promote to an alignment that would require dynamic stack 5812 // realignment. 5813 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 5814 if (!TRI->needsStackRealignment(MF)) 5815 while (NewAlign > Align && 5816 DL.exceedsNaturalStackAlignment(NewAlign)) 5817 NewAlign /= 2; 5818 5819 if (NewAlign > Align) { 5820 // Give the stack frame object a larger alignment if needed. 5821 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5822 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5823 Align = NewAlign; 5824 } 5825 } 5826 5827 MachineMemOperand::Flags MMOFlags = 5828 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 5829 SmallVector<SDValue, 16> OutLoadChains; 5830 SmallVector<SDValue, 16> OutStoreChains; 5831 SmallVector<SDValue, 32> OutChains; 5832 unsigned NumMemOps = MemOps.size(); 5833 uint64_t SrcOff = 0, DstOff = 0; 5834 for (unsigned i = 0; i != NumMemOps; ++i) { 5835 EVT VT = MemOps[i]; 5836 unsigned VTSize = VT.getSizeInBits() / 8; 5837 SDValue Value, Store; 5838 5839 if (VTSize > Size) { 5840 // Issuing an unaligned load / store pair that overlaps with the previous 5841 // pair. Adjust the offset accordingly. 5842 assert(i == NumMemOps-1 && i != 0); 5843 SrcOff -= VTSize - Size; 5844 DstOff -= VTSize - Size; 5845 } 5846 5847 if (CopyFromConstant && 5848 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { 5849 // It's unlikely a store of a vector immediate can be done in a single 5850 // instruction. It would require a load from a constantpool first. 5851 // We only handle zero vectors here. 5852 // FIXME: Handle other cases where store of vector immediate is done in 5853 // a single instruction. 5854 ConstantDataArraySlice SubSlice; 5855 if (SrcOff < Slice.Length) { 5856 SubSlice = Slice; 5857 SubSlice.move(SrcOff); 5858 } else { 5859 // This is an out-of-bounds access and hence UB. Pretend we read zero. 5860 SubSlice.Array = nullptr; 5861 SubSlice.Offset = 0; 5862 SubSlice.Length = VTSize; 5863 } 5864 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); 5865 if (Value.getNode()) { 5866 Store = DAG.getStore(Chain, dl, Value, 5867 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5868 DstPtrInfo.getWithOffset(DstOff), Align, 5869 MMOFlags); 5870 OutChains.push_back(Store); 5871 } 5872 } 5873 5874 if (!Store.getNode()) { 5875 // The type might not be legal for the target. This should only happen 5876 // if the type is smaller than a legal type, as on PPC, so the right 5877 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 5878 // to Load/Store if NVT==VT. 5879 // FIXME does the case above also need this? 5880 EVT NVT = TLI.getTypeToTransformTo(C, VT); 5881 assert(NVT.bitsGE(VT)); 5882 5883 bool isDereferenceable = 5884 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 5885 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 5886 if (isDereferenceable) 5887 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 5888 5889 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 5890 DAG.getMemBasePlusOffset(Src, SrcOff, dl), 5891 SrcPtrInfo.getWithOffset(SrcOff), VT, 5892 MinAlign(SrcAlign, SrcOff), SrcMMOFlags); 5893 OutLoadChains.push_back(Value.getValue(1)); 5894 5895 Store = DAG.getTruncStore( 5896 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 5897 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); 5898 OutStoreChains.push_back(Store); 5899 } 5900 SrcOff += VTSize; 5901 DstOff += VTSize; 5902 Size -= VTSize; 5903 } 5904 5905 unsigned GluedLdStLimit = MaxLdStGlue == 0 ? 5906 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; 5907 unsigned NumLdStInMemcpy = OutStoreChains.size(); 5908 5909 if (NumLdStInMemcpy) { 5910 // It may be that memcpy might be converted to memset if it's memcpy 5911 // of constants. In such a case, we won't have loads and stores, but 5912 // just stores. In the absence of loads, there is nothing to gang up. 5913 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { 5914 // If target does not care, just leave as it. 5915 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { 5916 OutChains.push_back(OutLoadChains[i]); 5917 OutChains.push_back(OutStoreChains[i]); 5918 } 5919 } else { 5920 // Ld/St less than/equal limit set by target. 5921 if (NumLdStInMemcpy <= GluedLdStLimit) { 5922 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5923 NumLdStInMemcpy, OutLoadChains, 5924 OutStoreChains); 5925 } else { 5926 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit; 5927 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; 5928 unsigned GlueIter = 0; 5929 5930 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { 5931 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; 5932 unsigned IndexTo = NumLdStInMemcpy - GlueIter; 5933 5934 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, 5935 OutLoadChains, OutStoreChains); 5936 GlueIter += GluedLdStLimit; 5937 } 5938 5939 // Residual ld/st. 5940 if (RemainingLdStInMemcpy) { 5941 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, 5942 RemainingLdStInMemcpy, OutLoadChains, 5943 OutStoreChains); 5944 } 5945 } 5946 } 5947 } 5948 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 5949 } 5950 5951 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, 5952 SDValue Chain, SDValue Dst, SDValue Src, 5953 uint64_t Size, unsigned Align, 5954 bool isVol, bool AlwaysInline, 5955 MachinePointerInfo DstPtrInfo, 5956 MachinePointerInfo SrcPtrInfo) { 5957 // Turn a memmove of undef to nop. 5958 if (Src.isUndef()) 5959 return Chain; 5960 5961 // Expand memmove to a series of load and store ops if the size operand falls 5962 // below a certain threshold. 5963 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5964 const DataLayout &DL = DAG.getDataLayout(); 5965 LLVMContext &C = *DAG.getContext(); 5966 std::vector<EVT> MemOps; 5967 bool DstAlignCanChange = false; 5968 MachineFunction &MF = DAG.getMachineFunction(); 5969 MachineFrameInfo &MFI = MF.getFrameInfo(); 5970 bool OptSize = shouldLowerMemFuncForSize(MF); 5971 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 5972 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 5973 DstAlignCanChange = true; 5974 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 5975 if (Align > SrcAlign) 5976 SrcAlign = Align; 5977 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 5978 5979 if (!TLI.findOptimalMemOpLowering(MemOps, Limit, Size, 5980 (DstAlignCanChange ? 0 : Align), SrcAlign, 5981 false, false, false, false, 5982 DstPtrInfo.getAddrSpace(), 5983 SrcPtrInfo.getAddrSpace(), 5984 MF.getFunction().getAttributes())) 5985 return SDValue(); 5986 5987 if (DstAlignCanChange) { 5988 Type *Ty = MemOps[0].getTypeForEVT(C); 5989 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); 5990 if (NewAlign > Align) { 5991 // Give the stack frame object a larger alignment if needed. 5992 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 5993 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 5994 Align = NewAlign; 5995 } 5996 } 5997 5998 MachineMemOperand::Flags MMOFlags = 5999 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 6000 uint64_t SrcOff = 0, DstOff = 0; 6001 SmallVector<SDValue, 8> LoadValues; 6002 SmallVector<SDValue, 8> LoadChains; 6003 SmallVector<SDValue, 8> OutChains; 6004 unsigned NumMemOps = MemOps.size(); 6005 for (unsigned i = 0; i < NumMemOps; i++) { 6006 EVT VT = MemOps[i]; 6007 unsigned VTSize = VT.getSizeInBits() / 8; 6008 SDValue Value; 6009 6010 bool isDereferenceable = 6011 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); 6012 MachineMemOperand::Flags SrcMMOFlags = MMOFlags; 6013 if (isDereferenceable) 6014 SrcMMOFlags |= MachineMemOperand::MODereferenceable; 6015 6016 Value = 6017 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), 6018 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); 6019 LoadValues.push_back(Value); 6020 LoadChains.push_back(Value.getValue(1)); 6021 SrcOff += VTSize; 6022 } 6023 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 6024 OutChains.clear(); 6025 for (unsigned i = 0; i < NumMemOps; i++) { 6026 EVT VT = MemOps[i]; 6027 unsigned VTSize = VT.getSizeInBits() / 8; 6028 SDValue Store; 6029 6030 Store = DAG.getStore(Chain, dl, LoadValues[i], 6031 DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6032 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); 6033 OutChains.push_back(Store); 6034 DstOff += VTSize; 6035 } 6036 6037 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6038 } 6039 6040 /// Lower the call to 'memset' intrinsic function into a series of store 6041 /// operations. 6042 /// 6043 /// \param DAG Selection DAG where lowered code is placed. 6044 /// \param dl Link to corresponding IR location. 6045 /// \param Chain Control flow dependency. 6046 /// \param Dst Pointer to destination memory location. 6047 /// \param Src Value of byte to write into the memory. 6048 /// \param Size Number of bytes to write. 6049 /// \param Align Alignment of the destination in bytes. 6050 /// \param isVol True if destination is volatile. 6051 /// \param DstPtrInfo IR information on the memory pointer. 6052 /// \returns New head in the control flow, if lowering was successful, empty 6053 /// SDValue otherwise. 6054 /// 6055 /// The function tries to replace 'llvm.memset' intrinsic with several store 6056 /// operations and value calculation code. This is usually profitable for small 6057 /// memory size. 6058 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, 6059 SDValue Chain, SDValue Dst, SDValue Src, 6060 uint64_t Size, unsigned Align, bool isVol, 6061 MachinePointerInfo DstPtrInfo) { 6062 // Turn a memset of undef to nop. 6063 if (Src.isUndef()) 6064 return Chain; 6065 6066 // Expand memset to a series of load/store ops if the size operand 6067 // falls below a certain threshold. 6068 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6069 std::vector<EVT> MemOps; 6070 bool DstAlignCanChange = false; 6071 MachineFunction &MF = DAG.getMachineFunction(); 6072 MachineFrameInfo &MFI = MF.getFrameInfo(); 6073 bool OptSize = shouldLowerMemFuncForSize(MF); 6074 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 6075 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) 6076 DstAlignCanChange = true; 6077 bool IsZeroVal = 6078 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 6079 if (!TLI.findOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 6080 Size, (DstAlignCanChange ? 0 : Align), 0, 6081 true, IsZeroVal, false, true, 6082 DstPtrInfo.getAddrSpace(), ~0u, 6083 MF.getFunction().getAttributes())) 6084 return SDValue(); 6085 6086 if (DstAlignCanChange) { 6087 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 6088 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); 6089 if (NewAlign > Align) { 6090 // Give the stack frame object a larger alignment if needed. 6091 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) 6092 MFI.setObjectAlignment(FI->getIndex(), NewAlign); 6093 Align = NewAlign; 6094 } 6095 } 6096 6097 SmallVector<SDValue, 8> OutChains; 6098 uint64_t DstOff = 0; 6099 unsigned NumMemOps = MemOps.size(); 6100 6101 // Find the largest store and generate the bit pattern for it. 6102 EVT LargestVT = MemOps[0]; 6103 for (unsigned i = 1; i < NumMemOps; i++) 6104 if (MemOps[i].bitsGT(LargestVT)) 6105 LargestVT = MemOps[i]; 6106 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 6107 6108 for (unsigned i = 0; i < NumMemOps; i++) { 6109 EVT VT = MemOps[i]; 6110 unsigned VTSize = VT.getSizeInBits() / 8; 6111 if (VTSize > Size) { 6112 // Issuing an unaligned load / store pair that overlaps with the previous 6113 // pair. Adjust the offset accordingly. 6114 assert(i == NumMemOps-1 && i != 0); 6115 DstOff -= VTSize - Size; 6116 } 6117 6118 // If this store is smaller than the largest store see whether we can get 6119 // the smaller value for free with a truncate. 6120 SDValue Value = MemSetValue; 6121 if (VT.bitsLT(LargestVT)) { 6122 if (!LargestVT.isVector() && !VT.isVector() && 6123 TLI.isTruncateFree(LargestVT, VT)) 6124 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 6125 else 6126 Value = getMemsetValue(Src, VT, DAG, dl); 6127 } 6128 assert(Value.getValueType() == VT && "Value with wrong type."); 6129 SDValue Store = DAG.getStore( 6130 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), 6131 DstPtrInfo.getWithOffset(DstOff), Align, 6132 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); 6133 OutChains.push_back(Store); 6134 DstOff += VT.getSizeInBits() / 8; 6135 Size -= VTSize; 6136 } 6137 6138 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 6139 } 6140 6141 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, 6142 unsigned AS) { 6143 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all 6144 // pointer operands can be losslessly bitcasted to pointers of address space 0 6145 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) { 6146 report_fatal_error("cannot lower memory intrinsic in address space " + 6147 Twine(AS)); 6148 } 6149 } 6150 6151 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, 6152 SDValue Src, SDValue Size, unsigned Align, 6153 bool isVol, bool AlwaysInline, bool isTailCall, 6154 MachinePointerInfo DstPtrInfo, 6155 MachinePointerInfo SrcPtrInfo) { 6156 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6157 6158 // Check to see if we should lower the memcpy to loads and stores first. 6159 // For cases within the target-specified limits, this is the best choice. 6160 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6161 if (ConstantSize) { 6162 // Memcpy with size zero? Just return the original chain. 6163 if (ConstantSize->isNullValue()) 6164 return Chain; 6165 6166 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6167 ConstantSize->getZExtValue(),Align, 6168 isVol, false, DstPtrInfo, SrcPtrInfo); 6169 if (Result.getNode()) 6170 return Result; 6171 } 6172 6173 // Then check to see if we should lower the memcpy with target-specific 6174 // code. If the target chooses to do this, this is the next best. 6175 if (TSI) { 6176 SDValue Result = TSI->EmitTargetCodeForMemcpy( 6177 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, 6178 DstPtrInfo, SrcPtrInfo); 6179 if (Result.getNode()) 6180 return Result; 6181 } 6182 6183 // If we really need inline code and the target declined to provide it, 6184 // use a (potentially long) sequence of loads and stores. 6185 if (AlwaysInline) { 6186 assert(ConstantSize && "AlwaysInline requires a constant size!"); 6187 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 6188 ConstantSize->getZExtValue(), Align, isVol, 6189 true, DstPtrInfo, SrcPtrInfo); 6190 } 6191 6192 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6193 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6194 6195 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 6196 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 6197 // respect volatile, so they may do things like read or write memory 6198 // beyond the given memory regions. But fixing this isn't easy, and most 6199 // people don't care. 6200 6201 // Emit a library call. 6202 TargetLowering::ArgListTy Args; 6203 TargetLowering::ArgListEntry Entry; 6204 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6205 Entry.Node = Dst; Args.push_back(Entry); 6206 Entry.Node = Src; Args.push_back(Entry); 6207 6208 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6209 Entry.Node = Size; Args.push_back(Entry); 6210 // FIXME: pass in SDLoc 6211 TargetLowering::CallLoweringInfo CLI(*this); 6212 CLI.setDebugLoc(dl) 6213 .setChain(Chain) 6214 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), 6215 Dst.getValueType().getTypeForEVT(*getContext()), 6216 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 6217 TLI->getPointerTy(getDataLayout())), 6218 std::move(Args)) 6219 .setDiscardResult() 6220 .setTailCall(isTailCall); 6221 6222 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6223 return CallResult.second; 6224 } 6225 6226 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, 6227 SDValue Dst, unsigned DstAlign, 6228 SDValue Src, unsigned SrcAlign, 6229 SDValue Size, Type *SizeTy, 6230 unsigned ElemSz, bool isTailCall, 6231 MachinePointerInfo DstPtrInfo, 6232 MachinePointerInfo SrcPtrInfo) { 6233 // Emit a library call. 6234 TargetLowering::ArgListTy Args; 6235 TargetLowering::ArgListEntry Entry; 6236 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6237 Entry.Node = Dst; 6238 Args.push_back(Entry); 6239 6240 Entry.Node = Src; 6241 Args.push_back(Entry); 6242 6243 Entry.Ty = SizeTy; 6244 Entry.Node = Size; 6245 Args.push_back(Entry); 6246 6247 RTLIB::Libcall LibraryCall = 6248 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6249 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6250 report_fatal_error("Unsupported element size"); 6251 6252 TargetLowering::CallLoweringInfo CLI(*this); 6253 CLI.setDebugLoc(dl) 6254 .setChain(Chain) 6255 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6256 Type::getVoidTy(*getContext()), 6257 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6258 TLI->getPointerTy(getDataLayout())), 6259 std::move(Args)) 6260 .setDiscardResult() 6261 .setTailCall(isTailCall); 6262 6263 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6264 return CallResult.second; 6265 } 6266 6267 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, 6268 SDValue Src, SDValue Size, unsigned Align, 6269 bool isVol, bool isTailCall, 6270 MachinePointerInfo DstPtrInfo, 6271 MachinePointerInfo SrcPtrInfo) { 6272 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6273 6274 // Check to see if we should lower the memmove to loads and stores first. 6275 // For cases within the target-specified limits, this is the best choice. 6276 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6277 if (ConstantSize) { 6278 // Memmove with size zero? Just return the original chain. 6279 if (ConstantSize->isNullValue()) 6280 return Chain; 6281 6282 SDValue Result = 6283 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 6284 ConstantSize->getZExtValue(), Align, isVol, 6285 false, DstPtrInfo, SrcPtrInfo); 6286 if (Result.getNode()) 6287 return Result; 6288 } 6289 6290 // Then check to see if we should lower the memmove with target-specific 6291 // code. If the target chooses to do this, this is the next best. 6292 if (TSI) { 6293 SDValue Result = TSI->EmitTargetCodeForMemmove( 6294 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); 6295 if (Result.getNode()) 6296 return Result; 6297 } 6298 6299 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6300 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); 6301 6302 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 6303 // not be safe. See memcpy above for more details. 6304 6305 // Emit a library call. 6306 TargetLowering::ArgListTy Args; 6307 TargetLowering::ArgListEntry Entry; 6308 Entry.Ty = Type::getInt8PtrTy(*getContext()); 6309 Entry.Node = Dst; Args.push_back(Entry); 6310 Entry.Node = Src; Args.push_back(Entry); 6311 6312 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6313 Entry.Node = Size; Args.push_back(Entry); 6314 // FIXME: pass in SDLoc 6315 TargetLowering::CallLoweringInfo CLI(*this); 6316 CLI.setDebugLoc(dl) 6317 .setChain(Chain) 6318 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 6319 Dst.getValueType().getTypeForEVT(*getContext()), 6320 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 6321 TLI->getPointerTy(getDataLayout())), 6322 std::move(Args)) 6323 .setDiscardResult() 6324 .setTailCall(isTailCall); 6325 6326 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6327 return CallResult.second; 6328 } 6329 6330 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, 6331 SDValue Dst, unsigned DstAlign, 6332 SDValue Src, unsigned SrcAlign, 6333 SDValue Size, Type *SizeTy, 6334 unsigned ElemSz, bool isTailCall, 6335 MachinePointerInfo DstPtrInfo, 6336 MachinePointerInfo SrcPtrInfo) { 6337 // Emit a library call. 6338 TargetLowering::ArgListTy Args; 6339 TargetLowering::ArgListEntry Entry; 6340 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6341 Entry.Node = Dst; 6342 Args.push_back(Entry); 6343 6344 Entry.Node = Src; 6345 Args.push_back(Entry); 6346 6347 Entry.Ty = SizeTy; 6348 Entry.Node = Size; 6349 Args.push_back(Entry); 6350 6351 RTLIB::Libcall LibraryCall = 6352 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6353 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6354 report_fatal_error("Unsupported element size"); 6355 6356 TargetLowering::CallLoweringInfo CLI(*this); 6357 CLI.setDebugLoc(dl) 6358 .setChain(Chain) 6359 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6360 Type::getVoidTy(*getContext()), 6361 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6362 TLI->getPointerTy(getDataLayout())), 6363 std::move(Args)) 6364 .setDiscardResult() 6365 .setTailCall(isTailCall); 6366 6367 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6368 return CallResult.second; 6369 } 6370 6371 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, 6372 SDValue Src, SDValue Size, unsigned Align, 6373 bool isVol, bool isTailCall, 6374 MachinePointerInfo DstPtrInfo) { 6375 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 6376 6377 // Check to see if we should lower the memset to stores first. 6378 // For cases within the target-specified limits, this is the best choice. 6379 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 6380 if (ConstantSize) { 6381 // Memset with size zero? Just return the original chain. 6382 if (ConstantSize->isNullValue()) 6383 return Chain; 6384 6385 SDValue Result = 6386 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 6387 Align, isVol, DstPtrInfo); 6388 6389 if (Result.getNode()) 6390 return Result; 6391 } 6392 6393 // Then check to see if we should lower the memset with target-specific 6394 // code. If the target chooses to do this, this is the next best. 6395 if (TSI) { 6396 SDValue Result = TSI->EmitTargetCodeForMemset( 6397 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); 6398 if (Result.getNode()) 6399 return Result; 6400 } 6401 6402 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); 6403 6404 // Emit a library call. 6405 TargetLowering::ArgListTy Args; 6406 TargetLowering::ArgListEntry Entry; 6407 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext()); 6408 Args.push_back(Entry); 6409 Entry.Node = Src; 6410 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext()); 6411 Args.push_back(Entry); 6412 Entry.Node = Size; 6413 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6414 Args.push_back(Entry); 6415 6416 // FIXME: pass in SDLoc 6417 TargetLowering::CallLoweringInfo CLI(*this); 6418 CLI.setDebugLoc(dl) 6419 .setChain(Chain) 6420 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), 6421 Dst.getValueType().getTypeForEVT(*getContext()), 6422 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 6423 TLI->getPointerTy(getDataLayout())), 6424 std::move(Args)) 6425 .setDiscardResult() 6426 .setTailCall(isTailCall); 6427 6428 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 6429 return CallResult.second; 6430 } 6431 6432 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, 6433 SDValue Dst, unsigned DstAlign, 6434 SDValue Value, SDValue Size, Type *SizeTy, 6435 unsigned ElemSz, bool isTailCall, 6436 MachinePointerInfo DstPtrInfo) { 6437 // Emit a library call. 6438 TargetLowering::ArgListTy Args; 6439 TargetLowering::ArgListEntry Entry; 6440 Entry.Ty = getDataLayout().getIntPtrType(*getContext()); 6441 Entry.Node = Dst; 6442 Args.push_back(Entry); 6443 6444 Entry.Ty = Type::getInt8Ty(*getContext()); 6445 Entry.Node = Value; 6446 Args.push_back(Entry); 6447 6448 Entry.Ty = SizeTy; 6449 Entry.Node = Size; 6450 Args.push_back(Entry); 6451 6452 RTLIB::Libcall LibraryCall = 6453 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); 6454 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 6455 report_fatal_error("Unsupported element size"); 6456 6457 TargetLowering::CallLoweringInfo CLI(*this); 6458 CLI.setDebugLoc(dl) 6459 .setChain(Chain) 6460 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), 6461 Type::getVoidTy(*getContext()), 6462 getExternalSymbol(TLI->getLibcallName(LibraryCall), 6463 TLI->getPointerTy(getDataLayout())), 6464 std::move(Args)) 6465 .setDiscardResult() 6466 .setTailCall(isTailCall); 6467 6468 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); 6469 return CallResult.second; 6470 } 6471 6472 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6473 SDVTList VTList, ArrayRef<SDValue> Ops, 6474 MachineMemOperand *MMO) { 6475 FoldingSetNodeID ID; 6476 ID.AddInteger(MemVT.getRawBits()); 6477 AddNodeIDNode(ID, Opcode, VTList, Ops); 6478 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6479 void* IP = nullptr; 6480 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6481 cast<AtomicSDNode>(E)->refineAlignment(MMO); 6482 return SDValue(E, 0); 6483 } 6484 6485 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6486 VTList, MemVT, MMO); 6487 createOperands(N, Ops); 6488 6489 CSEMap.InsertNode(N, IP); 6490 InsertNode(N); 6491 return SDValue(N, 0); 6492 } 6493 6494 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, 6495 EVT MemVT, SDVTList VTs, SDValue Chain, 6496 SDValue Ptr, SDValue Cmp, SDValue Swp, 6497 MachineMemOperand *MMO) { 6498 assert(Opcode == ISD::ATOMIC_CMP_SWAP || 6499 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 6500 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 6501 6502 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 6503 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6504 } 6505 6506 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6507 SDValue Chain, SDValue Ptr, SDValue Val, 6508 MachineMemOperand *MMO) { 6509 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 6510 Opcode == ISD::ATOMIC_LOAD_SUB || 6511 Opcode == ISD::ATOMIC_LOAD_AND || 6512 Opcode == ISD::ATOMIC_LOAD_CLR || 6513 Opcode == ISD::ATOMIC_LOAD_OR || 6514 Opcode == ISD::ATOMIC_LOAD_XOR || 6515 Opcode == ISD::ATOMIC_LOAD_NAND || 6516 Opcode == ISD::ATOMIC_LOAD_MIN || 6517 Opcode == ISD::ATOMIC_LOAD_MAX || 6518 Opcode == ISD::ATOMIC_LOAD_UMIN || 6519 Opcode == ISD::ATOMIC_LOAD_UMAX || 6520 Opcode == ISD::ATOMIC_LOAD_FADD || 6521 Opcode == ISD::ATOMIC_LOAD_FSUB || 6522 Opcode == ISD::ATOMIC_SWAP || 6523 Opcode == ISD::ATOMIC_STORE) && 6524 "Invalid Atomic Op"); 6525 6526 EVT VT = Val.getValueType(); 6527 6528 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 6529 getVTList(VT, MVT::Other); 6530 SDValue Ops[] = {Chain, Ptr, Val}; 6531 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6532 } 6533 6534 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, 6535 EVT VT, SDValue Chain, SDValue Ptr, 6536 MachineMemOperand *MMO) { 6537 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 6538 6539 SDVTList VTs = getVTList(VT, MVT::Other); 6540 SDValue Ops[] = {Chain, Ptr}; 6541 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); 6542 } 6543 6544 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 6545 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { 6546 if (Ops.size() == 1) 6547 return Ops[0]; 6548 6549 SmallVector<EVT, 4> VTs; 6550 VTs.reserve(Ops.size()); 6551 for (unsigned i = 0; i < Ops.size(); ++i) 6552 VTs.push_back(Ops[i].getValueType()); 6553 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); 6554 } 6555 6556 SDValue SelectionDAG::getMemIntrinsicNode( 6557 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, 6558 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, 6559 MachineMemOperand::Flags Flags, unsigned Size) { 6560 if (Align == 0) // Ensure that codegen never sees alignment 0 6561 Align = getEVTAlignment(MemVT); 6562 6563 if (!Size) 6564 Size = MemVT.getStoreSize(); 6565 6566 MachineFunction &MF = getMachineFunction(); 6567 MachineMemOperand *MMO = 6568 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align); 6569 6570 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); 6571 } 6572 6573 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, 6574 SDVTList VTList, 6575 ArrayRef<SDValue> Ops, EVT MemVT, 6576 MachineMemOperand *MMO) { 6577 assert((Opcode == ISD::INTRINSIC_VOID || 6578 Opcode == ISD::INTRINSIC_W_CHAIN || 6579 Opcode == ISD::PREFETCH || 6580 Opcode == ISD::LIFETIME_START || 6581 Opcode == ISD::LIFETIME_END || 6582 ((int)Opcode <= std::numeric_limits<int>::max() && 6583 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 6584 "Opcode is not a memory-accessing opcode!"); 6585 6586 // Memoize the node unless it returns a flag. 6587 MemIntrinsicSDNode *N; 6588 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 6589 FoldingSetNodeID ID; 6590 AddNodeIDNode(ID, Opcode, VTList, Ops); 6591 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( 6592 Opcode, dl.getIROrder(), VTList, MemVT, MMO)); 6593 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6594 void *IP = nullptr; 6595 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6596 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 6597 return SDValue(E, 0); 6598 } 6599 6600 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6601 VTList, MemVT, MMO); 6602 createOperands(N, Ops); 6603 6604 CSEMap.InsertNode(N, IP); 6605 } else { 6606 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), 6607 VTList, MemVT, MMO); 6608 createOperands(N, Ops); 6609 } 6610 InsertNode(N); 6611 return SDValue(N, 0); 6612 } 6613 6614 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, 6615 SDValue Chain, int FrameIndex, 6616 int64_t Size, int64_t Offset) { 6617 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; 6618 const auto VTs = getVTList(MVT::Other); 6619 SDValue Ops[2] = { 6620 Chain, 6621 getFrameIndex(FrameIndex, 6622 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), 6623 true)}; 6624 6625 FoldingSetNodeID ID; 6626 AddNodeIDNode(ID, Opcode, VTs, Ops); 6627 ID.AddInteger(FrameIndex); 6628 ID.AddInteger(Size); 6629 ID.AddInteger(Offset); 6630 void *IP = nullptr; 6631 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6632 return SDValue(E, 0); 6633 6634 LifetimeSDNode *N = newSDNode<LifetimeSDNode>( 6635 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); 6636 createOperands(N, Ops); 6637 CSEMap.InsertNode(N, IP); 6638 InsertNode(N); 6639 SDValue V(N, 0); 6640 NewSDValueDbgMsg(V, "Creating new node: ", this); 6641 return V; 6642 } 6643 6644 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6645 /// MachinePointerInfo record from it. This is particularly useful because the 6646 /// code generator has many cases where it doesn't bother passing in a 6647 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6648 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6649 SelectionDAG &DAG, SDValue Ptr, 6650 int64_t Offset = 0) { 6651 // If this is FI+Offset, we can model it. 6652 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 6653 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 6654 FI->getIndex(), Offset); 6655 6656 // If this is (FI+Offset1)+Offset2, we can model it. 6657 if (Ptr.getOpcode() != ISD::ADD || 6658 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 6659 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 6660 return Info; 6661 6662 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6663 return MachinePointerInfo::getFixedStack( 6664 DAG.getMachineFunction(), FI, 6665 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 6666 } 6667 6668 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 6669 /// MachinePointerInfo record from it. This is particularly useful because the 6670 /// code generator has many cases where it doesn't bother passing in a 6671 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 6672 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, 6673 SelectionDAG &DAG, SDValue Ptr, 6674 SDValue OffsetOp) { 6675 // If the 'Offset' value isn't a constant, we can't handle this. 6676 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 6677 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); 6678 if (OffsetOp.isUndef()) 6679 return InferPointerInfo(Info, DAG, Ptr); 6680 return Info; 6681 } 6682 6683 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6684 EVT VT, const SDLoc &dl, SDValue Chain, 6685 SDValue Ptr, SDValue Offset, 6686 MachinePointerInfo PtrInfo, EVT MemVT, 6687 unsigned Alignment, 6688 MachineMemOperand::Flags MMOFlags, 6689 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6690 assert(Chain.getValueType() == MVT::Other && 6691 "Invalid chain type"); 6692 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6693 Alignment = getEVTAlignment(MemVT); 6694 6695 MMOFlags |= MachineMemOperand::MOLoad; 6696 assert((MMOFlags & MachineMemOperand::MOStore) == 0); 6697 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 6698 // clients. 6699 if (PtrInfo.V.isNull()) 6700 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); 6701 6702 MachineFunction &MF = getMachineFunction(); 6703 MachineMemOperand *MMO = MF.getMachineMemOperand( 6704 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges); 6705 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 6706 } 6707 6708 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 6709 EVT VT, const SDLoc &dl, SDValue Chain, 6710 SDValue Ptr, SDValue Offset, EVT MemVT, 6711 MachineMemOperand *MMO) { 6712 if (VT == MemVT) { 6713 ExtType = ISD::NON_EXTLOAD; 6714 } else if (ExtType == ISD::NON_EXTLOAD) { 6715 assert(VT == MemVT && "Non-extending load from different memory type!"); 6716 } else { 6717 // Extending load. 6718 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 6719 "Should only be an extending load, not truncating!"); 6720 assert(VT.isInteger() == MemVT.isInteger() && 6721 "Cannot convert from FP to Int or Int -> FP!"); 6722 assert(VT.isVector() == MemVT.isVector() && 6723 "Cannot use an ext load to convert to or from a vector!"); 6724 assert((!VT.isVector() || 6725 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 6726 "Cannot use an ext load to change the number of vector elements!"); 6727 } 6728 6729 bool Indexed = AM != ISD::UNINDEXED; 6730 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); 6731 6732 SDVTList VTs = Indexed ? 6733 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 6734 SDValue Ops[] = { Chain, Ptr, Offset }; 6735 FoldingSetNodeID ID; 6736 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); 6737 ID.AddInteger(MemVT.getRawBits()); 6738 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( 6739 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); 6740 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6741 void *IP = nullptr; 6742 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6743 cast<LoadSDNode>(E)->refineAlignment(MMO); 6744 return SDValue(E, 0); 6745 } 6746 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6747 ExtType, MemVT, MMO); 6748 createOperands(N, Ops); 6749 6750 CSEMap.InsertNode(N, IP); 6751 InsertNode(N); 6752 SDValue V(N, 0); 6753 NewSDValueDbgMsg(V, "Creating new node: ", this); 6754 return V; 6755 } 6756 6757 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6758 SDValue Ptr, MachinePointerInfo PtrInfo, 6759 unsigned Alignment, 6760 MachineMemOperand::Flags MMOFlags, 6761 const AAMDNodes &AAInfo, const MDNode *Ranges) { 6762 SDValue Undef = getUNDEF(Ptr.getValueType()); 6763 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6764 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); 6765 } 6766 6767 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6768 SDValue Ptr, MachineMemOperand *MMO) { 6769 SDValue Undef = getUNDEF(Ptr.getValueType()); 6770 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 6771 VT, MMO); 6772 } 6773 6774 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6775 EVT VT, SDValue Chain, SDValue Ptr, 6776 MachinePointerInfo PtrInfo, EVT MemVT, 6777 unsigned Alignment, 6778 MachineMemOperand::Flags MMOFlags, 6779 const AAMDNodes &AAInfo) { 6780 SDValue Undef = getUNDEF(Ptr.getValueType()); 6781 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, 6782 MemVT, Alignment, MMOFlags, AAInfo); 6783 } 6784 6785 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, 6786 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, 6787 MachineMemOperand *MMO) { 6788 SDValue Undef = getUNDEF(Ptr.getValueType()); 6789 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 6790 MemVT, MMO); 6791 } 6792 6793 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, 6794 SDValue Base, SDValue Offset, 6795 ISD::MemIndexedMode AM) { 6796 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 6797 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); 6798 // Don't propagate the invariant or dereferenceable flags. 6799 auto MMOFlags = 6800 LD->getMemOperand()->getFlags() & 6801 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); 6802 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 6803 LD->getChain(), Base, Offset, LD->getPointerInfo(), 6804 LD->getMemoryVT(), LD->getAlignment(), MMOFlags, 6805 LD->getAAInfo()); 6806 } 6807 6808 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6809 SDValue Ptr, MachinePointerInfo PtrInfo, 6810 unsigned Alignment, 6811 MachineMemOperand::Flags MMOFlags, 6812 const AAMDNodes &AAInfo) { 6813 assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); 6814 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6815 Alignment = getEVTAlignment(Val.getValueType()); 6816 6817 MMOFlags |= MachineMemOperand::MOStore; 6818 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6819 6820 if (PtrInfo.V.isNull()) 6821 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6822 6823 MachineFunction &MF = getMachineFunction(); 6824 MachineMemOperand *MMO = MF.getMachineMemOperand( 6825 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo); 6826 return getStore(Chain, dl, Val, Ptr, MMO); 6827 } 6828 6829 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6830 SDValue Ptr, MachineMemOperand *MMO) { 6831 assert(Chain.getValueType() == MVT::Other && 6832 "Invalid chain type"); 6833 EVT VT = Val.getValueType(); 6834 SDVTList VTs = getVTList(MVT::Other); 6835 SDValue Undef = getUNDEF(Ptr.getValueType()); 6836 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6837 FoldingSetNodeID ID; 6838 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6839 ID.AddInteger(VT.getRawBits()); 6840 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6841 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); 6842 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6843 void *IP = nullptr; 6844 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6845 cast<StoreSDNode>(E)->refineAlignment(MMO); 6846 return SDValue(E, 0); 6847 } 6848 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6849 ISD::UNINDEXED, false, VT, MMO); 6850 createOperands(N, Ops); 6851 6852 CSEMap.InsertNode(N, IP); 6853 InsertNode(N); 6854 SDValue V(N, 0); 6855 NewSDValueDbgMsg(V, "Creating new node: ", this); 6856 return V; 6857 } 6858 6859 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6860 SDValue Ptr, MachinePointerInfo PtrInfo, 6861 EVT SVT, unsigned Alignment, 6862 MachineMemOperand::Flags MMOFlags, 6863 const AAMDNodes &AAInfo) { 6864 assert(Chain.getValueType() == MVT::Other && 6865 "Invalid chain type"); 6866 if (Alignment == 0) // Ensure that codegen never sees alignment 0 6867 Alignment = getEVTAlignment(SVT); 6868 6869 MMOFlags |= MachineMemOperand::MOStore; 6870 assert((MMOFlags & MachineMemOperand::MOLoad) == 0); 6871 6872 if (PtrInfo.V.isNull()) 6873 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); 6874 6875 MachineFunction &MF = getMachineFunction(); 6876 MachineMemOperand *MMO = MF.getMachineMemOperand( 6877 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo); 6878 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 6879 } 6880 6881 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, 6882 SDValue Ptr, EVT SVT, 6883 MachineMemOperand *MMO) { 6884 EVT VT = Val.getValueType(); 6885 6886 assert(Chain.getValueType() == MVT::Other && 6887 "Invalid chain type"); 6888 if (VT == SVT) 6889 return getStore(Chain, dl, Val, Ptr, MMO); 6890 6891 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 6892 "Should only be a truncating store, not extending!"); 6893 assert(VT.isInteger() == SVT.isInteger() && 6894 "Can't do FP-INT conversion!"); 6895 assert(VT.isVector() == SVT.isVector() && 6896 "Cannot use trunc store to convert to or from a vector!"); 6897 assert((!VT.isVector() || 6898 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 6899 "Cannot use trunc store to change the number of vector elements!"); 6900 6901 SDVTList VTs = getVTList(MVT::Other); 6902 SDValue Undef = getUNDEF(Ptr.getValueType()); 6903 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 6904 FoldingSetNodeID ID; 6905 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6906 ID.AddInteger(SVT.getRawBits()); 6907 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( 6908 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); 6909 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6910 void *IP = nullptr; 6911 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6912 cast<StoreSDNode>(E)->refineAlignment(MMO); 6913 return SDValue(E, 0); 6914 } 6915 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6916 ISD::UNINDEXED, true, SVT, MMO); 6917 createOperands(N, Ops); 6918 6919 CSEMap.InsertNode(N, IP); 6920 InsertNode(N); 6921 SDValue V(N, 0); 6922 NewSDValueDbgMsg(V, "Creating new node: ", this); 6923 return V; 6924 } 6925 6926 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, 6927 SDValue Base, SDValue Offset, 6928 ISD::MemIndexedMode AM) { 6929 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 6930 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); 6931 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 6932 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 6933 FoldingSetNodeID ID; 6934 AddNodeIDNode(ID, ISD::STORE, VTs, Ops); 6935 ID.AddInteger(ST->getMemoryVT().getRawBits()); 6936 ID.AddInteger(ST->getRawSubclassData()); 6937 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 6938 void *IP = nullptr; 6939 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) 6940 return SDValue(E, 0); 6941 6942 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 6943 ST->isTruncatingStore(), ST->getMemoryVT(), 6944 ST->getMemOperand()); 6945 createOperands(N, Ops); 6946 6947 CSEMap.InsertNode(N, IP); 6948 InsertNode(N); 6949 SDValue V(N, 0); 6950 NewSDValueDbgMsg(V, "Creating new node: ", this); 6951 return V; 6952 } 6953 6954 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, 6955 SDValue Ptr, SDValue Mask, SDValue PassThru, 6956 EVT MemVT, MachineMemOperand *MMO, 6957 ISD::LoadExtType ExtTy, bool isExpanding) { 6958 SDVTList VTs = getVTList(VT, MVT::Other); 6959 SDValue Ops[] = { Chain, Ptr, Mask, PassThru }; 6960 FoldingSetNodeID ID; 6961 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); 6962 ID.AddInteger(VT.getRawBits()); 6963 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( 6964 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO)); 6965 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6966 void *IP = nullptr; 6967 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6968 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); 6969 return SDValue(E, 0); 6970 } 6971 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 6972 ExtTy, isExpanding, MemVT, MMO); 6973 createOperands(N, Ops); 6974 6975 CSEMap.InsertNode(N, IP); 6976 InsertNode(N); 6977 SDValue V(N, 0); 6978 NewSDValueDbgMsg(V, "Creating new node: ", this); 6979 return V; 6980 } 6981 6982 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, 6983 SDValue Val, SDValue Ptr, SDValue Mask, 6984 EVT MemVT, MachineMemOperand *MMO, 6985 bool IsTruncating, bool IsCompressing) { 6986 assert(Chain.getValueType() == MVT::Other && 6987 "Invalid chain type"); 6988 EVT VT = Val.getValueType(); 6989 SDVTList VTs = getVTList(MVT::Other); 6990 SDValue Ops[] = { Chain, Val, Ptr, Mask }; 6991 FoldingSetNodeID ID; 6992 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); 6993 ID.AddInteger(VT.getRawBits()); 6994 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( 6995 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO)); 6996 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 6997 void *IP = nullptr; 6998 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 6999 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); 7000 return SDValue(E, 0); 7001 } 7002 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, 7003 IsTruncating, IsCompressing, MemVT, MMO); 7004 createOperands(N, Ops); 7005 7006 CSEMap.InsertNode(N, IP); 7007 InsertNode(N); 7008 SDValue V(N, 0); 7009 NewSDValueDbgMsg(V, "Creating new node: ", this); 7010 return V; 7011 } 7012 7013 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl, 7014 ArrayRef<SDValue> Ops, 7015 MachineMemOperand *MMO) { 7016 assert(Ops.size() == 6 && "Incompatible number of operands"); 7017 7018 FoldingSetNodeID ID; 7019 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); 7020 ID.AddInteger(VT.getRawBits()); 7021 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( 7022 dl.getIROrder(), VTs, VT, MMO)); 7023 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7024 void *IP = nullptr; 7025 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7026 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); 7027 return SDValue(E, 0); 7028 } 7029 7030 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7031 VTs, VT, MMO); 7032 createOperands(N, Ops); 7033 7034 assert(N->getPassThru().getValueType() == N->getValueType(0) && 7035 "Incompatible type of the PassThru value in MaskedGatherSDNode"); 7036 assert(N->getMask().getValueType().getVectorNumElements() == 7037 N->getValueType(0).getVectorNumElements() && 7038 "Vector width mismatch between mask and data"); 7039 assert(N->getIndex().getValueType().getVectorNumElements() >= 7040 N->getValueType(0).getVectorNumElements() && 7041 "Vector width mismatch between index and data"); 7042 assert(isa<ConstantSDNode>(N->getScale()) && 7043 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7044 "Scale should be a constant power of 2"); 7045 7046 CSEMap.InsertNode(N, IP); 7047 InsertNode(N); 7048 SDValue V(N, 0); 7049 NewSDValueDbgMsg(V, "Creating new node: ", this); 7050 return V; 7051 } 7052 7053 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl, 7054 ArrayRef<SDValue> Ops, 7055 MachineMemOperand *MMO) { 7056 assert(Ops.size() == 6 && "Incompatible number of operands"); 7057 7058 FoldingSetNodeID ID; 7059 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); 7060 ID.AddInteger(VT.getRawBits()); 7061 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( 7062 dl.getIROrder(), VTs, VT, MMO)); 7063 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 7064 void *IP = nullptr; 7065 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { 7066 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); 7067 return SDValue(E, 0); 7068 } 7069 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), 7070 VTs, VT, MMO); 7071 createOperands(N, Ops); 7072 7073 assert(N->getMask().getValueType().getVectorNumElements() == 7074 N->getValue().getValueType().getVectorNumElements() && 7075 "Vector width mismatch between mask and data"); 7076 assert(N->getIndex().getValueType().getVectorNumElements() >= 7077 N->getValue().getValueType().getVectorNumElements() && 7078 "Vector width mismatch between index and data"); 7079 assert(isa<ConstantSDNode>(N->getScale()) && 7080 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && 7081 "Scale should be a constant power of 2"); 7082 7083 CSEMap.InsertNode(N, IP); 7084 InsertNode(N); 7085 SDValue V(N, 0); 7086 NewSDValueDbgMsg(V, "Creating new node: ", this); 7087 return V; 7088 } 7089 7090 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { 7091 // select undef, T, F --> T (if T is a constant), otherwise F 7092 // select, ?, undef, F --> F 7093 // select, ?, T, undef --> T 7094 if (Cond.isUndef()) 7095 return isConstantValueOfAnyType(T) ? T : F; 7096 if (T.isUndef()) 7097 return F; 7098 if (F.isUndef()) 7099 return T; 7100 7101 // select true, T, F --> T 7102 // select false, T, F --> F 7103 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) 7104 return CondC->isNullValue() ? F : T; 7105 7106 // TODO: This should simplify VSELECT with constant condition using something 7107 // like this (but check boolean contents to be complete?): 7108 // if (ISD::isBuildVectorAllOnes(Cond.getNode())) 7109 // return T; 7110 // if (ISD::isBuildVectorAllZeros(Cond.getNode())) 7111 // return F; 7112 7113 // select ?, T, T --> T 7114 if (T == F) 7115 return T; 7116 7117 return SDValue(); 7118 } 7119 7120 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { 7121 // shift undef, Y --> 0 (can always assume that the undef value is 0) 7122 if (X.isUndef()) 7123 return getConstant(0, SDLoc(X.getNode()), X.getValueType()); 7124 // shift X, undef --> undef (because it may shift by the bitwidth) 7125 if (Y.isUndef()) 7126 return getUNDEF(X.getValueType()); 7127 7128 // shift 0, Y --> 0 7129 // shift X, 0 --> X 7130 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) 7131 return X; 7132 7133 // shift X, C >= bitwidth(X) --> undef 7134 // All vector elements must be too big (or undef) to avoid partial undefs. 7135 auto isShiftTooBig = [X](ConstantSDNode *Val) { 7136 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); 7137 }; 7138 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) 7139 return getUNDEF(X.getValueType()); 7140 7141 return SDValue(); 7142 } 7143 7144 // TODO: Use fast-math-flags to enable more simplifications. 7145 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) { 7146 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); 7147 if (!YC) 7148 return SDValue(); 7149 7150 // X + -0.0 --> X 7151 if (Opcode == ISD::FADD) 7152 if (YC->getValueAPF().isNegZero()) 7153 return X; 7154 7155 // X - +0.0 --> X 7156 if (Opcode == ISD::FSUB) 7157 if (YC->getValueAPF().isPosZero()) 7158 return X; 7159 7160 // X * 1.0 --> X 7161 // X / 1.0 --> X 7162 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) 7163 if (YC->getValueAPF().isExactlyValue(1.0)) 7164 return X; 7165 7166 return SDValue(); 7167 } 7168 7169 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, 7170 SDValue Ptr, SDValue SV, unsigned Align) { 7171 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; 7172 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); 7173 } 7174 7175 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7176 ArrayRef<SDUse> Ops) { 7177 switch (Ops.size()) { 7178 case 0: return getNode(Opcode, DL, VT); 7179 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); 7180 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 7181 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 7182 default: break; 7183 } 7184 7185 // Copy from an SDUse array into an SDValue array for use with 7186 // the regular getNode logic. 7187 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); 7188 return getNode(Opcode, DL, VT, NewOps); 7189 } 7190 7191 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, 7192 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { 7193 unsigned NumOps = Ops.size(); 7194 switch (NumOps) { 7195 case 0: return getNode(Opcode, DL, VT); 7196 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); 7197 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); 7198 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); 7199 default: break; 7200 } 7201 7202 switch (Opcode) { 7203 default: break; 7204 case ISD::BUILD_VECTOR: 7205 // Attempt to simplify BUILD_VECTOR. 7206 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) 7207 return V; 7208 break; 7209 case ISD::CONCAT_VECTORS: 7210 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) 7211 return V; 7212 break; 7213 case ISD::SELECT_CC: 7214 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 7215 assert(Ops[0].getValueType() == Ops[1].getValueType() && 7216 "LHS and RHS of condition must have same type!"); 7217 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7218 "True and False arms of SelectCC must have same type!"); 7219 assert(Ops[2].getValueType() == VT && 7220 "select_cc node must be of same type as true and false value!"); 7221 break; 7222 case ISD::BR_CC: 7223 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 7224 assert(Ops[2].getValueType() == Ops[3].getValueType() && 7225 "LHS/RHS of comparison should match types!"); 7226 break; 7227 } 7228 7229 // Memoize nodes. 7230 SDNode *N; 7231 SDVTList VTs = getVTList(VT); 7232 7233 if (VT != MVT::Glue) { 7234 FoldingSetNodeID ID; 7235 AddNodeIDNode(ID, Opcode, VTs, Ops); 7236 void *IP = nullptr; 7237 7238 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7239 return SDValue(E, 0); 7240 7241 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7242 createOperands(N, Ops); 7243 7244 CSEMap.InsertNode(N, IP); 7245 } else { 7246 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7247 createOperands(N, Ops); 7248 } 7249 7250 InsertNode(N); 7251 SDValue V(N, 0); 7252 NewSDValueDbgMsg(V, "Creating new node: ", this); 7253 return V; 7254 } 7255 7256 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7257 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { 7258 return getNode(Opcode, DL, getVTList(ResultTys), Ops); 7259 } 7260 7261 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7262 ArrayRef<SDValue> Ops) { 7263 if (VTList.NumVTs == 1) 7264 return getNode(Opcode, DL, VTList.VTs[0], Ops); 7265 7266 #if 0 7267 switch (Opcode) { 7268 // FIXME: figure out how to safely handle things like 7269 // int foo(int x) { return 1 << (x & 255); } 7270 // int bar() { return foo(256); } 7271 case ISD::SRA_PARTS: 7272 case ISD::SRL_PARTS: 7273 case ISD::SHL_PARTS: 7274 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 7275 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 7276 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7277 else if (N3.getOpcode() == ISD::AND) 7278 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 7279 // If the and is only masking out bits that cannot effect the shift, 7280 // eliminate the and. 7281 unsigned NumBits = VT.getScalarSizeInBits()*2; 7282 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 7283 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 7284 } 7285 break; 7286 } 7287 #endif 7288 7289 // Memoize the node unless it returns a flag. 7290 SDNode *N; 7291 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 7292 FoldingSetNodeID ID; 7293 AddNodeIDNode(ID, Opcode, VTList, Ops); 7294 void *IP = nullptr; 7295 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) 7296 return SDValue(E, 0); 7297 7298 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7299 createOperands(N, Ops); 7300 CSEMap.InsertNode(N, IP); 7301 } else { 7302 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); 7303 createOperands(N, Ops); 7304 } 7305 InsertNode(N); 7306 SDValue V(N, 0); 7307 NewSDValueDbgMsg(V, "Creating new node: ", this); 7308 return V; 7309 } 7310 7311 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, 7312 SDVTList VTList) { 7313 return getNode(Opcode, DL, VTList, None); 7314 } 7315 7316 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7317 SDValue N1) { 7318 SDValue Ops[] = { N1 }; 7319 return getNode(Opcode, DL, VTList, Ops); 7320 } 7321 7322 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7323 SDValue N1, SDValue N2) { 7324 SDValue Ops[] = { N1, N2 }; 7325 return getNode(Opcode, DL, VTList, Ops); 7326 } 7327 7328 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7329 SDValue N1, SDValue N2, SDValue N3) { 7330 SDValue Ops[] = { N1, N2, N3 }; 7331 return getNode(Opcode, DL, VTList, Ops); 7332 } 7333 7334 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7335 SDValue N1, SDValue N2, SDValue N3, SDValue N4) { 7336 SDValue Ops[] = { N1, N2, N3, N4 }; 7337 return getNode(Opcode, DL, VTList, Ops); 7338 } 7339 7340 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, 7341 SDValue N1, SDValue N2, SDValue N3, SDValue N4, 7342 SDValue N5) { 7343 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 7344 return getNode(Opcode, DL, VTList, Ops); 7345 } 7346 7347 SDVTList SelectionDAG::getVTList(EVT VT) { 7348 return makeVTList(SDNode::getValueTypeList(VT), 1); 7349 } 7350 7351 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 7352 FoldingSetNodeID ID; 7353 ID.AddInteger(2U); 7354 ID.AddInteger(VT1.getRawBits()); 7355 ID.AddInteger(VT2.getRawBits()); 7356 7357 void *IP = nullptr; 7358 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7359 if (!Result) { 7360 EVT *Array = Allocator.Allocate<EVT>(2); 7361 Array[0] = VT1; 7362 Array[1] = VT2; 7363 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); 7364 VTListMap.InsertNode(Result, IP); 7365 } 7366 return Result->getSDVTList(); 7367 } 7368 7369 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 7370 FoldingSetNodeID ID; 7371 ID.AddInteger(3U); 7372 ID.AddInteger(VT1.getRawBits()); 7373 ID.AddInteger(VT2.getRawBits()); 7374 ID.AddInteger(VT3.getRawBits()); 7375 7376 void *IP = nullptr; 7377 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7378 if (!Result) { 7379 EVT *Array = Allocator.Allocate<EVT>(3); 7380 Array[0] = VT1; 7381 Array[1] = VT2; 7382 Array[2] = VT3; 7383 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); 7384 VTListMap.InsertNode(Result, IP); 7385 } 7386 return Result->getSDVTList(); 7387 } 7388 7389 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 7390 FoldingSetNodeID ID; 7391 ID.AddInteger(4U); 7392 ID.AddInteger(VT1.getRawBits()); 7393 ID.AddInteger(VT2.getRawBits()); 7394 ID.AddInteger(VT3.getRawBits()); 7395 ID.AddInteger(VT4.getRawBits()); 7396 7397 void *IP = nullptr; 7398 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7399 if (!Result) { 7400 EVT *Array = Allocator.Allocate<EVT>(4); 7401 Array[0] = VT1; 7402 Array[1] = VT2; 7403 Array[2] = VT3; 7404 Array[3] = VT4; 7405 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); 7406 VTListMap.InsertNode(Result, IP); 7407 } 7408 return Result->getSDVTList(); 7409 } 7410 7411 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { 7412 unsigned NumVTs = VTs.size(); 7413 FoldingSetNodeID ID; 7414 ID.AddInteger(NumVTs); 7415 for (unsigned index = 0; index < NumVTs; index++) { 7416 ID.AddInteger(VTs[index].getRawBits()); 7417 } 7418 7419 void *IP = nullptr; 7420 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); 7421 if (!Result) { 7422 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 7423 llvm::copy(VTs, Array); 7424 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); 7425 VTListMap.InsertNode(Result, IP); 7426 } 7427 return Result->getSDVTList(); 7428 } 7429 7430 7431 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 7432 /// specified operands. If the resultant node already exists in the DAG, 7433 /// this does not modify the specified node, instead it returns the node that 7434 /// already exists. If the resultant node does not exist in the DAG, the 7435 /// input node is returned. As a degenerate case, if you specify the same 7436 /// input operands as the node already has, the input node is returned. 7437 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 7438 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 7439 7440 // Check to see if there is no change. 7441 if (Op == N->getOperand(0)) return N; 7442 7443 // See if the modified node already exists. 7444 void *InsertPos = nullptr; 7445 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 7446 return Existing; 7447 7448 // Nope it doesn't. Remove the node from its current place in the maps. 7449 if (InsertPos) 7450 if (!RemoveNodeFromCSEMaps(N)) 7451 InsertPos = nullptr; 7452 7453 // Now we update the operands. 7454 N->OperandList[0].set(Op); 7455 7456 updateDivergence(N); 7457 // If this gets put into a CSE map, add it. 7458 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7459 return N; 7460 } 7461 7462 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 7463 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 7464 7465 // Check to see if there is no change. 7466 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 7467 return N; // No operands changed, just return the input node. 7468 7469 // See if the modified node already exists. 7470 void *InsertPos = nullptr; 7471 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 7472 return Existing; 7473 7474 // Nope it doesn't. Remove the node from its current place in the maps. 7475 if (InsertPos) 7476 if (!RemoveNodeFromCSEMaps(N)) 7477 InsertPos = nullptr; 7478 7479 // Now we update the operands. 7480 if (N->OperandList[0] != Op1) 7481 N->OperandList[0].set(Op1); 7482 if (N->OperandList[1] != Op2) 7483 N->OperandList[1].set(Op2); 7484 7485 updateDivergence(N); 7486 // If this gets put into a CSE map, add it. 7487 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7488 return N; 7489 } 7490 7491 SDNode *SelectionDAG:: 7492 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 7493 SDValue Ops[] = { Op1, Op2, Op3 }; 7494 return UpdateNodeOperands(N, Ops); 7495 } 7496 7497 SDNode *SelectionDAG:: 7498 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7499 SDValue Op3, SDValue Op4) { 7500 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 7501 return UpdateNodeOperands(N, Ops); 7502 } 7503 7504 SDNode *SelectionDAG:: 7505 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 7506 SDValue Op3, SDValue Op4, SDValue Op5) { 7507 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 7508 return UpdateNodeOperands(N, Ops); 7509 } 7510 7511 SDNode *SelectionDAG:: 7512 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { 7513 unsigned NumOps = Ops.size(); 7514 assert(N->getNumOperands() == NumOps && 7515 "Update with wrong number of operands"); 7516 7517 // If no operands changed just return the input node. 7518 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) 7519 return N; 7520 7521 // See if the modified node already exists. 7522 void *InsertPos = nullptr; 7523 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) 7524 return Existing; 7525 7526 // Nope it doesn't. Remove the node from its current place in the maps. 7527 if (InsertPos) 7528 if (!RemoveNodeFromCSEMaps(N)) 7529 InsertPos = nullptr; 7530 7531 // Now we update the operands. 7532 for (unsigned i = 0; i != NumOps; ++i) 7533 if (N->OperandList[i] != Ops[i]) 7534 N->OperandList[i].set(Ops[i]); 7535 7536 updateDivergence(N); 7537 // If this gets put into a CSE map, add it. 7538 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 7539 return N; 7540 } 7541 7542 /// DropOperands - Release the operands and set this node to have 7543 /// zero operands. 7544 void SDNode::DropOperands() { 7545 // Unlike the code in MorphNodeTo that does this, we don't need to 7546 // watch for dead nodes here. 7547 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 7548 SDUse &Use = *I++; 7549 Use.set(SDValue()); 7550 } 7551 } 7552 7553 void SelectionDAG::setNodeMemRefs(MachineSDNode *N, 7554 ArrayRef<MachineMemOperand *> NewMemRefs) { 7555 if (NewMemRefs.empty()) { 7556 N->clearMemRefs(); 7557 return; 7558 } 7559 7560 // Check if we can avoid allocating by storing a single reference directly. 7561 if (NewMemRefs.size() == 1) { 7562 N->MemRefs = NewMemRefs[0]; 7563 N->NumMemRefs = 1; 7564 return; 7565 } 7566 7567 MachineMemOperand **MemRefsBuffer = 7568 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); 7569 llvm::copy(NewMemRefs, MemRefsBuffer); 7570 N->MemRefs = MemRefsBuffer; 7571 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); 7572 } 7573 7574 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 7575 /// machine opcode. 7576 /// 7577 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7578 EVT VT) { 7579 SDVTList VTs = getVTList(VT); 7580 return SelectNodeTo(N, MachineOpc, VTs, None); 7581 } 7582 7583 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7584 EVT VT, SDValue Op1) { 7585 SDVTList VTs = getVTList(VT); 7586 SDValue Ops[] = { Op1 }; 7587 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7588 } 7589 7590 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7591 EVT VT, SDValue Op1, 7592 SDValue Op2) { 7593 SDVTList VTs = getVTList(VT); 7594 SDValue Ops[] = { Op1, Op2 }; 7595 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7596 } 7597 7598 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7599 EVT VT, SDValue Op1, 7600 SDValue Op2, SDValue Op3) { 7601 SDVTList VTs = getVTList(VT); 7602 SDValue Ops[] = { Op1, Op2, Op3 }; 7603 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7604 } 7605 7606 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7607 EVT VT, ArrayRef<SDValue> Ops) { 7608 SDVTList VTs = getVTList(VT); 7609 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7610 } 7611 7612 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7613 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { 7614 SDVTList VTs = getVTList(VT1, VT2); 7615 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7616 } 7617 7618 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7619 EVT VT1, EVT VT2) { 7620 SDVTList VTs = getVTList(VT1, VT2); 7621 return SelectNodeTo(N, MachineOpc, VTs, None); 7622 } 7623 7624 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7625 EVT VT1, EVT VT2, EVT VT3, 7626 ArrayRef<SDValue> Ops) { 7627 SDVTList VTs = getVTList(VT1, VT2, VT3); 7628 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7629 } 7630 7631 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7632 EVT VT1, EVT VT2, 7633 SDValue Op1, SDValue Op2) { 7634 SDVTList VTs = getVTList(VT1, VT2); 7635 SDValue Ops[] = { Op1, Op2 }; 7636 return SelectNodeTo(N, MachineOpc, VTs, Ops); 7637 } 7638 7639 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 7640 SDVTList VTs,ArrayRef<SDValue> Ops) { 7641 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); 7642 // Reset the NodeID to -1. 7643 New->setNodeId(-1); 7644 if (New != N) { 7645 ReplaceAllUsesWith(N, New); 7646 RemoveDeadNode(N); 7647 } 7648 return New; 7649 } 7650 7651 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away 7652 /// the line number information on the merged node since it is not possible to 7653 /// preserve the information that operation is associated with multiple lines. 7654 /// This will make the debugger working better at -O0, were there is a higher 7655 /// probability having other instructions associated with that line. 7656 /// 7657 /// For IROrder, we keep the smaller of the two 7658 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { 7659 DebugLoc NLoc = N->getDebugLoc(); 7660 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) { 7661 N->setDebugLoc(DebugLoc()); 7662 } 7663 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 7664 N->setIROrder(Order); 7665 return N; 7666 } 7667 7668 /// MorphNodeTo - This *mutates* the specified node to have the specified 7669 /// return type, opcode, and operands. 7670 /// 7671 /// Note that MorphNodeTo returns the resultant node. If there is already a 7672 /// node of the specified opcode and operands, it returns that node instead of 7673 /// the current one. Note that the SDLoc need not be the same. 7674 /// 7675 /// Using MorphNodeTo is faster than creating a new node and swapping it in 7676 /// with ReplaceAllUsesWith both because it often avoids allocating a new 7677 /// node, and because it doesn't require CSE recalculation for any of 7678 /// the node's users. 7679 /// 7680 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. 7681 /// As a consequence it isn't appropriate to use from within the DAG combiner or 7682 /// the legalizer which maintain worklists that would need to be updated when 7683 /// deleting things. 7684 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 7685 SDVTList VTs, ArrayRef<SDValue> Ops) { 7686 // If an identical node already exists, use it. 7687 void *IP = nullptr; 7688 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 7689 FoldingSetNodeID ID; 7690 AddNodeIDNode(ID, Opc, VTs, Ops); 7691 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) 7692 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); 7693 } 7694 7695 if (!RemoveNodeFromCSEMaps(N)) 7696 IP = nullptr; 7697 7698 // Start the morphing. 7699 N->NodeType = Opc; 7700 N->ValueList = VTs.VTs; 7701 N->NumValues = VTs.NumVTs; 7702 7703 // Clear the operands list, updating used nodes to remove this from their 7704 // use list. Keep track of any operands that become dead as a result. 7705 SmallPtrSet<SDNode*, 16> DeadNodeSet; 7706 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 7707 SDUse &Use = *I++; 7708 SDNode *Used = Use.getNode(); 7709 Use.set(SDValue()); 7710 if (Used->use_empty()) 7711 DeadNodeSet.insert(Used); 7712 } 7713 7714 // For MachineNode, initialize the memory references information. 7715 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) 7716 MN->clearMemRefs(); 7717 7718 // Swap for an appropriately sized array from the recycler. 7719 removeOperands(N); 7720 createOperands(N, Ops); 7721 7722 // Delete any nodes that are still dead after adding the uses for the 7723 // new operands. 7724 if (!DeadNodeSet.empty()) { 7725 SmallVector<SDNode *, 16> DeadNodes; 7726 for (SDNode *N : DeadNodeSet) 7727 if (N->use_empty()) 7728 DeadNodes.push_back(N); 7729 RemoveDeadNodes(DeadNodes); 7730 } 7731 7732 if (IP) 7733 CSEMap.InsertNode(N, IP); // Memoize the new node. 7734 return N; 7735 } 7736 7737 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { 7738 unsigned OrigOpc = Node->getOpcode(); 7739 unsigned NewOpc; 7740 switch (OrigOpc) { 7741 default: 7742 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); 7743 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break; 7744 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break; 7745 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break; 7746 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break; 7747 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break; 7748 case ISD::STRICT_FMA: NewOpc = ISD::FMA; break; 7749 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; break; 7750 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break; 7751 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break; 7752 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; break; 7753 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; break; 7754 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; break; 7755 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; break; 7756 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; break; 7757 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; break; 7758 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; break; 7759 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; break; 7760 case ISD::STRICT_FNEARBYINT: NewOpc = ISD::FNEARBYINT; break; 7761 case ISD::STRICT_FMAXNUM: NewOpc = ISD::FMAXNUM; break; 7762 case ISD::STRICT_FMINNUM: NewOpc = ISD::FMINNUM; break; 7763 case ISD::STRICT_FCEIL: NewOpc = ISD::FCEIL; break; 7764 case ISD::STRICT_FFLOOR: NewOpc = ISD::FFLOOR; break; 7765 case ISD::STRICT_FROUND: NewOpc = ISD::FROUND; break; 7766 case ISD::STRICT_FTRUNC: NewOpc = ISD::FTRUNC; break; 7767 case ISD::STRICT_FP_ROUND: NewOpc = ISD::FP_ROUND; break; 7768 case ISD::STRICT_FP_EXTEND: NewOpc = ISD::FP_EXTEND; break; 7769 } 7770 7771 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); 7772 7773 // We're taking this node out of the chain, so we need to re-link things. 7774 SDValue InputChain = Node->getOperand(0); 7775 SDValue OutputChain = SDValue(Node, 1); 7776 ReplaceAllUsesOfValueWith(OutputChain, InputChain); 7777 7778 SmallVector<SDValue, 3> Ops; 7779 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) 7780 Ops.push_back(Node->getOperand(i)); 7781 7782 SDVTList VTs = getVTList(Node->getValueType(0)); 7783 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); 7784 7785 // MorphNodeTo can operate in two ways: if an existing node with the 7786 // specified operands exists, it can just return it. Otherwise, it 7787 // updates the node in place to have the requested operands. 7788 if (Res == Node) { 7789 // If we updated the node in place, reset the node ID. To the isel, 7790 // this should be just like a newly allocated machine node. 7791 Res->setNodeId(-1); 7792 } else { 7793 ReplaceAllUsesWith(Node, Res); 7794 RemoveDeadNode(Node); 7795 } 7796 7797 return Res; 7798 } 7799 7800 /// getMachineNode - These are used for target selectors to create a new node 7801 /// with specified return type(s), MachineInstr opcode, and operands. 7802 /// 7803 /// Note that getMachineNode returns the resultant node. If there is already a 7804 /// node of the specified opcode and operands, it returns that node instead of 7805 /// the current one. 7806 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7807 EVT VT) { 7808 SDVTList VTs = getVTList(VT); 7809 return getMachineNode(Opcode, dl, VTs, None); 7810 } 7811 7812 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7813 EVT VT, SDValue Op1) { 7814 SDVTList VTs = getVTList(VT); 7815 SDValue Ops[] = { Op1 }; 7816 return getMachineNode(Opcode, dl, VTs, Ops); 7817 } 7818 7819 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7820 EVT VT, SDValue Op1, SDValue Op2) { 7821 SDVTList VTs = getVTList(VT); 7822 SDValue Ops[] = { Op1, Op2 }; 7823 return getMachineNode(Opcode, dl, VTs, Ops); 7824 } 7825 7826 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7827 EVT VT, SDValue Op1, SDValue Op2, 7828 SDValue Op3) { 7829 SDVTList VTs = getVTList(VT); 7830 SDValue Ops[] = { Op1, Op2, Op3 }; 7831 return getMachineNode(Opcode, dl, VTs, Ops); 7832 } 7833 7834 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7835 EVT VT, ArrayRef<SDValue> Ops) { 7836 SDVTList VTs = getVTList(VT); 7837 return getMachineNode(Opcode, dl, VTs, Ops); 7838 } 7839 7840 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7841 EVT VT1, EVT VT2, SDValue Op1, 7842 SDValue Op2) { 7843 SDVTList VTs = getVTList(VT1, VT2); 7844 SDValue Ops[] = { Op1, Op2 }; 7845 return getMachineNode(Opcode, dl, VTs, Ops); 7846 } 7847 7848 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7849 EVT VT1, EVT VT2, SDValue Op1, 7850 SDValue Op2, SDValue Op3) { 7851 SDVTList VTs = getVTList(VT1, VT2); 7852 SDValue Ops[] = { Op1, Op2, Op3 }; 7853 return getMachineNode(Opcode, dl, VTs, Ops); 7854 } 7855 7856 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7857 EVT VT1, EVT VT2, 7858 ArrayRef<SDValue> Ops) { 7859 SDVTList VTs = getVTList(VT1, VT2); 7860 return getMachineNode(Opcode, dl, VTs, Ops); 7861 } 7862 7863 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7864 EVT VT1, EVT VT2, EVT VT3, 7865 SDValue Op1, SDValue Op2) { 7866 SDVTList VTs = getVTList(VT1, VT2, VT3); 7867 SDValue Ops[] = { Op1, Op2 }; 7868 return getMachineNode(Opcode, dl, VTs, Ops); 7869 } 7870 7871 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7872 EVT VT1, EVT VT2, EVT VT3, 7873 SDValue Op1, SDValue Op2, 7874 SDValue Op3) { 7875 SDVTList VTs = getVTList(VT1, VT2, VT3); 7876 SDValue Ops[] = { Op1, Op2, Op3 }; 7877 return getMachineNode(Opcode, dl, VTs, Ops); 7878 } 7879 7880 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7881 EVT VT1, EVT VT2, EVT VT3, 7882 ArrayRef<SDValue> Ops) { 7883 SDVTList VTs = getVTList(VT1, VT2, VT3); 7884 return getMachineNode(Opcode, dl, VTs, Ops); 7885 } 7886 7887 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, 7888 ArrayRef<EVT> ResultTys, 7889 ArrayRef<SDValue> Ops) { 7890 SDVTList VTs = getVTList(ResultTys); 7891 return getMachineNode(Opcode, dl, VTs, Ops); 7892 } 7893 7894 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, 7895 SDVTList VTs, 7896 ArrayRef<SDValue> Ops) { 7897 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 7898 MachineSDNode *N; 7899 void *IP = nullptr; 7900 7901 if (DoCSE) { 7902 FoldingSetNodeID ID; 7903 AddNodeIDNode(ID, ~Opcode, VTs, Ops); 7904 IP = nullptr; 7905 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { 7906 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); 7907 } 7908 } 7909 7910 // Allocate a new MachineSDNode. 7911 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 7912 createOperands(N, Ops); 7913 7914 if (DoCSE) 7915 CSEMap.InsertNode(N, IP); 7916 7917 InsertNode(N); 7918 return N; 7919 } 7920 7921 /// getTargetExtractSubreg - A convenience function for creating 7922 /// TargetOpcode::EXTRACT_SUBREG nodes. 7923 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7924 SDValue Operand) { 7925 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7926 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 7927 VT, Operand, SRIdxVal); 7928 return SDValue(Subreg, 0); 7929 } 7930 7931 /// getTargetInsertSubreg - A convenience function for creating 7932 /// TargetOpcode::INSERT_SUBREG nodes. 7933 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, 7934 SDValue Operand, SDValue Subreg) { 7935 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); 7936 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 7937 VT, Operand, Subreg, SRIdxVal); 7938 return SDValue(Result, 0); 7939 } 7940 7941 /// getNodeIfExists - Get the specified node if it's already available, or 7942 /// else return NULL. 7943 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 7944 ArrayRef<SDValue> Ops, 7945 const SDNodeFlags Flags) { 7946 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { 7947 FoldingSetNodeID ID; 7948 AddNodeIDNode(ID, Opcode, VTList, Ops); 7949 void *IP = nullptr; 7950 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { 7951 E->intersectFlagsWith(Flags); 7952 return E; 7953 } 7954 } 7955 return nullptr; 7956 } 7957 7958 /// getDbgValue - Creates a SDDbgValue node. 7959 /// 7960 /// SDNode 7961 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, 7962 SDNode *N, unsigned R, bool IsIndirect, 7963 const DebugLoc &DL, unsigned O) { 7964 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7965 "Expected inlined-at fields to agree"); 7966 return new (DbgInfo->getAlloc()) 7967 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O); 7968 } 7969 7970 /// Constant 7971 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, 7972 DIExpression *Expr, 7973 const Value *C, 7974 const DebugLoc &DL, unsigned O) { 7975 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7976 "Expected inlined-at fields to agree"); 7977 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O); 7978 } 7979 7980 /// FrameIndex 7981 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, 7982 DIExpression *Expr, unsigned FI, 7983 bool IsIndirect, 7984 const DebugLoc &DL, 7985 unsigned O) { 7986 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7987 "Expected inlined-at fields to agree"); 7988 return new (DbgInfo->getAlloc()) 7989 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX); 7990 } 7991 7992 /// VReg 7993 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, 7994 DIExpression *Expr, 7995 unsigned VReg, bool IsIndirect, 7996 const DebugLoc &DL, unsigned O) { 7997 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 7998 "Expected inlined-at fields to agree"); 7999 return new (DbgInfo->getAlloc()) 8000 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG); 8001 } 8002 8003 void SelectionDAG::transferDbgValues(SDValue From, SDValue To, 8004 unsigned OffsetInBits, unsigned SizeInBits, 8005 bool InvalidateDbg) { 8006 SDNode *FromNode = From.getNode(); 8007 SDNode *ToNode = To.getNode(); 8008 assert(FromNode && ToNode && "Can't modify dbg values"); 8009 8010 // PR35338 8011 // TODO: assert(From != To && "Redundant dbg value transfer"); 8012 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); 8013 if (From == To || FromNode == ToNode) 8014 return; 8015 8016 if (!FromNode->getHasDebugValue()) 8017 return; 8018 8019 SmallVector<SDDbgValue *, 2> ClonedDVs; 8020 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { 8021 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated()) 8022 continue; 8023 8024 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); 8025 8026 // Just transfer the dbg value attached to From. 8027 if (Dbg->getResNo() != From.getResNo()) 8028 continue; 8029 8030 DIVariable *Var = Dbg->getVariable(); 8031 auto *Expr = Dbg->getExpression(); 8032 // If a fragment is requested, update the expression. 8033 if (SizeInBits) { 8034 // When splitting a larger (e.g., sign-extended) value whose 8035 // lower bits are described with an SDDbgValue, do not attempt 8036 // to transfer the SDDbgValue to the upper bits. 8037 if (auto FI = Expr->getFragmentInfo()) 8038 if (OffsetInBits + SizeInBits > FI->SizeInBits) 8039 continue; 8040 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, 8041 SizeInBits); 8042 if (!Fragment) 8043 continue; 8044 Expr = *Fragment; 8045 } 8046 // Clone the SDDbgValue and move it to To. 8047 SDDbgValue *Clone = 8048 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), 8049 Dbg->getDebugLoc(), Dbg->getOrder()); 8050 ClonedDVs.push_back(Clone); 8051 8052 if (InvalidateDbg) { 8053 // Invalidate value and indicate the SDDbgValue should not be emitted. 8054 Dbg->setIsInvalidated(); 8055 Dbg->setIsEmitted(); 8056 } 8057 } 8058 8059 for (SDDbgValue *Dbg : ClonedDVs) 8060 AddDbgValue(Dbg, ToNode, false); 8061 } 8062 8063 void SelectionDAG::salvageDebugInfo(SDNode &N) { 8064 if (!N.getHasDebugValue()) 8065 return; 8066 8067 SmallVector<SDDbgValue *, 2> ClonedDVs; 8068 for (auto DV : GetDbgValues(&N)) { 8069 if (DV->isInvalidated()) 8070 continue; 8071 switch (N.getOpcode()) { 8072 default: 8073 break; 8074 case ISD::ADD: 8075 SDValue N0 = N.getOperand(0); 8076 SDValue N1 = N.getOperand(1); 8077 if (!isConstantIntBuildVectorOrConstantInt(N0) && 8078 isConstantIntBuildVectorOrConstantInt(N1)) { 8079 uint64_t Offset = N.getConstantOperandVal(1); 8080 // Rewrite an ADD constant node into a DIExpression. Since we are 8081 // performing arithmetic to compute the variable's *value* in the 8082 // DIExpression, we need to mark the expression with a 8083 // DW_OP_stack_value. 8084 auto *DIExpr = DV->getExpression(); 8085 DIExpr = 8086 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset); 8087 SDDbgValue *Clone = 8088 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(), 8089 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder()); 8090 ClonedDVs.push_back(Clone); 8091 DV->setIsInvalidated(); 8092 DV->setIsEmitted(); 8093 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; 8094 N0.getNode()->dumprFull(this); 8095 dbgs() << " into " << *DIExpr << '\n'); 8096 } 8097 } 8098 } 8099 8100 for (SDDbgValue *Dbg : ClonedDVs) 8101 AddDbgValue(Dbg, Dbg->getSDNode(), false); 8102 } 8103 8104 /// Creates a SDDbgLabel node. 8105 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, 8106 const DebugLoc &DL, unsigned O) { 8107 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 8108 "Expected inlined-at fields to agree"); 8109 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); 8110 } 8111 8112 namespace { 8113 8114 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 8115 /// pointed to by a use iterator is deleted, increment the use iterator 8116 /// so that it doesn't dangle. 8117 /// 8118 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 8119 SDNode::use_iterator &UI; 8120 SDNode::use_iterator &UE; 8121 8122 void NodeDeleted(SDNode *N, SDNode *E) override { 8123 // Increment the iterator as needed. 8124 while (UI != UE && N == *UI) 8125 ++UI; 8126 } 8127 8128 public: 8129 RAUWUpdateListener(SelectionDAG &d, 8130 SDNode::use_iterator &ui, 8131 SDNode::use_iterator &ue) 8132 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 8133 }; 8134 8135 } // end anonymous namespace 8136 8137 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8138 /// This can cause recursive merging of nodes in the DAG. 8139 /// 8140 /// This version assumes From has a single result value. 8141 /// 8142 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 8143 SDNode *From = FromN.getNode(); 8144 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 8145 "Cannot replace with this method!"); 8146 assert(From != To.getNode() && "Cannot replace uses of with self"); 8147 8148 // Preserve Debug Values 8149 transferDbgValues(FromN, To); 8150 8151 // Iterate over all the existing uses of From. New uses will be added 8152 // to the beginning of the use list, which we avoid visiting. 8153 // This specifically avoids visiting uses of From that arise while the 8154 // replacement is happening, because any such uses would be the result 8155 // of CSE: If an existing node looks like From after one of its operands 8156 // is replaced by To, we don't want to replace of all its users with To 8157 // too. See PR3018 for more info. 8158 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8159 RAUWUpdateListener Listener(*this, UI, UE); 8160 while (UI != UE) { 8161 SDNode *User = *UI; 8162 8163 // This node is about to morph, remove its old self from the CSE maps. 8164 RemoveNodeFromCSEMaps(User); 8165 8166 // A user can appear in a use list multiple times, and when this 8167 // happens the uses are usually next to each other in the list. 8168 // To help reduce the number of CSE recomputations, process all 8169 // the uses of this user that we can find this way. 8170 do { 8171 SDUse &Use = UI.getUse(); 8172 ++UI; 8173 Use.set(To); 8174 if (To->isDivergent() != From->isDivergent()) 8175 updateDivergence(User); 8176 } while (UI != UE && *UI == User); 8177 // Now that we have modified User, add it back to the CSE maps. If it 8178 // already exists there, recursively merge the results together. 8179 AddModifiedNodeToCSEMaps(User); 8180 } 8181 8182 // If we just RAUW'd the root, take note. 8183 if (FromN == getRoot()) 8184 setRoot(To); 8185 } 8186 8187 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8188 /// This can cause recursive merging of nodes in the DAG. 8189 /// 8190 /// This version assumes that for each value of From, there is a 8191 /// corresponding value in To in the same position with the same type. 8192 /// 8193 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 8194 #ifndef NDEBUG 8195 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8196 assert((!From->hasAnyUseOfValue(i) || 8197 From->getValueType(i) == To->getValueType(i)) && 8198 "Cannot use this version of ReplaceAllUsesWith!"); 8199 #endif 8200 8201 // Handle the trivial case. 8202 if (From == To) 8203 return; 8204 8205 // Preserve Debug Info. Only do this if there's a use. 8206 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8207 if (From->hasAnyUseOfValue(i)) { 8208 assert((i < To->getNumValues()) && "Invalid To location"); 8209 transferDbgValues(SDValue(From, i), SDValue(To, i)); 8210 } 8211 8212 // Iterate over just the existing users of From. See the comments in 8213 // the ReplaceAllUsesWith above. 8214 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8215 RAUWUpdateListener Listener(*this, UI, UE); 8216 while (UI != UE) { 8217 SDNode *User = *UI; 8218 8219 // This node is about to morph, remove its old self from the CSE maps. 8220 RemoveNodeFromCSEMaps(User); 8221 8222 // A user can appear in a use list multiple times, and when this 8223 // happens the uses are usually next to each other in the list. 8224 // To help reduce the number of CSE recomputations, process all 8225 // the uses of this user that we can find this way. 8226 do { 8227 SDUse &Use = UI.getUse(); 8228 ++UI; 8229 Use.setNode(To); 8230 if (To->isDivergent() != From->isDivergent()) 8231 updateDivergence(User); 8232 } while (UI != UE && *UI == User); 8233 8234 // Now that we have modified User, add it back to the CSE maps. If it 8235 // already exists there, recursively merge the results together. 8236 AddModifiedNodeToCSEMaps(User); 8237 } 8238 8239 // If we just RAUW'd the root, take note. 8240 if (From == getRoot().getNode()) 8241 setRoot(SDValue(To, getRoot().getResNo())); 8242 } 8243 8244 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 8245 /// This can cause recursive merging of nodes in the DAG. 8246 /// 8247 /// This version can replace From with any result values. To must match the 8248 /// number and types of values returned by From. 8249 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 8250 if (From->getNumValues() == 1) // Handle the simple case efficiently. 8251 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 8252 8253 // Preserve Debug Info. 8254 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 8255 transferDbgValues(SDValue(From, i), To[i]); 8256 8257 // Iterate over just the existing users of From. See the comments in 8258 // the ReplaceAllUsesWith above. 8259 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 8260 RAUWUpdateListener Listener(*this, UI, UE); 8261 while (UI != UE) { 8262 SDNode *User = *UI; 8263 8264 // This node is about to morph, remove its old self from the CSE maps. 8265 RemoveNodeFromCSEMaps(User); 8266 8267 // A user can appear in a use list multiple times, and when this happens the 8268 // uses are usually next to each other in the list. To help reduce the 8269 // number of CSE and divergence recomputations, process all the uses of this 8270 // user that we can find this way. 8271 bool To_IsDivergent = false; 8272 do { 8273 SDUse &Use = UI.getUse(); 8274 const SDValue &ToOp = To[Use.getResNo()]; 8275 ++UI; 8276 Use.set(ToOp); 8277 To_IsDivergent |= ToOp->isDivergent(); 8278 } while (UI != UE && *UI == User); 8279 8280 if (To_IsDivergent != From->isDivergent()) 8281 updateDivergence(User); 8282 8283 // Now that we have modified User, add it back to the CSE maps. If it 8284 // already exists there, recursively merge the results together. 8285 AddModifiedNodeToCSEMaps(User); 8286 } 8287 8288 // If we just RAUW'd the root, take note. 8289 if (From == getRoot().getNode()) 8290 setRoot(SDValue(To[getRoot().getResNo()])); 8291 } 8292 8293 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 8294 /// uses of other values produced by From.getNode() alone. The Deleted 8295 /// vector is handled the same way as for ReplaceAllUsesWith. 8296 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 8297 // Handle the really simple, really trivial case efficiently. 8298 if (From == To) return; 8299 8300 // Handle the simple, trivial, case efficiently. 8301 if (From.getNode()->getNumValues() == 1) { 8302 ReplaceAllUsesWith(From, To); 8303 return; 8304 } 8305 8306 // Preserve Debug Info. 8307 transferDbgValues(From, To); 8308 8309 // Iterate over just the existing users of From. See the comments in 8310 // the ReplaceAllUsesWith above. 8311 SDNode::use_iterator UI = From.getNode()->use_begin(), 8312 UE = From.getNode()->use_end(); 8313 RAUWUpdateListener Listener(*this, UI, UE); 8314 while (UI != UE) { 8315 SDNode *User = *UI; 8316 bool UserRemovedFromCSEMaps = false; 8317 8318 // A user can appear in a use list multiple times, and when this 8319 // happens the uses are usually next to each other in the list. 8320 // To help reduce the number of CSE recomputations, process all 8321 // the uses of this user that we can find this way. 8322 do { 8323 SDUse &Use = UI.getUse(); 8324 8325 // Skip uses of different values from the same node. 8326 if (Use.getResNo() != From.getResNo()) { 8327 ++UI; 8328 continue; 8329 } 8330 8331 // If this node hasn't been modified yet, it's still in the CSE maps, 8332 // so remove its old self from the CSE maps. 8333 if (!UserRemovedFromCSEMaps) { 8334 RemoveNodeFromCSEMaps(User); 8335 UserRemovedFromCSEMaps = true; 8336 } 8337 8338 ++UI; 8339 Use.set(To); 8340 if (To->isDivergent() != From->isDivergent()) 8341 updateDivergence(User); 8342 } while (UI != UE && *UI == User); 8343 // We are iterating over all uses of the From node, so if a use 8344 // doesn't use the specific value, no changes are made. 8345 if (!UserRemovedFromCSEMaps) 8346 continue; 8347 8348 // Now that we have modified User, add it back to the CSE maps. If it 8349 // already exists there, recursively merge the results together. 8350 AddModifiedNodeToCSEMaps(User); 8351 } 8352 8353 // If we just RAUW'd the root, take note. 8354 if (From == getRoot()) 8355 setRoot(To); 8356 } 8357 8358 namespace { 8359 8360 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 8361 /// to record information about a use. 8362 struct UseMemo { 8363 SDNode *User; 8364 unsigned Index; 8365 SDUse *Use; 8366 }; 8367 8368 /// operator< - Sort Memos by User. 8369 bool operator<(const UseMemo &L, const UseMemo &R) { 8370 return (intptr_t)L.User < (intptr_t)R.User; 8371 } 8372 8373 } // end anonymous namespace 8374 8375 void SelectionDAG::updateDivergence(SDNode * N) 8376 { 8377 if (TLI->isSDNodeAlwaysUniform(N)) 8378 return; 8379 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA); 8380 for (auto &Op : N->ops()) { 8381 if (Op.Val.getValueType() != MVT::Other) 8382 IsDivergent |= Op.getNode()->isDivergent(); 8383 } 8384 if (N->SDNodeBits.IsDivergent != IsDivergent) { 8385 N->SDNodeBits.IsDivergent = IsDivergent; 8386 for (auto U : N->uses()) { 8387 updateDivergence(U); 8388 } 8389 } 8390 } 8391 8392 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { 8393 DenseMap<SDNode *, unsigned> Degree; 8394 Order.reserve(AllNodes.size()); 8395 for (auto &N : allnodes()) { 8396 unsigned NOps = N.getNumOperands(); 8397 Degree[&N] = NOps; 8398 if (0 == NOps) 8399 Order.push_back(&N); 8400 } 8401 for (size_t I = 0; I != Order.size(); ++I) { 8402 SDNode *N = Order[I]; 8403 for (auto U : N->uses()) { 8404 unsigned &UnsortedOps = Degree[U]; 8405 if (0 == --UnsortedOps) 8406 Order.push_back(U); 8407 } 8408 } 8409 } 8410 8411 #ifndef NDEBUG 8412 void SelectionDAG::VerifyDAGDiverence() { 8413 std::vector<SDNode *> TopoOrder; 8414 CreateTopologicalOrder(TopoOrder); 8415 const TargetLowering &TLI = getTargetLoweringInfo(); 8416 DenseMap<const SDNode *, bool> DivergenceMap; 8417 for (auto &N : allnodes()) { 8418 DivergenceMap[&N] = false; 8419 } 8420 for (auto N : TopoOrder) { 8421 bool IsDivergent = DivergenceMap[N]; 8422 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA); 8423 for (auto &Op : N->ops()) { 8424 if (Op.Val.getValueType() != MVT::Other) 8425 IsSDNodeDivergent |= DivergenceMap[Op.getNode()]; 8426 } 8427 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) { 8428 DivergenceMap[N] = true; 8429 } 8430 } 8431 for (auto &N : allnodes()) { 8432 (void)N; 8433 assert(DivergenceMap[&N] == N.isDivergent() && 8434 "Divergence bit inconsistency detected\n"); 8435 } 8436 } 8437 #endif 8438 8439 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 8440 /// uses of other values produced by From.getNode() alone. The same value 8441 /// may appear in both the From and To list. The Deleted vector is 8442 /// handled the same way as for ReplaceAllUsesWith. 8443 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 8444 const SDValue *To, 8445 unsigned Num){ 8446 // Handle the simple, trivial case efficiently. 8447 if (Num == 1) 8448 return ReplaceAllUsesOfValueWith(*From, *To); 8449 8450 transferDbgValues(*From, *To); 8451 8452 // Read up all the uses and make records of them. This helps 8453 // processing new uses that are introduced during the 8454 // replacement process. 8455 SmallVector<UseMemo, 4> Uses; 8456 for (unsigned i = 0; i != Num; ++i) { 8457 unsigned FromResNo = From[i].getResNo(); 8458 SDNode *FromNode = From[i].getNode(); 8459 for (SDNode::use_iterator UI = FromNode->use_begin(), 8460 E = FromNode->use_end(); UI != E; ++UI) { 8461 SDUse &Use = UI.getUse(); 8462 if (Use.getResNo() == FromResNo) { 8463 UseMemo Memo = { *UI, i, &Use }; 8464 Uses.push_back(Memo); 8465 } 8466 } 8467 } 8468 8469 // Sort the uses, so that all the uses from a given User are together. 8470 llvm::sort(Uses); 8471 8472 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 8473 UseIndex != UseIndexEnd; ) { 8474 // We know that this user uses some value of From. If it is the right 8475 // value, update it. 8476 SDNode *User = Uses[UseIndex].User; 8477 8478 // This node is about to morph, remove its old self from the CSE maps. 8479 RemoveNodeFromCSEMaps(User); 8480 8481 // The Uses array is sorted, so all the uses for a given User 8482 // are next to each other in the list. 8483 // To help reduce the number of CSE recomputations, process all 8484 // the uses of this user that we can find this way. 8485 do { 8486 unsigned i = Uses[UseIndex].Index; 8487 SDUse &Use = *Uses[UseIndex].Use; 8488 ++UseIndex; 8489 8490 Use.set(To[i]); 8491 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 8492 8493 // Now that we have modified User, add it back to the CSE maps. If it 8494 // already exists there, recursively merge the results together. 8495 AddModifiedNodeToCSEMaps(User); 8496 } 8497 } 8498 8499 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 8500 /// based on their topological order. It returns the maximum id and a vector 8501 /// of the SDNodes* in assigned order by reference. 8502 unsigned SelectionDAG::AssignTopologicalOrder() { 8503 unsigned DAGSize = 0; 8504 8505 // SortedPos tracks the progress of the algorithm. Nodes before it are 8506 // sorted, nodes after it are unsorted. When the algorithm completes 8507 // it is at the end of the list. 8508 allnodes_iterator SortedPos = allnodes_begin(); 8509 8510 // Visit all the nodes. Move nodes with no operands to the front of 8511 // the list immediately. Annotate nodes that do have operands with their 8512 // operand count. Before we do this, the Node Id fields of the nodes 8513 // may contain arbitrary values. After, the Node Id fields for nodes 8514 // before SortedPos will contain the topological sort index, and the 8515 // Node Id fields for nodes At SortedPos and after will contain the 8516 // count of outstanding operands. 8517 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 8518 SDNode *N = &*I++; 8519 checkForCycles(N, this); 8520 unsigned Degree = N->getNumOperands(); 8521 if (Degree == 0) { 8522 // A node with no uses, add it to the result array immediately. 8523 N->setNodeId(DAGSize++); 8524 allnodes_iterator Q(N); 8525 if (Q != SortedPos) 8526 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 8527 assert(SortedPos != AllNodes.end() && "Overran node list"); 8528 ++SortedPos; 8529 } else { 8530 // Temporarily use the Node Id as scratch space for the degree count. 8531 N->setNodeId(Degree); 8532 } 8533 } 8534 8535 // Visit all the nodes. As we iterate, move nodes into sorted order, 8536 // such that by the time the end is reached all nodes will be sorted. 8537 for (SDNode &Node : allnodes()) { 8538 SDNode *N = &Node; 8539 checkForCycles(N, this); 8540 // N is in sorted position, so all its uses have one less operand 8541 // that needs to be sorted. 8542 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 8543 UI != UE; ++UI) { 8544 SDNode *P = *UI; 8545 unsigned Degree = P->getNodeId(); 8546 assert(Degree != 0 && "Invalid node degree"); 8547 --Degree; 8548 if (Degree == 0) { 8549 // All of P's operands are sorted, so P may sorted now. 8550 P->setNodeId(DAGSize++); 8551 if (P->getIterator() != SortedPos) 8552 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 8553 assert(SortedPos != AllNodes.end() && "Overran node list"); 8554 ++SortedPos; 8555 } else { 8556 // Update P's outstanding operand count. 8557 P->setNodeId(Degree); 8558 } 8559 } 8560 if (Node.getIterator() == SortedPos) { 8561 #ifndef NDEBUG 8562 allnodes_iterator I(N); 8563 SDNode *S = &*++I; 8564 dbgs() << "Overran sorted position:\n"; 8565 S->dumprFull(this); dbgs() << "\n"; 8566 dbgs() << "Checking if this is due to cycles\n"; 8567 checkForCycles(this, true); 8568 #endif 8569 llvm_unreachable(nullptr); 8570 } 8571 } 8572 8573 assert(SortedPos == AllNodes.end() && 8574 "Topological sort incomplete!"); 8575 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 8576 "First node in topological sort is not the entry token!"); 8577 assert(AllNodes.front().getNodeId() == 0 && 8578 "First node in topological sort has non-zero id!"); 8579 assert(AllNodes.front().getNumOperands() == 0 && 8580 "First node in topological sort has operands!"); 8581 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 8582 "Last node in topologic sort has unexpected id!"); 8583 assert(AllNodes.back().use_empty() && 8584 "Last node in topologic sort has users!"); 8585 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 8586 return DAGSize; 8587 } 8588 8589 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 8590 /// value is produced by SD. 8591 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 8592 if (SD) { 8593 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); 8594 SD->setHasDebugValue(true); 8595 } 8596 DbgInfo->add(DB, SD, isParameter); 8597 } 8598 8599 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { 8600 DbgInfo->add(DB); 8601 } 8602 8603 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, 8604 SDValue NewMemOp) { 8605 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); 8606 // The new memory operation must have the same position as the old load in 8607 // terms of memory dependency. Create a TokenFactor for the old load and new 8608 // memory operation and update uses of the old load's output chain to use that 8609 // TokenFactor. 8610 SDValue OldChain = SDValue(OldLoad, 1); 8611 SDValue NewChain = SDValue(NewMemOp.getNode(), 1); 8612 if (!OldLoad->hasAnyUseOfValue(1)) 8613 return NewChain; 8614 8615 SDValue TokenFactor = 8616 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain); 8617 ReplaceAllUsesOfValueWith(OldChain, TokenFactor); 8618 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain); 8619 return TokenFactor; 8620 } 8621 8622 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, 8623 Function **OutFunction) { 8624 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); 8625 8626 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 8627 auto *Module = MF->getFunction().getParent(); 8628 auto *Function = Module->getFunction(Symbol); 8629 8630 if (OutFunction != nullptr) 8631 *OutFunction = Function; 8632 8633 if (Function != nullptr) { 8634 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); 8635 return getGlobalAddress(Function, SDLoc(Op), PtrTy); 8636 } 8637 8638 std::string ErrorStr; 8639 raw_string_ostream ErrorFormatter(ErrorStr); 8640 8641 ErrorFormatter << "Undefined external symbol "; 8642 ErrorFormatter << '"' << Symbol << '"'; 8643 ErrorFormatter.flush(); 8644 8645 report_fatal_error(ErrorStr); 8646 } 8647 8648 //===----------------------------------------------------------------------===// 8649 // SDNode Class 8650 //===----------------------------------------------------------------------===// 8651 8652 bool llvm::isNullConstant(SDValue V) { 8653 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8654 return Const != nullptr && Const->isNullValue(); 8655 } 8656 8657 bool llvm::isNullFPConstant(SDValue V) { 8658 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); 8659 return Const != nullptr && Const->isZero() && !Const->isNegative(); 8660 } 8661 8662 bool llvm::isAllOnesConstant(SDValue V) { 8663 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8664 return Const != nullptr && Const->isAllOnesValue(); 8665 } 8666 8667 bool llvm::isOneConstant(SDValue V) { 8668 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); 8669 return Const != nullptr && Const->isOne(); 8670 } 8671 8672 SDValue llvm::peekThroughBitcasts(SDValue V) { 8673 while (V.getOpcode() == ISD::BITCAST) 8674 V = V.getOperand(0); 8675 return V; 8676 } 8677 8678 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { 8679 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) 8680 V = V.getOperand(0); 8681 return V; 8682 } 8683 8684 SDValue llvm::peekThroughExtractSubvectors(SDValue V) { 8685 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) 8686 V = V.getOperand(0); 8687 return V; 8688 } 8689 8690 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { 8691 if (V.getOpcode() != ISD::XOR) 8692 return false; 8693 V = peekThroughBitcasts(V.getOperand(1)); 8694 unsigned NumBits = V.getScalarValueSizeInBits(); 8695 ConstantSDNode *C = 8696 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); 8697 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits); 8698 } 8699 8700 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, 8701 bool AllowTruncation) { 8702 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8703 return CN; 8704 8705 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8706 BitVector UndefElements; 8707 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); 8708 8709 // BuildVectors can truncate their operands. Ignore that case here unless 8710 // AllowTruncation is set. 8711 if (CN && (UndefElements.none() || AllowUndefs)) { 8712 EVT CVT = CN->getValueType(0); 8713 EVT NSVT = N.getValueType().getScalarType(); 8714 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8715 if (AllowTruncation || (CVT == NSVT)) 8716 return CN; 8717 } 8718 } 8719 8720 return nullptr; 8721 } 8722 8723 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, 8724 bool AllowUndefs, 8725 bool AllowTruncation) { 8726 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) 8727 return CN; 8728 8729 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8730 BitVector UndefElements; 8731 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); 8732 8733 // BuildVectors can truncate their operands. Ignore that case here unless 8734 // AllowTruncation is set. 8735 if (CN && (UndefElements.none() || AllowUndefs)) { 8736 EVT CVT = CN->getValueType(0); 8737 EVT NSVT = N.getValueType().getScalarType(); 8738 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); 8739 if (AllowTruncation || (CVT == NSVT)) 8740 return CN; 8741 } 8742 } 8743 8744 return nullptr; 8745 } 8746 8747 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { 8748 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8749 return CN; 8750 8751 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8752 BitVector UndefElements; 8753 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); 8754 if (CN && (UndefElements.none() || AllowUndefs)) 8755 return CN; 8756 } 8757 8758 return nullptr; 8759 } 8760 8761 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, 8762 const APInt &DemandedElts, 8763 bool AllowUndefs) { 8764 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 8765 return CN; 8766 8767 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { 8768 BitVector UndefElements; 8769 ConstantFPSDNode *CN = 8770 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); 8771 if (CN && (UndefElements.none() || AllowUndefs)) 8772 return CN; 8773 } 8774 8775 return nullptr; 8776 } 8777 8778 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { 8779 // TODO: may want to use peekThroughBitcast() here. 8780 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); 8781 return C && C->isNullValue(); 8782 } 8783 8784 bool llvm::isOneOrOneSplat(SDValue N) { 8785 // TODO: may want to use peekThroughBitcast() here. 8786 unsigned BitWidth = N.getScalarValueSizeInBits(); 8787 ConstantSDNode *C = isConstOrConstSplat(N); 8788 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth; 8789 } 8790 8791 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) { 8792 N = peekThroughBitcasts(N); 8793 unsigned BitWidth = N.getScalarValueSizeInBits(); 8794 ConstantSDNode *C = isConstOrConstSplat(N); 8795 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth; 8796 } 8797 8798 HandleSDNode::~HandleSDNode() { 8799 DropOperands(); 8800 } 8801 8802 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 8803 const DebugLoc &DL, 8804 const GlobalValue *GA, EVT VT, 8805 int64_t o, unsigned char TF) 8806 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 8807 TheGlobal = GA; 8808 } 8809 8810 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, 8811 EVT VT, unsigned SrcAS, 8812 unsigned DestAS) 8813 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), 8814 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} 8815 8816 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, 8817 SDVTList VTs, EVT memvt, MachineMemOperand *mmo) 8818 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 8819 MemSDNodeBits.IsVolatile = MMO->isVolatile(); 8820 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); 8821 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); 8822 MemSDNodeBits.IsInvariant = MMO->isInvariant(); 8823 8824 // We check here that the size of the memory operand fits within the size of 8825 // the MMO. This is because the MMO might indicate only a possible address 8826 // range instead of specifying the affected memory addresses precisely. 8827 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!"); 8828 } 8829 8830 /// Profile - Gather unique data for the node. 8831 /// 8832 void SDNode::Profile(FoldingSetNodeID &ID) const { 8833 AddNodeIDNode(ID, this); 8834 } 8835 8836 namespace { 8837 8838 struct EVTArray { 8839 std::vector<EVT> VTs; 8840 8841 EVTArray() { 8842 VTs.reserve(MVT::LAST_VALUETYPE); 8843 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 8844 VTs.push_back(MVT((MVT::SimpleValueType)i)); 8845 } 8846 }; 8847 8848 } // end anonymous namespace 8849 8850 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs; 8851 static ManagedStatic<EVTArray> SimpleVTArray; 8852 static ManagedStatic<sys::SmartMutex<true>> VTMutex; 8853 8854 /// getValueTypeList - Return a pointer to the specified value type. 8855 /// 8856 const EVT *SDNode::getValueTypeList(EVT VT) { 8857 if (VT.isExtended()) { 8858 sys::SmartScopedLock<true> Lock(*VTMutex); 8859 return &(*EVTs->insert(VT).first); 8860 } else { 8861 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 8862 "Value type out of range!"); 8863 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 8864 } 8865 } 8866 8867 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 8868 /// indicated value. This method ignores uses of other values defined by this 8869 /// operation. 8870 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 8871 assert(Value < getNumValues() && "Bad value!"); 8872 8873 // TODO: Only iterate over uses of a given value of the node 8874 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 8875 if (UI.getUse().getResNo() == Value) { 8876 if (NUses == 0) 8877 return false; 8878 --NUses; 8879 } 8880 } 8881 8882 // Found exactly the right number of uses? 8883 return NUses == 0; 8884 } 8885 8886 /// hasAnyUseOfValue - Return true if there are any use of the indicated 8887 /// value. This method ignores uses of other values defined by this operation. 8888 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 8889 assert(Value < getNumValues() && "Bad value!"); 8890 8891 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 8892 if (UI.getUse().getResNo() == Value) 8893 return true; 8894 8895 return false; 8896 } 8897 8898 /// isOnlyUserOf - Return true if this node is the only use of N. 8899 bool SDNode::isOnlyUserOf(const SDNode *N) const { 8900 bool Seen = false; 8901 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 8902 SDNode *User = *I; 8903 if (User == this) 8904 Seen = true; 8905 else 8906 return false; 8907 } 8908 8909 return Seen; 8910 } 8911 8912 /// Return true if the only users of N are contained in Nodes. 8913 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { 8914 bool Seen = false; 8915 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 8916 SDNode *User = *I; 8917 if (llvm::any_of(Nodes, 8918 [&User](const SDNode *Node) { return User == Node; })) 8919 Seen = true; 8920 else 8921 return false; 8922 } 8923 8924 return Seen; 8925 } 8926 8927 /// isOperand - Return true if this node is an operand of N. 8928 bool SDValue::isOperandOf(const SDNode *N) const { 8929 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; }); 8930 } 8931 8932 bool SDNode::isOperandOf(const SDNode *N) const { 8933 return any_of(N->op_values(), 8934 [this](SDValue Op) { return this == Op.getNode(); }); 8935 } 8936 8937 /// reachesChainWithoutSideEffects - Return true if this operand (which must 8938 /// be a chain) reaches the specified operand without crossing any 8939 /// side-effecting instructions on any chain path. In practice, this looks 8940 /// through token factors and non-volatile loads. In order to remain efficient, 8941 /// this only looks a couple of nodes in, it does not do an exhaustive search. 8942 /// 8943 /// Note that we only need to examine chains when we're searching for 8944 /// side-effects; SelectionDAG requires that all side-effects are represented 8945 /// by chains, even if another operand would force a specific ordering. This 8946 /// constraint is necessary to allow transformations like splitting loads. 8947 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 8948 unsigned Depth) const { 8949 if (*this == Dest) return true; 8950 8951 // Don't search too deeply, we just want to be able to see through 8952 // TokenFactor's etc. 8953 if (Depth == 0) return false; 8954 8955 // If this is a token factor, all inputs to the TF happen in parallel. 8956 if (getOpcode() == ISD::TokenFactor) { 8957 // First, try a shallow search. 8958 if (is_contained((*this)->ops(), Dest)) { 8959 // We found the chain we want as an operand of this TokenFactor. 8960 // Essentially, we reach the chain without side-effects if we could 8961 // serialize the TokenFactor into a simple chain of operations with 8962 // Dest as the last operation. This is automatically true if the 8963 // chain has one use: there are no other ordering constraints. 8964 // If the chain has more than one use, we give up: some other 8965 // use of Dest might force a side-effect between Dest and the current 8966 // node. 8967 if (Dest.hasOneUse()) 8968 return true; 8969 } 8970 // Next, try a deep search: check whether every operand of the TokenFactor 8971 // reaches Dest. 8972 return llvm::all_of((*this)->ops(), [=](SDValue Op) { 8973 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); 8974 }); 8975 } 8976 8977 // Loads don't have side effects, look through them. 8978 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 8979 if (!Ld->isVolatile()) 8980 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 8981 } 8982 return false; 8983 } 8984 8985 bool SDNode::hasPredecessor(const SDNode *N) const { 8986 SmallPtrSet<const SDNode *, 32> Visited; 8987 SmallVector<const SDNode *, 16> Worklist; 8988 Worklist.push_back(this); 8989 return hasPredecessorHelper(N, Visited, Worklist); 8990 } 8991 8992 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { 8993 this->Flags.intersectWith(Flags); 8994 } 8995 8996 SDValue 8997 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, 8998 ArrayRef<ISD::NodeType> CandidateBinOps) { 8999 // The pattern must end in an extract from index 0. 9000 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9001 !isNullConstant(Extract->getOperand(1))) 9002 return SDValue(); 9003 9004 SDValue Op = Extract->getOperand(0); 9005 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); 9006 9007 // Match against one of the candidate binary ops. 9008 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { 9009 return Op.getOpcode() == unsigned(BinOp); 9010 })) 9011 return SDValue(); 9012 9013 // At each stage, we're looking for something that looks like: 9014 // %s = shufflevector <8 x i32> %op, <8 x i32> undef, 9015 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, 9016 // i32 undef, i32 undef, i32 undef, i32 undef> 9017 // %a = binop <8 x i32> %op, %s 9018 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, 9019 // we expect something like: 9020 // <4,5,6,7,u,u,u,u> 9021 // <2,3,u,u,u,u,u,u> 9022 // <1,u,u,u,u,u,u,u> 9023 unsigned CandidateBinOp = Op.getOpcode(); 9024 for (unsigned i = 0; i < Stages; ++i) { 9025 if (Op.getOpcode() != CandidateBinOp) 9026 return SDValue(); 9027 9028 SDValue Op0 = Op.getOperand(0); 9029 SDValue Op1 = Op.getOperand(1); 9030 9031 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); 9032 if (Shuffle) { 9033 Op = Op1; 9034 } else { 9035 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); 9036 Op = Op0; 9037 } 9038 9039 // The first operand of the shuffle should be the same as the other operand 9040 // of the binop. 9041 if (!Shuffle || Shuffle->getOperand(0) != Op) 9042 return SDValue(); 9043 9044 // Verify the shuffle has the expected (at this stage of the pyramid) mask. 9045 for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index) 9046 if (Shuffle->getMaskElt(Index) != MaskEnd + Index) 9047 return SDValue(); 9048 } 9049 9050 BinOp = (ISD::NodeType)CandidateBinOp; 9051 return Op; 9052 } 9053 9054 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 9055 assert(N->getNumValues() == 1 && 9056 "Can't unroll a vector with multiple results!"); 9057 9058 EVT VT = N->getValueType(0); 9059 unsigned NE = VT.getVectorNumElements(); 9060 EVT EltVT = VT.getVectorElementType(); 9061 SDLoc dl(N); 9062 9063 SmallVector<SDValue, 8> Scalars; 9064 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 9065 9066 // If ResNE is 0, fully unroll the vector op. 9067 if (ResNE == 0) 9068 ResNE = NE; 9069 else if (NE > ResNE) 9070 NE = ResNE; 9071 9072 unsigned i; 9073 for (i= 0; i != NE; ++i) { 9074 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 9075 SDValue Operand = N->getOperand(j); 9076 EVT OperandVT = Operand.getValueType(); 9077 if (OperandVT.isVector()) { 9078 // A vector operand; extract a single element. 9079 EVT OperandEltVT = OperandVT.getVectorElementType(); 9080 Operands[j] = 9081 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand, 9082 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout()))); 9083 } else { 9084 // A scalar operand; just use it as is. 9085 Operands[j] = Operand; 9086 } 9087 } 9088 9089 switch (N->getOpcode()) { 9090 default: { 9091 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, 9092 N->getFlags())); 9093 break; 9094 } 9095 case ISD::VSELECT: 9096 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); 9097 break; 9098 case ISD::SHL: 9099 case ISD::SRA: 9100 case ISD::SRL: 9101 case ISD::ROTL: 9102 case ISD::ROTR: 9103 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 9104 getShiftAmountOperand(Operands[0].getValueType(), 9105 Operands[1]))); 9106 break; 9107 case ISD::SIGN_EXTEND_INREG: 9108 case ISD::FP_ROUND_INREG: { 9109 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 9110 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 9111 Operands[0], 9112 getValueType(ExtVT))); 9113 } 9114 } 9115 } 9116 9117 for (; i < ResNE; ++i) 9118 Scalars.push_back(getUNDEF(EltVT)); 9119 9120 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); 9121 return getBuildVector(VecVT, dl, Scalars); 9122 } 9123 9124 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( 9125 SDNode *N, unsigned ResNE) { 9126 unsigned Opcode = N->getOpcode(); 9127 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || 9128 Opcode == ISD::USUBO || Opcode == ISD::SSUBO || 9129 Opcode == ISD::UMULO || Opcode == ISD::SMULO) && 9130 "Expected an overflow opcode"); 9131 9132 EVT ResVT = N->getValueType(0); 9133 EVT OvVT = N->getValueType(1); 9134 EVT ResEltVT = ResVT.getVectorElementType(); 9135 EVT OvEltVT = OvVT.getVectorElementType(); 9136 SDLoc dl(N); 9137 9138 // If ResNE is 0, fully unroll the vector op. 9139 unsigned NE = ResVT.getVectorNumElements(); 9140 if (ResNE == 0) 9141 ResNE = NE; 9142 else if (NE > ResNE) 9143 NE = ResNE; 9144 9145 SmallVector<SDValue, 8> LHSScalars; 9146 SmallVector<SDValue, 8> RHSScalars; 9147 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); 9148 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); 9149 9150 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); 9151 SDVTList VTs = getVTList(ResEltVT, SVT); 9152 SmallVector<SDValue, 8> ResScalars; 9153 SmallVector<SDValue, 8> OvScalars; 9154 for (unsigned i = 0; i < NE; ++i) { 9155 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); 9156 SDValue Ov = 9157 getSelect(dl, OvEltVT, Res.getValue(1), 9158 getBoolConstant(true, dl, OvEltVT, ResVT), 9159 getConstant(0, dl, OvEltVT)); 9160 9161 ResScalars.push_back(Res); 9162 OvScalars.push_back(Ov); 9163 } 9164 9165 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); 9166 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); 9167 9168 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); 9169 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); 9170 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), 9171 getBuildVector(NewOvVT, dl, OvScalars)); 9172 } 9173 9174 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, 9175 LoadSDNode *Base, 9176 unsigned Bytes, 9177 int Dist) const { 9178 if (LD->isVolatile() || Base->isVolatile()) 9179 return false; 9180 if (LD->isIndexed() || Base->isIndexed()) 9181 return false; 9182 if (LD->getChain() != Base->getChain()) 9183 return false; 9184 EVT VT = LD->getValueType(0); 9185 if (VT.getSizeInBits() / 8 != Bytes) 9186 return false; 9187 9188 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); 9189 auto LocDecomp = BaseIndexOffset::match(LD, *this); 9190 9191 int64_t Offset = 0; 9192 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) 9193 return (Dist * Bytes == Offset); 9194 return false; 9195 } 9196 9197 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 9198 /// it cannot be inferred. 9199 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 9200 // If this is a GlobalAddress + cst, return the alignment. 9201 const GlobalValue *GV; 9202 int64_t GVOffset = 0; 9203 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 9204 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType()); 9205 KnownBits Known(IdxWidth); 9206 llvm::computeKnownBits(GV, Known, getDataLayout()); 9207 unsigned AlignBits = Known.countMinTrailingZeros(); 9208 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 9209 if (Align) 9210 return MinAlign(Align, GVOffset); 9211 } 9212 9213 // If this is a direct reference to a stack slot, use information about the 9214 // stack slot's alignment. 9215 int FrameIdx = INT_MIN; 9216 int64_t FrameOffset = 0; 9217 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 9218 FrameIdx = FI->getIndex(); 9219 } else if (isBaseWithConstantOffset(Ptr) && 9220 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 9221 // Handle FI+Cst 9222 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 9223 FrameOffset = Ptr.getConstantOperandVal(1); 9224 } 9225 9226 if (FrameIdx != INT_MIN) { 9227 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); 9228 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 9229 FrameOffset); 9230 return FIInfoAlign; 9231 } 9232 9233 return 0; 9234 } 9235 9236 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type 9237 /// which is split (or expanded) into two not necessarily identical pieces. 9238 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { 9239 // Currently all types are split in half. 9240 EVT LoVT, HiVT; 9241 if (!VT.isVector()) 9242 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); 9243 else 9244 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); 9245 9246 return std::make_pair(LoVT, HiVT); 9247 } 9248 9249 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the 9250 /// low/high part. 9251 std::pair<SDValue, SDValue> 9252 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, 9253 const EVT &HiVT) { 9254 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <= 9255 N.getValueType().getVectorNumElements() && 9256 "More vector elements requested than available!"); 9257 SDValue Lo, Hi; 9258 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 9259 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9260 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, 9261 getConstant(LoVT.getVectorNumElements(), DL, 9262 TLI->getVectorIdxTy(getDataLayout()))); 9263 return std::make_pair(Lo, Hi); 9264 } 9265 9266 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. 9267 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { 9268 EVT VT = N.getValueType(); 9269 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), 9270 NextPowerOf2(VT.getVectorNumElements())); 9271 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, 9272 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout()))); 9273 } 9274 9275 void SelectionDAG::ExtractVectorElements(SDValue Op, 9276 SmallVectorImpl<SDValue> &Args, 9277 unsigned Start, unsigned Count) { 9278 EVT VT = Op.getValueType(); 9279 if (Count == 0) 9280 Count = VT.getVectorNumElements(); 9281 9282 EVT EltVT = VT.getVectorElementType(); 9283 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout()); 9284 SDLoc SL(Op); 9285 for (unsigned i = Start, e = Start + Count; i != e; ++i) { 9286 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 9287 Op, getConstant(i, SL, IdxTy))); 9288 } 9289 } 9290 9291 // getAddressSpace - Return the address space this GlobalAddress belongs to. 9292 unsigned GlobalAddressSDNode::getAddressSpace() const { 9293 return getGlobal()->getType()->getAddressSpace(); 9294 } 9295 9296 Type *ConstantPoolSDNode::getType() const { 9297 if (isMachineConstantPoolEntry()) 9298 return Val.MachineCPVal->getType(); 9299 return Val.ConstVal->getType(); 9300 } 9301 9302 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, 9303 unsigned &SplatBitSize, 9304 bool &HasAnyUndefs, 9305 unsigned MinSplatBits, 9306 bool IsBigEndian) const { 9307 EVT VT = getValueType(0); 9308 assert(VT.isVector() && "Expected a vector type"); 9309 unsigned VecWidth = VT.getSizeInBits(); 9310 if (MinSplatBits > VecWidth) 9311 return false; 9312 9313 // FIXME: The widths are based on this node's type, but build vectors can 9314 // truncate their operands. 9315 SplatValue = APInt(VecWidth, 0); 9316 SplatUndef = APInt(VecWidth, 0); 9317 9318 // Get the bits. Bits with undefined values (when the corresponding element 9319 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 9320 // in SplatValue. If any of the values are not constant, give up and return 9321 // false. 9322 unsigned int NumOps = getNumOperands(); 9323 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); 9324 unsigned EltWidth = VT.getScalarSizeInBits(); 9325 9326 for (unsigned j = 0; j < NumOps; ++j) { 9327 unsigned i = IsBigEndian ? NumOps - 1 - j : j; 9328 SDValue OpVal = getOperand(i); 9329 unsigned BitPos = j * EltWidth; 9330 9331 if (OpVal.isUndef()) 9332 SplatUndef.setBits(BitPos, BitPos + EltWidth); 9333 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) 9334 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); 9335 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 9336 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); 9337 else 9338 return false; 9339 } 9340 9341 // The build_vector is all constants or undefs. Find the smallest element 9342 // size that splats the vector. 9343 HasAnyUndefs = (SplatUndef != 0); 9344 9345 // FIXME: This does not work for vectors with elements less than 8 bits. 9346 while (VecWidth > 8) { 9347 unsigned HalfSize = VecWidth / 2; 9348 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 9349 APInt LowValue = SplatValue.trunc(HalfSize); 9350 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 9351 APInt LowUndef = SplatUndef.trunc(HalfSize); 9352 9353 // If the two halves do not match (ignoring undef bits), stop here. 9354 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 9355 MinSplatBits > HalfSize) 9356 break; 9357 9358 SplatValue = HighValue | LowValue; 9359 SplatUndef = HighUndef & LowUndef; 9360 9361 VecWidth = HalfSize; 9362 } 9363 9364 SplatBitSize = VecWidth; 9365 return true; 9366 } 9367 9368 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, 9369 BitVector *UndefElements) const { 9370 if (UndefElements) { 9371 UndefElements->clear(); 9372 UndefElements->resize(getNumOperands()); 9373 } 9374 assert(getNumOperands() == DemandedElts.getBitWidth() && 9375 "Unexpected vector size"); 9376 if (!DemandedElts) 9377 return SDValue(); 9378 SDValue Splatted; 9379 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 9380 if (!DemandedElts[i]) 9381 continue; 9382 SDValue Op = getOperand(i); 9383 if (Op.isUndef()) { 9384 if (UndefElements) 9385 (*UndefElements)[i] = true; 9386 } else if (!Splatted) { 9387 Splatted = Op; 9388 } else if (Splatted != Op) { 9389 return SDValue(); 9390 } 9391 } 9392 9393 if (!Splatted) { 9394 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros(); 9395 assert(getOperand(FirstDemandedIdx).isUndef() && 9396 "Can only have a splat without a constant for all undefs."); 9397 return getOperand(FirstDemandedIdx); 9398 } 9399 9400 return Splatted; 9401 } 9402 9403 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { 9404 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands()); 9405 return getSplatValue(DemandedElts, UndefElements); 9406 } 9407 9408 ConstantSDNode * 9409 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, 9410 BitVector *UndefElements) const { 9411 return dyn_cast_or_null<ConstantSDNode>( 9412 getSplatValue(DemandedElts, UndefElements)); 9413 } 9414 9415 ConstantSDNode * 9416 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { 9417 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); 9418 } 9419 9420 ConstantFPSDNode * 9421 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, 9422 BitVector *UndefElements) const { 9423 return dyn_cast_or_null<ConstantFPSDNode>( 9424 getSplatValue(DemandedElts, UndefElements)); 9425 } 9426 9427 ConstantFPSDNode * 9428 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { 9429 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); 9430 } 9431 9432 int32_t 9433 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, 9434 uint32_t BitWidth) const { 9435 if (ConstantFPSDNode *CN = 9436 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { 9437 bool IsExact; 9438 APSInt IntVal(BitWidth); 9439 const APFloat &APF = CN->getValueAPF(); 9440 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != 9441 APFloat::opOK || 9442 !IsExact) 9443 return -1; 9444 9445 return IntVal.exactLogBase2(); 9446 } 9447 return -1; 9448 } 9449 9450 bool BuildVectorSDNode::isConstant() const { 9451 for (const SDValue &Op : op_values()) { 9452 unsigned Opc = Op.getOpcode(); 9453 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) 9454 return false; 9455 } 9456 return true; 9457 } 9458 9459 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 9460 // Find the first non-undef value in the shuffle mask. 9461 unsigned i, e; 9462 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 9463 /* search */; 9464 9465 // If all elements are undefined, this shuffle can be considered a splat 9466 // (although it should eventually get simplified away completely). 9467 if (i == e) 9468 return true; 9469 9470 // Make sure all remaining elements are either undef or the same as the first 9471 // non-undef value. 9472 for (int Idx = Mask[i]; i != e; ++i) 9473 if (Mask[i] >= 0 && Mask[i] != Idx) 9474 return false; 9475 return true; 9476 } 9477 9478 // Returns the SDNode if it is a constant integer BuildVector 9479 // or constant integer. 9480 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { 9481 if (isa<ConstantSDNode>(N)) 9482 return N.getNode(); 9483 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) 9484 return N.getNode(); 9485 // Treat a GlobalAddress supporting constant offset folding as a 9486 // constant integer. 9487 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) 9488 if (GA->getOpcode() == ISD::GlobalAddress && 9489 TLI->isOffsetFoldingLegal(GA)) 9490 return GA; 9491 return nullptr; 9492 } 9493 9494 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) { 9495 if (isa<ConstantFPSDNode>(N)) 9496 return N.getNode(); 9497 9498 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 9499 return N.getNode(); 9500 9501 return nullptr; 9502 } 9503 9504 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { 9505 assert(!Node->OperandList && "Node already has operands"); 9506 assert(SDNode::getMaxNumOperands() >= Vals.size() && 9507 "too many operands to fit into SDNode"); 9508 SDUse *Ops = OperandRecycler.allocate( 9509 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); 9510 9511 bool IsDivergent = false; 9512 for (unsigned I = 0; I != Vals.size(); ++I) { 9513 Ops[I].setUser(Node); 9514 Ops[I].setInitial(Vals[I]); 9515 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. 9516 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent(); 9517 } 9518 Node->NumOperands = Vals.size(); 9519 Node->OperandList = Ops; 9520 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA); 9521 if (!TLI->isSDNodeAlwaysUniform(Node)) 9522 Node->SDNodeBits.IsDivergent = IsDivergent; 9523 checkForCycles(Node); 9524 } 9525 9526 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, 9527 SmallVectorImpl<SDValue> &Vals) { 9528 size_t Limit = SDNode::getMaxNumOperands(); 9529 while (Vals.size() > Limit) { 9530 unsigned SliceIdx = Vals.size() - Limit; 9531 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); 9532 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); 9533 Vals.erase(Vals.begin() + SliceIdx, Vals.end()); 9534 Vals.emplace_back(NewTF); 9535 } 9536 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); 9537 } 9538 9539 #ifndef NDEBUG 9540 static void checkForCyclesHelper(const SDNode *N, 9541 SmallPtrSetImpl<const SDNode*> &Visited, 9542 SmallPtrSetImpl<const SDNode*> &Checked, 9543 const llvm::SelectionDAG *DAG) { 9544 // If this node has already been checked, don't check it again. 9545 if (Checked.count(N)) 9546 return; 9547 9548 // If a node has already been visited on this depth-first walk, reject it as 9549 // a cycle. 9550 if (!Visited.insert(N).second) { 9551 errs() << "Detected cycle in SelectionDAG\n"; 9552 dbgs() << "Offending node:\n"; 9553 N->dumprFull(DAG); dbgs() << "\n"; 9554 abort(); 9555 } 9556 9557 for (const SDValue &Op : N->op_values()) 9558 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); 9559 9560 Checked.insert(N); 9561 Visited.erase(N); 9562 } 9563 #endif 9564 9565 void llvm::checkForCycles(const llvm::SDNode *N, 9566 const llvm::SelectionDAG *DAG, 9567 bool force) { 9568 #ifndef NDEBUG 9569 bool check = force; 9570 #ifdef EXPENSIVE_CHECKS 9571 check = true; 9572 #endif // EXPENSIVE_CHECKS 9573 if (check) { 9574 assert(N && "Checking nonexistent SDNode"); 9575 SmallPtrSet<const SDNode*, 32> visited; 9576 SmallPtrSet<const SDNode*, 32> checked; 9577 checkForCyclesHelper(N, visited, checked, DAG); 9578 } 9579 #endif // !NDEBUG 9580 } 9581 9582 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { 9583 checkForCycles(DAG->getRoot().getNode(), DAG, force); 9584 } 9585