1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/FunctionLoweringInfo.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineConstantPool.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
40 #include "llvm/CodeGen/SelectionDAGNodes.h"
41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
42 #include "llvm/CodeGen/TargetFrameLowering.h"
43 #include "llvm/CodeGen/TargetLowering.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/ValueTypes.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfoMetadata.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/KnownBits.h"
64 #include "llvm/Support/MachineValueType.h"
65 #include "llvm/Support/ManagedStatic.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/Mutex.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetOptions.h"
71 #include "llvm/Transforms/Utils/SizeOpts.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <cstdlib>
76 #include <limits>
77 #include <set>
78 #include <string>
79 #include <utility>
80 #include <vector>
81 
82 using namespace llvm;
83 
84 /// makeVTList - Return an instance of the SDVTList struct initialized with the
85 /// specified members.
86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
87   SDVTList Res = {VTs, NumVTs};
88   return Res;
89 }
90 
91 // Default null implementations of the callbacks.
92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
95 
96 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
97 
98 #define DEBUG_TYPE "selectiondag"
99 
100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
101        cl::Hidden, cl::init(true),
102        cl::desc("Gang up loads and stores generated by inlining of memcpy"));
103 
104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
105        cl::desc("Number limit for gluing ld/st of memcpy."),
106        cl::Hidden, cl::init(0));
107 
108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
109   LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
110 }
111 
112 //===----------------------------------------------------------------------===//
113 //                              ConstantFPSDNode Class
114 //===----------------------------------------------------------------------===//
115 
116 /// isExactlyValue - We don't rely on operator== working on double values, as
117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
118 /// As such, this method can be used to do an exact bit-for-bit comparison of
119 /// two floating point values.
120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
121   return getValueAPF().bitwiseIsEqual(V);
122 }
123 
124 bool ConstantFPSDNode::isValueValidForType(EVT VT,
125                                            const APFloat& Val) {
126   assert(VT.isFloatingPoint() && "Can only convert between FP types");
127 
128   // convert modifies in place, so make a copy.
129   APFloat Val2 = APFloat(Val);
130   bool losesInfo;
131   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
132                       APFloat::rmNearestTiesToEven,
133                       &losesInfo);
134   return !losesInfo;
135 }
136 
137 //===----------------------------------------------------------------------===//
138 //                              ISD Namespace
139 //===----------------------------------------------------------------------===//
140 
141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
142   if (N->getOpcode() == ISD::SPLAT_VECTOR) {
143     unsigned EltSize =
144         N->getValueType(0).getVectorElementType().getSizeInBits();
145     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
146       SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
147       return true;
148     }
149   }
150 
151   auto *BV = dyn_cast<BuildVectorSDNode>(N);
152   if (!BV)
153     return false;
154 
155   APInt SplatUndef;
156   unsigned SplatBitSize;
157   bool HasUndefs;
158   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
159   return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
160                              EltSize) &&
161          EltSize == SplatBitSize;
162 }
163 
164 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
165 // specializations of the more general isConstantSplatVector()?
166 
167 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
168   // Look through a bit convert.
169   while (N->getOpcode() == ISD::BITCAST)
170     N = N->getOperand(0).getNode();
171 
172   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
173     APInt SplatVal;
174     return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
175   }
176 
177   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
178 
179   unsigned i = 0, e = N->getNumOperands();
180 
181   // Skip over all of the undef values.
182   while (i != e && N->getOperand(i).isUndef())
183     ++i;
184 
185   // Do not accept an all-undef vector.
186   if (i == e) return false;
187 
188   // Do not accept build_vectors that aren't all constants or which have non-~0
189   // elements. We have to be a bit careful here, as the type of the constant
190   // may not be the same as the type of the vector elements due to type
191   // legalization (the elements are promoted to a legal type for the target and
192   // a vector of a type may be legal when the base element type is not).
193   // We only want to check enough bits to cover the vector elements, because
194   // we care if the resultant vector is all ones, not whether the individual
195   // constants are.
196   SDValue NotZero = N->getOperand(i);
197   unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
198   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
199     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
200       return false;
201   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
202     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
203       return false;
204   } else
205     return false;
206 
207   // Okay, we have at least one ~0 value, check to see if the rest match or are
208   // undefs. Even with the above element type twiddling, this should be OK, as
209   // the same type legalization should have applied to all the elements.
210   for (++i; i != e; ++i)
211     if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
212       return false;
213   return true;
214 }
215 
216 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
217   // Look through a bit convert.
218   while (N->getOpcode() == ISD::BITCAST)
219     N = N->getOperand(0).getNode();
220 
221   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
222     APInt SplatVal;
223     return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
224   }
225 
226   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
227 
228   bool IsAllUndef = true;
229   for (const SDValue &Op : N->op_values()) {
230     if (Op.isUndef())
231       continue;
232     IsAllUndef = false;
233     // Do not accept build_vectors that aren't all constants or which have non-0
234     // elements. We have to be a bit careful here, as the type of the constant
235     // may not be the same as the type of the vector elements due to type
236     // legalization (the elements are promoted to a legal type for the target
237     // and a vector of a type may be legal when the base element type is not).
238     // We only want to check enough bits to cover the vector elements, because
239     // we care if the resultant vector is all zeros, not whether the individual
240     // constants are.
241     unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
242     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
243       if (CN->getAPIntValue().countTrailingZeros() < EltSize)
244         return false;
245     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
246       if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
247         return false;
248     } else
249       return false;
250   }
251 
252   // Do not accept an all-undef vector.
253   if (IsAllUndef)
254     return false;
255   return true;
256 }
257 
258 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
259   return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
260 }
261 
262 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
263   return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
264 }
265 
266 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
267   if (N->getOpcode() != ISD::BUILD_VECTOR)
268     return false;
269 
270   for (const SDValue &Op : N->op_values()) {
271     if (Op.isUndef())
272       continue;
273     if (!isa<ConstantSDNode>(Op))
274       return false;
275   }
276   return true;
277 }
278 
279 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
280   if (N->getOpcode() != ISD::BUILD_VECTOR)
281     return false;
282 
283   for (const SDValue &Op : N->op_values()) {
284     if (Op.isUndef())
285       continue;
286     if (!isa<ConstantFPSDNode>(Op))
287       return false;
288   }
289   return true;
290 }
291 
292 bool ISD::allOperandsUndef(const SDNode *N) {
293   // Return false if the node has no operands.
294   // This is "logically inconsistent" with the definition of "all" but
295   // is probably the desired behavior.
296   if (N->getNumOperands() == 0)
297     return false;
298   return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
299 }
300 
301 bool ISD::matchUnaryPredicate(SDValue Op,
302                               std::function<bool(ConstantSDNode *)> Match,
303                               bool AllowUndefs) {
304   // FIXME: Add support for scalar UNDEF cases?
305   if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
306     return Match(Cst);
307 
308   // FIXME: Add support for vector UNDEF cases?
309   if (ISD::BUILD_VECTOR != Op.getOpcode())
310     return false;
311 
312   EVT SVT = Op.getValueType().getScalarType();
313   for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
314     if (AllowUndefs && Op.getOperand(i).isUndef()) {
315       if (!Match(nullptr))
316         return false;
317       continue;
318     }
319 
320     auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
321     if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
322       return false;
323   }
324   return true;
325 }
326 
327 bool ISD::matchBinaryPredicate(
328     SDValue LHS, SDValue RHS,
329     std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
330     bool AllowUndefs, bool AllowTypeMismatch) {
331   if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
332     return false;
333 
334   // TODO: Add support for scalar UNDEF cases?
335   if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
336     if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
337       return Match(LHSCst, RHSCst);
338 
339   // TODO: Add support for vector UNDEF cases?
340   if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
341       ISD::BUILD_VECTOR != RHS.getOpcode())
342     return false;
343 
344   EVT SVT = LHS.getValueType().getScalarType();
345   for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
346     SDValue LHSOp = LHS.getOperand(i);
347     SDValue RHSOp = RHS.getOperand(i);
348     bool LHSUndef = AllowUndefs && LHSOp.isUndef();
349     bool RHSUndef = AllowUndefs && RHSOp.isUndef();
350     auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
351     auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
352     if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
353       return false;
354     if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
355                                LHSOp.getValueType() != RHSOp.getValueType()))
356       return false;
357     if (!Match(LHSCst, RHSCst))
358       return false;
359   }
360   return true;
361 }
362 
363 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
364   switch (VecReduceOpcode) {
365   default:
366     llvm_unreachable("Expected VECREDUCE opcode");
367   case ISD::VECREDUCE_FADD:
368   case ISD::VECREDUCE_SEQ_FADD:
369     return ISD::FADD;
370   case ISD::VECREDUCE_FMUL:
371   case ISD::VECREDUCE_SEQ_FMUL:
372     return ISD::FMUL;
373   case ISD::VECREDUCE_ADD:
374     return ISD::ADD;
375   case ISD::VECREDUCE_MUL:
376     return ISD::MUL;
377   case ISD::VECREDUCE_AND:
378     return ISD::AND;
379   case ISD::VECREDUCE_OR:
380     return ISD::OR;
381   case ISD::VECREDUCE_XOR:
382     return ISD::XOR;
383   case ISD::VECREDUCE_SMAX:
384     return ISD::SMAX;
385   case ISD::VECREDUCE_SMIN:
386     return ISD::SMIN;
387   case ISD::VECREDUCE_UMAX:
388     return ISD::UMAX;
389   case ISD::VECREDUCE_UMIN:
390     return ISD::UMIN;
391   case ISD::VECREDUCE_FMAX:
392     return ISD::FMAXNUM;
393   case ISD::VECREDUCE_FMIN:
394     return ISD::FMINNUM;
395   }
396 }
397 
398 bool ISD::isVPOpcode(unsigned Opcode) {
399   switch (Opcode) {
400   default:
401     return false;
402 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...)                                   \
403   case ISD::SDOPC:                                                             \
404     return true;
405 #include "llvm/IR/VPIntrinsics.def"
406   }
407 }
408 
409 /// The operand position of the vector mask.
410 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
411   switch (Opcode) {
412   default:
413     return None;
414 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...)        \
415   case ISD::SDOPC:                                                             \
416     return MASKPOS;
417 #include "llvm/IR/VPIntrinsics.def"
418   }
419 }
420 
421 /// The operand position of the explicit vector length parameter.
422 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
423   switch (Opcode) {
424   default:
425     return None;
426 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS)     \
427   case ISD::SDOPC:                                                             \
428     return EVLPOS;
429 #include "llvm/IR/VPIntrinsics.def"
430   }
431 }
432 
433 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
434   switch (ExtType) {
435   case ISD::EXTLOAD:
436     return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
437   case ISD::SEXTLOAD:
438     return ISD::SIGN_EXTEND;
439   case ISD::ZEXTLOAD:
440     return ISD::ZERO_EXTEND;
441   default:
442     break;
443   }
444 
445   llvm_unreachable("Invalid LoadExtType");
446 }
447 
448 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
449   // To perform this operation, we just need to swap the L and G bits of the
450   // operation.
451   unsigned OldL = (Operation >> 2) & 1;
452   unsigned OldG = (Operation >> 1) & 1;
453   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
454                        (OldL << 1) |       // New G bit
455                        (OldG << 2));       // New L bit.
456 }
457 
458 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
459   unsigned Operation = Op;
460   if (isIntegerLike)
461     Operation ^= 7;   // Flip L, G, E bits, but not U.
462   else
463     Operation ^= 15;  // Flip all of the condition bits.
464 
465   if (Operation > ISD::SETTRUE2)
466     Operation &= ~8;  // Don't let N and U bits get set.
467 
468   return ISD::CondCode(Operation);
469 }
470 
471 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
472   return getSetCCInverseImpl(Op, Type.isInteger());
473 }
474 
475 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
476                                                bool isIntegerLike) {
477   return getSetCCInverseImpl(Op, isIntegerLike);
478 }
479 
480 /// For an integer comparison, return 1 if the comparison is a signed operation
481 /// and 2 if the result is an unsigned comparison. Return zero if the operation
482 /// does not depend on the sign of the input (setne and seteq).
483 static int isSignedOp(ISD::CondCode Opcode) {
484   switch (Opcode) {
485   default: llvm_unreachable("Illegal integer setcc operation!");
486   case ISD::SETEQ:
487   case ISD::SETNE: return 0;
488   case ISD::SETLT:
489   case ISD::SETLE:
490   case ISD::SETGT:
491   case ISD::SETGE: return 1;
492   case ISD::SETULT:
493   case ISD::SETULE:
494   case ISD::SETUGT:
495   case ISD::SETUGE: return 2;
496   }
497 }
498 
499 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
500                                        EVT Type) {
501   bool IsInteger = Type.isInteger();
502   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
503     // Cannot fold a signed integer setcc with an unsigned integer setcc.
504     return ISD::SETCC_INVALID;
505 
506   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
507 
508   // If the N and U bits get set, then the resultant comparison DOES suddenly
509   // care about orderedness, and it is true when ordered.
510   if (Op > ISD::SETTRUE2)
511     Op &= ~16;     // Clear the U bit if the N bit is set.
512 
513   // Canonicalize illegal integer setcc's.
514   if (IsInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
515     Op = ISD::SETNE;
516 
517   return ISD::CondCode(Op);
518 }
519 
520 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
521                                         EVT Type) {
522   bool IsInteger = Type.isInteger();
523   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
524     // Cannot fold a signed setcc with an unsigned setcc.
525     return ISD::SETCC_INVALID;
526 
527   // Combine all of the condition bits.
528   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
529 
530   // Canonicalize illegal integer setcc's.
531   if (IsInteger) {
532     switch (Result) {
533     default: break;
534     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
535     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
536     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
537     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
538     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
539     }
540   }
541 
542   return Result;
543 }
544 
545 //===----------------------------------------------------------------------===//
546 //                           SDNode Profile Support
547 //===----------------------------------------------------------------------===//
548 
549 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
550 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
551   ID.AddInteger(OpC);
552 }
553 
554 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
555 /// solely with their pointer.
556 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
557   ID.AddPointer(VTList.VTs);
558 }
559 
560 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
561 static void AddNodeIDOperands(FoldingSetNodeID &ID,
562                               ArrayRef<SDValue> Ops) {
563   for (auto& Op : Ops) {
564     ID.AddPointer(Op.getNode());
565     ID.AddInteger(Op.getResNo());
566   }
567 }
568 
569 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
570 static void AddNodeIDOperands(FoldingSetNodeID &ID,
571                               ArrayRef<SDUse> Ops) {
572   for (auto& Op : Ops) {
573     ID.AddPointer(Op.getNode());
574     ID.AddInteger(Op.getResNo());
575   }
576 }
577 
578 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
579                           SDVTList VTList, ArrayRef<SDValue> OpList) {
580   AddNodeIDOpcode(ID, OpC);
581   AddNodeIDValueTypes(ID, VTList);
582   AddNodeIDOperands(ID, OpList);
583 }
584 
585 /// If this is an SDNode with special info, add this info to the NodeID data.
586 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
587   switch (N->getOpcode()) {
588   case ISD::TargetExternalSymbol:
589   case ISD::ExternalSymbol:
590   case ISD::MCSymbol:
591     llvm_unreachable("Should only be used on nodes with operands");
592   default: break;  // Normal nodes don't need extra info.
593   case ISD::TargetConstant:
594   case ISD::Constant: {
595     const ConstantSDNode *C = cast<ConstantSDNode>(N);
596     ID.AddPointer(C->getConstantIntValue());
597     ID.AddBoolean(C->isOpaque());
598     break;
599   }
600   case ISD::TargetConstantFP:
601   case ISD::ConstantFP:
602     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
603     break;
604   case ISD::TargetGlobalAddress:
605   case ISD::GlobalAddress:
606   case ISD::TargetGlobalTLSAddress:
607   case ISD::GlobalTLSAddress: {
608     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
609     ID.AddPointer(GA->getGlobal());
610     ID.AddInteger(GA->getOffset());
611     ID.AddInteger(GA->getTargetFlags());
612     break;
613   }
614   case ISD::BasicBlock:
615     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
616     break;
617   case ISD::Register:
618     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
619     break;
620   case ISD::RegisterMask:
621     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
622     break;
623   case ISD::SRCVALUE:
624     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
625     break;
626   case ISD::FrameIndex:
627   case ISD::TargetFrameIndex:
628     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
629     break;
630   case ISD::LIFETIME_START:
631   case ISD::LIFETIME_END:
632     if (cast<LifetimeSDNode>(N)->hasOffset()) {
633       ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
634       ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
635     }
636     break;
637   case ISD::PSEUDO_PROBE:
638     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
639     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
640     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
641     break;
642   case ISD::JumpTable:
643   case ISD::TargetJumpTable:
644     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
645     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
646     break;
647   case ISD::ConstantPool:
648   case ISD::TargetConstantPool: {
649     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
650     ID.AddInteger(CP->getAlign().value());
651     ID.AddInteger(CP->getOffset());
652     if (CP->isMachineConstantPoolEntry())
653       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
654     else
655       ID.AddPointer(CP->getConstVal());
656     ID.AddInteger(CP->getTargetFlags());
657     break;
658   }
659   case ISD::TargetIndex: {
660     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
661     ID.AddInteger(TI->getIndex());
662     ID.AddInteger(TI->getOffset());
663     ID.AddInteger(TI->getTargetFlags());
664     break;
665   }
666   case ISD::LOAD: {
667     const LoadSDNode *LD = cast<LoadSDNode>(N);
668     ID.AddInteger(LD->getMemoryVT().getRawBits());
669     ID.AddInteger(LD->getRawSubclassData());
670     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
671     break;
672   }
673   case ISD::STORE: {
674     const StoreSDNode *ST = cast<StoreSDNode>(N);
675     ID.AddInteger(ST->getMemoryVT().getRawBits());
676     ID.AddInteger(ST->getRawSubclassData());
677     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
678     break;
679   }
680   case ISD::MLOAD: {
681     const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
682     ID.AddInteger(MLD->getMemoryVT().getRawBits());
683     ID.AddInteger(MLD->getRawSubclassData());
684     ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
685     break;
686   }
687   case ISD::MSTORE: {
688     const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
689     ID.AddInteger(MST->getMemoryVT().getRawBits());
690     ID.AddInteger(MST->getRawSubclassData());
691     ID.AddInteger(MST->getPointerInfo().getAddrSpace());
692     break;
693   }
694   case ISD::MGATHER: {
695     const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
696     ID.AddInteger(MG->getMemoryVT().getRawBits());
697     ID.AddInteger(MG->getRawSubclassData());
698     ID.AddInteger(MG->getPointerInfo().getAddrSpace());
699     break;
700   }
701   case ISD::MSCATTER: {
702     const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
703     ID.AddInteger(MS->getMemoryVT().getRawBits());
704     ID.AddInteger(MS->getRawSubclassData());
705     ID.AddInteger(MS->getPointerInfo().getAddrSpace());
706     break;
707   }
708   case ISD::ATOMIC_CMP_SWAP:
709   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
710   case ISD::ATOMIC_SWAP:
711   case ISD::ATOMIC_LOAD_ADD:
712   case ISD::ATOMIC_LOAD_SUB:
713   case ISD::ATOMIC_LOAD_AND:
714   case ISD::ATOMIC_LOAD_CLR:
715   case ISD::ATOMIC_LOAD_OR:
716   case ISD::ATOMIC_LOAD_XOR:
717   case ISD::ATOMIC_LOAD_NAND:
718   case ISD::ATOMIC_LOAD_MIN:
719   case ISD::ATOMIC_LOAD_MAX:
720   case ISD::ATOMIC_LOAD_UMIN:
721   case ISD::ATOMIC_LOAD_UMAX:
722   case ISD::ATOMIC_LOAD:
723   case ISD::ATOMIC_STORE: {
724     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
725     ID.AddInteger(AT->getMemoryVT().getRawBits());
726     ID.AddInteger(AT->getRawSubclassData());
727     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
728     break;
729   }
730   case ISD::PREFETCH: {
731     const MemSDNode *PF = cast<MemSDNode>(N);
732     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
733     break;
734   }
735   case ISD::VECTOR_SHUFFLE: {
736     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
737     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
738          i != e; ++i)
739       ID.AddInteger(SVN->getMaskElt(i));
740     break;
741   }
742   case ISD::TargetBlockAddress:
743   case ISD::BlockAddress: {
744     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
745     ID.AddPointer(BA->getBlockAddress());
746     ID.AddInteger(BA->getOffset());
747     ID.AddInteger(BA->getTargetFlags());
748     break;
749   }
750   } // end switch (N->getOpcode())
751 
752   // Target specific memory nodes could also have address spaces to check.
753   if (N->isTargetMemoryOpcode())
754     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
755 }
756 
757 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
758 /// data.
759 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
760   AddNodeIDOpcode(ID, N->getOpcode());
761   // Add the return value info.
762   AddNodeIDValueTypes(ID, N->getVTList());
763   // Add the operand info.
764   AddNodeIDOperands(ID, N->ops());
765 
766   // Handle SDNode leafs with special info.
767   AddNodeIDCustom(ID, N);
768 }
769 
770 //===----------------------------------------------------------------------===//
771 //                              SelectionDAG Class
772 //===----------------------------------------------------------------------===//
773 
774 /// doNotCSE - Return true if CSE should not be performed for this node.
775 static bool doNotCSE(SDNode *N) {
776   if (N->getValueType(0) == MVT::Glue)
777     return true; // Never CSE anything that produces a flag.
778 
779   switch (N->getOpcode()) {
780   default: break;
781   case ISD::HANDLENODE:
782   case ISD::EH_LABEL:
783     return true;   // Never CSE these nodes.
784   }
785 
786   // Check that remaining values produced are not flags.
787   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
788     if (N->getValueType(i) == MVT::Glue)
789       return true; // Never CSE anything that produces a flag.
790 
791   return false;
792 }
793 
794 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
795 /// SelectionDAG.
796 void SelectionDAG::RemoveDeadNodes() {
797   // Create a dummy node (which is not added to allnodes), that adds a reference
798   // to the root node, preventing it from being deleted.
799   HandleSDNode Dummy(getRoot());
800 
801   SmallVector<SDNode*, 128> DeadNodes;
802 
803   // Add all obviously-dead nodes to the DeadNodes worklist.
804   for (SDNode &Node : allnodes())
805     if (Node.use_empty())
806       DeadNodes.push_back(&Node);
807 
808   RemoveDeadNodes(DeadNodes);
809 
810   // If the root changed (e.g. it was a dead load, update the root).
811   setRoot(Dummy.getValue());
812 }
813 
814 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
815 /// given list, and any nodes that become unreachable as a result.
816 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
817 
818   // Process the worklist, deleting the nodes and adding their uses to the
819   // worklist.
820   while (!DeadNodes.empty()) {
821     SDNode *N = DeadNodes.pop_back_val();
822     // Skip to next node if we've already managed to delete the node. This could
823     // happen if replacing a node causes a node previously added to the node to
824     // be deleted.
825     if (N->getOpcode() == ISD::DELETED_NODE)
826       continue;
827 
828     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
829       DUL->NodeDeleted(N, nullptr);
830 
831     // Take the node out of the appropriate CSE map.
832     RemoveNodeFromCSEMaps(N);
833 
834     // Next, brutally remove the operand list.  This is safe to do, as there are
835     // no cycles in the graph.
836     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
837       SDUse &Use = *I++;
838       SDNode *Operand = Use.getNode();
839       Use.set(SDValue());
840 
841       // Now that we removed this operand, see if there are no uses of it left.
842       if (Operand->use_empty())
843         DeadNodes.push_back(Operand);
844     }
845 
846     DeallocateNode(N);
847   }
848 }
849 
850 void SelectionDAG::RemoveDeadNode(SDNode *N){
851   SmallVector<SDNode*, 16> DeadNodes(1, N);
852 
853   // Create a dummy node that adds a reference to the root node, preventing
854   // it from being deleted.  (This matters if the root is an operand of the
855   // dead node.)
856   HandleSDNode Dummy(getRoot());
857 
858   RemoveDeadNodes(DeadNodes);
859 }
860 
861 void SelectionDAG::DeleteNode(SDNode *N) {
862   // First take this out of the appropriate CSE map.
863   RemoveNodeFromCSEMaps(N);
864 
865   // Finally, remove uses due to operands of this node, remove from the
866   // AllNodes list, and delete the node.
867   DeleteNodeNotInCSEMaps(N);
868 }
869 
870 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
871   assert(N->getIterator() != AllNodes.begin() &&
872          "Cannot delete the entry node!");
873   assert(N->use_empty() && "Cannot delete a node that is not dead!");
874 
875   // Drop all of the operands and decrement used node's use counts.
876   N->DropOperands();
877 
878   DeallocateNode(N);
879 }
880 
881 void SDDbgInfo::erase(const SDNode *Node) {
882   DbgValMapType::iterator I = DbgValMap.find(Node);
883   if (I == DbgValMap.end())
884     return;
885   for (auto &Val: I->second)
886     Val->setIsInvalidated();
887   DbgValMap.erase(I);
888 }
889 
890 void SelectionDAG::DeallocateNode(SDNode *N) {
891   // If we have operands, deallocate them.
892   removeOperands(N);
893 
894   NodeAllocator.Deallocate(AllNodes.remove(N));
895 
896   // Set the opcode to DELETED_NODE to help catch bugs when node
897   // memory is reallocated.
898   // FIXME: There are places in SDag that have grown a dependency on the opcode
899   // value in the released node.
900   __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
901   N->NodeType = ISD::DELETED_NODE;
902 
903   // If any of the SDDbgValue nodes refer to this SDNode, invalidate
904   // them and forget about that node.
905   DbgInfo->erase(N);
906 }
907 
908 #ifndef NDEBUG
909 /// VerifySDNode - Sanity check the given SDNode.  Aborts if it is invalid.
910 static void VerifySDNode(SDNode *N) {
911   switch (N->getOpcode()) {
912   default:
913     break;
914   case ISD::BUILD_PAIR: {
915     EVT VT = N->getValueType(0);
916     assert(N->getNumValues() == 1 && "Too many results!");
917     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
918            "Wrong return type!");
919     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
920     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
921            "Mismatched operand types!");
922     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
923            "Wrong operand type!");
924     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
925            "Wrong return type size");
926     break;
927   }
928   case ISD::BUILD_VECTOR: {
929     assert(N->getNumValues() == 1 && "Too many results!");
930     assert(N->getValueType(0).isVector() && "Wrong return type!");
931     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
932            "Wrong number of operands!");
933     EVT EltVT = N->getValueType(0).getVectorElementType();
934     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
935       assert((I->getValueType() == EltVT ||
936              (EltVT.isInteger() && I->getValueType().isInteger() &&
937               EltVT.bitsLE(I->getValueType()))) &&
938             "Wrong operand type!");
939       assert(I->getValueType() == N->getOperand(0).getValueType() &&
940              "Operands must all have the same type");
941     }
942     break;
943   }
944   }
945 }
946 #endif // NDEBUG
947 
948 /// Insert a newly allocated node into the DAG.
949 ///
950 /// Handles insertion into the all nodes list and CSE map, as well as
951 /// verification and other common operations when a new node is allocated.
952 void SelectionDAG::InsertNode(SDNode *N) {
953   AllNodes.push_back(N);
954 #ifndef NDEBUG
955   N->PersistentId = NextPersistentId++;
956   VerifySDNode(N);
957 #endif
958   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
959     DUL->NodeInserted(N);
960 }
961 
962 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
963 /// correspond to it.  This is useful when we're about to delete or repurpose
964 /// the node.  We don't want future request for structurally identical nodes
965 /// to return N anymore.
966 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
967   bool Erased = false;
968   switch (N->getOpcode()) {
969   case ISD::HANDLENODE: return false;  // noop.
970   case ISD::CONDCODE:
971     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
972            "Cond code doesn't exist!");
973     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
974     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
975     break;
976   case ISD::ExternalSymbol:
977     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
978     break;
979   case ISD::TargetExternalSymbol: {
980     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
981     Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
982         ESN->getSymbol(), ESN->getTargetFlags()));
983     break;
984   }
985   case ISD::MCSymbol: {
986     auto *MCSN = cast<MCSymbolSDNode>(N);
987     Erased = MCSymbols.erase(MCSN->getMCSymbol());
988     break;
989   }
990   case ISD::VALUETYPE: {
991     EVT VT = cast<VTSDNode>(N)->getVT();
992     if (VT.isExtended()) {
993       Erased = ExtendedValueTypeNodes.erase(VT);
994     } else {
995       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
996       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
997     }
998     break;
999   }
1000   default:
1001     // Remove it from the CSE Map.
1002     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1003     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1004     Erased = CSEMap.RemoveNode(N);
1005     break;
1006   }
1007 #ifndef NDEBUG
1008   // Verify that the node was actually in one of the CSE maps, unless it has a
1009   // flag result (which cannot be CSE'd) or is one of the special cases that are
1010   // not subject to CSE.
1011   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1012       !N->isMachineOpcode() && !doNotCSE(N)) {
1013     N->dump(this);
1014     dbgs() << "\n";
1015     llvm_unreachable("Node is not in map!");
1016   }
1017 #endif
1018   return Erased;
1019 }
1020 
1021 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1022 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1023 /// node already exists, in which case transfer all its users to the existing
1024 /// node. This transfer can potentially trigger recursive merging.
1025 void
1026 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1027   // For node types that aren't CSE'd, just act as if no identical node
1028   // already exists.
1029   if (!doNotCSE(N)) {
1030     SDNode *Existing = CSEMap.GetOrInsertNode(N);
1031     if (Existing != N) {
1032       // If there was already an existing matching node, use ReplaceAllUsesWith
1033       // to replace the dead one with the existing one.  This can cause
1034       // recursive merging of other unrelated nodes down the line.
1035       ReplaceAllUsesWith(N, Existing);
1036 
1037       // N is now dead. Inform the listeners and delete it.
1038       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1039         DUL->NodeDeleted(N, Existing);
1040       DeleteNodeNotInCSEMaps(N);
1041       return;
1042     }
1043   }
1044 
1045   // If the node doesn't already exist, we updated it.  Inform listeners.
1046   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1047     DUL->NodeUpdated(N);
1048 }
1049 
1050 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1051 /// were replaced with those specified.  If this node is never memoized,
1052 /// return null, otherwise return a pointer to the slot it would take.  If a
1053 /// node already exists with these operands, the slot will be non-null.
1054 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1055                                            void *&InsertPos) {
1056   if (doNotCSE(N))
1057     return nullptr;
1058 
1059   SDValue Ops[] = { Op };
1060   FoldingSetNodeID ID;
1061   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1062   AddNodeIDCustom(ID, N);
1063   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1064   if (Node)
1065     Node->intersectFlagsWith(N->getFlags());
1066   return Node;
1067 }
1068 
1069 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1070 /// were replaced with those specified.  If this node is never memoized,
1071 /// return null, otherwise return a pointer to the slot it would take.  If a
1072 /// node already exists with these operands, the slot will be non-null.
1073 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1074                                            SDValue Op1, SDValue Op2,
1075                                            void *&InsertPos) {
1076   if (doNotCSE(N))
1077     return nullptr;
1078 
1079   SDValue Ops[] = { Op1, Op2 };
1080   FoldingSetNodeID ID;
1081   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1082   AddNodeIDCustom(ID, N);
1083   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1084   if (Node)
1085     Node->intersectFlagsWith(N->getFlags());
1086   return Node;
1087 }
1088 
1089 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1090 /// were replaced with those specified.  If this node is never memoized,
1091 /// return null, otherwise return a pointer to the slot it would take.  If a
1092 /// node already exists with these operands, the slot will be non-null.
1093 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1094                                            void *&InsertPos) {
1095   if (doNotCSE(N))
1096     return nullptr;
1097 
1098   FoldingSetNodeID ID;
1099   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1100   AddNodeIDCustom(ID, N);
1101   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1102   if (Node)
1103     Node->intersectFlagsWith(N->getFlags());
1104   return Node;
1105 }
1106 
1107 Align SelectionDAG::getEVTAlign(EVT VT) const {
1108   Type *Ty = VT == MVT::iPTR ?
1109                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1110                    VT.getTypeForEVT(*getContext());
1111 
1112   return getDataLayout().getABITypeAlign(Ty);
1113 }
1114 
1115 // EntryNode could meaningfully have debug info if we can find it...
1116 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1117     : TM(tm), OptLevel(OL),
1118       EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1119       Root(getEntryNode()) {
1120   InsertNode(&EntryNode);
1121   DbgInfo = new SDDbgInfo();
1122 }
1123 
1124 void SelectionDAG::init(MachineFunction &NewMF,
1125                         OptimizationRemarkEmitter &NewORE,
1126                         Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1127                         LegacyDivergenceAnalysis * Divergence,
1128                         ProfileSummaryInfo *PSIin,
1129                         BlockFrequencyInfo *BFIin) {
1130   MF = &NewMF;
1131   SDAGISelPass = PassPtr;
1132   ORE = &NewORE;
1133   TLI = getSubtarget().getTargetLowering();
1134   TSI = getSubtarget().getSelectionDAGInfo();
1135   LibInfo = LibraryInfo;
1136   Context = &MF->getFunction().getContext();
1137   DA = Divergence;
1138   PSI = PSIin;
1139   BFI = BFIin;
1140 }
1141 
1142 SelectionDAG::~SelectionDAG() {
1143   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1144   allnodes_clear();
1145   OperandRecycler.clear(OperandAllocator);
1146   delete DbgInfo;
1147 }
1148 
1149 bool SelectionDAG::shouldOptForSize() const {
1150   return MF->getFunction().hasOptSize() ||
1151       llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1152 }
1153 
1154 void SelectionDAG::allnodes_clear() {
1155   assert(&*AllNodes.begin() == &EntryNode);
1156   AllNodes.remove(AllNodes.begin());
1157   while (!AllNodes.empty())
1158     DeallocateNode(&AllNodes.front());
1159 #ifndef NDEBUG
1160   NextPersistentId = 0;
1161 #endif
1162 }
1163 
1164 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1165                                           void *&InsertPos) {
1166   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1167   if (N) {
1168     switch (N->getOpcode()) {
1169     default: break;
1170     case ISD::Constant:
1171     case ISD::ConstantFP:
1172       llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1173                        "debug location.  Use another overload.");
1174     }
1175   }
1176   return N;
1177 }
1178 
1179 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1180                                           const SDLoc &DL, void *&InsertPos) {
1181   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1182   if (N) {
1183     switch (N->getOpcode()) {
1184     case ISD::Constant:
1185     case ISD::ConstantFP:
1186       // Erase debug location from the node if the node is used at several
1187       // different places. Do not propagate one location to all uses as it
1188       // will cause a worse single stepping debugging experience.
1189       if (N->getDebugLoc() != DL.getDebugLoc())
1190         N->setDebugLoc(DebugLoc());
1191       break;
1192     default:
1193       // When the node's point of use is located earlier in the instruction
1194       // sequence than its prior point of use, update its debug info to the
1195       // earlier location.
1196       if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1197         N->setDebugLoc(DL.getDebugLoc());
1198       break;
1199     }
1200   }
1201   return N;
1202 }
1203 
1204 void SelectionDAG::clear() {
1205   allnodes_clear();
1206   OperandRecycler.clear(OperandAllocator);
1207   OperandAllocator.Reset();
1208   CSEMap.clear();
1209 
1210   ExtendedValueTypeNodes.clear();
1211   ExternalSymbols.clear();
1212   TargetExternalSymbols.clear();
1213   MCSymbols.clear();
1214   SDCallSiteDbgInfo.clear();
1215   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1216             static_cast<CondCodeSDNode*>(nullptr));
1217   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1218             static_cast<SDNode*>(nullptr));
1219 
1220   EntryNode.UseList = nullptr;
1221   InsertNode(&EntryNode);
1222   Root = getEntryNode();
1223   DbgInfo->clear();
1224 }
1225 
1226 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1227   return VT.bitsGT(Op.getValueType())
1228              ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1229              : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1230 }
1231 
1232 std::pair<SDValue, SDValue>
1233 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1234                                        const SDLoc &DL, EVT VT) {
1235   assert(!VT.bitsEq(Op.getValueType()) &&
1236          "Strict no-op FP extend/round not allowed.");
1237   SDValue Res =
1238       VT.bitsGT(Op.getValueType())
1239           ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1240           : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1241                     {Chain, Op, getIntPtrConstant(0, DL)});
1242 
1243   return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1244 }
1245 
1246 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1247   return VT.bitsGT(Op.getValueType()) ?
1248     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1249     getNode(ISD::TRUNCATE, DL, VT, Op);
1250 }
1251 
1252 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1253   return VT.bitsGT(Op.getValueType()) ?
1254     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1255     getNode(ISD::TRUNCATE, DL, VT, Op);
1256 }
1257 
1258 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1259   return VT.bitsGT(Op.getValueType()) ?
1260     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1261     getNode(ISD::TRUNCATE, DL, VT, Op);
1262 }
1263 
1264 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1265                                         EVT OpVT) {
1266   if (VT.bitsLE(Op.getValueType()))
1267     return getNode(ISD::TRUNCATE, SL, VT, Op);
1268 
1269   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1270   return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1271 }
1272 
1273 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1274   EVT OpVT = Op.getValueType();
1275   assert(VT.isInteger() && OpVT.isInteger() &&
1276          "Cannot getZeroExtendInReg FP types");
1277   assert(VT.isVector() == OpVT.isVector() &&
1278          "getZeroExtendInReg type should be vector iff the operand "
1279          "type is vector!");
1280   assert((!VT.isVector() ||
1281           VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1282          "Vector element counts must match in getZeroExtendInReg");
1283   assert(VT.bitsLE(OpVT) && "Not extending!");
1284   if (OpVT == VT)
1285     return Op;
1286   APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1287                                    VT.getScalarSizeInBits());
1288   return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1289 }
1290 
1291 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1292   // Only unsigned pointer semantics are supported right now. In the future this
1293   // might delegate to TLI to check pointer signedness.
1294   return getZExtOrTrunc(Op, DL, VT);
1295 }
1296 
1297 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1298   // Only unsigned pointer semantics are supported right now. In the future this
1299   // might delegate to TLI to check pointer signedness.
1300   return getZeroExtendInReg(Op, DL, VT);
1301 }
1302 
1303 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1304 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1305   EVT EltVT = VT.getScalarType();
1306   SDValue NegOne =
1307     getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1308   return getNode(ISD::XOR, DL, VT, Val, NegOne);
1309 }
1310 
1311 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1312   SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1313   return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1314 }
1315 
1316 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1317                                       EVT OpVT) {
1318   if (!V)
1319     return getConstant(0, DL, VT);
1320 
1321   switch (TLI->getBooleanContents(OpVT)) {
1322   case TargetLowering::ZeroOrOneBooleanContent:
1323   case TargetLowering::UndefinedBooleanContent:
1324     return getConstant(1, DL, VT);
1325   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1326     return getAllOnesConstant(DL, VT);
1327   }
1328   llvm_unreachable("Unexpected boolean content enum!");
1329 }
1330 
1331 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1332                                   bool isT, bool isO) {
1333   EVT EltVT = VT.getScalarType();
1334   assert((EltVT.getSizeInBits() >= 64 ||
1335           (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1336          "getConstant with a uint64_t value that doesn't fit in the type!");
1337   return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1338 }
1339 
1340 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1341                                   bool isT, bool isO) {
1342   return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1343 }
1344 
1345 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1346                                   EVT VT, bool isT, bool isO) {
1347   assert(VT.isInteger() && "Cannot create FP integer constant!");
1348 
1349   EVT EltVT = VT.getScalarType();
1350   const ConstantInt *Elt = &Val;
1351 
1352   // In some cases the vector type is legal but the element type is illegal and
1353   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
1354   // inserted value (the type does not need to match the vector element type).
1355   // Any extra bits introduced will be truncated away.
1356   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1357                            TargetLowering::TypePromoteInteger) {
1358     EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1359     APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1360     Elt = ConstantInt::get(*getContext(), NewVal);
1361   }
1362   // In other cases the element type is illegal and needs to be expanded, for
1363   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1364   // the value into n parts and use a vector type with n-times the elements.
1365   // Then bitcast to the type requested.
1366   // Legalizing constants too early makes the DAGCombiner's job harder so we
1367   // only legalize if the DAG tells us we must produce legal types.
1368   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1369            TLI->getTypeAction(*getContext(), EltVT) ==
1370                TargetLowering::TypeExpandInteger) {
1371     const APInt &NewVal = Elt->getValue();
1372     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1373     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1374     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1375     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1376 
1377     // Check the temporary vector is the correct size. If this fails then
1378     // getTypeToTransformTo() probably returned a type whose size (in bits)
1379     // isn't a power-of-2 factor of the requested type size.
1380     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1381 
1382     SmallVector<SDValue, 2> EltParts;
1383     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1384       EltParts.push_back(getConstant(
1385           NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
1386           ViaEltVT, isT, isO));
1387     }
1388 
1389     // EltParts is currently in little endian order. If we actually want
1390     // big-endian order then reverse it now.
1391     if (getDataLayout().isBigEndian())
1392       std::reverse(EltParts.begin(), EltParts.end());
1393 
1394     // The elements must be reversed when the element order is different
1395     // to the endianness of the elements (because the BITCAST is itself a
1396     // vector shuffle in this situation). However, we do not need any code to
1397     // perform this reversal because getConstant() is producing a vector
1398     // splat.
1399     // This situation occurs in MIPS MSA.
1400 
1401     SmallVector<SDValue, 8> Ops;
1402     for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1403       llvm::append_range(Ops, EltParts);
1404 
1405     SDValue V =
1406         getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1407     return V;
1408   }
1409 
1410   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1411          "APInt size does not match type size!");
1412   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1413   FoldingSetNodeID ID;
1414   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1415   ID.AddPointer(Elt);
1416   ID.AddBoolean(isO);
1417   void *IP = nullptr;
1418   SDNode *N = nullptr;
1419   if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1420     if (!VT.isVector())
1421       return SDValue(N, 0);
1422 
1423   if (!N) {
1424     N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1425     CSEMap.InsertNode(N, IP);
1426     InsertNode(N);
1427     NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1428   }
1429 
1430   SDValue Result(N, 0);
1431   if (VT.isScalableVector())
1432     Result = getSplatVector(VT, DL, Result);
1433   else if (VT.isVector())
1434     Result = getSplatBuildVector(VT, DL, Result);
1435 
1436   return Result;
1437 }
1438 
1439 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1440                                         bool isTarget) {
1441   return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1442 }
1443 
1444 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1445                                              const SDLoc &DL, bool LegalTypes) {
1446   assert(VT.isInteger() && "Shift amount is not an integer type!");
1447   EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1448   return getConstant(Val, DL, ShiftVT);
1449 }
1450 
1451 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1452                                            bool isTarget) {
1453   return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1454 }
1455 
1456 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1457                                     bool isTarget) {
1458   return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1459 }
1460 
1461 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1462                                     EVT VT, bool isTarget) {
1463   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1464 
1465   EVT EltVT = VT.getScalarType();
1466 
1467   // Do the map lookup using the actual bit pattern for the floating point
1468   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1469   // we don't have issues with SNANs.
1470   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1471   FoldingSetNodeID ID;
1472   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1473   ID.AddPointer(&V);
1474   void *IP = nullptr;
1475   SDNode *N = nullptr;
1476   if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1477     if (!VT.isVector())
1478       return SDValue(N, 0);
1479 
1480   if (!N) {
1481     N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1482     CSEMap.InsertNode(N, IP);
1483     InsertNode(N);
1484   }
1485 
1486   SDValue Result(N, 0);
1487   if (VT.isScalableVector())
1488     Result = getSplatVector(VT, DL, Result);
1489   else if (VT.isVector())
1490     Result = getSplatBuildVector(VT, DL, Result);
1491   NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1492   return Result;
1493 }
1494 
1495 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1496                                     bool isTarget) {
1497   EVT EltVT = VT.getScalarType();
1498   if (EltVT == MVT::f32)
1499     return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1500   else if (EltVT == MVT::f64)
1501     return getConstantFP(APFloat(Val), DL, VT, isTarget);
1502   else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1503            EltVT == MVT::f16 || EltVT == MVT::bf16) {
1504     bool Ignored;
1505     APFloat APF = APFloat(Val);
1506     APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1507                 &Ignored);
1508     return getConstantFP(APF, DL, VT, isTarget);
1509   } else
1510     llvm_unreachable("Unsupported type in getConstantFP");
1511 }
1512 
1513 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1514                                        EVT VT, int64_t Offset, bool isTargetGA,
1515                                        unsigned TargetFlags) {
1516   assert((TargetFlags == 0 || isTargetGA) &&
1517          "Cannot set target flags on target-independent globals");
1518 
1519   // Truncate (with sign-extension) the offset value to the pointer size.
1520   unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1521   if (BitWidth < 64)
1522     Offset = SignExtend64(Offset, BitWidth);
1523 
1524   unsigned Opc;
1525   if (GV->isThreadLocal())
1526     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1527   else
1528     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1529 
1530   FoldingSetNodeID ID;
1531   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1532   ID.AddPointer(GV);
1533   ID.AddInteger(Offset);
1534   ID.AddInteger(TargetFlags);
1535   void *IP = nullptr;
1536   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1537     return SDValue(E, 0);
1538 
1539   auto *N = newSDNode<GlobalAddressSDNode>(
1540       Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1541   CSEMap.InsertNode(N, IP);
1542     InsertNode(N);
1543   return SDValue(N, 0);
1544 }
1545 
1546 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1547   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1548   FoldingSetNodeID ID;
1549   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1550   ID.AddInteger(FI);
1551   void *IP = nullptr;
1552   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1553     return SDValue(E, 0);
1554 
1555   auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1556   CSEMap.InsertNode(N, IP);
1557   InsertNode(N);
1558   return SDValue(N, 0);
1559 }
1560 
1561 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1562                                    unsigned TargetFlags) {
1563   assert((TargetFlags == 0 || isTarget) &&
1564          "Cannot set target flags on target-independent jump tables");
1565   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1566   FoldingSetNodeID ID;
1567   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1568   ID.AddInteger(JTI);
1569   ID.AddInteger(TargetFlags);
1570   void *IP = nullptr;
1571   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1572     return SDValue(E, 0);
1573 
1574   auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1575   CSEMap.InsertNode(N, IP);
1576   InsertNode(N);
1577   return SDValue(N, 0);
1578 }
1579 
1580 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1581                                       MaybeAlign Alignment, int Offset,
1582                                       bool isTarget, unsigned TargetFlags) {
1583   assert((TargetFlags == 0 || isTarget) &&
1584          "Cannot set target flags on target-independent globals");
1585   if (!Alignment)
1586     Alignment = shouldOptForSize()
1587                     ? getDataLayout().getABITypeAlign(C->getType())
1588                     : getDataLayout().getPrefTypeAlign(C->getType());
1589   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1590   FoldingSetNodeID ID;
1591   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1592   ID.AddInteger(Alignment->value());
1593   ID.AddInteger(Offset);
1594   ID.AddPointer(C);
1595   ID.AddInteger(TargetFlags);
1596   void *IP = nullptr;
1597   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1598     return SDValue(E, 0);
1599 
1600   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1601                                           TargetFlags);
1602   CSEMap.InsertNode(N, IP);
1603   InsertNode(N);
1604   SDValue V = SDValue(N, 0);
1605   NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1606   return V;
1607 }
1608 
1609 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1610                                       MaybeAlign Alignment, int Offset,
1611                                       bool isTarget, unsigned TargetFlags) {
1612   assert((TargetFlags == 0 || isTarget) &&
1613          "Cannot set target flags on target-independent globals");
1614   if (!Alignment)
1615     Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1616   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1617   FoldingSetNodeID ID;
1618   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1619   ID.AddInteger(Alignment->value());
1620   ID.AddInteger(Offset);
1621   C->addSelectionDAGCSEId(ID);
1622   ID.AddInteger(TargetFlags);
1623   void *IP = nullptr;
1624   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1625     return SDValue(E, 0);
1626 
1627   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1628                                           TargetFlags);
1629   CSEMap.InsertNode(N, IP);
1630   InsertNode(N);
1631   return SDValue(N, 0);
1632 }
1633 
1634 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1635                                      unsigned TargetFlags) {
1636   FoldingSetNodeID ID;
1637   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1638   ID.AddInteger(Index);
1639   ID.AddInteger(Offset);
1640   ID.AddInteger(TargetFlags);
1641   void *IP = nullptr;
1642   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1643     return SDValue(E, 0);
1644 
1645   auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1646   CSEMap.InsertNode(N, IP);
1647   InsertNode(N);
1648   return SDValue(N, 0);
1649 }
1650 
1651 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1652   FoldingSetNodeID ID;
1653   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1654   ID.AddPointer(MBB);
1655   void *IP = nullptr;
1656   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1657     return SDValue(E, 0);
1658 
1659   auto *N = newSDNode<BasicBlockSDNode>(MBB);
1660   CSEMap.InsertNode(N, IP);
1661   InsertNode(N);
1662   return SDValue(N, 0);
1663 }
1664 
1665 SDValue SelectionDAG::getValueType(EVT VT) {
1666   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1667       ValueTypeNodes.size())
1668     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1669 
1670   SDNode *&N = VT.isExtended() ?
1671     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1672 
1673   if (N) return SDValue(N, 0);
1674   N = newSDNode<VTSDNode>(VT);
1675   InsertNode(N);
1676   return SDValue(N, 0);
1677 }
1678 
1679 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1680   SDNode *&N = ExternalSymbols[Sym];
1681   if (N) return SDValue(N, 0);
1682   N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1683   InsertNode(N);
1684   return SDValue(N, 0);
1685 }
1686 
1687 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1688   SDNode *&N = MCSymbols[Sym];
1689   if (N)
1690     return SDValue(N, 0);
1691   N = newSDNode<MCSymbolSDNode>(Sym, VT);
1692   InsertNode(N);
1693   return SDValue(N, 0);
1694 }
1695 
1696 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1697                                               unsigned TargetFlags) {
1698   SDNode *&N =
1699       TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1700   if (N) return SDValue(N, 0);
1701   N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1702   InsertNode(N);
1703   return SDValue(N, 0);
1704 }
1705 
1706 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1707   if ((unsigned)Cond >= CondCodeNodes.size())
1708     CondCodeNodes.resize(Cond+1);
1709 
1710   if (!CondCodeNodes[Cond]) {
1711     auto *N = newSDNode<CondCodeSDNode>(Cond);
1712     CondCodeNodes[Cond] = N;
1713     InsertNode(N);
1714   }
1715 
1716   return SDValue(CondCodeNodes[Cond], 0);
1717 }
1718 
1719 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1720 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1721 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1722   std::swap(N1, N2);
1723   ShuffleVectorSDNode::commuteMask(M);
1724 }
1725 
1726 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1727                                        SDValue N2, ArrayRef<int> Mask) {
1728   assert(VT.getVectorNumElements() == Mask.size() &&
1729            "Must have the same number of vector elements as mask elements!");
1730   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1731          "Invalid VECTOR_SHUFFLE");
1732 
1733   // Canonicalize shuffle undef, undef -> undef
1734   if (N1.isUndef() && N2.isUndef())
1735     return getUNDEF(VT);
1736 
1737   // Validate that all indices in Mask are within the range of the elements
1738   // input to the shuffle.
1739   int NElts = Mask.size();
1740   assert(llvm::all_of(Mask,
1741                       [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1742          "Index out of range");
1743 
1744   // Copy the mask so we can do any needed cleanup.
1745   SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1746 
1747   // Canonicalize shuffle v, v -> v, undef
1748   if (N1 == N2) {
1749     N2 = getUNDEF(VT);
1750     for (int i = 0; i != NElts; ++i)
1751       if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1752   }
1753 
1754   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1755   if (N1.isUndef())
1756     commuteShuffle(N1, N2, MaskVec);
1757 
1758   if (TLI->hasVectorBlend()) {
1759     // If shuffling a splat, try to blend the splat instead. We do this here so
1760     // that even when this arises during lowering we don't have to re-handle it.
1761     auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1762       BitVector UndefElements;
1763       SDValue Splat = BV->getSplatValue(&UndefElements);
1764       if (!Splat)
1765         return;
1766 
1767       for (int i = 0; i < NElts; ++i) {
1768         if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1769           continue;
1770 
1771         // If this input comes from undef, mark it as such.
1772         if (UndefElements[MaskVec[i] - Offset]) {
1773           MaskVec[i] = -1;
1774           continue;
1775         }
1776 
1777         // If we can blend a non-undef lane, use that instead.
1778         if (!UndefElements[i])
1779           MaskVec[i] = i + Offset;
1780       }
1781     };
1782     if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1783       BlendSplat(N1BV, 0);
1784     if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1785       BlendSplat(N2BV, NElts);
1786   }
1787 
1788   // Canonicalize all index into lhs, -> shuffle lhs, undef
1789   // Canonicalize all index into rhs, -> shuffle rhs, undef
1790   bool AllLHS = true, AllRHS = true;
1791   bool N2Undef = N2.isUndef();
1792   for (int i = 0; i != NElts; ++i) {
1793     if (MaskVec[i] >= NElts) {
1794       if (N2Undef)
1795         MaskVec[i] = -1;
1796       else
1797         AllLHS = false;
1798     } else if (MaskVec[i] >= 0) {
1799       AllRHS = false;
1800     }
1801   }
1802   if (AllLHS && AllRHS)
1803     return getUNDEF(VT);
1804   if (AllLHS && !N2Undef)
1805     N2 = getUNDEF(VT);
1806   if (AllRHS) {
1807     N1 = getUNDEF(VT);
1808     commuteShuffle(N1, N2, MaskVec);
1809   }
1810   // Reset our undef status after accounting for the mask.
1811   N2Undef = N2.isUndef();
1812   // Re-check whether both sides ended up undef.
1813   if (N1.isUndef() && N2Undef)
1814     return getUNDEF(VT);
1815 
1816   // If Identity shuffle return that node.
1817   bool Identity = true, AllSame = true;
1818   for (int i = 0; i != NElts; ++i) {
1819     if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1820     if (MaskVec[i] != MaskVec[0]) AllSame = false;
1821   }
1822   if (Identity && NElts)
1823     return N1;
1824 
1825   // Shuffling a constant splat doesn't change the result.
1826   if (N2Undef) {
1827     SDValue V = N1;
1828 
1829     // Look through any bitcasts. We check that these don't change the number
1830     // (and size) of elements and just changes their types.
1831     while (V.getOpcode() == ISD::BITCAST)
1832       V = V->getOperand(0);
1833 
1834     // A splat should always show up as a build vector node.
1835     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1836       BitVector UndefElements;
1837       SDValue Splat = BV->getSplatValue(&UndefElements);
1838       // If this is a splat of an undef, shuffling it is also undef.
1839       if (Splat && Splat.isUndef())
1840         return getUNDEF(VT);
1841 
1842       bool SameNumElts =
1843           V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1844 
1845       // We only have a splat which can skip shuffles if there is a splatted
1846       // value and no undef lanes rearranged by the shuffle.
1847       if (Splat && UndefElements.none()) {
1848         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1849         // number of elements match or the value splatted is a zero constant.
1850         if (SameNumElts)
1851           return N1;
1852         if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1853           if (C->isNullValue())
1854             return N1;
1855       }
1856 
1857       // If the shuffle itself creates a splat, build the vector directly.
1858       if (AllSame && SameNumElts) {
1859         EVT BuildVT = BV->getValueType(0);
1860         const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1861         SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1862 
1863         // We may have jumped through bitcasts, so the type of the
1864         // BUILD_VECTOR may not match the type of the shuffle.
1865         if (BuildVT != VT)
1866           NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1867         return NewBV;
1868       }
1869     }
1870   }
1871 
1872   FoldingSetNodeID ID;
1873   SDValue Ops[2] = { N1, N2 };
1874   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1875   for (int i = 0; i != NElts; ++i)
1876     ID.AddInteger(MaskVec[i]);
1877 
1878   void* IP = nullptr;
1879   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1880     return SDValue(E, 0);
1881 
1882   // Allocate the mask array for the node out of the BumpPtrAllocator, since
1883   // SDNode doesn't have access to it.  This memory will be "leaked" when
1884   // the node is deallocated, but recovered when the NodeAllocator is released.
1885   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1886   llvm::copy(MaskVec, MaskAlloc);
1887 
1888   auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1889                                            dl.getDebugLoc(), MaskAlloc);
1890   createOperands(N, Ops);
1891 
1892   CSEMap.InsertNode(N, IP);
1893   InsertNode(N);
1894   SDValue V = SDValue(N, 0);
1895   NewSDValueDbgMsg(V, "Creating new node: ", this);
1896   return V;
1897 }
1898 
1899 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1900   EVT VT = SV.getValueType(0);
1901   SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1902   ShuffleVectorSDNode::commuteMask(MaskVec);
1903 
1904   SDValue Op0 = SV.getOperand(0);
1905   SDValue Op1 = SV.getOperand(1);
1906   return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1907 }
1908 
1909 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1910   FoldingSetNodeID ID;
1911   AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1912   ID.AddInteger(RegNo);
1913   void *IP = nullptr;
1914   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1915     return SDValue(E, 0);
1916 
1917   auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1918   N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1919   CSEMap.InsertNode(N, IP);
1920   InsertNode(N);
1921   return SDValue(N, 0);
1922 }
1923 
1924 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1925   FoldingSetNodeID ID;
1926   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1927   ID.AddPointer(RegMask);
1928   void *IP = nullptr;
1929   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1930     return SDValue(E, 0);
1931 
1932   auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1933   CSEMap.InsertNode(N, IP);
1934   InsertNode(N);
1935   return SDValue(N, 0);
1936 }
1937 
1938 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1939                                  MCSymbol *Label) {
1940   return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1941 }
1942 
1943 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1944                                    SDValue Root, MCSymbol *Label) {
1945   FoldingSetNodeID ID;
1946   SDValue Ops[] = { Root };
1947   AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1948   ID.AddPointer(Label);
1949   void *IP = nullptr;
1950   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1951     return SDValue(E, 0);
1952 
1953   auto *N =
1954       newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1955   createOperands(N, Ops);
1956 
1957   CSEMap.InsertNode(N, IP);
1958   InsertNode(N);
1959   return SDValue(N, 0);
1960 }
1961 
1962 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1963                                       int64_t Offset, bool isTarget,
1964                                       unsigned TargetFlags) {
1965   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1966 
1967   FoldingSetNodeID ID;
1968   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1969   ID.AddPointer(BA);
1970   ID.AddInteger(Offset);
1971   ID.AddInteger(TargetFlags);
1972   void *IP = nullptr;
1973   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1974     return SDValue(E, 0);
1975 
1976   auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1977   CSEMap.InsertNode(N, IP);
1978   InsertNode(N);
1979   return SDValue(N, 0);
1980 }
1981 
1982 SDValue SelectionDAG::getSrcValue(const Value *V) {
1983   FoldingSetNodeID ID;
1984   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1985   ID.AddPointer(V);
1986 
1987   void *IP = nullptr;
1988   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1989     return SDValue(E, 0);
1990 
1991   auto *N = newSDNode<SrcValueSDNode>(V);
1992   CSEMap.InsertNode(N, IP);
1993   InsertNode(N);
1994   return SDValue(N, 0);
1995 }
1996 
1997 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1998   FoldingSetNodeID ID;
1999   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2000   ID.AddPointer(MD);
2001 
2002   void *IP = nullptr;
2003   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2004     return SDValue(E, 0);
2005 
2006   auto *N = newSDNode<MDNodeSDNode>(MD);
2007   CSEMap.InsertNode(N, IP);
2008   InsertNode(N);
2009   return SDValue(N, 0);
2010 }
2011 
2012 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2013   if (VT == V.getValueType())
2014     return V;
2015 
2016   return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2017 }
2018 
2019 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2020                                        unsigned SrcAS, unsigned DestAS) {
2021   SDValue Ops[] = {Ptr};
2022   FoldingSetNodeID ID;
2023   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2024   ID.AddInteger(SrcAS);
2025   ID.AddInteger(DestAS);
2026 
2027   void *IP = nullptr;
2028   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2029     return SDValue(E, 0);
2030 
2031   auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2032                                            VT, SrcAS, DestAS);
2033   createOperands(N, Ops);
2034 
2035   CSEMap.InsertNode(N, IP);
2036   InsertNode(N);
2037   return SDValue(N, 0);
2038 }
2039 
2040 SDValue SelectionDAG::getFreeze(SDValue V) {
2041   return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2042 }
2043 
2044 /// getShiftAmountOperand - Return the specified value casted to
2045 /// the target's desired shift amount type.
2046 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2047   EVT OpTy = Op.getValueType();
2048   EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2049   if (OpTy == ShTy || OpTy.isVector()) return Op;
2050 
2051   return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2052 }
2053 
2054 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2055   SDLoc dl(Node);
2056   const TargetLowering &TLI = getTargetLoweringInfo();
2057   const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2058   EVT VT = Node->getValueType(0);
2059   SDValue Tmp1 = Node->getOperand(0);
2060   SDValue Tmp2 = Node->getOperand(1);
2061   const MaybeAlign MA(Node->getConstantOperandVal(3));
2062 
2063   SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2064                                Tmp2, MachinePointerInfo(V));
2065   SDValue VAList = VAListLoad;
2066 
2067   if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2068     VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2069                      getConstant(MA->value() - 1, dl, VAList.getValueType()));
2070 
2071     VAList =
2072         getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2073                 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2074   }
2075 
2076   // Increment the pointer, VAList, to the next vaarg
2077   Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2078                  getConstant(getDataLayout().getTypeAllocSize(
2079                                                VT.getTypeForEVT(*getContext())),
2080                              dl, VAList.getValueType()));
2081   // Store the incremented VAList to the legalized pointer
2082   Tmp1 =
2083       getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2084   // Load the actual argument out of the pointer VAList
2085   return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2086 }
2087 
2088 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2089   SDLoc dl(Node);
2090   const TargetLowering &TLI = getTargetLoweringInfo();
2091   // This defaults to loading a pointer from the input and storing it to the
2092   // output, returning the chain.
2093   const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2094   const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2095   SDValue Tmp1 =
2096       getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2097               Node->getOperand(2), MachinePointerInfo(VS));
2098   return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2099                   MachinePointerInfo(VD));
2100 }
2101 
2102 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2103   const DataLayout &DL = getDataLayout();
2104   Type *Ty = VT.getTypeForEVT(*getContext());
2105   Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2106 
2107   if (TLI->isTypeLegal(VT) || !VT.isVector())
2108     return RedAlign;
2109 
2110   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2111   const Align StackAlign = TFI->getStackAlign();
2112 
2113   // See if we can choose a smaller ABI alignment in cases where it's an
2114   // illegal vector type that will get broken down.
2115   if (RedAlign > StackAlign) {
2116     EVT IntermediateVT;
2117     MVT RegisterVT;
2118     unsigned NumIntermediates;
2119     TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2120                                 NumIntermediates, RegisterVT);
2121     Ty = IntermediateVT.getTypeForEVT(*getContext());
2122     Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2123     if (RedAlign2 < RedAlign)
2124       RedAlign = RedAlign2;
2125   }
2126 
2127   return RedAlign;
2128 }
2129 
2130 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2131   MachineFrameInfo &MFI = MF->getFrameInfo();
2132   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2133   int StackID = 0;
2134   if (Bytes.isScalable())
2135     StackID = TFI->getStackIDForScalableVectors();
2136   // The stack id gives an indication of whether the object is scalable or
2137   // not, so it's safe to pass in the minimum size here.
2138   int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2139                                        false, nullptr, StackID);
2140   return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2141 }
2142 
2143 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2144   Type *Ty = VT.getTypeForEVT(*getContext());
2145   Align StackAlign =
2146       std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2147   return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2148 }
2149 
2150 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2151   TypeSize VT1Size = VT1.getStoreSize();
2152   TypeSize VT2Size = VT2.getStoreSize();
2153   assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2154          "Don't know how to choose the maximum size when creating a stack "
2155          "temporary");
2156   TypeSize Bytes =
2157       VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2158 
2159   Type *Ty1 = VT1.getTypeForEVT(*getContext());
2160   Type *Ty2 = VT2.getTypeForEVT(*getContext());
2161   const DataLayout &DL = getDataLayout();
2162   Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2163   return CreateStackTemporary(Bytes, Align);
2164 }
2165 
2166 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2167                                 ISD::CondCode Cond, const SDLoc &dl) {
2168   EVT OpVT = N1.getValueType();
2169 
2170   // These setcc operations always fold.
2171   switch (Cond) {
2172   default: break;
2173   case ISD::SETFALSE:
2174   case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2175   case ISD::SETTRUE:
2176   case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2177 
2178   case ISD::SETOEQ:
2179   case ISD::SETOGT:
2180   case ISD::SETOGE:
2181   case ISD::SETOLT:
2182   case ISD::SETOLE:
2183   case ISD::SETONE:
2184   case ISD::SETO:
2185   case ISD::SETUO:
2186   case ISD::SETUEQ:
2187   case ISD::SETUNE:
2188     assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2189     break;
2190   }
2191 
2192   if (OpVT.isInteger()) {
2193     // For EQ and NE, we can always pick a value for the undef to make the
2194     // predicate pass or fail, so we can return undef.
2195     // Matches behavior in llvm::ConstantFoldCompareInstruction.
2196     // icmp eq/ne X, undef -> undef.
2197     if ((N1.isUndef() || N2.isUndef()) &&
2198         (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2199       return getUNDEF(VT);
2200 
2201     // If both operands are undef, we can return undef for int comparison.
2202     // icmp undef, undef -> undef.
2203     if (N1.isUndef() && N2.isUndef())
2204       return getUNDEF(VT);
2205 
2206     // icmp X, X -> true/false
2207     // icmp X, undef -> true/false because undef could be X.
2208     if (N1 == N2)
2209       return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2210   }
2211 
2212   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2213     const APInt &C2 = N2C->getAPIntValue();
2214     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2215       const APInt &C1 = N1C->getAPIntValue();
2216 
2217       switch (Cond) {
2218       default: llvm_unreachable("Unknown integer setcc!");
2219       case ISD::SETEQ:  return getBoolConstant(C1 == C2, dl, VT, OpVT);
2220       case ISD::SETNE:  return getBoolConstant(C1 != C2, dl, VT, OpVT);
2221       case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2222       case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2223       case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2224       case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2225       case ISD::SETLT:  return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2226       case ISD::SETGT:  return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2227       case ISD::SETLE:  return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2228       case ISD::SETGE:  return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2229       }
2230     }
2231   }
2232 
2233   auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2234   auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2235 
2236   if (N1CFP && N2CFP) {
2237     APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2238     switch (Cond) {
2239     default: break;
2240     case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
2241                         return getUNDEF(VT);
2242                       LLVM_FALLTHROUGH;
2243     case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2244                                              OpVT);
2245     case ISD::SETNE:  if (R==APFloat::cmpUnordered)
2246                         return getUNDEF(VT);
2247                       LLVM_FALLTHROUGH;
2248     case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2249                                              R==APFloat::cmpLessThan, dl, VT,
2250                                              OpVT);
2251     case ISD::SETLT:  if (R==APFloat::cmpUnordered)
2252                         return getUNDEF(VT);
2253                       LLVM_FALLTHROUGH;
2254     case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2255                                              OpVT);
2256     case ISD::SETGT:  if (R==APFloat::cmpUnordered)
2257                         return getUNDEF(VT);
2258                       LLVM_FALLTHROUGH;
2259     case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2260                                              VT, OpVT);
2261     case ISD::SETLE:  if (R==APFloat::cmpUnordered)
2262                         return getUNDEF(VT);
2263                       LLVM_FALLTHROUGH;
2264     case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2265                                              R==APFloat::cmpEqual, dl, VT,
2266                                              OpVT);
2267     case ISD::SETGE:  if (R==APFloat::cmpUnordered)
2268                         return getUNDEF(VT);
2269                       LLVM_FALLTHROUGH;
2270     case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2271                                          R==APFloat::cmpEqual, dl, VT, OpVT);
2272     case ISD::SETO:   return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2273                                              OpVT);
2274     case ISD::SETUO:  return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2275                                              OpVT);
2276     case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2277                                              R==APFloat::cmpEqual, dl, VT,
2278                                              OpVT);
2279     case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2280                                              OpVT);
2281     case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2282                                              R==APFloat::cmpLessThan, dl, VT,
2283                                              OpVT);
2284     case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2285                                              R==APFloat::cmpUnordered, dl, VT,
2286                                              OpVT);
2287     case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2288                                              VT, OpVT);
2289     case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2290                                              OpVT);
2291     }
2292   } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2293     // Ensure that the constant occurs on the RHS.
2294     ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2295     if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2296       return SDValue();
2297     return getSetCC(dl, VT, N2, N1, SwappedCond);
2298   } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2299              (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2300     // If an operand is known to be a nan (or undef that could be a nan), we can
2301     // fold it.
2302     // Choosing NaN for the undef will always make unordered comparison succeed
2303     // and ordered comparison fails.
2304     // Matches behavior in llvm::ConstantFoldCompareInstruction.
2305     switch (ISD::getUnorderedFlavor(Cond)) {
2306     default:
2307       llvm_unreachable("Unknown flavor!");
2308     case 0: // Known false.
2309       return getBoolConstant(false, dl, VT, OpVT);
2310     case 1: // Known true.
2311       return getBoolConstant(true, dl, VT, OpVT);
2312     case 2: // Undefined.
2313       return getUNDEF(VT);
2314     }
2315   }
2316 
2317   // Could not fold it.
2318   return SDValue();
2319 }
2320 
2321 /// See if the specified operand can be simplified with the knowledge that only
2322 /// the bits specified by DemandedBits are used.
2323 /// TODO: really we should be making this into the DAG equivalent of
2324 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2325 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2326   EVT VT = V.getValueType();
2327 
2328   if (VT.isScalableVector())
2329     return SDValue();
2330 
2331   APInt DemandedElts = VT.isVector()
2332                            ? APInt::getAllOnesValue(VT.getVectorNumElements())
2333                            : APInt(1, 1);
2334   return GetDemandedBits(V, DemandedBits, DemandedElts);
2335 }
2336 
2337 /// See if the specified operand can be simplified with the knowledge that only
2338 /// the bits specified by DemandedBits are used in the elements specified by
2339 /// DemandedElts.
2340 /// TODO: really we should be making this into the DAG equivalent of
2341 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2342 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2343                                       const APInt &DemandedElts) {
2344   switch (V.getOpcode()) {
2345   default:
2346     return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2347                                                 *this, 0);
2348   case ISD::Constant: {
2349     const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2350     APInt NewVal = CVal & DemandedBits;
2351     if (NewVal != CVal)
2352       return getConstant(NewVal, SDLoc(V), V.getValueType());
2353     break;
2354   }
2355   case ISD::SRL:
2356     // Only look at single-use SRLs.
2357     if (!V.getNode()->hasOneUse())
2358       break;
2359     if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2360       // See if we can recursively simplify the LHS.
2361       unsigned Amt = RHSC->getZExtValue();
2362 
2363       // Watch out for shift count overflow though.
2364       if (Amt >= DemandedBits.getBitWidth())
2365         break;
2366       APInt SrcDemandedBits = DemandedBits << Amt;
2367       if (SDValue SimplifyLHS =
2368               GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2369         return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2370                        V.getOperand(1));
2371     }
2372     break;
2373   }
2374   return SDValue();
2375 }
2376 
2377 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
2378 /// use this predicate to simplify operations downstream.
2379 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2380   unsigned BitWidth = Op.getScalarValueSizeInBits();
2381   return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2382 }
2383 
2384 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
2385 /// this predicate to simplify operations downstream.  Mask is known to be zero
2386 /// for bits that V cannot have.
2387 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2388                                      unsigned Depth) const {
2389   return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2390 }
2391 
2392 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2393 /// DemandedElts.  We use this predicate to simplify operations downstream.
2394 /// Mask is known to be zero for bits that V cannot have.
2395 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2396                                      const APInt &DemandedElts,
2397                                      unsigned Depth) const {
2398   return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2399 }
2400 
2401 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2402 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2403                                         unsigned Depth) const {
2404   return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2405 }
2406 
2407 /// isSplatValue - Return true if the vector V has the same value
2408 /// across all DemandedElts. For scalable vectors it does not make
2409 /// sense to specify which elements are demanded or undefined, therefore
2410 /// they are simply ignored.
2411 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2412                                 APInt &UndefElts, unsigned Depth) {
2413   EVT VT = V.getValueType();
2414   assert(VT.isVector() && "Vector type expected");
2415 
2416   if (!VT.isScalableVector() && !DemandedElts)
2417     return false; // No demanded elts, better to assume we don't know anything.
2418 
2419   if (Depth >= MaxRecursionDepth)
2420     return false; // Limit search depth.
2421 
2422   // Deal with some common cases here that work for both fixed and scalable
2423   // vector types.
2424   switch (V.getOpcode()) {
2425   case ISD::SPLAT_VECTOR:
2426     UndefElts = V.getOperand(0).isUndef()
2427                     ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
2428                     : APInt(DemandedElts.getBitWidth(), 0);
2429     return true;
2430   case ISD::ADD:
2431   case ISD::SUB:
2432   case ISD::AND: {
2433     APInt UndefLHS, UndefRHS;
2434     SDValue LHS = V.getOperand(0);
2435     SDValue RHS = V.getOperand(1);
2436     if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2437         isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2438       UndefElts = UndefLHS | UndefRHS;
2439       return true;
2440     }
2441     break;
2442   }
2443   case ISD::TRUNCATE:
2444   case ISD::SIGN_EXTEND:
2445   case ISD::ZERO_EXTEND:
2446     return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2447   }
2448 
2449   // We don't support other cases than those above for scalable vectors at
2450   // the moment.
2451   if (VT.isScalableVector())
2452     return false;
2453 
2454   unsigned NumElts = VT.getVectorNumElements();
2455   assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2456   UndefElts = APInt::getNullValue(NumElts);
2457 
2458   switch (V.getOpcode()) {
2459   case ISD::BUILD_VECTOR: {
2460     SDValue Scl;
2461     for (unsigned i = 0; i != NumElts; ++i) {
2462       SDValue Op = V.getOperand(i);
2463       if (Op.isUndef()) {
2464         UndefElts.setBit(i);
2465         continue;
2466       }
2467       if (!DemandedElts[i])
2468         continue;
2469       if (Scl && Scl != Op)
2470         return false;
2471       Scl = Op;
2472     }
2473     return true;
2474   }
2475   case ISD::VECTOR_SHUFFLE: {
2476     // Check if this is a shuffle node doing a splat.
2477     // TODO: Do we need to handle shuffle(splat, undef, mask)?
2478     int SplatIndex = -1;
2479     ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2480     for (int i = 0; i != (int)NumElts; ++i) {
2481       int M = Mask[i];
2482       if (M < 0) {
2483         UndefElts.setBit(i);
2484         continue;
2485       }
2486       if (!DemandedElts[i])
2487         continue;
2488       if (0 <= SplatIndex && SplatIndex != M)
2489         return false;
2490       SplatIndex = M;
2491     }
2492     return true;
2493   }
2494   case ISD::EXTRACT_SUBVECTOR: {
2495     // Offset the demanded elts by the subvector index.
2496     SDValue Src = V.getOperand(0);
2497     uint64_t Idx = V.getConstantOperandVal(1);
2498     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2499     APInt UndefSrcElts;
2500     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2501     if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2502       UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2503       return true;
2504     }
2505     break;
2506   }
2507   }
2508 
2509   return false;
2510 }
2511 
2512 /// Helper wrapper to main isSplatValue function.
2513 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2514   EVT VT = V.getValueType();
2515   assert(VT.isVector() && "Vector type expected");
2516 
2517   APInt UndefElts;
2518   APInt DemandedElts;
2519 
2520   // For now we don't support this with scalable vectors.
2521   if (!VT.isScalableVector())
2522     DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2523   return isSplatValue(V, DemandedElts, UndefElts) &&
2524          (AllowUndefs || !UndefElts);
2525 }
2526 
2527 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2528   V = peekThroughExtractSubvectors(V);
2529 
2530   EVT VT = V.getValueType();
2531   unsigned Opcode = V.getOpcode();
2532   switch (Opcode) {
2533   default: {
2534     APInt UndefElts;
2535     APInt DemandedElts;
2536 
2537     if (!VT.isScalableVector())
2538       DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2539 
2540     if (isSplatValue(V, DemandedElts, UndefElts)) {
2541       if (VT.isScalableVector()) {
2542         // DemandedElts and UndefElts are ignored for scalable vectors, since
2543         // the only supported cases are SPLAT_VECTOR nodes.
2544         SplatIdx = 0;
2545       } else {
2546         // Handle case where all demanded elements are UNDEF.
2547         if (DemandedElts.isSubsetOf(UndefElts)) {
2548           SplatIdx = 0;
2549           return getUNDEF(VT);
2550         }
2551         SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2552       }
2553       return V;
2554     }
2555     break;
2556   }
2557   case ISD::SPLAT_VECTOR:
2558     SplatIdx = 0;
2559     return V;
2560   case ISD::VECTOR_SHUFFLE: {
2561     if (VT.isScalableVector())
2562       return SDValue();
2563 
2564     // Check if this is a shuffle node doing a splat.
2565     // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2566     // getTargetVShiftNode currently struggles without the splat source.
2567     auto *SVN = cast<ShuffleVectorSDNode>(V);
2568     if (!SVN->isSplat())
2569       break;
2570     int Idx = SVN->getSplatIndex();
2571     int NumElts = V.getValueType().getVectorNumElements();
2572     SplatIdx = Idx % NumElts;
2573     return V.getOperand(Idx / NumElts);
2574   }
2575   }
2576 
2577   return SDValue();
2578 }
2579 
2580 SDValue SelectionDAG::getSplatValue(SDValue V) {
2581   int SplatIdx;
2582   if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx))
2583     return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V),
2584                    SrcVector.getValueType().getScalarType(), SrcVector,
2585                    getVectorIdxConstant(SplatIdx, SDLoc(V)));
2586   return SDValue();
2587 }
2588 
2589 const APInt *
2590 SelectionDAG::getValidShiftAmountConstant(SDValue V,
2591                                           const APInt &DemandedElts) const {
2592   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2593           V.getOpcode() == ISD::SRA) &&
2594          "Unknown shift node");
2595   unsigned BitWidth = V.getScalarValueSizeInBits();
2596   if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2597     // Shifting more than the bitwidth is not valid.
2598     const APInt &ShAmt = SA->getAPIntValue();
2599     if (ShAmt.ult(BitWidth))
2600       return &ShAmt;
2601   }
2602   return nullptr;
2603 }
2604 
2605 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2606     SDValue V, const APInt &DemandedElts) const {
2607   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2608           V.getOpcode() == ISD::SRA) &&
2609          "Unknown shift node");
2610   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2611     return ValidAmt;
2612   unsigned BitWidth = V.getScalarValueSizeInBits();
2613   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2614   if (!BV)
2615     return nullptr;
2616   const APInt *MinShAmt = nullptr;
2617   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2618     if (!DemandedElts[i])
2619       continue;
2620     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2621     if (!SA)
2622       return nullptr;
2623     // Shifting more than the bitwidth is not valid.
2624     const APInt &ShAmt = SA->getAPIntValue();
2625     if (ShAmt.uge(BitWidth))
2626       return nullptr;
2627     if (MinShAmt && MinShAmt->ule(ShAmt))
2628       continue;
2629     MinShAmt = &ShAmt;
2630   }
2631   return MinShAmt;
2632 }
2633 
2634 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2635     SDValue V, const APInt &DemandedElts) const {
2636   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2637           V.getOpcode() == ISD::SRA) &&
2638          "Unknown shift node");
2639   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2640     return ValidAmt;
2641   unsigned BitWidth = V.getScalarValueSizeInBits();
2642   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2643   if (!BV)
2644     return nullptr;
2645   const APInt *MaxShAmt = nullptr;
2646   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2647     if (!DemandedElts[i])
2648       continue;
2649     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2650     if (!SA)
2651       return nullptr;
2652     // Shifting more than the bitwidth is not valid.
2653     const APInt &ShAmt = SA->getAPIntValue();
2654     if (ShAmt.uge(BitWidth))
2655       return nullptr;
2656     if (MaxShAmt && MaxShAmt->uge(ShAmt))
2657       continue;
2658     MaxShAmt = &ShAmt;
2659   }
2660   return MaxShAmt;
2661 }
2662 
2663 /// Determine which bits of Op are known to be either zero or one and return
2664 /// them in Known. For vectors, the known bits are those that are shared by
2665 /// every vector element.
2666 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2667   EVT VT = Op.getValueType();
2668 
2669   // TOOD: Until we have a plan for how to represent demanded elements for
2670   // scalable vectors, we can just bail out for now.
2671   if (Op.getValueType().isScalableVector()) {
2672     unsigned BitWidth = Op.getScalarValueSizeInBits();
2673     return KnownBits(BitWidth);
2674   }
2675 
2676   APInt DemandedElts = VT.isVector()
2677                            ? APInt::getAllOnesValue(VT.getVectorNumElements())
2678                            : APInt(1, 1);
2679   return computeKnownBits(Op, DemandedElts, Depth);
2680 }
2681 
2682 /// Determine which bits of Op are known to be either zero or one and return
2683 /// them in Known. The DemandedElts argument allows us to only collect the known
2684 /// bits that are shared by the requested vector elements.
2685 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2686                                          unsigned Depth) const {
2687   unsigned BitWidth = Op.getScalarValueSizeInBits();
2688 
2689   KnownBits Known(BitWidth);   // Don't know anything.
2690 
2691   // TOOD: Until we have a plan for how to represent demanded elements for
2692   // scalable vectors, we can just bail out for now.
2693   if (Op.getValueType().isScalableVector())
2694     return Known;
2695 
2696   if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2697     // We know all of the bits for a constant!
2698     return KnownBits::makeConstant(C->getAPIntValue());
2699   }
2700   if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2701     // We know all of the bits for a constant fp!
2702     return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2703   }
2704 
2705   if (Depth >= MaxRecursionDepth)
2706     return Known;  // Limit search depth.
2707 
2708   KnownBits Known2;
2709   unsigned NumElts = DemandedElts.getBitWidth();
2710   assert((!Op.getValueType().isVector() ||
2711           NumElts == Op.getValueType().getVectorNumElements()) &&
2712          "Unexpected vector size");
2713 
2714   if (!DemandedElts)
2715     return Known;  // No demanded elts, better to assume we don't know anything.
2716 
2717   unsigned Opcode = Op.getOpcode();
2718   switch (Opcode) {
2719   case ISD::BUILD_VECTOR:
2720     // Collect the known bits that are shared by every demanded vector element.
2721     Known.Zero.setAllBits(); Known.One.setAllBits();
2722     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2723       if (!DemandedElts[i])
2724         continue;
2725 
2726       SDValue SrcOp = Op.getOperand(i);
2727       Known2 = computeKnownBits(SrcOp, Depth + 1);
2728 
2729       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2730       if (SrcOp.getValueSizeInBits() != BitWidth) {
2731         assert(SrcOp.getValueSizeInBits() > BitWidth &&
2732                "Expected BUILD_VECTOR implicit truncation");
2733         Known2 = Known2.trunc(BitWidth);
2734       }
2735 
2736       // Known bits are the values that are shared by every demanded element.
2737       Known = KnownBits::commonBits(Known, Known2);
2738 
2739       // If we don't know any bits, early out.
2740       if (Known.isUnknown())
2741         break;
2742     }
2743     break;
2744   case ISD::VECTOR_SHUFFLE: {
2745     // Collect the known bits that are shared by every vector element referenced
2746     // by the shuffle.
2747     APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2748     Known.Zero.setAllBits(); Known.One.setAllBits();
2749     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2750     assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2751     for (unsigned i = 0; i != NumElts; ++i) {
2752       if (!DemandedElts[i])
2753         continue;
2754 
2755       int M = SVN->getMaskElt(i);
2756       if (M < 0) {
2757         // For UNDEF elements, we don't know anything about the common state of
2758         // the shuffle result.
2759         Known.resetAll();
2760         DemandedLHS.clearAllBits();
2761         DemandedRHS.clearAllBits();
2762         break;
2763       }
2764 
2765       if ((unsigned)M < NumElts)
2766         DemandedLHS.setBit((unsigned)M % NumElts);
2767       else
2768         DemandedRHS.setBit((unsigned)M % NumElts);
2769     }
2770     // Known bits are the values that are shared by every demanded element.
2771     if (!!DemandedLHS) {
2772       SDValue LHS = Op.getOperand(0);
2773       Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2774       Known = KnownBits::commonBits(Known, Known2);
2775     }
2776     // If we don't know any bits, early out.
2777     if (Known.isUnknown())
2778       break;
2779     if (!!DemandedRHS) {
2780       SDValue RHS = Op.getOperand(1);
2781       Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2782       Known = KnownBits::commonBits(Known, Known2);
2783     }
2784     break;
2785   }
2786   case ISD::CONCAT_VECTORS: {
2787     // Split DemandedElts and test each of the demanded subvectors.
2788     Known.Zero.setAllBits(); Known.One.setAllBits();
2789     EVT SubVectorVT = Op.getOperand(0).getValueType();
2790     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2791     unsigned NumSubVectors = Op.getNumOperands();
2792     for (unsigned i = 0; i != NumSubVectors; ++i) {
2793       APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2794       DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2795       if (!!DemandedSub) {
2796         SDValue Sub = Op.getOperand(i);
2797         Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2798         Known = KnownBits::commonBits(Known, Known2);
2799       }
2800       // If we don't know any bits, early out.
2801       if (Known.isUnknown())
2802         break;
2803     }
2804     break;
2805   }
2806   case ISD::INSERT_SUBVECTOR: {
2807     // Demand any elements from the subvector and the remainder from the src its
2808     // inserted into.
2809     SDValue Src = Op.getOperand(0);
2810     SDValue Sub = Op.getOperand(1);
2811     uint64_t Idx = Op.getConstantOperandVal(2);
2812     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2813     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2814     APInt DemandedSrcElts = DemandedElts;
2815     DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
2816 
2817     Known.One.setAllBits();
2818     Known.Zero.setAllBits();
2819     if (!!DemandedSubElts) {
2820       Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2821       if (Known.isUnknown())
2822         break; // early-out.
2823     }
2824     if (!!DemandedSrcElts) {
2825       Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2826       Known = KnownBits::commonBits(Known, Known2);
2827     }
2828     break;
2829   }
2830   case ISD::EXTRACT_SUBVECTOR: {
2831     // Offset the demanded elts by the subvector index.
2832     SDValue Src = Op.getOperand(0);
2833     // Bail until we can represent demanded elements for scalable vectors.
2834     if (Src.getValueType().isScalableVector())
2835       break;
2836     uint64_t Idx = Op.getConstantOperandVal(1);
2837     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2838     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2839     Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2840     break;
2841   }
2842   case ISD::SCALAR_TO_VECTOR: {
2843     // We know about scalar_to_vector as much as we know about it source,
2844     // which becomes the first element of otherwise unknown vector.
2845     if (DemandedElts != 1)
2846       break;
2847 
2848     SDValue N0 = Op.getOperand(0);
2849     Known = computeKnownBits(N0, Depth + 1);
2850     if (N0.getValueSizeInBits() != BitWidth)
2851       Known = Known.trunc(BitWidth);
2852 
2853     break;
2854   }
2855   case ISD::BITCAST: {
2856     SDValue N0 = Op.getOperand(0);
2857     EVT SubVT = N0.getValueType();
2858     unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2859 
2860     // Ignore bitcasts from unsupported types.
2861     if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2862       break;
2863 
2864     // Fast handling of 'identity' bitcasts.
2865     if (BitWidth == SubBitWidth) {
2866       Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2867       break;
2868     }
2869 
2870     bool IsLE = getDataLayout().isLittleEndian();
2871 
2872     // Bitcast 'small element' vector to 'large element' scalar/vector.
2873     if ((BitWidth % SubBitWidth) == 0) {
2874       assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2875 
2876       // Collect known bits for the (larger) output by collecting the known
2877       // bits from each set of sub elements and shift these into place.
2878       // We need to separately call computeKnownBits for each set of
2879       // sub elements as the knownbits for each is likely to be different.
2880       unsigned SubScale = BitWidth / SubBitWidth;
2881       APInt SubDemandedElts(NumElts * SubScale, 0);
2882       for (unsigned i = 0; i != NumElts; ++i)
2883         if (DemandedElts[i])
2884           SubDemandedElts.setBit(i * SubScale);
2885 
2886       for (unsigned i = 0; i != SubScale; ++i) {
2887         Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2888                          Depth + 1);
2889         unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2890         Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2891         Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2892       }
2893     }
2894 
2895     // Bitcast 'large element' scalar/vector to 'small element' vector.
2896     if ((SubBitWidth % BitWidth) == 0) {
2897       assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2898 
2899       // Collect known bits for the (smaller) output by collecting the known
2900       // bits from the overlapping larger input elements and extracting the
2901       // sub sections we actually care about.
2902       unsigned SubScale = SubBitWidth / BitWidth;
2903       APInt SubDemandedElts(NumElts / SubScale, 0);
2904       for (unsigned i = 0; i != NumElts; ++i)
2905         if (DemandedElts[i])
2906           SubDemandedElts.setBit(i / SubScale);
2907 
2908       Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2909 
2910       Known.Zero.setAllBits(); Known.One.setAllBits();
2911       for (unsigned i = 0; i != NumElts; ++i)
2912         if (DemandedElts[i]) {
2913           unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2914           unsigned Offset = (Shifts % SubScale) * BitWidth;
2915           Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2916           Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2917           // If we don't know any bits, early out.
2918           if (Known.isUnknown())
2919             break;
2920         }
2921     }
2922     break;
2923   }
2924   case ISD::AND:
2925     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2926     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2927 
2928     Known &= Known2;
2929     break;
2930   case ISD::OR:
2931     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2932     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2933 
2934     Known |= Known2;
2935     break;
2936   case ISD::XOR:
2937     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2938     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2939 
2940     Known ^= Known2;
2941     break;
2942   case ISD::MUL: {
2943     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2944     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2945     Known = KnownBits::computeForMul(Known, Known2);
2946     break;
2947   }
2948   case ISD::UDIV: {
2949     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2950     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2951     Known = KnownBits::udiv(Known, Known2);
2952     break;
2953   }
2954   case ISD::SELECT:
2955   case ISD::VSELECT:
2956     Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2957     // If we don't know any bits, early out.
2958     if (Known.isUnknown())
2959       break;
2960     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2961 
2962     // Only known if known in both the LHS and RHS.
2963     Known = KnownBits::commonBits(Known, Known2);
2964     break;
2965   case ISD::SELECT_CC:
2966     Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2967     // If we don't know any bits, early out.
2968     if (Known.isUnknown())
2969       break;
2970     Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2971 
2972     // Only known if known in both the LHS and RHS.
2973     Known = KnownBits::commonBits(Known, Known2);
2974     break;
2975   case ISD::SMULO:
2976   case ISD::UMULO:
2977   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2978     if (Op.getResNo() != 1)
2979       break;
2980     // The boolean result conforms to getBooleanContents.
2981     // If we know the result of a setcc has the top bits zero, use this info.
2982     // We know that we have an integer-based boolean since these operations
2983     // are only available for integer.
2984     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2985             TargetLowering::ZeroOrOneBooleanContent &&
2986         BitWidth > 1)
2987       Known.Zero.setBitsFrom(1);
2988     break;
2989   case ISD::SETCC:
2990   case ISD::STRICT_FSETCC:
2991   case ISD::STRICT_FSETCCS: {
2992     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
2993     // If we know the result of a setcc has the top bits zero, use this info.
2994     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
2995             TargetLowering::ZeroOrOneBooleanContent &&
2996         BitWidth > 1)
2997       Known.Zero.setBitsFrom(1);
2998     break;
2999   }
3000   case ISD::SHL:
3001     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3002     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3003     Known = KnownBits::shl(Known, Known2);
3004 
3005     // Minimum shift low bits are known zero.
3006     if (const APInt *ShMinAmt =
3007             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3008       Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3009     break;
3010   case ISD::SRL:
3011     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3012     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3013     Known = KnownBits::lshr(Known, Known2);
3014 
3015     // Minimum shift high bits are known zero.
3016     if (const APInt *ShMinAmt =
3017             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3018       Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3019     break;
3020   case ISD::SRA:
3021     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3022     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3023     Known = KnownBits::ashr(Known, Known2);
3024     // TODO: Add minimum shift high known sign bits.
3025     break;
3026   case ISD::FSHL:
3027   case ISD::FSHR:
3028     if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3029       unsigned Amt = C->getAPIntValue().urem(BitWidth);
3030 
3031       // For fshl, 0-shift returns the 1st arg.
3032       // For fshr, 0-shift returns the 2nd arg.
3033       if (Amt == 0) {
3034         Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3035                                  DemandedElts, Depth + 1);
3036         break;
3037       }
3038 
3039       // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3040       // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3041       Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3042       Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3043       if (Opcode == ISD::FSHL) {
3044         Known.One <<= Amt;
3045         Known.Zero <<= Amt;
3046         Known2.One.lshrInPlace(BitWidth - Amt);
3047         Known2.Zero.lshrInPlace(BitWidth - Amt);
3048       } else {
3049         Known.One <<= BitWidth - Amt;
3050         Known.Zero <<= BitWidth - Amt;
3051         Known2.One.lshrInPlace(Amt);
3052         Known2.Zero.lshrInPlace(Amt);
3053       }
3054       Known.One |= Known2.One;
3055       Known.Zero |= Known2.Zero;
3056     }
3057     break;
3058   case ISD::SIGN_EXTEND_INREG: {
3059     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3060     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3061     Known = Known.sextInReg(EVT.getScalarSizeInBits());
3062     break;
3063   }
3064   case ISD::CTTZ:
3065   case ISD::CTTZ_ZERO_UNDEF: {
3066     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3067     // If we have a known 1, its position is our upper bound.
3068     unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3069     unsigned LowBits = Log2_32(PossibleTZ) + 1;
3070     Known.Zero.setBitsFrom(LowBits);
3071     break;
3072   }
3073   case ISD::CTLZ:
3074   case ISD::CTLZ_ZERO_UNDEF: {
3075     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3076     // If we have a known 1, its position is our upper bound.
3077     unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3078     unsigned LowBits = Log2_32(PossibleLZ) + 1;
3079     Known.Zero.setBitsFrom(LowBits);
3080     break;
3081   }
3082   case ISD::CTPOP: {
3083     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3084     // If we know some of the bits are zero, they can't be one.
3085     unsigned PossibleOnes = Known2.countMaxPopulation();
3086     Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3087     break;
3088   }
3089   case ISD::PARITY: {
3090     // Parity returns 0 everywhere but the LSB.
3091     Known.Zero.setBitsFrom(1);
3092     break;
3093   }
3094   case ISD::LOAD: {
3095     LoadSDNode *LD = cast<LoadSDNode>(Op);
3096     const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3097     if (ISD::isNON_EXTLoad(LD) && Cst) {
3098       // Determine any common known bits from the loaded constant pool value.
3099       Type *CstTy = Cst->getType();
3100       if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3101         // If its a vector splat, then we can (quickly) reuse the scalar path.
3102         // NOTE: We assume all elements match and none are UNDEF.
3103         if (CstTy->isVectorTy()) {
3104           if (const Constant *Splat = Cst->getSplatValue()) {
3105             Cst = Splat;
3106             CstTy = Cst->getType();
3107           }
3108         }
3109         // TODO - do we need to handle different bitwidths?
3110         if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3111           // Iterate across all vector elements finding common known bits.
3112           Known.One.setAllBits();
3113           Known.Zero.setAllBits();
3114           for (unsigned i = 0; i != NumElts; ++i) {
3115             if (!DemandedElts[i])
3116               continue;
3117             if (Constant *Elt = Cst->getAggregateElement(i)) {
3118               if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3119                 const APInt &Value = CInt->getValue();
3120                 Known.One &= Value;
3121                 Known.Zero &= ~Value;
3122                 continue;
3123               }
3124               if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3125                 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3126                 Known.One &= Value;
3127                 Known.Zero &= ~Value;
3128                 continue;
3129               }
3130             }
3131             Known.One.clearAllBits();
3132             Known.Zero.clearAllBits();
3133             break;
3134           }
3135         } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3136           if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3137             Known = KnownBits::makeConstant(CInt->getValue());
3138           } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3139             Known =
3140                 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3141           }
3142         }
3143       }
3144     } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3145       // If this is a ZEXTLoad and we are looking at the loaded value.
3146       EVT VT = LD->getMemoryVT();
3147       unsigned MemBits = VT.getScalarSizeInBits();
3148       Known.Zero.setBitsFrom(MemBits);
3149     } else if (const MDNode *Ranges = LD->getRanges()) {
3150       if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3151         computeKnownBitsFromRangeMetadata(*Ranges, Known);
3152     }
3153     break;
3154   }
3155   case ISD::ZERO_EXTEND_VECTOR_INREG: {
3156     EVT InVT = Op.getOperand(0).getValueType();
3157     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3158     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3159     Known = Known.zext(BitWidth);
3160     break;
3161   }
3162   case ISD::ZERO_EXTEND: {
3163     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3164     Known = Known.zext(BitWidth);
3165     break;
3166   }
3167   case ISD::SIGN_EXTEND_VECTOR_INREG: {
3168     EVT InVT = Op.getOperand(0).getValueType();
3169     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3170     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3171     // If the sign bit is known to be zero or one, then sext will extend
3172     // it to the top bits, else it will just zext.
3173     Known = Known.sext(BitWidth);
3174     break;
3175   }
3176   case ISD::SIGN_EXTEND: {
3177     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3178     // If the sign bit is known to be zero or one, then sext will extend
3179     // it to the top bits, else it will just zext.
3180     Known = Known.sext(BitWidth);
3181     break;
3182   }
3183   case ISD::ANY_EXTEND_VECTOR_INREG: {
3184     EVT InVT = Op.getOperand(0).getValueType();
3185     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3186     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3187     Known = Known.anyext(BitWidth);
3188     break;
3189   }
3190   case ISD::ANY_EXTEND: {
3191     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3192     Known = Known.anyext(BitWidth);
3193     break;
3194   }
3195   case ISD::TRUNCATE: {
3196     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3197     Known = Known.trunc(BitWidth);
3198     break;
3199   }
3200   case ISD::AssertZext: {
3201     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3202     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3203     Known = computeKnownBits(Op.getOperand(0), Depth+1);
3204     Known.Zero |= (~InMask);
3205     Known.One  &= (~Known.Zero);
3206     break;
3207   }
3208   case ISD::AssertAlign: {
3209     unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3210     assert(LogOfAlign != 0);
3211     // If a node is guaranteed to be aligned, set low zero bits accordingly as
3212     // well as clearing one bits.
3213     Known.Zero.setLowBits(LogOfAlign);
3214     Known.One.clearLowBits(LogOfAlign);
3215     break;
3216   }
3217   case ISD::FGETSIGN:
3218     // All bits are zero except the low bit.
3219     Known.Zero.setBitsFrom(1);
3220     break;
3221   case ISD::USUBO:
3222   case ISD::SSUBO:
3223     if (Op.getResNo() == 1) {
3224       // If we know the result of a setcc has the top bits zero, use this info.
3225       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3226               TargetLowering::ZeroOrOneBooleanContent &&
3227           BitWidth > 1)
3228         Known.Zero.setBitsFrom(1);
3229       break;
3230     }
3231     LLVM_FALLTHROUGH;
3232   case ISD::SUB:
3233   case ISD::SUBC: {
3234     assert(Op.getResNo() == 0 &&
3235            "We only compute knownbits for the difference here.");
3236 
3237     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3238     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3239     Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3240                                         Known, Known2);
3241     break;
3242   }
3243   case ISD::UADDO:
3244   case ISD::SADDO:
3245   case ISD::ADDCARRY:
3246     if (Op.getResNo() == 1) {
3247       // If we know the result of a setcc has the top bits zero, use this info.
3248       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3249               TargetLowering::ZeroOrOneBooleanContent &&
3250           BitWidth > 1)
3251         Known.Zero.setBitsFrom(1);
3252       break;
3253     }
3254     LLVM_FALLTHROUGH;
3255   case ISD::ADD:
3256   case ISD::ADDC:
3257   case ISD::ADDE: {
3258     assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3259 
3260     // With ADDE and ADDCARRY, a carry bit may be added in.
3261     KnownBits Carry(1);
3262     if (Opcode == ISD::ADDE)
3263       // Can't track carry from glue, set carry to unknown.
3264       Carry.resetAll();
3265     else if (Opcode == ISD::ADDCARRY)
3266       // TODO: Compute known bits for the carry operand. Not sure if it is worth
3267       // the trouble (how often will we find a known carry bit). And I haven't
3268       // tested this very much yet, but something like this might work:
3269       //   Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3270       //   Carry = Carry.zextOrTrunc(1, false);
3271       Carry.resetAll();
3272     else
3273       Carry.setAllZero();
3274 
3275     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3276     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3277     Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3278     break;
3279   }
3280   case ISD::SREM: {
3281     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3282     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3283     Known = KnownBits::srem(Known, Known2);
3284     break;
3285   }
3286   case ISD::UREM: {
3287     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3288     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3289     Known = KnownBits::urem(Known, Known2);
3290     break;
3291   }
3292   case ISD::EXTRACT_ELEMENT: {
3293     Known = computeKnownBits(Op.getOperand(0), Depth+1);
3294     const unsigned Index = Op.getConstantOperandVal(1);
3295     const unsigned EltBitWidth = Op.getValueSizeInBits();
3296 
3297     // Remove low part of known bits mask
3298     Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3299     Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3300 
3301     // Remove high part of known bit mask
3302     Known = Known.trunc(EltBitWidth);
3303     break;
3304   }
3305   case ISD::EXTRACT_VECTOR_ELT: {
3306     SDValue InVec = Op.getOperand(0);
3307     SDValue EltNo = Op.getOperand(1);
3308     EVT VecVT = InVec.getValueType();
3309     // computeKnownBits not yet implemented for scalable vectors.
3310     if (VecVT.isScalableVector())
3311       break;
3312     const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3313     const unsigned NumSrcElts = VecVT.getVectorNumElements();
3314 
3315     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3316     // anything about the extended bits.
3317     if (BitWidth > EltBitWidth)
3318       Known = Known.trunc(EltBitWidth);
3319 
3320     // If we know the element index, just demand that vector element, else for
3321     // an unknown element index, ignore DemandedElts and demand them all.
3322     APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3323     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3324     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3325       DemandedSrcElts =
3326           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3327 
3328     Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3329     if (BitWidth > EltBitWidth)
3330       Known = Known.anyext(BitWidth);
3331     break;
3332   }
3333   case ISD::INSERT_VECTOR_ELT: {
3334     // If we know the element index, split the demand between the
3335     // source vector and the inserted element, otherwise assume we need
3336     // the original demanded vector elements and the value.
3337     SDValue InVec = Op.getOperand(0);
3338     SDValue InVal = Op.getOperand(1);
3339     SDValue EltNo = Op.getOperand(2);
3340     bool DemandedVal = true;
3341     APInt DemandedVecElts = DemandedElts;
3342     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3343     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3344       unsigned EltIdx = CEltNo->getZExtValue();
3345       DemandedVal = !!DemandedElts[EltIdx];
3346       DemandedVecElts.clearBit(EltIdx);
3347     }
3348     Known.One.setAllBits();
3349     Known.Zero.setAllBits();
3350     if (DemandedVal) {
3351       Known2 = computeKnownBits(InVal, Depth + 1);
3352       Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3353     }
3354     if (!!DemandedVecElts) {
3355       Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3356       Known = KnownBits::commonBits(Known, Known2);
3357     }
3358     break;
3359   }
3360   case ISD::BITREVERSE: {
3361     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3362     Known = Known2.reverseBits();
3363     break;
3364   }
3365   case ISD::BSWAP: {
3366     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3367     Known = Known2.byteSwap();
3368     break;
3369   }
3370   case ISD::ABS: {
3371     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3372     Known = Known2.abs();
3373     break;
3374   }
3375   case ISD::UMIN: {
3376     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3377     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3378     Known = KnownBits::umin(Known, Known2);
3379     break;
3380   }
3381   case ISD::UMAX: {
3382     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3383     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3384     Known = KnownBits::umax(Known, Known2);
3385     break;
3386   }
3387   case ISD::SMIN:
3388   case ISD::SMAX: {
3389     // If we have a clamp pattern, we know that the number of sign bits will be
3390     // the minimum of the clamp min/max range.
3391     bool IsMax = (Opcode == ISD::SMAX);
3392     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3393     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3394       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3395         CstHigh =
3396             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3397     if (CstLow && CstHigh) {
3398       if (!IsMax)
3399         std::swap(CstLow, CstHigh);
3400 
3401       const APInt &ValueLow = CstLow->getAPIntValue();
3402       const APInt &ValueHigh = CstHigh->getAPIntValue();
3403       if (ValueLow.sle(ValueHigh)) {
3404         unsigned LowSignBits = ValueLow.getNumSignBits();
3405         unsigned HighSignBits = ValueHigh.getNumSignBits();
3406         unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3407         if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3408           Known.One.setHighBits(MinSignBits);
3409           break;
3410         }
3411         if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3412           Known.Zero.setHighBits(MinSignBits);
3413           break;
3414         }
3415       }
3416     }
3417 
3418     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3419     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3420     if (IsMax)
3421       Known = KnownBits::smax(Known, Known2);
3422     else
3423       Known = KnownBits::smin(Known, Known2);
3424     break;
3425   }
3426   case ISD::FrameIndex:
3427   case ISD::TargetFrameIndex:
3428     TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3429                                        Known, getMachineFunction());
3430     break;
3431 
3432   default:
3433     if (Opcode < ISD::BUILTIN_OP_END)
3434       break;
3435     LLVM_FALLTHROUGH;
3436   case ISD::INTRINSIC_WO_CHAIN:
3437   case ISD::INTRINSIC_W_CHAIN:
3438   case ISD::INTRINSIC_VOID:
3439     // Allow the target to implement this method for its nodes.
3440     TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3441     break;
3442   }
3443 
3444   assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3445   return Known;
3446 }
3447 
3448 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3449                                                              SDValue N1) const {
3450   // X + 0 never overflow
3451   if (isNullConstant(N1))
3452     return OFK_Never;
3453 
3454   KnownBits N1Known = computeKnownBits(N1);
3455   if (N1Known.Zero.getBoolValue()) {
3456     KnownBits N0Known = computeKnownBits(N0);
3457 
3458     bool overflow;
3459     (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3460     if (!overflow)
3461       return OFK_Never;
3462   }
3463 
3464   // mulhi + 1 never overflow
3465   if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3466       (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3467     return OFK_Never;
3468 
3469   if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3470     KnownBits N0Known = computeKnownBits(N0);
3471 
3472     if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3473       return OFK_Never;
3474   }
3475 
3476   return OFK_Sometime;
3477 }
3478 
3479 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3480   EVT OpVT = Val.getValueType();
3481   unsigned BitWidth = OpVT.getScalarSizeInBits();
3482 
3483   // Is the constant a known power of 2?
3484   if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3485     return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3486 
3487   // A left-shift of a constant one will have exactly one bit set because
3488   // shifting the bit off the end is undefined.
3489   if (Val.getOpcode() == ISD::SHL) {
3490     auto *C = isConstOrConstSplat(Val.getOperand(0));
3491     if (C && C->getAPIntValue() == 1)
3492       return true;
3493   }
3494 
3495   // Similarly, a logical right-shift of a constant sign-bit will have exactly
3496   // one bit set.
3497   if (Val.getOpcode() == ISD::SRL) {
3498     auto *C = isConstOrConstSplat(Val.getOperand(0));
3499     if (C && C->getAPIntValue().isSignMask())
3500       return true;
3501   }
3502 
3503   // Are all operands of a build vector constant powers of two?
3504   if (Val.getOpcode() == ISD::BUILD_VECTOR)
3505     if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3506           if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3507             return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3508           return false;
3509         }))
3510       return true;
3511 
3512   // More could be done here, though the above checks are enough
3513   // to handle some common cases.
3514 
3515   // Fall back to computeKnownBits to catch other known cases.
3516   KnownBits Known = computeKnownBits(Val);
3517   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3518 }
3519 
3520 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3521   EVT VT = Op.getValueType();
3522 
3523   // TODO: Assume we don't know anything for now.
3524   if (VT.isScalableVector())
3525     return 1;
3526 
3527   APInt DemandedElts = VT.isVector()
3528                            ? APInt::getAllOnesValue(VT.getVectorNumElements())
3529                            : APInt(1, 1);
3530   return ComputeNumSignBits(Op, DemandedElts, Depth);
3531 }
3532 
3533 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3534                                           unsigned Depth) const {
3535   EVT VT = Op.getValueType();
3536   assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3537   unsigned VTBits = VT.getScalarSizeInBits();
3538   unsigned NumElts = DemandedElts.getBitWidth();
3539   unsigned Tmp, Tmp2;
3540   unsigned FirstAnswer = 1;
3541 
3542   if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3543     const APInt &Val = C->getAPIntValue();
3544     return Val.getNumSignBits();
3545   }
3546 
3547   if (Depth >= MaxRecursionDepth)
3548     return 1;  // Limit search depth.
3549 
3550   if (!DemandedElts || VT.isScalableVector())
3551     return 1;  // No demanded elts, better to assume we don't know anything.
3552 
3553   unsigned Opcode = Op.getOpcode();
3554   switch (Opcode) {
3555   default: break;
3556   case ISD::AssertSext:
3557     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3558     return VTBits-Tmp+1;
3559   case ISD::AssertZext:
3560     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3561     return VTBits-Tmp;
3562 
3563   case ISD::BUILD_VECTOR:
3564     Tmp = VTBits;
3565     for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3566       if (!DemandedElts[i])
3567         continue;
3568 
3569       SDValue SrcOp = Op.getOperand(i);
3570       Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3571 
3572       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3573       if (SrcOp.getValueSizeInBits() != VTBits) {
3574         assert(SrcOp.getValueSizeInBits() > VTBits &&
3575                "Expected BUILD_VECTOR implicit truncation");
3576         unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3577         Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3578       }
3579       Tmp = std::min(Tmp, Tmp2);
3580     }
3581     return Tmp;
3582 
3583   case ISD::VECTOR_SHUFFLE: {
3584     // Collect the minimum number of sign bits that are shared by every vector
3585     // element referenced by the shuffle.
3586     APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3587     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3588     assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3589     for (unsigned i = 0; i != NumElts; ++i) {
3590       int M = SVN->getMaskElt(i);
3591       if (!DemandedElts[i])
3592         continue;
3593       // For UNDEF elements, we don't know anything about the common state of
3594       // the shuffle result.
3595       if (M < 0)
3596         return 1;
3597       if ((unsigned)M < NumElts)
3598         DemandedLHS.setBit((unsigned)M % NumElts);
3599       else
3600         DemandedRHS.setBit((unsigned)M % NumElts);
3601     }
3602     Tmp = std::numeric_limits<unsigned>::max();
3603     if (!!DemandedLHS)
3604       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3605     if (!!DemandedRHS) {
3606       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3607       Tmp = std::min(Tmp, Tmp2);
3608     }
3609     // If we don't know anything, early out and try computeKnownBits fall-back.
3610     if (Tmp == 1)
3611       break;
3612     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3613     return Tmp;
3614   }
3615 
3616   case ISD::BITCAST: {
3617     SDValue N0 = Op.getOperand(0);
3618     EVT SrcVT = N0.getValueType();
3619     unsigned SrcBits = SrcVT.getScalarSizeInBits();
3620 
3621     // Ignore bitcasts from unsupported types..
3622     if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3623       break;
3624 
3625     // Fast handling of 'identity' bitcasts.
3626     if (VTBits == SrcBits)
3627       return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3628 
3629     bool IsLE = getDataLayout().isLittleEndian();
3630 
3631     // Bitcast 'large element' scalar/vector to 'small element' vector.
3632     if ((SrcBits % VTBits) == 0) {
3633       assert(VT.isVector() && "Expected bitcast to vector");
3634 
3635       unsigned Scale = SrcBits / VTBits;
3636       APInt SrcDemandedElts(NumElts / Scale, 0);
3637       for (unsigned i = 0; i != NumElts; ++i)
3638         if (DemandedElts[i])
3639           SrcDemandedElts.setBit(i / Scale);
3640 
3641       // Fast case - sign splat can be simply split across the small elements.
3642       Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3643       if (Tmp == SrcBits)
3644         return VTBits;
3645 
3646       // Slow case - determine how far the sign extends into each sub-element.
3647       Tmp2 = VTBits;
3648       for (unsigned i = 0; i != NumElts; ++i)
3649         if (DemandedElts[i]) {
3650           unsigned SubOffset = i % Scale;
3651           SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3652           SubOffset = SubOffset * VTBits;
3653           if (Tmp <= SubOffset)
3654             return 1;
3655           Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3656         }
3657       return Tmp2;
3658     }
3659     break;
3660   }
3661 
3662   case ISD::SIGN_EXTEND:
3663     Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3664     return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3665   case ISD::SIGN_EXTEND_INREG:
3666     // Max of the input and what this extends.
3667     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3668     Tmp = VTBits-Tmp+1;
3669     Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3670     return std::max(Tmp, Tmp2);
3671   case ISD::SIGN_EXTEND_VECTOR_INREG: {
3672     SDValue Src = Op.getOperand(0);
3673     EVT SrcVT = Src.getValueType();
3674     APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3675     Tmp = VTBits - SrcVT.getScalarSizeInBits();
3676     return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3677   }
3678   case ISD::SRA:
3679     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3680     // SRA X, C -> adds C sign bits.
3681     if (const APInt *ShAmt =
3682             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3683       Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3684     return Tmp;
3685   case ISD::SHL:
3686     if (const APInt *ShAmt =
3687             getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3688       // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3689       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3690       if (ShAmt->ult(Tmp))
3691         return Tmp - ShAmt->getZExtValue();
3692     }
3693     break;
3694   case ISD::AND:
3695   case ISD::OR:
3696   case ISD::XOR:    // NOT is handled here.
3697     // Logical binary ops preserve the number of sign bits at the worst.
3698     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3699     if (Tmp != 1) {
3700       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3701       FirstAnswer = std::min(Tmp, Tmp2);
3702       // We computed what we know about the sign bits as our first
3703       // answer. Now proceed to the generic code that uses
3704       // computeKnownBits, and pick whichever answer is better.
3705     }
3706     break;
3707 
3708   case ISD::SELECT:
3709   case ISD::VSELECT:
3710     Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3711     if (Tmp == 1) return 1;  // Early out.
3712     Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3713     return std::min(Tmp, Tmp2);
3714   case ISD::SELECT_CC:
3715     Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3716     if (Tmp == 1) return 1;  // Early out.
3717     Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3718     return std::min(Tmp, Tmp2);
3719 
3720   case ISD::SMIN:
3721   case ISD::SMAX: {
3722     // If we have a clamp pattern, we know that the number of sign bits will be
3723     // the minimum of the clamp min/max range.
3724     bool IsMax = (Opcode == ISD::SMAX);
3725     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3726     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3727       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3728         CstHigh =
3729             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3730     if (CstLow && CstHigh) {
3731       if (!IsMax)
3732         std::swap(CstLow, CstHigh);
3733       if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3734         Tmp = CstLow->getAPIntValue().getNumSignBits();
3735         Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3736         return std::min(Tmp, Tmp2);
3737       }
3738     }
3739 
3740     // Fallback - just get the minimum number of sign bits of the operands.
3741     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3742     if (Tmp == 1)
3743       return 1;  // Early out.
3744     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3745     return std::min(Tmp, Tmp2);
3746   }
3747   case ISD::UMIN:
3748   case ISD::UMAX:
3749     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3750     if (Tmp == 1)
3751       return 1;  // Early out.
3752     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3753     return std::min(Tmp, Tmp2);
3754   case ISD::SADDO:
3755   case ISD::UADDO:
3756   case ISD::SSUBO:
3757   case ISD::USUBO:
3758   case ISD::SMULO:
3759   case ISD::UMULO:
3760     if (Op.getResNo() != 1)
3761       break;
3762     // The boolean result conforms to getBooleanContents.  Fall through.
3763     // If setcc returns 0/-1, all bits are sign bits.
3764     // We know that we have an integer-based boolean since these operations
3765     // are only available for integer.
3766     if (TLI->getBooleanContents(VT.isVector(), false) ==
3767         TargetLowering::ZeroOrNegativeOneBooleanContent)
3768       return VTBits;
3769     break;
3770   case ISD::SETCC:
3771   case ISD::STRICT_FSETCC:
3772   case ISD::STRICT_FSETCCS: {
3773     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3774     // If setcc returns 0/-1, all bits are sign bits.
3775     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3776         TargetLowering::ZeroOrNegativeOneBooleanContent)
3777       return VTBits;
3778     break;
3779   }
3780   case ISD::ROTL:
3781   case ISD::ROTR:
3782     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3783 
3784     // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3785     if (Tmp == VTBits)
3786       return VTBits;
3787 
3788     if (ConstantSDNode *C =
3789             isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3790       unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3791 
3792       // Handle rotate right by N like a rotate left by 32-N.
3793       if (Opcode == ISD::ROTR)
3794         RotAmt = (VTBits - RotAmt) % VTBits;
3795 
3796       // If we aren't rotating out all of the known-in sign bits, return the
3797       // number that are left.  This handles rotl(sext(x), 1) for example.
3798       if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3799     }
3800     break;
3801   case ISD::ADD:
3802   case ISD::ADDC:
3803     // Add can have at most one carry bit.  Thus we know that the output
3804     // is, at worst, one more bit than the inputs.
3805     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3806     if (Tmp == 1) return 1; // Early out.
3807 
3808     // Special case decrementing a value (ADD X, -1):
3809     if (ConstantSDNode *CRHS =
3810             isConstOrConstSplat(Op.getOperand(1), DemandedElts))
3811       if (CRHS->isAllOnesValue()) {
3812         KnownBits Known =
3813             computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3814 
3815         // If the input is known to be 0 or 1, the output is 0/-1, which is all
3816         // sign bits set.
3817         if ((Known.Zero | 1).isAllOnesValue())
3818           return VTBits;
3819 
3820         // If we are subtracting one from a positive number, there is no carry
3821         // out of the result.
3822         if (Known.isNonNegative())
3823           return Tmp;
3824       }
3825 
3826     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3827     if (Tmp2 == 1) return 1; // Early out.
3828     return std::min(Tmp, Tmp2) - 1;
3829   case ISD::SUB:
3830     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3831     if (Tmp2 == 1) return 1; // Early out.
3832 
3833     // Handle NEG.
3834     if (ConstantSDNode *CLHS =
3835             isConstOrConstSplat(Op.getOperand(0), DemandedElts))
3836       if (CLHS->isNullValue()) {
3837         KnownBits Known =
3838             computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3839         // If the input is known to be 0 or 1, the output is 0/-1, which is all
3840         // sign bits set.
3841         if ((Known.Zero | 1).isAllOnesValue())
3842           return VTBits;
3843 
3844         // If the input is known to be positive (the sign bit is known clear),
3845         // the output of the NEG has the same number of sign bits as the input.
3846         if (Known.isNonNegative())
3847           return Tmp2;
3848 
3849         // Otherwise, we treat this like a SUB.
3850       }
3851 
3852     // Sub can have at most one carry bit.  Thus we know that the output
3853     // is, at worst, one more bit than the inputs.
3854     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3855     if (Tmp == 1) return 1; // Early out.
3856     return std::min(Tmp, Tmp2) - 1;
3857   case ISD::MUL: {
3858     // The output of the Mul can be at most twice the valid bits in the inputs.
3859     unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3860     if (SignBitsOp0 == 1)
3861       break;
3862     unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3863     if (SignBitsOp1 == 1)
3864       break;
3865     unsigned OutValidBits =
3866         (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
3867     return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
3868   }
3869   case ISD::TRUNCATE: {
3870     // Check if the sign bits of source go down as far as the truncated value.
3871     unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3872     unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3873     if (NumSrcSignBits > (NumSrcBits - VTBits))
3874       return NumSrcSignBits - (NumSrcBits - VTBits);
3875     break;
3876   }
3877   case ISD::EXTRACT_ELEMENT: {
3878     const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3879     const int BitWidth = Op.getValueSizeInBits();
3880     const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3881 
3882     // Get reverse index (starting from 1), Op1 value indexes elements from
3883     // little end. Sign starts at big end.
3884     const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3885 
3886     // If the sign portion ends in our element the subtraction gives correct
3887     // result. Otherwise it gives either negative or > bitwidth result
3888     return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3889   }
3890   case ISD::INSERT_VECTOR_ELT: {
3891     // If we know the element index, split the demand between the
3892     // source vector and the inserted element, otherwise assume we need
3893     // the original demanded vector elements and the value.
3894     SDValue InVec = Op.getOperand(0);
3895     SDValue InVal = Op.getOperand(1);
3896     SDValue EltNo = Op.getOperand(2);
3897     bool DemandedVal = true;
3898     APInt DemandedVecElts = DemandedElts;
3899     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3900     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3901       unsigned EltIdx = CEltNo->getZExtValue();
3902       DemandedVal = !!DemandedElts[EltIdx];
3903       DemandedVecElts.clearBit(EltIdx);
3904     }
3905     Tmp = std::numeric_limits<unsigned>::max();
3906     if (DemandedVal) {
3907       // TODO - handle implicit truncation of inserted elements.
3908       if (InVal.getScalarValueSizeInBits() != VTBits)
3909         break;
3910       Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3911       Tmp = std::min(Tmp, Tmp2);
3912     }
3913     if (!!DemandedVecElts) {
3914       Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
3915       Tmp = std::min(Tmp, Tmp2);
3916     }
3917     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3918     return Tmp;
3919   }
3920   case ISD::EXTRACT_VECTOR_ELT: {
3921     SDValue InVec = Op.getOperand(0);
3922     SDValue EltNo = Op.getOperand(1);
3923     EVT VecVT = InVec.getValueType();
3924     const unsigned BitWidth = Op.getValueSizeInBits();
3925     const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3926     const unsigned NumSrcElts = VecVT.getVectorNumElements();
3927 
3928     // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3929     // anything about sign bits. But if the sizes match we can derive knowledge
3930     // about sign bits from the vector operand.
3931     if (BitWidth != EltBitWidth)
3932       break;
3933 
3934     // If we know the element index, just demand that vector element, else for
3935     // an unknown element index, ignore DemandedElts and demand them all.
3936     APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3937     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3938     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3939       DemandedSrcElts =
3940           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3941 
3942     return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3943   }
3944   case ISD::EXTRACT_SUBVECTOR: {
3945     // Offset the demanded elts by the subvector index.
3946     SDValue Src = Op.getOperand(0);
3947     // Bail until we can represent demanded elements for scalable vectors.
3948     if (Src.getValueType().isScalableVector())
3949       break;
3950     uint64_t Idx = Op.getConstantOperandVal(1);
3951     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3952     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3953     return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3954   }
3955   case ISD::CONCAT_VECTORS: {
3956     // Determine the minimum number of sign bits across all demanded
3957     // elts of the input vectors. Early out if the result is already 1.
3958     Tmp = std::numeric_limits<unsigned>::max();
3959     EVT SubVectorVT = Op.getOperand(0).getValueType();
3960     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3961     unsigned NumSubVectors = Op.getNumOperands();
3962     for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3963       APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3964       DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3965       if (!DemandedSub)
3966         continue;
3967       Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3968       Tmp = std::min(Tmp, Tmp2);
3969     }
3970     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3971     return Tmp;
3972   }
3973   case ISD::INSERT_SUBVECTOR: {
3974     // Demand any elements from the subvector and the remainder from the src its
3975     // inserted into.
3976     SDValue Src = Op.getOperand(0);
3977     SDValue Sub = Op.getOperand(1);
3978     uint64_t Idx = Op.getConstantOperandVal(2);
3979     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3980     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3981     APInt DemandedSrcElts = DemandedElts;
3982     DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
3983 
3984     Tmp = std::numeric_limits<unsigned>::max();
3985     if (!!DemandedSubElts) {
3986       Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3987       if (Tmp == 1)
3988         return 1; // early-out
3989     }
3990     if (!!DemandedSrcElts) {
3991       Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3992       Tmp = std::min(Tmp, Tmp2);
3993     }
3994     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3995     return Tmp;
3996   }
3997   }
3998 
3999   // If we are looking at the loaded value of the SDNode.
4000   if (Op.getResNo() == 0) {
4001     // Handle LOADX separately here. EXTLOAD case will fallthrough.
4002     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4003       unsigned ExtType = LD->getExtensionType();
4004       switch (ExtType) {
4005       default: break;
4006       case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4007         Tmp = LD->getMemoryVT().getScalarSizeInBits();
4008         return VTBits - Tmp + 1;
4009       case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4010         Tmp = LD->getMemoryVT().getScalarSizeInBits();
4011         return VTBits - Tmp;
4012       case ISD::NON_EXTLOAD:
4013         if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4014           // We only need to handle vectors - computeKnownBits should handle
4015           // scalar cases.
4016           Type *CstTy = Cst->getType();
4017           if (CstTy->isVectorTy() &&
4018               (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4019             Tmp = VTBits;
4020             for (unsigned i = 0; i != NumElts; ++i) {
4021               if (!DemandedElts[i])
4022                 continue;
4023               if (Constant *Elt = Cst->getAggregateElement(i)) {
4024                 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4025                   const APInt &Value = CInt->getValue();
4026                   Tmp = std::min(Tmp, Value.getNumSignBits());
4027                   continue;
4028                 }
4029                 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4030                   APInt Value = CFP->getValueAPF().bitcastToAPInt();
4031                   Tmp = std::min(Tmp, Value.getNumSignBits());
4032                   continue;
4033                 }
4034               }
4035               // Unknown type. Conservatively assume no bits match sign bit.
4036               return 1;
4037             }
4038             return Tmp;
4039           }
4040         }
4041         break;
4042       }
4043     }
4044   }
4045 
4046   // Allow the target to implement this method for its nodes.
4047   if (Opcode >= ISD::BUILTIN_OP_END ||
4048       Opcode == ISD::INTRINSIC_WO_CHAIN ||
4049       Opcode == ISD::INTRINSIC_W_CHAIN ||
4050       Opcode == ISD::INTRINSIC_VOID) {
4051     unsigned NumBits =
4052         TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4053     if (NumBits > 1)
4054       FirstAnswer = std::max(FirstAnswer, NumBits);
4055   }
4056 
4057   // Finally, if we can prove that the top bits of the result are 0's or 1's,
4058   // use this information.
4059   KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4060 
4061   APInt Mask;
4062   if (Known.isNonNegative()) {        // sign bit is 0
4063     Mask = Known.Zero;
4064   } else if (Known.isNegative()) {  // sign bit is 1;
4065     Mask = Known.One;
4066   } else {
4067     // Nothing known.
4068     return FirstAnswer;
4069   }
4070 
4071   // Okay, we know that the sign bit in Mask is set.  Use CLO to determine
4072   // the number of identical bits in the top of the input value.
4073   Mask <<= Mask.getBitWidth()-VTBits;
4074   return std::max(FirstAnswer, Mask.countLeadingOnes());
4075 }
4076 
4077 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4078   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4079       !isa<ConstantSDNode>(Op.getOperand(1)))
4080     return false;
4081 
4082   if (Op.getOpcode() == ISD::OR &&
4083       !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4084     return false;
4085 
4086   return true;
4087 }
4088 
4089 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4090   // If we're told that NaNs won't happen, assume they won't.
4091   if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4092     return true;
4093 
4094   if (Depth >= MaxRecursionDepth)
4095     return false; // Limit search depth.
4096 
4097   // TODO: Handle vectors.
4098   // If the value is a constant, we can obviously see if it is a NaN or not.
4099   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4100     return !C->getValueAPF().isNaN() ||
4101            (SNaN && !C->getValueAPF().isSignaling());
4102   }
4103 
4104   unsigned Opcode = Op.getOpcode();
4105   switch (Opcode) {
4106   case ISD::FADD:
4107   case ISD::FSUB:
4108   case ISD::FMUL:
4109   case ISD::FDIV:
4110   case ISD::FREM:
4111   case ISD::FSIN:
4112   case ISD::FCOS: {
4113     if (SNaN)
4114       return true;
4115     // TODO: Need isKnownNeverInfinity
4116     return false;
4117   }
4118   case ISD::FCANONICALIZE:
4119   case ISD::FEXP:
4120   case ISD::FEXP2:
4121   case ISD::FTRUNC:
4122   case ISD::FFLOOR:
4123   case ISD::FCEIL:
4124   case ISD::FROUND:
4125   case ISD::FROUNDEVEN:
4126   case ISD::FRINT:
4127   case ISD::FNEARBYINT: {
4128     if (SNaN)
4129       return true;
4130     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4131   }
4132   case ISD::FABS:
4133   case ISD::FNEG:
4134   case ISD::FCOPYSIGN: {
4135     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4136   }
4137   case ISD::SELECT:
4138     return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4139            isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4140   case ISD::FP_EXTEND:
4141   case ISD::FP_ROUND: {
4142     if (SNaN)
4143       return true;
4144     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4145   }
4146   case ISD::SINT_TO_FP:
4147   case ISD::UINT_TO_FP:
4148     return true;
4149   case ISD::FMA:
4150   case ISD::FMAD: {
4151     if (SNaN)
4152       return true;
4153     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4154            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4155            isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4156   }
4157   case ISD::FSQRT: // Need is known positive
4158   case ISD::FLOG:
4159   case ISD::FLOG2:
4160   case ISD::FLOG10:
4161   case ISD::FPOWI:
4162   case ISD::FPOW: {
4163     if (SNaN)
4164       return true;
4165     // TODO: Refine on operand
4166     return false;
4167   }
4168   case ISD::FMINNUM:
4169   case ISD::FMAXNUM: {
4170     // Only one needs to be known not-nan, since it will be returned if the
4171     // other ends up being one.
4172     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4173            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4174   }
4175   case ISD::FMINNUM_IEEE:
4176   case ISD::FMAXNUM_IEEE: {
4177     if (SNaN)
4178       return true;
4179     // This can return a NaN if either operand is an sNaN, or if both operands
4180     // are NaN.
4181     return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4182             isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4183            (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4184             isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4185   }
4186   case ISD::FMINIMUM:
4187   case ISD::FMAXIMUM: {
4188     // TODO: Does this quiet or return the origina NaN as-is?
4189     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4190            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4191   }
4192   case ISD::EXTRACT_VECTOR_ELT: {
4193     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4194   }
4195   default:
4196     if (Opcode >= ISD::BUILTIN_OP_END ||
4197         Opcode == ISD::INTRINSIC_WO_CHAIN ||
4198         Opcode == ISD::INTRINSIC_W_CHAIN ||
4199         Opcode == ISD::INTRINSIC_VOID) {
4200       return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4201     }
4202 
4203     return false;
4204   }
4205 }
4206 
4207 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4208   assert(Op.getValueType().isFloatingPoint() &&
4209          "Floating point type expected");
4210 
4211   // If the value is a constant, we can obviously see if it is a zero or not.
4212   // TODO: Add BuildVector support.
4213   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4214     return !C->isZero();
4215   return false;
4216 }
4217 
4218 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4219   assert(!Op.getValueType().isFloatingPoint() &&
4220          "Floating point types unsupported - use isKnownNeverZeroFloat");
4221 
4222   // If the value is a constant, we can obviously see if it is a zero or not.
4223   if (ISD::matchUnaryPredicate(
4224           Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4225     return true;
4226 
4227   // TODO: Recognize more cases here.
4228   switch (Op.getOpcode()) {
4229   default: break;
4230   case ISD::OR:
4231     if (isKnownNeverZero(Op.getOperand(1)) ||
4232         isKnownNeverZero(Op.getOperand(0)))
4233       return true;
4234     break;
4235   }
4236 
4237   return false;
4238 }
4239 
4240 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4241   // Check the obvious case.
4242   if (A == B) return true;
4243 
4244   // For for negative and positive zero.
4245   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4246     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4247       if (CA->isZero() && CB->isZero()) return true;
4248 
4249   // Otherwise they may not be equal.
4250   return false;
4251 }
4252 
4253 // FIXME: unify with llvm::haveNoCommonBitsSet.
4254 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
4255 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4256   assert(A.getValueType() == B.getValueType() &&
4257          "Values must have the same type");
4258   return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
4259 }
4260 
4261 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4262                                 ArrayRef<SDValue> Ops,
4263                                 SelectionDAG &DAG) {
4264   int NumOps = Ops.size();
4265   assert(NumOps != 0 && "Can't build an empty vector!");
4266   assert(!VT.isScalableVector() &&
4267          "BUILD_VECTOR cannot be used with scalable types");
4268   assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4269          "Incorrect element count in BUILD_VECTOR!");
4270 
4271   // BUILD_VECTOR of UNDEFs is UNDEF.
4272   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4273     return DAG.getUNDEF(VT);
4274 
4275   // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4276   SDValue IdentitySrc;
4277   bool IsIdentity = true;
4278   for (int i = 0; i != NumOps; ++i) {
4279     if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4280         Ops[i].getOperand(0).getValueType() != VT ||
4281         (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4282         !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4283         cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4284       IsIdentity = false;
4285       break;
4286     }
4287     IdentitySrc = Ops[i].getOperand(0);
4288   }
4289   if (IsIdentity)
4290     return IdentitySrc;
4291 
4292   return SDValue();
4293 }
4294 
4295 /// Try to simplify vector concatenation to an input value, undef, or build
4296 /// vector.
4297 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4298                                   ArrayRef<SDValue> Ops,
4299                                   SelectionDAG &DAG) {
4300   assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4301   assert(llvm::all_of(Ops,
4302                       [Ops](SDValue Op) {
4303                         return Ops[0].getValueType() == Op.getValueType();
4304                       }) &&
4305          "Concatenation of vectors with inconsistent value types!");
4306   assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
4307              VT.getVectorElementCount() &&
4308          "Incorrect element count in vector concatenation!");
4309 
4310   if (Ops.size() == 1)
4311     return Ops[0];
4312 
4313   // Concat of UNDEFs is UNDEF.
4314   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4315     return DAG.getUNDEF(VT);
4316 
4317   // Scan the operands and look for extract operations from a single source
4318   // that correspond to insertion at the same location via this concatenation:
4319   // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4320   SDValue IdentitySrc;
4321   bool IsIdentity = true;
4322   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4323     SDValue Op = Ops[i];
4324     unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4325     if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4326         Op.getOperand(0).getValueType() != VT ||
4327         (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4328         Op.getConstantOperandVal(1) != IdentityIndex) {
4329       IsIdentity = false;
4330       break;
4331     }
4332     assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4333            "Unexpected identity source vector for concat of extracts");
4334     IdentitySrc = Op.getOperand(0);
4335   }
4336   if (IsIdentity) {
4337     assert(IdentitySrc && "Failed to set source vector of extracts");
4338     return IdentitySrc;
4339   }
4340 
4341   // The code below this point is only designed to work for fixed width
4342   // vectors, so we bail out for now.
4343   if (VT.isScalableVector())
4344     return SDValue();
4345 
4346   // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4347   // simplified to one big BUILD_VECTOR.
4348   // FIXME: Add support for SCALAR_TO_VECTOR as well.
4349   EVT SVT = VT.getScalarType();
4350   SmallVector<SDValue, 16> Elts;
4351   for (SDValue Op : Ops) {
4352     EVT OpVT = Op.getValueType();
4353     if (Op.isUndef())
4354       Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4355     else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4356       Elts.append(Op->op_begin(), Op->op_end());
4357     else
4358       return SDValue();
4359   }
4360 
4361   // BUILD_VECTOR requires all inputs to be of the same type, find the
4362   // maximum type and extend them all.
4363   for (SDValue Op : Elts)
4364     SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4365 
4366   if (SVT.bitsGT(VT.getScalarType())) {
4367     for (SDValue &Op : Elts) {
4368       if (Op.isUndef())
4369         Op = DAG.getUNDEF(SVT);
4370       else
4371         Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4372                  ? DAG.getZExtOrTrunc(Op, DL, SVT)
4373                  : DAG.getSExtOrTrunc(Op, DL, SVT);
4374     }
4375   }
4376 
4377   SDValue V = DAG.getBuildVector(VT, DL, Elts);
4378   NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4379   return V;
4380 }
4381 
4382 /// Gets or creates the specified node.
4383 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4384   FoldingSetNodeID ID;
4385   AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4386   void *IP = nullptr;
4387   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4388     return SDValue(E, 0);
4389 
4390   auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4391                               getVTList(VT));
4392   CSEMap.InsertNode(N, IP);
4393 
4394   InsertNode(N);
4395   SDValue V = SDValue(N, 0);
4396   NewSDValueDbgMsg(V, "Creating new node: ", this);
4397   return V;
4398 }
4399 
4400 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4401                               SDValue Operand) {
4402   SDNodeFlags Flags;
4403   if (Inserter)
4404     Flags = Inserter->getFlags();
4405   return getNode(Opcode, DL, VT, Operand, Flags);
4406 }
4407 
4408 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4409                               SDValue Operand, const SDNodeFlags Flags) {
4410   // Constant fold unary operations with an integer constant operand. Even
4411   // opaque constant will be folded, because the folding of unary operations
4412   // doesn't create new constants with different values. Nevertheless, the
4413   // opaque flag is preserved during folding to prevent future folding with
4414   // other constants.
4415   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4416     const APInt &Val = C->getAPIntValue();
4417     switch (Opcode) {
4418     default: break;
4419     case ISD::SIGN_EXTEND:
4420       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4421                          C->isTargetOpcode(), C->isOpaque());
4422     case ISD::TRUNCATE:
4423       if (C->isOpaque())
4424         break;
4425       LLVM_FALLTHROUGH;
4426     case ISD::ANY_EXTEND:
4427     case ISD::ZERO_EXTEND:
4428       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4429                          C->isTargetOpcode(), C->isOpaque());
4430     case ISD::UINT_TO_FP:
4431     case ISD::SINT_TO_FP: {
4432       APFloat apf(EVTToAPFloatSemantics(VT),
4433                   APInt::getNullValue(VT.getSizeInBits()));
4434       (void)apf.convertFromAPInt(Val,
4435                                  Opcode==ISD::SINT_TO_FP,
4436                                  APFloat::rmNearestTiesToEven);
4437       return getConstantFP(apf, DL, VT);
4438     }
4439     case ISD::BITCAST:
4440       if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4441         return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4442       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4443         return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4444       if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4445         return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4446       if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4447         return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4448       break;
4449     case ISD::ABS:
4450       return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4451                          C->isOpaque());
4452     case ISD::BITREVERSE:
4453       return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4454                          C->isOpaque());
4455     case ISD::BSWAP:
4456       return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4457                          C->isOpaque());
4458     case ISD::CTPOP:
4459       return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4460                          C->isOpaque());
4461     case ISD::CTLZ:
4462     case ISD::CTLZ_ZERO_UNDEF:
4463       return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4464                          C->isOpaque());
4465     case ISD::CTTZ:
4466     case ISD::CTTZ_ZERO_UNDEF:
4467       return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4468                          C->isOpaque());
4469     case ISD::FP16_TO_FP: {
4470       bool Ignored;
4471       APFloat FPV(APFloat::IEEEhalf(),
4472                   (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4473 
4474       // This can return overflow, underflow, or inexact; we don't care.
4475       // FIXME need to be more flexible about rounding mode.
4476       (void)FPV.convert(EVTToAPFloatSemantics(VT),
4477                         APFloat::rmNearestTiesToEven, &Ignored);
4478       return getConstantFP(FPV, DL, VT);
4479     }
4480     }
4481   }
4482 
4483   // Constant fold unary operations with a floating point constant operand.
4484   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4485     APFloat V = C->getValueAPF();    // make copy
4486     switch (Opcode) {
4487     case ISD::FNEG:
4488       V.changeSign();
4489       return getConstantFP(V, DL, VT);
4490     case ISD::FABS:
4491       V.clearSign();
4492       return getConstantFP(V, DL, VT);
4493     case ISD::FCEIL: {
4494       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4495       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4496         return getConstantFP(V, DL, VT);
4497       break;
4498     }
4499     case ISD::FTRUNC: {
4500       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4501       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4502         return getConstantFP(V, DL, VT);
4503       break;
4504     }
4505     case ISD::FFLOOR: {
4506       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4507       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4508         return getConstantFP(V, DL, VT);
4509       break;
4510     }
4511     case ISD::FP_EXTEND: {
4512       bool ignored;
4513       // This can return overflow, underflow, or inexact; we don't care.
4514       // FIXME need to be more flexible about rounding mode.
4515       (void)V.convert(EVTToAPFloatSemantics(VT),
4516                       APFloat::rmNearestTiesToEven, &ignored);
4517       return getConstantFP(V, DL, VT);
4518     }
4519     case ISD::FP_TO_SINT:
4520     case ISD::FP_TO_UINT: {
4521       bool ignored;
4522       APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4523       // FIXME need to be more flexible about rounding mode.
4524       APFloat::opStatus s =
4525           V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4526       if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4527         break;
4528       return getConstant(IntVal, DL, VT);
4529     }
4530     case ISD::BITCAST:
4531       if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4532         return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4533       else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4534         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4535       else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4536         return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4537       break;
4538     case ISD::FP_TO_FP16: {
4539       bool Ignored;
4540       // This can return overflow, underflow, or inexact; we don't care.
4541       // FIXME need to be more flexible about rounding mode.
4542       (void)V.convert(APFloat::IEEEhalf(),
4543                       APFloat::rmNearestTiesToEven, &Ignored);
4544       return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4545     }
4546     }
4547   }
4548 
4549   // Constant fold unary operations with a vector integer or float operand.
4550   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4551     if (BV->isConstant()) {
4552       switch (Opcode) {
4553       default:
4554         // FIXME: Entirely reasonable to perform folding of other unary
4555         // operations here as the need arises.
4556         break;
4557       case ISD::FNEG:
4558       case ISD::FABS:
4559       case ISD::FCEIL:
4560       case ISD::FTRUNC:
4561       case ISD::FFLOOR:
4562       case ISD::FP_EXTEND:
4563       case ISD::FP_TO_SINT:
4564       case ISD::FP_TO_UINT:
4565       case ISD::TRUNCATE:
4566       case ISD::ANY_EXTEND:
4567       case ISD::ZERO_EXTEND:
4568       case ISD::SIGN_EXTEND:
4569       case ISD::UINT_TO_FP:
4570       case ISD::SINT_TO_FP:
4571       case ISD::ABS:
4572       case ISD::BITREVERSE:
4573       case ISD::BSWAP:
4574       case ISD::CTLZ:
4575       case ISD::CTLZ_ZERO_UNDEF:
4576       case ISD::CTTZ:
4577       case ISD::CTTZ_ZERO_UNDEF:
4578       case ISD::CTPOP: {
4579         SDValue Ops = { Operand };
4580         if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4581           return Fold;
4582       }
4583       }
4584     }
4585   }
4586 
4587   unsigned OpOpcode = Operand.getNode()->getOpcode();
4588   switch (Opcode) {
4589   case ISD::FREEZE:
4590     assert(VT == Operand.getValueType() && "Unexpected VT!");
4591     break;
4592   case ISD::TokenFactor:
4593   case ISD::MERGE_VALUES:
4594   case ISD::CONCAT_VECTORS:
4595     return Operand;         // Factor, merge or concat of one node?  No need.
4596   case ISD::BUILD_VECTOR: {
4597     // Attempt to simplify BUILD_VECTOR.
4598     SDValue Ops[] = {Operand};
4599     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4600       return V;
4601     break;
4602   }
4603   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4604   case ISD::FP_EXTEND:
4605     assert(VT.isFloatingPoint() &&
4606            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4607     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
4608     assert((!VT.isVector() ||
4609             VT.getVectorElementCount() ==
4610             Operand.getValueType().getVectorElementCount()) &&
4611            "Vector element count mismatch!");
4612     assert(Operand.getValueType().bitsLT(VT) &&
4613            "Invalid fpext node, dst < src!");
4614     if (Operand.isUndef())
4615       return getUNDEF(VT);
4616     break;
4617   case ISD::FP_TO_SINT:
4618   case ISD::FP_TO_UINT:
4619     if (Operand.isUndef())
4620       return getUNDEF(VT);
4621     break;
4622   case ISD::SINT_TO_FP:
4623   case ISD::UINT_TO_FP:
4624     // [us]itofp(undef) = 0, because the result value is bounded.
4625     if (Operand.isUndef())
4626       return getConstantFP(0.0, DL, VT);
4627     break;
4628   case ISD::SIGN_EXTEND:
4629     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4630            "Invalid SIGN_EXTEND!");
4631     assert(VT.isVector() == Operand.getValueType().isVector() &&
4632            "SIGN_EXTEND result type type should be vector iff the operand "
4633            "type is vector!");
4634     if (Operand.getValueType() == VT) return Operand;   // noop extension
4635     assert((!VT.isVector() ||
4636             VT.getVectorElementCount() ==
4637                 Operand.getValueType().getVectorElementCount()) &&
4638            "Vector element count mismatch!");
4639     assert(Operand.getValueType().bitsLT(VT) &&
4640            "Invalid sext node, dst < src!");
4641     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4642       return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4643     else if (OpOpcode == ISD::UNDEF)
4644       // sext(undef) = 0, because the top bits will all be the same.
4645       return getConstant(0, DL, VT);
4646     break;
4647   case ISD::ZERO_EXTEND:
4648     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4649            "Invalid ZERO_EXTEND!");
4650     assert(VT.isVector() == Operand.getValueType().isVector() &&
4651            "ZERO_EXTEND result type type should be vector iff the operand "
4652            "type is vector!");
4653     if (Operand.getValueType() == VT) return Operand;   // noop extension
4654     assert((!VT.isVector() ||
4655             VT.getVectorElementCount() ==
4656                 Operand.getValueType().getVectorElementCount()) &&
4657            "Vector element count mismatch!");
4658     assert(Operand.getValueType().bitsLT(VT) &&
4659            "Invalid zext node, dst < src!");
4660     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
4661       return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4662     else if (OpOpcode == ISD::UNDEF)
4663       // zext(undef) = 0, because the top bits will be zero.
4664       return getConstant(0, DL, VT);
4665     break;
4666   case ISD::ANY_EXTEND:
4667     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4668            "Invalid ANY_EXTEND!");
4669     assert(VT.isVector() == Operand.getValueType().isVector() &&
4670            "ANY_EXTEND result type type should be vector iff the operand "
4671            "type is vector!");
4672     if (Operand.getValueType() == VT) return Operand;   // noop extension
4673     assert((!VT.isVector() ||
4674             VT.getVectorElementCount() ==
4675                 Operand.getValueType().getVectorElementCount()) &&
4676            "Vector element count mismatch!");
4677     assert(Operand.getValueType().bitsLT(VT) &&
4678            "Invalid anyext node, dst < src!");
4679 
4680     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4681         OpOpcode == ISD::ANY_EXTEND)
4682       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
4683       return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4684     else if (OpOpcode == ISD::UNDEF)
4685       return getUNDEF(VT);
4686 
4687     // (ext (trunc x)) -> x
4688     if (OpOpcode == ISD::TRUNCATE) {
4689       SDValue OpOp = Operand.getOperand(0);
4690       if (OpOp.getValueType() == VT) {
4691         transferDbgValues(Operand, OpOp);
4692         return OpOp;
4693       }
4694     }
4695     break;
4696   case ISD::TRUNCATE:
4697     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4698            "Invalid TRUNCATE!");
4699     assert(VT.isVector() == Operand.getValueType().isVector() &&
4700            "TRUNCATE result type type should be vector iff the operand "
4701            "type is vector!");
4702     if (Operand.getValueType() == VT) return Operand;   // noop truncate
4703     assert((!VT.isVector() ||
4704             VT.getVectorElementCount() ==
4705                 Operand.getValueType().getVectorElementCount()) &&
4706            "Vector element count mismatch!");
4707     assert(Operand.getValueType().bitsGT(VT) &&
4708            "Invalid truncate node, src < dst!");
4709     if (OpOpcode == ISD::TRUNCATE)
4710       return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4711     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4712         OpOpcode == ISD::ANY_EXTEND) {
4713       // If the source is smaller than the dest, we still need an extend.
4714       if (Operand.getOperand(0).getValueType().getScalarType()
4715             .bitsLT(VT.getScalarType()))
4716         return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4717       if (Operand.getOperand(0).getValueType().bitsGT(VT))
4718         return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4719       return Operand.getOperand(0);
4720     }
4721     if (OpOpcode == ISD::UNDEF)
4722       return getUNDEF(VT);
4723     break;
4724   case ISD::ANY_EXTEND_VECTOR_INREG:
4725   case ISD::ZERO_EXTEND_VECTOR_INREG:
4726   case ISD::SIGN_EXTEND_VECTOR_INREG:
4727     assert(VT.isVector() && "This DAG node is restricted to vector types.");
4728     assert(Operand.getValueType().bitsLE(VT) &&
4729            "The input must be the same size or smaller than the result.");
4730     assert(VT.getVectorNumElements() <
4731              Operand.getValueType().getVectorNumElements() &&
4732            "The destination vector type must have fewer lanes than the input.");
4733     break;
4734   case ISD::ABS:
4735     assert(VT.isInteger() && VT == Operand.getValueType() &&
4736            "Invalid ABS!");
4737     if (OpOpcode == ISD::UNDEF)
4738       return getUNDEF(VT);
4739     break;
4740   case ISD::BSWAP:
4741     assert(VT.isInteger() && VT == Operand.getValueType() &&
4742            "Invalid BSWAP!");
4743     assert((VT.getScalarSizeInBits() % 16 == 0) &&
4744            "BSWAP types must be a multiple of 16 bits!");
4745     if (OpOpcode == ISD::UNDEF)
4746       return getUNDEF(VT);
4747     break;
4748   case ISD::BITREVERSE:
4749     assert(VT.isInteger() && VT == Operand.getValueType() &&
4750            "Invalid BITREVERSE!");
4751     if (OpOpcode == ISD::UNDEF)
4752       return getUNDEF(VT);
4753     break;
4754   case ISD::BITCAST:
4755     // Basic sanity checking.
4756     assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4757            "Cannot BITCAST between types of different sizes!");
4758     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
4759     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
4760       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4761     if (OpOpcode == ISD::UNDEF)
4762       return getUNDEF(VT);
4763     break;
4764   case ISD::SCALAR_TO_VECTOR:
4765     assert(VT.isVector() && !Operand.getValueType().isVector() &&
4766            (VT.getVectorElementType() == Operand.getValueType() ||
4767             (VT.getVectorElementType().isInteger() &&
4768              Operand.getValueType().isInteger() &&
4769              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4770            "Illegal SCALAR_TO_VECTOR node!");
4771     if (OpOpcode == ISD::UNDEF)
4772       return getUNDEF(VT);
4773     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4774     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4775         isa<ConstantSDNode>(Operand.getOperand(1)) &&
4776         Operand.getConstantOperandVal(1) == 0 &&
4777         Operand.getOperand(0).getValueType() == VT)
4778       return Operand.getOperand(0);
4779     break;
4780   case ISD::FNEG:
4781     // Negation of an unknown bag of bits is still completely undefined.
4782     if (OpOpcode == ISD::UNDEF)
4783       return getUNDEF(VT);
4784 
4785     if (OpOpcode == ISD::FNEG)  // --X -> X
4786       return Operand.getOperand(0);
4787     break;
4788   case ISD::FABS:
4789     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
4790       return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4791     break;
4792   case ISD::VSCALE:
4793     assert(VT == Operand.getValueType() && "Unexpected VT!");
4794     break;
4795   case ISD::CTPOP:
4796     if (Operand.getValueType().getScalarType() == MVT::i1)
4797       return Operand;
4798     break;
4799   case ISD::CTLZ:
4800   case ISD::CTTZ:
4801     if (Operand.getValueType().getScalarType() == MVT::i1)
4802       return getNOT(DL, Operand, Operand.getValueType());
4803     break;
4804   case ISD::VECREDUCE_SMIN:
4805   case ISD::VECREDUCE_UMAX:
4806     if (Operand.getValueType().getScalarType() == MVT::i1)
4807       return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
4808     break;
4809   case ISD::VECREDUCE_SMAX:
4810   case ISD::VECREDUCE_UMIN:
4811     if (Operand.getValueType().getScalarType() == MVT::i1)
4812       return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
4813     break;
4814   }
4815 
4816   SDNode *N;
4817   SDVTList VTs = getVTList(VT);
4818   SDValue Ops[] = {Operand};
4819   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4820     FoldingSetNodeID ID;
4821     AddNodeIDNode(ID, Opcode, VTs, Ops);
4822     void *IP = nullptr;
4823     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4824       E->intersectFlagsWith(Flags);
4825       return SDValue(E, 0);
4826     }
4827 
4828     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4829     N->setFlags(Flags);
4830     createOperands(N, Ops);
4831     CSEMap.InsertNode(N, IP);
4832   } else {
4833     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4834     createOperands(N, Ops);
4835   }
4836 
4837   InsertNode(N);
4838   SDValue V = SDValue(N, 0);
4839   NewSDValueDbgMsg(V, "Creating new node: ", this);
4840   return V;
4841 }
4842 
4843 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
4844                                        const APInt &C2) {
4845   switch (Opcode) {
4846   case ISD::ADD:  return C1 + C2;
4847   case ISD::SUB:  return C1 - C2;
4848   case ISD::MUL:  return C1 * C2;
4849   case ISD::AND:  return C1 & C2;
4850   case ISD::OR:   return C1 | C2;
4851   case ISD::XOR:  return C1 ^ C2;
4852   case ISD::SHL:  return C1 << C2;
4853   case ISD::SRL:  return C1.lshr(C2);
4854   case ISD::SRA:  return C1.ashr(C2);
4855   case ISD::ROTL: return C1.rotl(C2);
4856   case ISD::ROTR: return C1.rotr(C2);
4857   case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
4858   case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
4859   case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
4860   case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
4861   case ISD::SADDSAT: return C1.sadd_sat(C2);
4862   case ISD::UADDSAT: return C1.uadd_sat(C2);
4863   case ISD::SSUBSAT: return C1.ssub_sat(C2);
4864   case ISD::USUBSAT: return C1.usub_sat(C2);
4865   case ISD::UDIV:
4866     if (!C2.getBoolValue())
4867       break;
4868     return C1.udiv(C2);
4869   case ISD::UREM:
4870     if (!C2.getBoolValue())
4871       break;
4872     return C1.urem(C2);
4873   case ISD::SDIV:
4874     if (!C2.getBoolValue())
4875       break;
4876     return C1.sdiv(C2);
4877   case ISD::SREM:
4878     if (!C2.getBoolValue())
4879       break;
4880     return C1.srem(C2);
4881   }
4882   return llvm::None;
4883 }
4884 
4885 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4886                                        const GlobalAddressSDNode *GA,
4887                                        const SDNode *N2) {
4888   if (GA->getOpcode() != ISD::GlobalAddress)
4889     return SDValue();
4890   if (!TLI->isOffsetFoldingLegal(GA))
4891     return SDValue();
4892   auto *C2 = dyn_cast<ConstantSDNode>(N2);
4893   if (!C2)
4894     return SDValue();
4895   int64_t Offset = C2->getSExtValue();
4896   switch (Opcode) {
4897   case ISD::ADD: break;
4898   case ISD::SUB: Offset = -uint64_t(Offset); break;
4899   default: return SDValue();
4900   }
4901   return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
4902                           GA->getOffset() + uint64_t(Offset));
4903 }
4904 
4905 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4906   switch (Opcode) {
4907   case ISD::SDIV:
4908   case ISD::UDIV:
4909   case ISD::SREM:
4910   case ISD::UREM: {
4911     // If a divisor is zero/undef or any element of a divisor vector is
4912     // zero/undef, the whole op is undef.
4913     assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4914     SDValue Divisor = Ops[1];
4915     if (Divisor.isUndef() || isNullConstant(Divisor))
4916       return true;
4917 
4918     return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4919            llvm::any_of(Divisor->op_values(),
4920                         [](SDValue V) { return V.isUndef() ||
4921                                         isNullConstant(V); });
4922     // TODO: Handle signed overflow.
4923   }
4924   // TODO: Handle oversized shifts.
4925   default:
4926     return false;
4927   }
4928 }
4929 
4930 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4931                                              EVT VT, ArrayRef<SDValue> Ops) {
4932   // If the opcode is a target-specific ISD node, there's nothing we can
4933   // do here and the operand rules may not line up with the below, so
4934   // bail early.
4935   if (Opcode >= ISD::BUILTIN_OP_END)
4936     return SDValue();
4937 
4938   // For now, the array Ops should only contain two values.
4939   // This enforcement will be removed once this function is merged with
4940   // FoldConstantVectorArithmetic
4941   if (Ops.size() != 2)
4942     return SDValue();
4943 
4944   if (isUndef(Opcode, Ops))
4945     return getUNDEF(VT);
4946 
4947   SDNode *N1 = Ops[0].getNode();
4948   SDNode *N2 = Ops[1].getNode();
4949 
4950   // Handle the case of two scalars.
4951   if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
4952     if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
4953       if (C1->isOpaque() || C2->isOpaque())
4954         return SDValue();
4955 
4956       Optional<APInt> FoldAttempt =
4957           FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
4958       if (!FoldAttempt)
4959         return SDValue();
4960 
4961       SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
4962       assert((!Folded || !VT.isVector()) &&
4963              "Can't fold vectors ops with scalar operands");
4964       return Folded;
4965     }
4966   }
4967 
4968   // fold (add Sym, c) -> Sym+c
4969   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
4970     return FoldSymbolOffset(Opcode, VT, GA, N2);
4971   if (TLI->isCommutativeBinOp(Opcode))
4972     if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
4973       return FoldSymbolOffset(Opcode, VT, GA, N1);
4974 
4975   // TODO: All the folds below are performed lane-by-lane and assume a fixed
4976   // vector width, however we should be able to do constant folds involving
4977   // splat vector nodes too.
4978   if (VT.isScalableVector())
4979     return SDValue();
4980 
4981   // For fixed width vectors, extract each constant element and fold them
4982   // individually. Either input may be an undef value.
4983   auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
4984   if (!BV1 && !N1->isUndef())
4985     return SDValue();
4986   auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
4987   if (!BV2 && !N2->isUndef())
4988     return SDValue();
4989   // If both operands are undef, that's handled the same way as scalars.
4990   if (!BV1 && !BV2)
4991     return SDValue();
4992 
4993   assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4994          "Vector binop with different number of elements in operands?");
4995 
4996   EVT SVT = VT.getScalarType();
4997   EVT LegalSVT = SVT;
4998   if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4999     LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5000     if (LegalSVT.bitsLT(SVT))
5001       return SDValue();
5002   }
5003   SmallVector<SDValue, 4> Outputs;
5004   unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
5005   for (unsigned I = 0; I != NumOps; ++I) {
5006     SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
5007     SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
5008     if (SVT.isInteger()) {
5009       if (V1->getValueType(0).bitsGT(SVT))
5010         V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
5011       if (V2->getValueType(0).bitsGT(SVT))
5012         V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
5013     }
5014 
5015     if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
5016       return SDValue();
5017 
5018     // Fold one vector element.
5019     SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
5020     if (LegalSVT != SVT)
5021       ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5022 
5023     // Scalar folding only succeeded if the result is a constant or UNDEF.
5024     if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5025         ScalarResult.getOpcode() != ISD::ConstantFP)
5026       return SDValue();
5027     Outputs.push_back(ScalarResult);
5028   }
5029 
5030   assert(VT.getVectorNumElements() == Outputs.size() &&
5031          "Vector size mismatch!");
5032 
5033   // We may have a vector type but a scalar result. Create a splat.
5034   Outputs.resize(VT.getVectorNumElements(), Outputs.back());
5035 
5036   // Build a big vector out of the scalar elements we generated.
5037   return getBuildVector(VT, SDLoc(), Outputs);
5038 }
5039 
5040 // TODO: Merge with FoldConstantArithmetic
5041 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
5042                                                    const SDLoc &DL, EVT VT,
5043                                                    ArrayRef<SDValue> Ops,
5044                                                    const SDNodeFlags Flags) {
5045   // If the opcode is a target-specific ISD node, there's nothing we can
5046   // do here and the operand rules may not line up with the below, so
5047   // bail early.
5048   if (Opcode >= ISD::BUILTIN_OP_END)
5049     return SDValue();
5050 
5051   if (isUndef(Opcode, Ops))
5052     return getUNDEF(VT);
5053 
5054   // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
5055   if (!VT.isVector())
5056     return SDValue();
5057 
5058   // TODO: All the folds below are performed lane-by-lane and assume a fixed
5059   // vector width, however we should be able to do constant folds involving
5060   // splat vector nodes too.
5061   if (VT.isScalableVector())
5062     return SDValue();
5063 
5064   // From this point onwards all vectors are assumed to be fixed width.
5065   unsigned NumElts = VT.getVectorNumElements();
5066 
5067   auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
5068     return !Op.getValueType().isVector() ||
5069            Op.getValueType().getVectorNumElements() == NumElts;
5070   };
5071 
5072   auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
5073     BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5074     return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
5075            (BV && BV->isConstant());
5076   };
5077 
5078   // All operands must be vector types with the same number of elements as
5079   // the result type and must be either UNDEF or a build vector of constant
5080   // or UNDEF scalars.
5081   if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
5082       !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5083     return SDValue();
5084 
5085   // If we are comparing vectors, then the result needs to be a i1 boolean
5086   // that is then sign-extended back to the legal result type.
5087   EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5088 
5089   // Find legal integer scalar type for constant promotion and
5090   // ensure that its scalar size is at least as large as source.
5091   EVT LegalSVT = VT.getScalarType();
5092   if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5093     LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5094     if (LegalSVT.bitsLT(VT.getScalarType()))
5095       return SDValue();
5096   }
5097 
5098   // Constant fold each scalar lane separately.
5099   SmallVector<SDValue, 4> ScalarResults;
5100   for (unsigned i = 0; i != NumElts; i++) {
5101     SmallVector<SDValue, 4> ScalarOps;
5102     for (SDValue Op : Ops) {
5103       EVT InSVT = Op.getValueType().getScalarType();
5104       BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
5105       if (!InBV) {
5106         // We've checked that this is UNDEF or a constant of some kind.
5107         if (Op.isUndef())
5108           ScalarOps.push_back(getUNDEF(InSVT));
5109         else
5110           ScalarOps.push_back(Op);
5111         continue;
5112       }
5113 
5114       SDValue ScalarOp = InBV->getOperand(i);
5115       EVT ScalarVT = ScalarOp.getValueType();
5116 
5117       // Build vector (integer) scalar operands may need implicit
5118       // truncation - do this before constant folding.
5119       if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5120         ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5121 
5122       ScalarOps.push_back(ScalarOp);
5123     }
5124 
5125     // Constant fold the scalar operands.
5126     SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5127 
5128     // Legalize the (integer) scalar constant if necessary.
5129     if (LegalSVT != SVT)
5130       ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5131 
5132     // Scalar folding only succeeded if the result is a constant or UNDEF.
5133     if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5134         ScalarResult.getOpcode() != ISD::ConstantFP)
5135       return SDValue();
5136     ScalarResults.push_back(ScalarResult);
5137   }
5138 
5139   SDValue V = getBuildVector(VT, DL, ScalarResults);
5140   NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5141   return V;
5142 }
5143 
5144 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5145                                          EVT VT, SDValue N1, SDValue N2) {
5146   // TODO: We don't do any constant folding for strict FP opcodes here, but we
5147   //       should. That will require dealing with a potentially non-default
5148   //       rounding mode, checking the "opStatus" return value from the APFloat
5149   //       math calculations, and possibly other variations.
5150   auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5151   auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5152   if (N1CFP && N2CFP) {
5153     APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5154     switch (Opcode) {
5155     case ISD::FADD:
5156       C1.add(C2, APFloat::rmNearestTiesToEven);
5157       return getConstantFP(C1, DL, VT);
5158     case ISD::FSUB:
5159       C1.subtract(C2, APFloat::rmNearestTiesToEven);
5160       return getConstantFP(C1, DL, VT);
5161     case ISD::FMUL:
5162       C1.multiply(C2, APFloat::rmNearestTiesToEven);
5163       return getConstantFP(C1, DL, VT);
5164     case ISD::FDIV:
5165       C1.divide(C2, APFloat::rmNearestTiesToEven);
5166       return getConstantFP(C1, DL, VT);
5167     case ISD::FREM:
5168       C1.mod(C2);
5169       return getConstantFP(C1, DL, VT);
5170     case ISD::FCOPYSIGN:
5171       C1.copySign(C2);
5172       return getConstantFP(C1, DL, VT);
5173     default: break;
5174     }
5175   }
5176   if (N1CFP && Opcode == ISD::FP_ROUND) {
5177     APFloat C1 = N1CFP->getValueAPF();    // make copy
5178     bool Unused;
5179     // This can return overflow, underflow, or inexact; we don't care.
5180     // FIXME need to be more flexible about rounding mode.
5181     (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5182                       &Unused);
5183     return getConstantFP(C1, DL, VT);
5184   }
5185 
5186   switch (Opcode) {
5187   case ISD::FSUB:
5188     // -0.0 - undef --> undef (consistent with "fneg undef")
5189     if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef())
5190       return getUNDEF(VT);
5191     LLVM_FALLTHROUGH;
5192 
5193   case ISD::FADD:
5194   case ISD::FMUL:
5195   case ISD::FDIV:
5196   case ISD::FREM:
5197     // If both operands are undef, the result is undef. If 1 operand is undef,
5198     // the result is NaN. This should match the behavior of the IR optimizer.
5199     if (N1.isUndef() && N2.isUndef())
5200       return getUNDEF(VT);
5201     if (N1.isUndef() || N2.isUndef())
5202       return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5203   }
5204   return SDValue();
5205 }
5206 
5207 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5208   assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
5209 
5210   // There's no need to assert on a byte-aligned pointer. All pointers are at
5211   // least byte aligned.
5212   if (A == Align(1))
5213     return Val;
5214 
5215   FoldingSetNodeID ID;
5216   AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5217   ID.AddInteger(A.value());
5218 
5219   void *IP = nullptr;
5220   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5221     return SDValue(E, 0);
5222 
5223   auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5224                                          Val.getValueType(), A);
5225   createOperands(N, {Val});
5226 
5227   CSEMap.InsertNode(N, IP);
5228   InsertNode(N);
5229 
5230   SDValue V(N, 0);
5231   NewSDValueDbgMsg(V, "Creating new node: ", this);
5232   return V;
5233 }
5234 
5235 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5236                               SDValue N1, SDValue N2) {
5237   SDNodeFlags Flags;
5238   if (Inserter)
5239     Flags = Inserter->getFlags();
5240   return getNode(Opcode, DL, VT, N1, N2, Flags);
5241 }
5242 
5243 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5244                               SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5245   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5246   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5247   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5248   ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5249 
5250   // Canonicalize constant to RHS if commutative.
5251   if (TLI->isCommutativeBinOp(Opcode)) {
5252     if (N1C && !N2C) {
5253       std::swap(N1C, N2C);
5254       std::swap(N1, N2);
5255     } else if (N1CFP && !N2CFP) {
5256       std::swap(N1CFP, N2CFP);
5257       std::swap(N1, N2);
5258     }
5259   }
5260 
5261   switch (Opcode) {
5262   default: break;
5263   case ISD::TokenFactor:
5264     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5265            N2.getValueType() == MVT::Other && "Invalid token factor!");
5266     // Fold trivial token factors.
5267     if (N1.getOpcode() == ISD::EntryToken) return N2;
5268     if (N2.getOpcode() == ISD::EntryToken) return N1;
5269     if (N1 == N2) return N1;
5270     break;
5271   case ISD::BUILD_VECTOR: {
5272     // Attempt to simplify BUILD_VECTOR.
5273     SDValue Ops[] = {N1, N2};
5274     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5275       return V;
5276     break;
5277   }
5278   case ISD::CONCAT_VECTORS: {
5279     SDValue Ops[] = {N1, N2};
5280     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5281       return V;
5282     break;
5283   }
5284   case ISD::AND:
5285     assert(VT.isInteger() && "This operator does not apply to FP types!");
5286     assert(N1.getValueType() == N2.getValueType() &&
5287            N1.getValueType() == VT && "Binary operator types must match!");
5288     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
5289     // worth handling here.
5290     if (N2C && N2C->isNullValue())
5291       return N2;
5292     if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
5293       return N1;
5294     break;
5295   case ISD::OR:
5296   case ISD::XOR:
5297   case ISD::ADD:
5298   case ISD::SUB:
5299     assert(VT.isInteger() && "This operator does not apply to FP types!");
5300     assert(N1.getValueType() == N2.getValueType() &&
5301            N1.getValueType() == VT && "Binary operator types must match!");
5302     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
5303     // it's worth handling here.
5304     if (N2C && N2C->isNullValue())
5305       return N1;
5306     break;
5307   case ISD::MUL:
5308     assert(VT.isInteger() && "This operator does not apply to FP types!");
5309     assert(N1.getValueType() == N2.getValueType() &&
5310            N1.getValueType() == VT && "Binary operator types must match!");
5311     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5312       APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue();
5313       APInt N2CImm = N2C->getAPIntValue();
5314       return getVScale(DL, VT, MulImm * N2CImm);
5315     }
5316     break;
5317   case ISD::UDIV:
5318   case ISD::UREM:
5319   case ISD::MULHU:
5320   case ISD::MULHS:
5321   case ISD::SDIV:
5322   case ISD::SREM:
5323   case ISD::SADDSAT:
5324   case ISD::SSUBSAT:
5325   case ISD::UADDSAT:
5326   case ISD::USUBSAT:
5327     assert(VT.isInteger() && "This operator does not apply to FP types!");
5328     assert(N1.getValueType() == N2.getValueType() &&
5329            N1.getValueType() == VT && "Binary operator types must match!");
5330     break;
5331   case ISD::SMIN:
5332   case ISD::UMAX:
5333     assert(VT.isInteger() && "This operator does not apply to FP types!");
5334     assert(N1.getValueType() == N2.getValueType() &&
5335            N1.getValueType() == VT && "Binary operator types must match!");
5336     if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5337       return getNode(ISD::OR, DL, VT, N1, N2);
5338     break;
5339   case ISD::SMAX:
5340   case ISD::UMIN:
5341     assert(VT.isInteger() && "This operator does not apply to FP types!");
5342     assert(N1.getValueType() == N2.getValueType() &&
5343            N1.getValueType() == VT && "Binary operator types must match!");
5344     if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5345       return getNode(ISD::AND, DL, VT, N1, N2);
5346     break;
5347   case ISD::FADD:
5348   case ISD::FSUB:
5349   case ISD::FMUL:
5350   case ISD::FDIV:
5351   case ISD::FREM:
5352     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5353     assert(N1.getValueType() == N2.getValueType() &&
5354            N1.getValueType() == VT && "Binary operator types must match!");
5355     if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5356       return V;
5357     break;
5358   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
5359     assert(N1.getValueType() == VT &&
5360            N1.getValueType().isFloatingPoint() &&
5361            N2.getValueType().isFloatingPoint() &&
5362            "Invalid FCOPYSIGN!");
5363     break;
5364   case ISD::SHL:
5365     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5366       APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue();
5367       APInt ShiftImm = N2C->getAPIntValue();
5368       return getVScale(DL, VT, MulImm << ShiftImm);
5369     }
5370     LLVM_FALLTHROUGH;
5371   case ISD::SRA:
5372   case ISD::SRL:
5373     if (SDValue V = simplifyShift(N1, N2))
5374       return V;
5375     LLVM_FALLTHROUGH;
5376   case ISD::ROTL:
5377   case ISD::ROTR:
5378     assert(VT == N1.getValueType() &&
5379            "Shift operators return type must be the same as their first arg");
5380     assert(VT.isInteger() && N2.getValueType().isInteger() &&
5381            "Shifts only work on integers");
5382     assert((!VT.isVector() || VT == N2.getValueType()) &&
5383            "Vector shift amounts must be in the same as their first arg");
5384     // Verify that the shift amount VT is big enough to hold valid shift
5385     // amounts.  This catches things like trying to shift an i1024 value by an
5386     // i8, which is easy to fall into in generic code that uses
5387     // TLI.getShiftAmount().
5388     assert(N2.getValueType().getScalarSizeInBits() >=
5389                Log2_32_Ceil(VT.getScalarSizeInBits()) &&
5390            "Invalid use of small shift amount with oversized value!");
5391 
5392     // Always fold shifts of i1 values so the code generator doesn't need to
5393     // handle them.  Since we know the size of the shift has to be less than the
5394     // size of the value, the shift/rotate count is guaranteed to be zero.
5395     if (VT == MVT::i1)
5396       return N1;
5397     if (N2C && N2C->isNullValue())
5398       return N1;
5399     break;
5400   case ISD::FP_ROUND:
5401     assert(VT.isFloatingPoint() &&
5402            N1.getValueType().isFloatingPoint() &&
5403            VT.bitsLE(N1.getValueType()) &&
5404            N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5405            "Invalid FP_ROUND!");
5406     if (N1.getValueType() == VT) return N1;  // noop conversion.
5407     break;
5408   case ISD::AssertSext:
5409   case ISD::AssertZext: {
5410     EVT EVT = cast<VTSDNode>(N2)->getVT();
5411     assert(VT == N1.getValueType() && "Not an inreg extend!");
5412     assert(VT.isInteger() && EVT.isInteger() &&
5413            "Cannot *_EXTEND_INREG FP types");
5414     assert(!EVT.isVector() &&
5415            "AssertSExt/AssertZExt type should be the vector element type "
5416            "rather than the vector type!");
5417     assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5418     if (VT.getScalarType() == EVT) return N1; // noop assertion.
5419     break;
5420   }
5421   case ISD::SIGN_EXTEND_INREG: {
5422     EVT EVT = cast<VTSDNode>(N2)->getVT();
5423     assert(VT == N1.getValueType() && "Not an inreg extend!");
5424     assert(VT.isInteger() && EVT.isInteger() &&
5425            "Cannot *_EXTEND_INREG FP types");
5426     assert(EVT.isVector() == VT.isVector() &&
5427            "SIGN_EXTEND_INREG type should be vector iff the operand "
5428            "type is vector!");
5429     assert((!EVT.isVector() ||
5430             EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
5431            "Vector element counts must match in SIGN_EXTEND_INREG");
5432     assert(EVT.bitsLE(VT) && "Not extending!");
5433     if (EVT == VT) return N1;  // Not actually extending
5434 
5435     auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5436       unsigned FromBits = EVT.getScalarSizeInBits();
5437       Val <<= Val.getBitWidth() - FromBits;
5438       Val.ashrInPlace(Val.getBitWidth() - FromBits);
5439       return getConstant(Val, DL, ConstantVT);
5440     };
5441 
5442     if (N1C) {
5443       const APInt &Val = N1C->getAPIntValue();
5444       return SignExtendInReg(Val, VT);
5445     }
5446     if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5447       SmallVector<SDValue, 8> Ops;
5448       llvm::EVT OpVT = N1.getOperand(0).getValueType();
5449       for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5450         SDValue Op = N1.getOperand(i);
5451         if (Op.isUndef()) {
5452           Ops.push_back(getUNDEF(OpVT));
5453           continue;
5454         }
5455         ConstantSDNode *C = cast<ConstantSDNode>(Op);
5456         APInt Val = C->getAPIntValue();
5457         Ops.push_back(SignExtendInReg(Val, OpVT));
5458       }
5459       return getBuildVector(VT, DL, Ops);
5460     }
5461     break;
5462   }
5463   case ISD::EXTRACT_VECTOR_ELT:
5464     assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5465            "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5466              element type of the vector.");
5467 
5468     // Extract from an undefined value or using an undefined index is undefined.
5469     if (N1.isUndef() || N2.isUndef())
5470       return getUNDEF(VT);
5471 
5472     // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5473     // vectors. For scalable vectors we will provide appropriate support for
5474     // dealing with arbitrary indices.
5475     if (N2C && N1.getValueType().isFixedLengthVector() &&
5476         N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5477       return getUNDEF(VT);
5478 
5479     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5480     // expanding copies of large vectors from registers. This only works for
5481     // fixed length vectors, since we need to know the exact number of
5482     // elements.
5483     if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5484         N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5485       unsigned Factor =
5486         N1.getOperand(0).getValueType().getVectorNumElements();
5487       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5488                      N1.getOperand(N2C->getZExtValue() / Factor),
5489                      getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5490     }
5491 
5492     // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5493     // lowering is expanding large vector constants.
5494     if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
5495                 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
5496       assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
5497               N1.getValueType().isFixedLengthVector()) &&
5498              "BUILD_VECTOR used for scalable vectors");
5499       unsigned Index =
5500           N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
5501       SDValue Elt = N1.getOperand(Index);
5502 
5503       if (VT != Elt.getValueType())
5504         // If the vector element type is not legal, the BUILD_VECTOR operands
5505         // are promoted and implicitly truncated, and the result implicitly
5506         // extended. Make that explicit here.
5507         Elt = getAnyExtOrTrunc(Elt, DL, VT);
5508 
5509       return Elt;
5510     }
5511 
5512     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5513     // operations are lowered to scalars.
5514     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5515       // If the indices are the same, return the inserted element else
5516       // if the indices are known different, extract the element from
5517       // the original vector.
5518       SDValue N1Op2 = N1.getOperand(2);
5519       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5520 
5521       if (N1Op2C && N2C) {
5522         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5523           if (VT == N1.getOperand(1).getValueType())
5524             return N1.getOperand(1);
5525           else
5526             return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5527         }
5528 
5529         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5530       }
5531     }
5532 
5533     // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5534     // when vector types are scalarized and v1iX is legal.
5535     // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
5536     // Here we are completely ignoring the extract element index (N2),
5537     // which is fine for fixed width vectors, since any index other than 0
5538     // is undefined anyway. However, this cannot be ignored for scalable
5539     // vectors - in theory we could support this, but we don't want to do this
5540     // without a profitability check.
5541     if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5542         N1.getValueType().isFixedLengthVector() &&
5543         N1.getValueType().getVectorNumElements() == 1) {
5544       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5545                      N1.getOperand(1));
5546     }
5547     break;
5548   case ISD::EXTRACT_ELEMENT:
5549     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5550     assert(!N1.getValueType().isVector() && !VT.isVector() &&
5551            (N1.getValueType().isInteger() == VT.isInteger()) &&
5552            N1.getValueType() != VT &&
5553            "Wrong types for EXTRACT_ELEMENT!");
5554 
5555     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5556     // 64-bit integers into 32-bit parts.  Instead of building the extract of
5557     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5558     if (N1.getOpcode() == ISD::BUILD_PAIR)
5559       return N1.getOperand(N2C->getZExtValue());
5560 
5561     // EXTRACT_ELEMENT of a constant int is also very common.
5562     if (N1C) {
5563       unsigned ElementSize = VT.getSizeInBits();
5564       unsigned Shift = ElementSize * N2C->getZExtValue();
5565       APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5566       return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5567     }
5568     break;
5569   case ISD::EXTRACT_SUBVECTOR:
5570     EVT N1VT = N1.getValueType();
5571     assert(VT.isVector() && N1VT.isVector() &&
5572            "Extract subvector VTs must be vectors!");
5573     assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
5574            "Extract subvector VTs must have the same element type!");
5575     assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
5576            "Cannot extract a scalable vector from a fixed length vector!");
5577     assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5578             VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
5579            "Extract subvector must be from larger vector to smaller vector!");
5580     assert(N2C && "Extract subvector index must be a constant");
5581     assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5582             (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
5583                 N1VT.getVectorMinNumElements()) &&
5584            "Extract subvector overflow!");
5585     assert(N2C->getAPIntValue().getBitWidth() ==
5586                TLI->getVectorIdxTy(getDataLayout())
5587                    .getSizeInBits()
5588                    .getFixedSize() &&
5589            "Constant index for EXTRACT_SUBVECTOR has an invalid size");
5590 
5591     // Trivial extraction.
5592     if (VT == N1VT)
5593       return N1;
5594 
5595     // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5596     if (N1.isUndef())
5597       return getUNDEF(VT);
5598 
5599     // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5600     // the concat have the same type as the extract.
5601     if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
5602         VT == N1.getOperand(0).getValueType()) {
5603       unsigned Factor = VT.getVectorMinNumElements();
5604       return N1.getOperand(N2C->getZExtValue() / Factor);
5605     }
5606 
5607     // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5608     // during shuffle legalization.
5609     if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5610         VT == N1.getOperand(1).getValueType())
5611       return N1.getOperand(1);
5612     break;
5613   }
5614 
5615   // Perform trivial constant folding.
5616   if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
5617     return SV;
5618 
5619   if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5620     return V;
5621 
5622   // Canonicalize an UNDEF to the RHS, even over a constant.
5623   if (N1.isUndef()) {
5624     if (TLI->isCommutativeBinOp(Opcode)) {
5625       std::swap(N1, N2);
5626     } else {
5627       switch (Opcode) {
5628       case ISD::SIGN_EXTEND_INREG:
5629       case ISD::SUB:
5630         return getUNDEF(VT);     // fold op(undef, arg2) -> undef
5631       case ISD::UDIV:
5632       case ISD::SDIV:
5633       case ISD::UREM:
5634       case ISD::SREM:
5635       case ISD::SSUBSAT:
5636       case ISD::USUBSAT:
5637         return getConstant(0, DL, VT);    // fold op(undef, arg2) -> 0
5638       }
5639     }
5640   }
5641 
5642   // Fold a bunch of operators when the RHS is undef.
5643   if (N2.isUndef()) {
5644     switch (Opcode) {
5645     case ISD::XOR:
5646       if (N1.isUndef())
5647         // Handle undef ^ undef -> 0 special case. This is a common
5648         // idiom (misuse).
5649         return getConstant(0, DL, VT);
5650       LLVM_FALLTHROUGH;
5651     case ISD::ADD:
5652     case ISD::SUB:
5653     case ISD::UDIV:
5654     case ISD::SDIV:
5655     case ISD::UREM:
5656     case ISD::SREM:
5657       return getUNDEF(VT);       // fold op(arg1, undef) -> undef
5658     case ISD::MUL:
5659     case ISD::AND:
5660     case ISD::SSUBSAT:
5661     case ISD::USUBSAT:
5662       return getConstant(0, DL, VT);  // fold op(arg1, undef) -> 0
5663     case ISD::OR:
5664     case ISD::SADDSAT:
5665     case ISD::UADDSAT:
5666       return getAllOnesConstant(DL, VT);
5667     }
5668   }
5669 
5670   // Memoize this node if possible.
5671   SDNode *N;
5672   SDVTList VTs = getVTList(VT);
5673   SDValue Ops[] = {N1, N2};
5674   if (VT != MVT::Glue) {
5675     FoldingSetNodeID ID;
5676     AddNodeIDNode(ID, Opcode, VTs, Ops);
5677     void *IP = nullptr;
5678     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5679       E->intersectFlagsWith(Flags);
5680       return SDValue(E, 0);
5681     }
5682 
5683     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5684     N->setFlags(Flags);
5685     createOperands(N, Ops);
5686     CSEMap.InsertNode(N, IP);
5687   } else {
5688     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5689     createOperands(N, Ops);
5690   }
5691 
5692   InsertNode(N);
5693   SDValue V = SDValue(N, 0);
5694   NewSDValueDbgMsg(V, "Creating new node: ", this);
5695   return V;
5696 }
5697 
5698 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5699                               SDValue N1, SDValue N2, SDValue N3) {
5700   SDNodeFlags Flags;
5701   if (Inserter)
5702     Flags = Inserter->getFlags();
5703   return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
5704 }
5705 
5706 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5707                               SDValue N1, SDValue N2, SDValue N3,
5708                               const SDNodeFlags Flags) {
5709   // Perform various simplifications.
5710   switch (Opcode) {
5711   case ISD::FMA: {
5712     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5713     assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5714            N3.getValueType() == VT && "FMA types must match!");
5715     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5716     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5717     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5718     if (N1CFP && N2CFP && N3CFP) {
5719       APFloat  V1 = N1CFP->getValueAPF();
5720       const APFloat &V2 = N2CFP->getValueAPF();
5721       const APFloat &V3 = N3CFP->getValueAPF();
5722       V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5723       return getConstantFP(V1, DL, VT);
5724     }
5725     break;
5726   }
5727   case ISD::BUILD_VECTOR: {
5728     // Attempt to simplify BUILD_VECTOR.
5729     SDValue Ops[] = {N1, N2, N3};
5730     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5731       return V;
5732     break;
5733   }
5734   case ISD::CONCAT_VECTORS: {
5735     SDValue Ops[] = {N1, N2, N3};
5736     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5737       return V;
5738     break;
5739   }
5740   case ISD::SETCC: {
5741     assert(VT.isInteger() && "SETCC result type must be an integer!");
5742     assert(N1.getValueType() == N2.getValueType() &&
5743            "SETCC operands must have the same type!");
5744     assert(VT.isVector() == N1.getValueType().isVector() &&
5745            "SETCC type should be vector iff the operand type is vector!");
5746     assert((!VT.isVector() || VT.getVectorElementCount() ==
5747                                   N1.getValueType().getVectorElementCount()) &&
5748            "SETCC vector element counts must match!");
5749     // Use FoldSetCC to simplify SETCC's.
5750     if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5751       return V;
5752     // Vector constant folding.
5753     SDValue Ops[] = {N1, N2, N3};
5754     if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5755       NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5756       return V;
5757     }
5758     break;
5759   }
5760   case ISD::SELECT:
5761   case ISD::VSELECT:
5762     if (SDValue V = simplifySelect(N1, N2, N3))
5763       return V;
5764     break;
5765   case ISD::VECTOR_SHUFFLE:
5766     llvm_unreachable("should use getVectorShuffle constructor!");
5767   case ISD::INSERT_VECTOR_ELT: {
5768     ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5769     // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
5770     // for scalable vectors where we will generate appropriate code to
5771     // deal with out-of-bounds cases correctly.
5772     if (N3C && N1.getValueType().isFixedLengthVector() &&
5773         N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5774       return getUNDEF(VT);
5775 
5776     // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
5777     if (N3.isUndef())
5778       return getUNDEF(VT);
5779 
5780     // If the inserted element is an UNDEF, just use the input vector.
5781     if (N2.isUndef())
5782       return N1;
5783 
5784     break;
5785   }
5786   case ISD::INSERT_SUBVECTOR: {
5787     // Inserting undef into undef is still undef.
5788     if (N1.isUndef() && N2.isUndef())
5789       return getUNDEF(VT);
5790 
5791     EVT N2VT = N2.getValueType();
5792     assert(VT == N1.getValueType() &&
5793            "Dest and insert subvector source types must match!");
5794     assert(VT.isVector() && N2VT.isVector() &&
5795            "Insert subvector VTs must be vectors!");
5796     assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
5797            "Cannot insert a scalable vector into a fixed length vector!");
5798     assert((VT.isScalableVector() != N2VT.isScalableVector() ||
5799             VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
5800            "Insert subvector must be from smaller vector to larger vector!");
5801     assert(isa<ConstantSDNode>(N3) &&
5802            "Insert subvector index must be constant");
5803     assert((VT.isScalableVector() != N2VT.isScalableVector() ||
5804             (N2VT.getVectorMinNumElements() +
5805              cast<ConstantSDNode>(N3)->getZExtValue()) <=
5806                 VT.getVectorMinNumElements()) &&
5807            "Insert subvector overflow!");
5808 
5809     // Trivial insertion.
5810     if (VT == N2VT)
5811       return N2;
5812 
5813     // If this is an insert of an extracted vector into an undef vector, we
5814     // can just use the input to the extract.
5815     if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5816         N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
5817       return N2.getOperand(0);
5818     break;
5819   }
5820   case ISD::BITCAST:
5821     // Fold bit_convert nodes from a type to themselves.
5822     if (N1.getValueType() == VT)
5823       return N1;
5824     break;
5825   }
5826 
5827   // Memoize node if it doesn't produce a flag.
5828   SDNode *N;
5829   SDVTList VTs = getVTList(VT);
5830   SDValue Ops[] = {N1, N2, N3};
5831   if (VT != MVT::Glue) {
5832     FoldingSetNodeID ID;
5833     AddNodeIDNode(ID, Opcode, VTs, Ops);
5834     void *IP = nullptr;
5835     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5836       E->intersectFlagsWith(Flags);
5837       return SDValue(E, 0);
5838     }
5839 
5840     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5841     N->setFlags(Flags);
5842     createOperands(N, Ops);
5843     CSEMap.InsertNode(N, IP);
5844   } else {
5845     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5846     createOperands(N, Ops);
5847   }
5848 
5849   InsertNode(N);
5850   SDValue V = SDValue(N, 0);
5851   NewSDValueDbgMsg(V, "Creating new node: ", this);
5852   return V;
5853 }
5854 
5855 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5856                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5857   SDValue Ops[] = { N1, N2, N3, N4 };
5858   return getNode(Opcode, DL, VT, Ops);
5859 }
5860 
5861 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5862                               SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5863                               SDValue N5) {
5864   SDValue Ops[] = { N1, N2, N3, N4, N5 };
5865   return getNode(Opcode, DL, VT, Ops);
5866 }
5867 
5868 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5869 /// the incoming stack arguments to be loaded from the stack.
5870 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5871   SmallVector<SDValue, 8> ArgChains;
5872 
5873   // Include the original chain at the beginning of the list. When this is
5874   // used by target LowerCall hooks, this helps legalize find the
5875   // CALLSEQ_BEGIN node.
5876   ArgChains.push_back(Chain);
5877 
5878   // Add a chain value for each stack argument.
5879   for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5880        UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5881     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5882       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5883         if (FI->getIndex() < 0)
5884           ArgChains.push_back(SDValue(L, 1));
5885 
5886   // Build a tokenfactor for all the chains.
5887   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5888 }
5889 
5890 /// getMemsetValue - Vectorized representation of the memset value
5891 /// operand.
5892 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5893                               const SDLoc &dl) {
5894   assert(!Value.isUndef());
5895 
5896   unsigned NumBits = VT.getScalarSizeInBits();
5897   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5898     assert(C->getAPIntValue().getBitWidth() == 8);
5899     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5900     if (VT.isInteger()) {
5901       bool IsOpaque = VT.getSizeInBits() > 64 ||
5902           !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5903       return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5904     }
5905     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5906                              VT);
5907   }
5908 
5909   assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5910   EVT IntVT = VT.getScalarType();
5911   if (!IntVT.isInteger())
5912     IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5913 
5914   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5915   if (NumBits > 8) {
5916     // Use a multiplication with 0x010101... to extend the input to the
5917     // required length.
5918     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5919     Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5920                         DAG.getConstant(Magic, dl, IntVT));
5921   }
5922 
5923   if (VT != Value.getValueType() && !VT.isInteger())
5924     Value = DAG.getBitcast(VT.getScalarType(), Value);
5925   if (VT != Value.getValueType())
5926     Value = DAG.getSplatBuildVector(VT, dl, Value);
5927 
5928   return Value;
5929 }
5930 
5931 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5932 /// used when a memcpy is turned into a memset when the source is a constant
5933 /// string ptr.
5934 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5935                                   const TargetLowering &TLI,
5936                                   const ConstantDataArraySlice &Slice) {
5937   // Handle vector with all elements zero.
5938   if (Slice.Array == nullptr) {
5939     if (VT.isInteger())
5940       return DAG.getConstant(0, dl, VT);
5941     else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5942       return DAG.getConstantFP(0.0, dl, VT);
5943     else if (VT.isVector()) {
5944       unsigned NumElts = VT.getVectorNumElements();
5945       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5946       return DAG.getNode(ISD::BITCAST, dl, VT,
5947                          DAG.getConstant(0, dl,
5948                                          EVT::getVectorVT(*DAG.getContext(),
5949                                                           EltVT, NumElts)));
5950     } else
5951       llvm_unreachable("Expected type!");
5952   }
5953 
5954   assert(!VT.isVector() && "Can't handle vector type here!");
5955   unsigned NumVTBits = VT.getSizeInBits();
5956   unsigned NumVTBytes = NumVTBits / 8;
5957   unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5958 
5959   APInt Val(NumVTBits, 0);
5960   if (DAG.getDataLayout().isLittleEndian()) {
5961     for (unsigned i = 0; i != NumBytes; ++i)
5962       Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5963   } else {
5964     for (unsigned i = 0; i != NumBytes; ++i)
5965       Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5966   }
5967 
5968   // If the "cost" of materializing the integer immediate is less than the cost
5969   // of a load, then it is cost effective to turn the load into the immediate.
5970   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5971   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5972     return DAG.getConstant(Val, dl, VT);
5973   return SDValue(nullptr, 0);
5974 }
5975 
5976 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
5977                                            const SDLoc &DL,
5978                                            const SDNodeFlags Flags) {
5979   EVT VT = Base.getValueType();
5980   SDValue Index;
5981 
5982   if (Offset.isScalable())
5983     Index = getVScale(DL, Base.getValueType(),
5984                       APInt(Base.getValueSizeInBits().getFixedSize(),
5985                             Offset.getKnownMinSize()));
5986   else
5987     Index = getConstant(Offset.getFixedSize(), DL, VT);
5988 
5989   return getMemBasePlusOffset(Base, Index, DL, Flags);
5990 }
5991 
5992 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
5993                                            const SDLoc &DL,
5994                                            const SDNodeFlags Flags) {
5995   assert(Offset.getValueType().isInteger());
5996   EVT BasePtrVT = Ptr.getValueType();
5997   return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
5998 }
5999 
6000 /// Returns true if memcpy source is constant data.
6001 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
6002   uint64_t SrcDelta = 0;
6003   GlobalAddressSDNode *G = nullptr;
6004   if (Src.getOpcode() == ISD::GlobalAddress)
6005     G = cast<GlobalAddressSDNode>(Src);
6006   else if (Src.getOpcode() == ISD::ADD &&
6007            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
6008            Src.getOperand(1).getOpcode() == ISD::Constant) {
6009     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
6010     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
6011   }
6012   if (!G)
6013     return false;
6014 
6015   return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
6016                                   SrcDelta + G->getOffset());
6017 }
6018 
6019 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
6020                                       SelectionDAG &DAG) {
6021   // On Darwin, -Os means optimize for size without hurting performance, so
6022   // only really optimize for size when -Oz (MinSize) is used.
6023   if (MF.getTarget().getTargetTriple().isOSDarwin())
6024     return MF.getFunction().hasMinSize();
6025   return DAG.shouldOptForSize();
6026 }
6027 
6028 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
6029                           SmallVector<SDValue, 32> &OutChains, unsigned From,
6030                           unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
6031                           SmallVector<SDValue, 16> &OutStoreChains) {
6032   assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
6033   assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
6034   SmallVector<SDValue, 16> GluedLoadChains;
6035   for (unsigned i = From; i < To; ++i) {
6036     OutChains.push_back(OutLoadChains[i]);
6037     GluedLoadChains.push_back(OutLoadChains[i]);
6038   }
6039 
6040   // Chain for all loads.
6041   SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6042                                   GluedLoadChains);
6043 
6044   for (unsigned i = From; i < To; ++i) {
6045     StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
6046     SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
6047                                   ST->getBasePtr(), ST->getMemoryVT(),
6048                                   ST->getMemOperand());
6049     OutChains.push_back(NewStore);
6050   }
6051 }
6052 
6053 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6054                                        SDValue Chain, SDValue Dst, SDValue Src,
6055                                        uint64_t Size, Align Alignment,
6056                                        bool isVol, bool AlwaysInline,
6057                                        MachinePointerInfo DstPtrInfo,
6058                                        MachinePointerInfo SrcPtrInfo) {
6059   // Turn a memcpy of undef to nop.
6060   // FIXME: We need to honor volatile even is Src is undef.
6061   if (Src.isUndef())
6062     return Chain;
6063 
6064   // Expand memcpy to a series of load and store ops if the size operand falls
6065   // below a certain threshold.
6066   // TODO: In the AlwaysInline case, if the size is big then generate a loop
6067   // rather than maybe a humongous number of loads and stores.
6068   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6069   const DataLayout &DL = DAG.getDataLayout();
6070   LLVMContext &C = *DAG.getContext();
6071   std::vector<EVT> MemOps;
6072   bool DstAlignCanChange = false;
6073   MachineFunction &MF = DAG.getMachineFunction();
6074   MachineFrameInfo &MFI = MF.getFrameInfo();
6075   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6076   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6077   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6078     DstAlignCanChange = true;
6079   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6080   if (!SrcAlign || Alignment > *SrcAlign)
6081     SrcAlign = Alignment;
6082   assert(SrcAlign && "SrcAlign must be set");
6083   ConstantDataArraySlice Slice;
6084   // If marked as volatile, perform a copy even when marked as constant.
6085   bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
6086   bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
6087   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
6088   const MemOp Op = isZeroConstant
6089                        ? MemOp::Set(Size, DstAlignCanChange, Alignment,
6090                                     /*IsZeroMemset*/ true, isVol)
6091                        : MemOp::Copy(Size, DstAlignCanChange, Alignment,
6092                                      *SrcAlign, isVol, CopyFromConstant);
6093   if (!TLI.findOptimalMemOpLowering(
6094           MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
6095           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
6096     return SDValue();
6097 
6098   if (DstAlignCanChange) {
6099     Type *Ty = MemOps[0].getTypeForEVT(C);
6100     Align NewAlign = DL.getABITypeAlign(Ty);
6101 
6102     // Don't promote to an alignment that would require dynamic stack
6103     // realignment.
6104     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
6105     if (!TRI->needsStackRealignment(MF))
6106       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
6107         NewAlign = NewAlign / 2;
6108 
6109     if (NewAlign > Alignment) {
6110       // Give the stack frame object a larger alignment if needed.
6111       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6112         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6113       Alignment = NewAlign;
6114     }
6115   }
6116 
6117   MachineMemOperand::Flags MMOFlags =
6118       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6119   SmallVector<SDValue, 16> OutLoadChains;
6120   SmallVector<SDValue, 16> OutStoreChains;
6121   SmallVector<SDValue, 32> OutChains;
6122   unsigned NumMemOps = MemOps.size();
6123   uint64_t SrcOff = 0, DstOff = 0;
6124   for (unsigned i = 0; i != NumMemOps; ++i) {
6125     EVT VT = MemOps[i];
6126     unsigned VTSize = VT.getSizeInBits() / 8;
6127     SDValue Value, Store;
6128 
6129     if (VTSize > Size) {
6130       // Issuing an unaligned load / store pair  that overlaps with the previous
6131       // pair. Adjust the offset accordingly.
6132       assert(i == NumMemOps-1 && i != 0);
6133       SrcOff -= VTSize - Size;
6134       DstOff -= VTSize - Size;
6135     }
6136 
6137     if (CopyFromConstant &&
6138         (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
6139       // It's unlikely a store of a vector immediate can be done in a single
6140       // instruction. It would require a load from a constantpool first.
6141       // We only handle zero vectors here.
6142       // FIXME: Handle other cases where store of vector immediate is done in
6143       // a single instruction.
6144       ConstantDataArraySlice SubSlice;
6145       if (SrcOff < Slice.Length) {
6146         SubSlice = Slice;
6147         SubSlice.move(SrcOff);
6148       } else {
6149         // This is an out-of-bounds access and hence UB. Pretend we read zero.
6150         SubSlice.Array = nullptr;
6151         SubSlice.Offset = 0;
6152         SubSlice.Length = VTSize;
6153       }
6154       Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
6155       if (Value.getNode()) {
6156         Store = DAG.getStore(
6157             Chain, dl, Value,
6158             DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6159             DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6160         OutChains.push_back(Store);
6161       }
6162     }
6163 
6164     if (!Store.getNode()) {
6165       // The type might not be legal for the target.  This should only happen
6166       // if the type is smaller than a legal type, as on PPC, so the right
6167       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
6168       // to Load/Store if NVT==VT.
6169       // FIXME does the case above also need this?
6170       EVT NVT = TLI.getTypeToTransformTo(C, VT);
6171       assert(NVT.bitsGE(VT));
6172 
6173       bool isDereferenceable =
6174         SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6175       MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6176       if (isDereferenceable)
6177         SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6178 
6179       Value = DAG.getExtLoad(
6180           ISD::EXTLOAD, dl, NVT, Chain,
6181           DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6182           SrcPtrInfo.getWithOffset(SrcOff), VT,
6183           commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags);
6184       OutLoadChains.push_back(Value.getValue(1));
6185 
6186       Store = DAG.getTruncStore(
6187           Chain, dl, Value,
6188           DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6189           DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
6190       OutStoreChains.push_back(Store);
6191     }
6192     SrcOff += VTSize;
6193     DstOff += VTSize;
6194     Size -= VTSize;
6195   }
6196 
6197   unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6198                                 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6199   unsigned NumLdStInMemcpy = OutStoreChains.size();
6200 
6201   if (NumLdStInMemcpy) {
6202     // It may be that memcpy might be converted to memset if it's memcpy
6203     // of constants. In such a case, we won't have loads and stores, but
6204     // just stores. In the absence of loads, there is nothing to gang up.
6205     if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6206       // If target does not care, just leave as it.
6207       for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6208         OutChains.push_back(OutLoadChains[i]);
6209         OutChains.push_back(OutStoreChains[i]);
6210       }
6211     } else {
6212       // Ld/St less than/equal limit set by target.
6213       if (NumLdStInMemcpy <= GluedLdStLimit) {
6214           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6215                                         NumLdStInMemcpy, OutLoadChains,
6216                                         OutStoreChains);
6217       } else {
6218         unsigned NumberLdChain =  NumLdStInMemcpy / GluedLdStLimit;
6219         unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6220         unsigned GlueIter = 0;
6221 
6222         for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6223           unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6224           unsigned IndexTo   = NumLdStInMemcpy - GlueIter;
6225 
6226           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6227                                        OutLoadChains, OutStoreChains);
6228           GlueIter += GluedLdStLimit;
6229         }
6230 
6231         // Residual ld/st.
6232         if (RemainingLdStInMemcpy) {
6233           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6234                                         RemainingLdStInMemcpy, OutLoadChains,
6235                                         OutStoreChains);
6236         }
6237       }
6238     }
6239   }
6240   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6241 }
6242 
6243 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6244                                         SDValue Chain, SDValue Dst, SDValue Src,
6245                                         uint64_t Size, Align Alignment,
6246                                         bool isVol, bool AlwaysInline,
6247                                         MachinePointerInfo DstPtrInfo,
6248                                         MachinePointerInfo SrcPtrInfo) {
6249   // Turn a memmove of undef to nop.
6250   // FIXME: We need to honor volatile even is Src is undef.
6251   if (Src.isUndef())
6252     return Chain;
6253 
6254   // Expand memmove to a series of load and store ops if the size operand falls
6255   // below a certain threshold.
6256   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6257   const DataLayout &DL = DAG.getDataLayout();
6258   LLVMContext &C = *DAG.getContext();
6259   std::vector<EVT> MemOps;
6260   bool DstAlignCanChange = false;
6261   MachineFunction &MF = DAG.getMachineFunction();
6262   MachineFrameInfo &MFI = MF.getFrameInfo();
6263   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6264   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6265   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6266     DstAlignCanChange = true;
6267   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6268   if (!SrcAlign || Alignment > *SrcAlign)
6269     SrcAlign = Alignment;
6270   assert(SrcAlign && "SrcAlign must be set");
6271   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6272   if (!TLI.findOptimalMemOpLowering(
6273           MemOps, Limit,
6274           MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
6275                       /*IsVolatile*/ true),
6276           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6277           MF.getFunction().getAttributes()))
6278     return SDValue();
6279 
6280   if (DstAlignCanChange) {
6281     Type *Ty = MemOps[0].getTypeForEVT(C);
6282     Align NewAlign = DL.getABITypeAlign(Ty);
6283     if (NewAlign > Alignment) {
6284       // Give the stack frame object a larger alignment if needed.
6285       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6286         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6287       Alignment = NewAlign;
6288     }
6289   }
6290 
6291   MachineMemOperand::Flags MMOFlags =
6292       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6293   uint64_t SrcOff = 0, DstOff = 0;
6294   SmallVector<SDValue, 8> LoadValues;
6295   SmallVector<SDValue, 8> LoadChains;
6296   SmallVector<SDValue, 8> OutChains;
6297   unsigned NumMemOps = MemOps.size();
6298   for (unsigned i = 0; i < NumMemOps; i++) {
6299     EVT VT = MemOps[i];
6300     unsigned VTSize = VT.getSizeInBits() / 8;
6301     SDValue Value;
6302 
6303     bool isDereferenceable =
6304       SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6305     MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6306     if (isDereferenceable)
6307       SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6308 
6309     Value =
6310         DAG.getLoad(VT, dl, Chain,
6311                     DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6312                     SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags);
6313     LoadValues.push_back(Value);
6314     LoadChains.push_back(Value.getValue(1));
6315     SrcOff += VTSize;
6316   }
6317   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6318   OutChains.clear();
6319   for (unsigned i = 0; i < NumMemOps; i++) {
6320     EVT VT = MemOps[i];
6321     unsigned VTSize = VT.getSizeInBits() / 8;
6322     SDValue Store;
6323 
6324     Store =
6325         DAG.getStore(Chain, dl, LoadValues[i],
6326                      DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6327                      DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6328     OutChains.push_back(Store);
6329     DstOff += VTSize;
6330   }
6331 
6332   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6333 }
6334 
6335 /// Lower the call to 'memset' intrinsic function into a series of store
6336 /// operations.
6337 ///
6338 /// \param DAG Selection DAG where lowered code is placed.
6339 /// \param dl Link to corresponding IR location.
6340 /// \param Chain Control flow dependency.
6341 /// \param Dst Pointer to destination memory location.
6342 /// \param Src Value of byte to write into the memory.
6343 /// \param Size Number of bytes to write.
6344 /// \param Alignment Alignment of the destination in bytes.
6345 /// \param isVol True if destination is volatile.
6346 /// \param DstPtrInfo IR information on the memory pointer.
6347 /// \returns New head in the control flow, if lowering was successful, empty
6348 /// SDValue otherwise.
6349 ///
6350 /// The function tries to replace 'llvm.memset' intrinsic with several store
6351 /// operations and value calculation code. This is usually profitable for small
6352 /// memory size.
6353 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6354                                SDValue Chain, SDValue Dst, SDValue Src,
6355                                uint64_t Size, Align Alignment, bool isVol,
6356                                MachinePointerInfo DstPtrInfo) {
6357   // Turn a memset of undef to nop.
6358   // FIXME: We need to honor volatile even is Src is undef.
6359   if (Src.isUndef())
6360     return Chain;
6361 
6362   // Expand memset to a series of load/store ops if the size operand
6363   // falls below a certain threshold.
6364   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6365   std::vector<EVT> MemOps;
6366   bool DstAlignCanChange = false;
6367   MachineFunction &MF = DAG.getMachineFunction();
6368   MachineFrameInfo &MFI = MF.getFrameInfo();
6369   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6370   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6371   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6372     DstAlignCanChange = true;
6373   bool IsZeroVal =
6374     isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6375   if (!TLI.findOptimalMemOpLowering(
6376           MemOps, TLI.getMaxStoresPerMemset(OptSize),
6377           MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
6378           DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
6379     return SDValue();
6380 
6381   if (DstAlignCanChange) {
6382     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6383     Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
6384     if (NewAlign > Alignment) {
6385       // Give the stack frame object a larger alignment if needed.
6386       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6387         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6388       Alignment = NewAlign;
6389     }
6390   }
6391 
6392   SmallVector<SDValue, 8> OutChains;
6393   uint64_t DstOff = 0;
6394   unsigned NumMemOps = MemOps.size();
6395 
6396   // Find the largest store and generate the bit pattern for it.
6397   EVT LargestVT = MemOps[0];
6398   for (unsigned i = 1; i < NumMemOps; i++)
6399     if (MemOps[i].bitsGT(LargestVT))
6400       LargestVT = MemOps[i];
6401   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6402 
6403   for (unsigned i = 0; i < NumMemOps; i++) {
6404     EVT VT = MemOps[i];
6405     unsigned VTSize = VT.getSizeInBits() / 8;
6406     if (VTSize > Size) {
6407       // Issuing an unaligned load / store pair  that overlaps with the previous
6408       // pair. Adjust the offset accordingly.
6409       assert(i == NumMemOps-1 && i != 0);
6410       DstOff -= VTSize - Size;
6411     }
6412 
6413     // If this store is smaller than the largest store see whether we can get
6414     // the smaller value for free with a truncate.
6415     SDValue Value = MemSetValue;
6416     if (VT.bitsLT(LargestVT)) {
6417       if (!LargestVT.isVector() && !VT.isVector() &&
6418           TLI.isTruncateFree(LargestVT, VT))
6419         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6420       else
6421         Value = getMemsetValue(Src, VT, DAG, dl);
6422     }
6423     assert(Value.getValueType() == VT && "Value with wrong type.");
6424     SDValue Store = DAG.getStore(
6425         Chain, dl, Value,
6426         DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6427         DstPtrInfo.getWithOffset(DstOff), Alignment,
6428         isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6429     OutChains.push_back(Store);
6430     DstOff += VT.getSizeInBits() / 8;
6431     Size -= VTSize;
6432   }
6433 
6434   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6435 }
6436 
6437 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6438                                             unsigned AS) {
6439   // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6440   // pointer operands can be losslessly bitcasted to pointers of address space 0
6441   if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
6442     report_fatal_error("cannot lower memory intrinsic in address space " +
6443                        Twine(AS));
6444   }
6445 }
6446 
6447 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6448                                 SDValue Src, SDValue Size, Align Alignment,
6449                                 bool isVol, bool AlwaysInline, bool isTailCall,
6450                                 MachinePointerInfo DstPtrInfo,
6451                                 MachinePointerInfo SrcPtrInfo) {
6452   // Check to see if we should lower the memcpy to loads and stores first.
6453   // For cases within the target-specified limits, this is the best choice.
6454   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6455   if (ConstantSize) {
6456     // Memcpy with size zero? Just return the original chain.
6457     if (ConstantSize->isNullValue())
6458       return Chain;
6459 
6460     SDValue Result = getMemcpyLoadsAndStores(
6461         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6462         isVol, false, DstPtrInfo, SrcPtrInfo);
6463     if (Result.getNode())
6464       return Result;
6465   }
6466 
6467   // Then check to see if we should lower the memcpy with target-specific
6468   // code. If the target chooses to do this, this is the next best.
6469   if (TSI) {
6470     SDValue Result = TSI->EmitTargetCodeForMemcpy(
6471         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
6472         DstPtrInfo, SrcPtrInfo);
6473     if (Result.getNode())
6474       return Result;
6475   }
6476 
6477   // If we really need inline code and the target declined to provide it,
6478   // use a (potentially long) sequence of loads and stores.
6479   if (AlwaysInline) {
6480     assert(ConstantSize && "AlwaysInline requires a constant size!");
6481     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6482                                    ConstantSize->getZExtValue(), Alignment,
6483                                    isVol, true, DstPtrInfo, SrcPtrInfo);
6484   }
6485 
6486   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6487   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6488 
6489   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6490   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6491   // respect volatile, so they may do things like read or write memory
6492   // beyond the given memory regions. But fixing this isn't easy, and most
6493   // people don't care.
6494 
6495   // Emit a library call.
6496   TargetLowering::ArgListTy Args;
6497   TargetLowering::ArgListEntry Entry;
6498   Entry.Ty = Type::getInt8PtrTy(*getContext());
6499   Entry.Node = Dst; Args.push_back(Entry);
6500   Entry.Node = Src; Args.push_back(Entry);
6501 
6502   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6503   Entry.Node = Size; Args.push_back(Entry);
6504   // FIXME: pass in SDLoc
6505   TargetLowering::CallLoweringInfo CLI(*this);
6506   CLI.setDebugLoc(dl)
6507       .setChain(Chain)
6508       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6509                     Dst.getValueType().getTypeForEVT(*getContext()),
6510                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6511                                       TLI->getPointerTy(getDataLayout())),
6512                     std::move(Args))
6513       .setDiscardResult()
6514       .setTailCall(isTailCall);
6515 
6516   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6517   return CallResult.second;
6518 }
6519 
6520 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6521                                       SDValue Dst, unsigned DstAlign,
6522                                       SDValue Src, unsigned SrcAlign,
6523                                       SDValue Size, Type *SizeTy,
6524                                       unsigned ElemSz, bool isTailCall,
6525                                       MachinePointerInfo DstPtrInfo,
6526                                       MachinePointerInfo SrcPtrInfo) {
6527   // Emit a library call.
6528   TargetLowering::ArgListTy Args;
6529   TargetLowering::ArgListEntry Entry;
6530   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6531   Entry.Node = Dst;
6532   Args.push_back(Entry);
6533 
6534   Entry.Node = Src;
6535   Args.push_back(Entry);
6536 
6537   Entry.Ty = SizeTy;
6538   Entry.Node = Size;
6539   Args.push_back(Entry);
6540 
6541   RTLIB::Libcall LibraryCall =
6542       RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6543   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6544     report_fatal_error("Unsupported element size");
6545 
6546   TargetLowering::CallLoweringInfo CLI(*this);
6547   CLI.setDebugLoc(dl)
6548       .setChain(Chain)
6549       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6550                     Type::getVoidTy(*getContext()),
6551                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
6552                                       TLI->getPointerTy(getDataLayout())),
6553                     std::move(Args))
6554       .setDiscardResult()
6555       .setTailCall(isTailCall);
6556 
6557   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6558   return CallResult.second;
6559 }
6560 
6561 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6562                                  SDValue Src, SDValue Size, Align Alignment,
6563                                  bool isVol, bool isTailCall,
6564                                  MachinePointerInfo DstPtrInfo,
6565                                  MachinePointerInfo SrcPtrInfo) {
6566   // Check to see if we should lower the memmove to loads and stores first.
6567   // For cases within the target-specified limits, this is the best choice.
6568   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6569   if (ConstantSize) {
6570     // Memmove with size zero? Just return the original chain.
6571     if (ConstantSize->isNullValue())
6572       return Chain;
6573 
6574     SDValue Result = getMemmoveLoadsAndStores(
6575         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6576         isVol, false, DstPtrInfo, SrcPtrInfo);
6577     if (Result.getNode())
6578       return Result;
6579   }
6580 
6581   // Then check to see if we should lower the memmove with target-specific
6582   // code. If the target chooses to do this, this is the next best.
6583   if (TSI) {
6584     SDValue Result =
6585         TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
6586                                       Alignment, isVol, DstPtrInfo, SrcPtrInfo);
6587     if (Result.getNode())
6588       return Result;
6589   }
6590 
6591   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6592   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6593 
6594   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6595   // not be safe.  See memcpy above for more details.
6596 
6597   // Emit a library call.
6598   TargetLowering::ArgListTy Args;
6599   TargetLowering::ArgListEntry Entry;
6600   Entry.Ty = Type::getInt8PtrTy(*getContext());
6601   Entry.Node = Dst; Args.push_back(Entry);
6602   Entry.Node = Src; Args.push_back(Entry);
6603 
6604   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6605   Entry.Node = Size; Args.push_back(Entry);
6606   // FIXME:  pass in SDLoc
6607   TargetLowering::CallLoweringInfo CLI(*this);
6608   CLI.setDebugLoc(dl)
6609       .setChain(Chain)
6610       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6611                     Dst.getValueType().getTypeForEVT(*getContext()),
6612                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6613                                       TLI->getPointerTy(getDataLayout())),
6614                     std::move(Args))
6615       .setDiscardResult()
6616       .setTailCall(isTailCall);
6617 
6618   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6619   return CallResult.second;
6620 }
6621 
6622 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6623                                        SDValue Dst, unsigned DstAlign,
6624                                        SDValue Src, unsigned SrcAlign,
6625                                        SDValue Size, Type *SizeTy,
6626                                        unsigned ElemSz, bool isTailCall,
6627                                        MachinePointerInfo DstPtrInfo,
6628                                        MachinePointerInfo SrcPtrInfo) {
6629   // Emit a library call.
6630   TargetLowering::ArgListTy Args;
6631   TargetLowering::ArgListEntry Entry;
6632   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6633   Entry.Node = Dst;
6634   Args.push_back(Entry);
6635 
6636   Entry.Node = Src;
6637   Args.push_back(Entry);
6638 
6639   Entry.Ty = SizeTy;
6640   Entry.Node = Size;
6641   Args.push_back(Entry);
6642 
6643   RTLIB::Libcall LibraryCall =
6644       RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6645   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6646     report_fatal_error("Unsupported element size");
6647 
6648   TargetLowering::CallLoweringInfo CLI(*this);
6649   CLI.setDebugLoc(dl)
6650       .setChain(Chain)
6651       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6652                     Type::getVoidTy(*getContext()),
6653                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
6654                                       TLI->getPointerTy(getDataLayout())),
6655                     std::move(Args))
6656       .setDiscardResult()
6657       .setTailCall(isTailCall);
6658 
6659   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6660   return CallResult.second;
6661 }
6662 
6663 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6664                                 SDValue Src, SDValue Size, Align Alignment,
6665                                 bool isVol, bool isTailCall,
6666                                 MachinePointerInfo DstPtrInfo) {
6667   // Check to see if we should lower the memset to stores first.
6668   // For cases within the target-specified limits, this is the best choice.
6669   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6670   if (ConstantSize) {
6671     // Memset with size zero? Just return the original chain.
6672     if (ConstantSize->isNullValue())
6673       return Chain;
6674 
6675     SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
6676                                      ConstantSize->getZExtValue(), Alignment,
6677                                      isVol, DstPtrInfo);
6678 
6679     if (Result.getNode())
6680       return Result;
6681   }
6682 
6683   // Then check to see if we should lower the memset with target-specific
6684   // code. If the target chooses to do this, this is the next best.
6685   if (TSI) {
6686     SDValue Result = TSI->EmitTargetCodeForMemset(
6687         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
6688     if (Result.getNode())
6689       return Result;
6690   }
6691 
6692   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6693 
6694   // Emit a library call.
6695   TargetLowering::ArgListTy Args;
6696   TargetLowering::ArgListEntry Entry;
6697   Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6698   Args.push_back(Entry);
6699   Entry.Node = Src;
6700   Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6701   Args.push_back(Entry);
6702   Entry.Node = Size;
6703   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6704   Args.push_back(Entry);
6705 
6706   // FIXME: pass in SDLoc
6707   TargetLowering::CallLoweringInfo CLI(*this);
6708   CLI.setDebugLoc(dl)
6709       .setChain(Chain)
6710       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6711                     Dst.getValueType().getTypeForEVT(*getContext()),
6712                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6713                                       TLI->getPointerTy(getDataLayout())),
6714                     std::move(Args))
6715       .setDiscardResult()
6716       .setTailCall(isTailCall);
6717 
6718   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6719   return CallResult.second;
6720 }
6721 
6722 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6723                                       SDValue Dst, unsigned DstAlign,
6724                                       SDValue Value, SDValue Size, Type *SizeTy,
6725                                       unsigned ElemSz, bool isTailCall,
6726                                       MachinePointerInfo DstPtrInfo) {
6727   // Emit a library call.
6728   TargetLowering::ArgListTy Args;
6729   TargetLowering::ArgListEntry Entry;
6730   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6731   Entry.Node = Dst;
6732   Args.push_back(Entry);
6733 
6734   Entry.Ty = Type::getInt8Ty(*getContext());
6735   Entry.Node = Value;
6736   Args.push_back(Entry);
6737 
6738   Entry.Ty = SizeTy;
6739   Entry.Node = Size;
6740   Args.push_back(Entry);
6741 
6742   RTLIB::Libcall LibraryCall =
6743       RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6744   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6745     report_fatal_error("Unsupported element size");
6746 
6747   TargetLowering::CallLoweringInfo CLI(*this);
6748   CLI.setDebugLoc(dl)
6749       .setChain(Chain)
6750       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6751                     Type::getVoidTy(*getContext()),
6752                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
6753                                       TLI->getPointerTy(getDataLayout())),
6754                     std::move(Args))
6755       .setDiscardResult()
6756       .setTailCall(isTailCall);
6757 
6758   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6759   return CallResult.second;
6760 }
6761 
6762 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6763                                 SDVTList VTList, ArrayRef<SDValue> Ops,
6764                                 MachineMemOperand *MMO) {
6765   FoldingSetNodeID ID;
6766   ID.AddInteger(MemVT.getRawBits());
6767   AddNodeIDNode(ID, Opcode, VTList, Ops);
6768   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6769   void* IP = nullptr;
6770   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6771     cast<AtomicSDNode>(E)->refineAlignment(MMO);
6772     return SDValue(E, 0);
6773   }
6774 
6775   auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6776                                     VTList, MemVT, MMO);
6777   createOperands(N, Ops);
6778 
6779   CSEMap.InsertNode(N, IP);
6780   InsertNode(N);
6781   return SDValue(N, 0);
6782 }
6783 
6784 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6785                                        EVT MemVT, SDVTList VTs, SDValue Chain,
6786                                        SDValue Ptr, SDValue Cmp, SDValue Swp,
6787                                        MachineMemOperand *MMO) {
6788   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6789          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6790   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6791 
6792   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6793   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6794 }
6795 
6796 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6797                                 SDValue Chain, SDValue Ptr, SDValue Val,
6798                                 MachineMemOperand *MMO) {
6799   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6800           Opcode == ISD::ATOMIC_LOAD_SUB ||
6801           Opcode == ISD::ATOMIC_LOAD_AND ||
6802           Opcode == ISD::ATOMIC_LOAD_CLR ||
6803           Opcode == ISD::ATOMIC_LOAD_OR ||
6804           Opcode == ISD::ATOMIC_LOAD_XOR ||
6805           Opcode == ISD::ATOMIC_LOAD_NAND ||
6806           Opcode == ISD::ATOMIC_LOAD_MIN ||
6807           Opcode == ISD::ATOMIC_LOAD_MAX ||
6808           Opcode == ISD::ATOMIC_LOAD_UMIN ||
6809           Opcode == ISD::ATOMIC_LOAD_UMAX ||
6810           Opcode == ISD::ATOMIC_LOAD_FADD ||
6811           Opcode == ISD::ATOMIC_LOAD_FSUB ||
6812           Opcode == ISD::ATOMIC_SWAP ||
6813           Opcode == ISD::ATOMIC_STORE) &&
6814          "Invalid Atomic Op");
6815 
6816   EVT VT = Val.getValueType();
6817 
6818   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6819                                                getVTList(VT, MVT::Other);
6820   SDValue Ops[] = {Chain, Ptr, Val};
6821   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6822 }
6823 
6824 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6825                                 EVT VT, SDValue Chain, SDValue Ptr,
6826                                 MachineMemOperand *MMO) {
6827   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6828 
6829   SDVTList VTs = getVTList(VT, MVT::Other);
6830   SDValue Ops[] = {Chain, Ptr};
6831   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6832 }
6833 
6834 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
6835 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6836   if (Ops.size() == 1)
6837     return Ops[0];
6838 
6839   SmallVector<EVT, 4> VTs;
6840   VTs.reserve(Ops.size());
6841   for (unsigned i = 0; i < Ops.size(); ++i)
6842     VTs.push_back(Ops[i].getValueType());
6843   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6844 }
6845 
6846 SDValue SelectionDAG::getMemIntrinsicNode(
6847     unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6848     EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
6849     MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
6850   if (!Size && MemVT.isScalableVector())
6851     Size = MemoryLocation::UnknownSize;
6852   else if (!Size)
6853     Size = MemVT.getStoreSize();
6854 
6855   MachineFunction &MF = getMachineFunction();
6856   MachineMemOperand *MMO =
6857       MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
6858 
6859   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6860 }
6861 
6862 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6863                                           SDVTList VTList,
6864                                           ArrayRef<SDValue> Ops, EVT MemVT,
6865                                           MachineMemOperand *MMO) {
6866   assert((Opcode == ISD::INTRINSIC_VOID ||
6867           Opcode == ISD::INTRINSIC_W_CHAIN ||
6868           Opcode == ISD::PREFETCH ||
6869           ((int)Opcode <= std::numeric_limits<int>::max() &&
6870            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6871          "Opcode is not a memory-accessing opcode!");
6872 
6873   // Memoize the node unless it returns a flag.
6874   MemIntrinsicSDNode *N;
6875   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6876     FoldingSetNodeID ID;
6877     AddNodeIDNode(ID, Opcode, VTList, Ops);
6878     ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6879         Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6880     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6881     void *IP = nullptr;
6882     if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6883       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6884       return SDValue(E, 0);
6885     }
6886 
6887     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6888                                       VTList, MemVT, MMO);
6889     createOperands(N, Ops);
6890 
6891   CSEMap.InsertNode(N, IP);
6892   } else {
6893     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6894                                       VTList, MemVT, MMO);
6895     createOperands(N, Ops);
6896   }
6897   InsertNode(N);
6898   SDValue V(N, 0);
6899   NewSDValueDbgMsg(V, "Creating new node: ", this);
6900   return V;
6901 }
6902 
6903 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
6904                                       SDValue Chain, int FrameIndex,
6905                                       int64_t Size, int64_t Offset) {
6906   const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
6907   const auto VTs = getVTList(MVT::Other);
6908   SDValue Ops[2] = {
6909       Chain,
6910       getFrameIndex(FrameIndex,
6911                     getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6912                     true)};
6913 
6914   FoldingSetNodeID ID;
6915   AddNodeIDNode(ID, Opcode, VTs, Ops);
6916   ID.AddInteger(FrameIndex);
6917   ID.AddInteger(Size);
6918   ID.AddInteger(Offset);
6919   void *IP = nullptr;
6920   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6921     return SDValue(E, 0);
6922 
6923   LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
6924       Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
6925   createOperands(N, Ops);
6926   CSEMap.InsertNode(N, IP);
6927   InsertNode(N);
6928   SDValue V(N, 0);
6929   NewSDValueDbgMsg(V, "Creating new node: ", this);
6930   return V;
6931 }
6932 
6933 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
6934                                          uint64_t Guid, uint64_t Index,
6935                                          uint32_t Attr) {
6936   const unsigned Opcode = ISD::PSEUDO_PROBE;
6937   const auto VTs = getVTList(MVT::Other);
6938   SDValue Ops[] = {Chain};
6939   FoldingSetNodeID ID;
6940   AddNodeIDNode(ID, Opcode, VTs, Ops);
6941   ID.AddInteger(Guid);
6942   ID.AddInteger(Index);
6943   void *IP = nullptr;
6944   if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
6945     return SDValue(E, 0);
6946 
6947   auto *N = newSDNode<PseudoProbeSDNode>(
6948       Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
6949   createOperands(N, Ops);
6950   CSEMap.InsertNode(N, IP);
6951   InsertNode(N);
6952   SDValue V(N, 0);
6953   NewSDValueDbgMsg(V, "Creating new node: ", this);
6954   return V;
6955 }
6956 
6957 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6958 /// MachinePointerInfo record from it.  This is particularly useful because the
6959 /// code generator has many cases where it doesn't bother passing in a
6960 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6961 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6962                                            SelectionDAG &DAG, SDValue Ptr,
6963                                            int64_t Offset = 0) {
6964   // If this is FI+Offset, we can model it.
6965   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6966     return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6967                                              FI->getIndex(), Offset);
6968 
6969   // If this is (FI+Offset1)+Offset2, we can model it.
6970   if (Ptr.getOpcode() != ISD::ADD ||
6971       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6972       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6973     return Info;
6974 
6975   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6976   return MachinePointerInfo::getFixedStack(
6977       DAG.getMachineFunction(), FI,
6978       Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6979 }
6980 
6981 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6982 /// MachinePointerInfo record from it.  This is particularly useful because the
6983 /// code generator has many cases where it doesn't bother passing in a
6984 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
6985 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6986                                            SelectionDAG &DAG, SDValue Ptr,
6987                                            SDValue OffsetOp) {
6988   // If the 'Offset' value isn't a constant, we can't handle this.
6989   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6990     return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6991   if (OffsetOp.isUndef())
6992     return InferPointerInfo(Info, DAG, Ptr);
6993   return Info;
6994 }
6995 
6996 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6997                               EVT VT, const SDLoc &dl, SDValue Chain,
6998                               SDValue Ptr, SDValue Offset,
6999                               MachinePointerInfo PtrInfo, EVT MemVT,
7000                               Align Alignment,
7001                               MachineMemOperand::Flags MMOFlags,
7002                               const AAMDNodes &AAInfo, const MDNode *Ranges) {
7003   assert(Chain.getValueType() == MVT::Other &&
7004         "Invalid chain type");
7005 
7006   MMOFlags |= MachineMemOperand::MOLoad;
7007   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7008   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7009   // clients.
7010   if (PtrInfo.V.isNull())
7011     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7012 
7013   uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7014   MachineFunction &MF = getMachineFunction();
7015   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7016                                                    Alignment, AAInfo, Ranges);
7017   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
7018 }
7019 
7020 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7021                               EVT VT, const SDLoc &dl, SDValue Chain,
7022                               SDValue Ptr, SDValue Offset, EVT MemVT,
7023                               MachineMemOperand *MMO) {
7024   if (VT == MemVT) {
7025     ExtType = ISD::NON_EXTLOAD;
7026   } else if (ExtType == ISD::NON_EXTLOAD) {
7027     assert(VT == MemVT && "Non-extending load from different memory type!");
7028   } else {
7029     // Extending load.
7030     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7031            "Should only be an extending load, not truncating!");
7032     assert(VT.isInteger() == MemVT.isInteger() &&
7033            "Cannot convert from FP to Int or Int -> FP!");
7034     assert(VT.isVector() == MemVT.isVector() &&
7035            "Cannot use an ext load to convert to or from a vector!");
7036     assert((!VT.isVector() ||
7037             VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7038            "Cannot use an ext load to change the number of vector elements!");
7039   }
7040 
7041   bool Indexed = AM != ISD::UNINDEXED;
7042   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7043 
7044   SDVTList VTs = Indexed ?
7045     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
7046   SDValue Ops[] = { Chain, Ptr, Offset };
7047   FoldingSetNodeID ID;
7048   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
7049   ID.AddInteger(MemVT.getRawBits());
7050   ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
7051       dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
7052   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7053   void *IP = nullptr;
7054   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7055     cast<LoadSDNode>(E)->refineAlignment(MMO);
7056     return SDValue(E, 0);
7057   }
7058   auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7059                                   ExtType, MemVT, MMO);
7060   createOperands(N, Ops);
7061 
7062   CSEMap.InsertNode(N, IP);
7063   InsertNode(N);
7064   SDValue V(N, 0);
7065   NewSDValueDbgMsg(V, "Creating new node: ", this);
7066   return V;
7067 }
7068 
7069 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7070                               SDValue Ptr, MachinePointerInfo PtrInfo,
7071                               MaybeAlign Alignment,
7072                               MachineMemOperand::Flags MMOFlags,
7073                               const AAMDNodes &AAInfo, const MDNode *Ranges) {
7074   SDValue Undef = getUNDEF(Ptr.getValueType());
7075   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7076                  PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
7077 }
7078 
7079 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7080                               SDValue Ptr, MachineMemOperand *MMO) {
7081   SDValue Undef = getUNDEF(Ptr.getValueType());
7082   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7083                  VT, MMO);
7084 }
7085 
7086 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7087                                  EVT VT, SDValue Chain, SDValue Ptr,
7088                                  MachinePointerInfo PtrInfo, EVT MemVT,
7089                                  MaybeAlign Alignment,
7090                                  MachineMemOperand::Flags MMOFlags,
7091                                  const AAMDNodes &AAInfo) {
7092   SDValue Undef = getUNDEF(Ptr.getValueType());
7093   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
7094                  MemVT, Alignment, MMOFlags, AAInfo);
7095 }
7096 
7097 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7098                                  EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
7099                                  MachineMemOperand *MMO) {
7100   SDValue Undef = getUNDEF(Ptr.getValueType());
7101   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
7102                  MemVT, MMO);
7103 }
7104 
7105 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
7106                                      SDValue Base, SDValue Offset,
7107                                      ISD::MemIndexedMode AM) {
7108   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
7109   assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7110   // Don't propagate the invariant or dereferenceable flags.
7111   auto MMOFlags =
7112       LD->getMemOperand()->getFlags() &
7113       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7114   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7115                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
7116                  LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
7117 }
7118 
7119 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7120                                SDValue Ptr, MachinePointerInfo PtrInfo,
7121                                Align Alignment,
7122                                MachineMemOperand::Flags MMOFlags,
7123                                const AAMDNodes &AAInfo) {
7124   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7125 
7126   MMOFlags |= MachineMemOperand::MOStore;
7127   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7128 
7129   if (PtrInfo.V.isNull())
7130     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7131 
7132   MachineFunction &MF = getMachineFunction();
7133   uint64_t Size =
7134       MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7135   MachineMemOperand *MMO =
7136       MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7137   return getStore(Chain, dl, Val, Ptr, MMO);
7138 }
7139 
7140 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7141                                SDValue Ptr, MachineMemOperand *MMO) {
7142   assert(Chain.getValueType() == MVT::Other &&
7143         "Invalid chain type");
7144   EVT VT = Val.getValueType();
7145   SDVTList VTs = getVTList(MVT::Other);
7146   SDValue Undef = getUNDEF(Ptr.getValueType());
7147   SDValue Ops[] = { Chain, Val, Ptr, Undef };
7148   FoldingSetNodeID ID;
7149   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7150   ID.AddInteger(VT.getRawBits());
7151   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7152       dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
7153   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7154   void *IP = nullptr;
7155   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7156     cast<StoreSDNode>(E)->refineAlignment(MMO);
7157     return SDValue(E, 0);
7158   }
7159   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7160                                    ISD::UNINDEXED, false, VT, MMO);
7161   createOperands(N, Ops);
7162 
7163   CSEMap.InsertNode(N, IP);
7164   InsertNode(N);
7165   SDValue V(N, 0);
7166   NewSDValueDbgMsg(V, "Creating new node: ", this);
7167   return V;
7168 }
7169 
7170 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7171                                     SDValue Ptr, MachinePointerInfo PtrInfo,
7172                                     EVT SVT, Align Alignment,
7173                                     MachineMemOperand::Flags MMOFlags,
7174                                     const AAMDNodes &AAInfo) {
7175   assert(Chain.getValueType() == MVT::Other &&
7176         "Invalid chain type");
7177 
7178   MMOFlags |= MachineMemOperand::MOStore;
7179   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7180 
7181   if (PtrInfo.V.isNull())
7182     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7183 
7184   MachineFunction &MF = getMachineFunction();
7185   MachineMemOperand *MMO = MF.getMachineMemOperand(
7186       PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7187       Alignment, AAInfo);
7188   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
7189 }
7190 
7191 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7192                                     SDValue Ptr, EVT SVT,
7193                                     MachineMemOperand *MMO) {
7194   EVT VT = Val.getValueType();
7195 
7196   assert(Chain.getValueType() == MVT::Other &&
7197         "Invalid chain type");
7198   if (VT == SVT)
7199     return getStore(Chain, dl, Val, Ptr, MMO);
7200 
7201   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7202          "Should only be a truncating store, not extending!");
7203   assert(VT.isInteger() == SVT.isInteger() &&
7204          "Can't do FP-INT conversion!");
7205   assert(VT.isVector() == SVT.isVector() &&
7206          "Cannot use trunc store to convert to or from a vector!");
7207   assert((!VT.isVector() ||
7208           VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7209          "Cannot use trunc store to change the number of vector elements!");
7210 
7211   SDVTList VTs = getVTList(MVT::Other);
7212   SDValue Undef = getUNDEF(Ptr.getValueType());
7213   SDValue Ops[] = { Chain, Val, Ptr, Undef };
7214   FoldingSetNodeID ID;
7215   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7216   ID.AddInteger(SVT.getRawBits());
7217   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7218       dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7219   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7220   void *IP = nullptr;
7221   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7222     cast<StoreSDNode>(E)->refineAlignment(MMO);
7223     return SDValue(E, 0);
7224   }
7225   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7226                                    ISD::UNINDEXED, true, SVT, MMO);
7227   createOperands(N, Ops);
7228 
7229   CSEMap.InsertNode(N, IP);
7230   InsertNode(N);
7231   SDValue V(N, 0);
7232   NewSDValueDbgMsg(V, "Creating new node: ", this);
7233   return V;
7234 }
7235 
7236 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7237                                       SDValue Base, SDValue Offset,
7238                                       ISD::MemIndexedMode AM) {
7239   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7240   assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7241   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7242   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7243   FoldingSetNodeID ID;
7244   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7245   ID.AddInteger(ST->getMemoryVT().getRawBits());
7246   ID.AddInteger(ST->getRawSubclassData());
7247   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7248   void *IP = nullptr;
7249   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7250     return SDValue(E, 0);
7251 
7252   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7253                                    ST->isTruncatingStore(), ST->getMemoryVT(),
7254                                    ST->getMemOperand());
7255   createOperands(N, Ops);
7256 
7257   CSEMap.InsertNode(N, IP);
7258   InsertNode(N);
7259   SDValue V(N, 0);
7260   NewSDValueDbgMsg(V, "Creating new node: ", this);
7261   return V;
7262 }
7263 
7264 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7265                                     SDValue Base, SDValue Offset, SDValue Mask,
7266                                     SDValue PassThru, EVT MemVT,
7267                                     MachineMemOperand *MMO,
7268                                     ISD::MemIndexedMode AM,
7269                                     ISD::LoadExtType ExtTy, bool isExpanding) {
7270   bool Indexed = AM != ISD::UNINDEXED;
7271   assert((Indexed || Offset.isUndef()) &&
7272          "Unindexed masked load with an offset!");
7273   SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
7274                          : getVTList(VT, MVT::Other);
7275   SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
7276   FoldingSetNodeID ID;
7277   AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
7278   ID.AddInteger(MemVT.getRawBits());
7279   ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
7280       dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
7281   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7282   void *IP = nullptr;
7283   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7284     cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
7285     return SDValue(E, 0);
7286   }
7287   auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7288                                         AM, ExtTy, isExpanding, MemVT, MMO);
7289   createOperands(N, Ops);
7290 
7291   CSEMap.InsertNode(N, IP);
7292   InsertNode(N);
7293   SDValue V(N, 0);
7294   NewSDValueDbgMsg(V, "Creating new node: ", this);
7295   return V;
7296 }
7297 
7298 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
7299                                            SDValue Base, SDValue Offset,
7300                                            ISD::MemIndexedMode AM) {
7301   MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
7302   assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
7303   return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
7304                        Offset, LD->getMask(), LD->getPassThru(),
7305                        LD->getMemoryVT(), LD->getMemOperand(), AM,
7306                        LD->getExtensionType(), LD->isExpandingLoad());
7307 }
7308 
7309 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
7310                                      SDValue Val, SDValue Base, SDValue Offset,
7311                                      SDValue Mask, EVT MemVT,
7312                                      MachineMemOperand *MMO,
7313                                      ISD::MemIndexedMode AM, bool IsTruncating,
7314                                      bool IsCompressing) {
7315   assert(Chain.getValueType() == MVT::Other &&
7316         "Invalid chain type");
7317   bool Indexed = AM != ISD::UNINDEXED;
7318   assert((Indexed || Offset.isUndef()) &&
7319          "Unindexed masked store with an offset!");
7320   SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
7321                          : getVTList(MVT::Other);
7322   SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
7323   FoldingSetNodeID ID;
7324   AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
7325   ID.AddInteger(MemVT.getRawBits());
7326   ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
7327       dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7328   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7329   void *IP = nullptr;
7330   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7331     cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
7332     return SDValue(E, 0);
7333   }
7334   auto *N =
7335       newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7336                                    IsTruncating, IsCompressing, MemVT, MMO);
7337   createOperands(N, Ops);
7338 
7339   CSEMap.InsertNode(N, IP);
7340   InsertNode(N);
7341   SDValue V(N, 0);
7342   NewSDValueDbgMsg(V, "Creating new node: ", this);
7343   return V;
7344 }
7345 
7346 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
7347                                             SDValue Base, SDValue Offset,
7348                                             ISD::MemIndexedMode AM) {
7349   MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
7350   assert(ST->getOffset().isUndef() &&
7351          "Masked store is already a indexed store!");
7352   return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
7353                         ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
7354                         AM, ST->isTruncatingStore(), ST->isCompressingStore());
7355 }
7356 
7357 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7358                                       ArrayRef<SDValue> Ops,
7359                                       MachineMemOperand *MMO,
7360                                       ISD::MemIndexType IndexType,
7361                                       ISD::LoadExtType ExtTy) {
7362   assert(Ops.size() == 6 && "Incompatible number of operands");
7363 
7364   FoldingSetNodeID ID;
7365   AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7366   ID.AddInteger(VT.getRawBits());
7367   ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7368       dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy));
7369   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7370   void *IP = nullptr;
7371   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7372     cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7373     return SDValue(E, 0);
7374   }
7375 
7376   IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7377   auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7378                                           VTs, VT, MMO, IndexType, ExtTy);
7379   createOperands(N, Ops);
7380 
7381   assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7382          "Incompatible type of the PassThru value in MaskedGatherSDNode");
7383   assert(N->getMask().getValueType().getVectorElementCount() ==
7384              N->getValueType(0).getVectorElementCount() &&
7385          "Vector width mismatch between mask and data");
7386   assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7387              N->getValueType(0).getVectorElementCount().isScalable() &&
7388          "Scalable flags of index and data do not match");
7389   assert(ElementCount::isKnownGE(
7390              N->getIndex().getValueType().getVectorElementCount(),
7391              N->getValueType(0).getVectorElementCount()) &&
7392          "Vector width mismatch between index and data");
7393   assert(isa<ConstantSDNode>(N->getScale()) &&
7394          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7395          "Scale should be a constant power of 2");
7396 
7397   CSEMap.InsertNode(N, IP);
7398   InsertNode(N);
7399   SDValue V(N, 0);
7400   NewSDValueDbgMsg(V, "Creating new node: ", this);
7401   return V;
7402 }
7403 
7404 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7405                                        ArrayRef<SDValue> Ops,
7406                                        MachineMemOperand *MMO,
7407                                        ISD::MemIndexType IndexType,
7408                                        bool IsTrunc) {
7409   assert(Ops.size() == 6 && "Incompatible number of operands");
7410 
7411   FoldingSetNodeID ID;
7412   AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7413   ID.AddInteger(VT.getRawBits());
7414   ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7415       dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc));
7416   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7417   void *IP = nullptr;
7418   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7419     cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7420     return SDValue(E, 0);
7421   }
7422 
7423   IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7424   auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7425                                            VTs, VT, MMO, IndexType, IsTrunc);
7426   createOperands(N, Ops);
7427 
7428   assert(N->getMask().getValueType().getVectorElementCount() ==
7429              N->getValue().getValueType().getVectorElementCount() &&
7430          "Vector width mismatch between mask and data");
7431   assert(
7432       N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7433           N->getValue().getValueType().getVectorElementCount().isScalable() &&
7434       "Scalable flags of index and data do not match");
7435   assert(ElementCount::isKnownGE(
7436              N->getIndex().getValueType().getVectorElementCount(),
7437              N->getValue().getValueType().getVectorElementCount()) &&
7438          "Vector width mismatch between index and data");
7439   assert(isa<ConstantSDNode>(N->getScale()) &&
7440          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7441          "Scale should be a constant power of 2");
7442 
7443   CSEMap.InsertNode(N, IP);
7444   InsertNode(N);
7445   SDValue V(N, 0);
7446   NewSDValueDbgMsg(V, "Creating new node: ", this);
7447   return V;
7448 }
7449 
7450 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7451   // select undef, T, F --> T (if T is a constant), otherwise F
7452   // select, ?, undef, F --> F
7453   // select, ?, T, undef --> T
7454   if (Cond.isUndef())
7455     return isConstantValueOfAnyType(T) ? T : F;
7456   if (T.isUndef())
7457     return F;
7458   if (F.isUndef())
7459     return T;
7460 
7461   // select true, T, F --> T
7462   // select false, T, F --> F
7463   if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7464     return CondC->isNullValue() ? F : T;
7465 
7466   // TODO: This should simplify VSELECT with constant condition using something
7467   // like this (but check boolean contents to be complete?):
7468   //  if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7469   //    return T;
7470   //  if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7471   //    return F;
7472 
7473   // select ?, T, T --> T
7474   if (T == F)
7475     return T;
7476 
7477   return SDValue();
7478 }
7479 
7480 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7481   // shift undef, Y --> 0 (can always assume that the undef value is 0)
7482   if (X.isUndef())
7483     return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7484   // shift X, undef --> undef (because it may shift by the bitwidth)
7485   if (Y.isUndef())
7486     return getUNDEF(X.getValueType());
7487 
7488   // shift 0, Y --> 0
7489   // shift X, 0 --> X
7490   if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7491     return X;
7492 
7493   // shift X, C >= bitwidth(X) --> undef
7494   // All vector elements must be too big (or undef) to avoid partial undefs.
7495   auto isShiftTooBig = [X](ConstantSDNode *Val) {
7496     return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7497   };
7498   if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7499     return getUNDEF(X.getValueType());
7500 
7501   return SDValue();
7502 }
7503 
7504 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
7505                                       SDNodeFlags Flags) {
7506   // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
7507   // (an undef operand can be chosen to be Nan/Inf), then the result of this
7508   // operation is poison. That result can be relaxed to undef.
7509   ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
7510   ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7511   bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
7512                 (YC && YC->getValueAPF().isNaN());
7513   bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
7514                 (YC && YC->getValueAPF().isInfinity());
7515 
7516   if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
7517     return getUNDEF(X.getValueType());
7518 
7519   if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
7520     return getUNDEF(X.getValueType());
7521 
7522   if (!YC)
7523     return SDValue();
7524 
7525   // X + -0.0 --> X
7526   if (Opcode == ISD::FADD)
7527     if (YC->getValueAPF().isNegZero())
7528       return X;
7529 
7530   // X - +0.0 --> X
7531   if (Opcode == ISD::FSUB)
7532     if (YC->getValueAPF().isPosZero())
7533       return X;
7534 
7535   // X * 1.0 --> X
7536   // X / 1.0 --> X
7537   if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7538     if (YC->getValueAPF().isExactlyValue(1.0))
7539       return X;
7540 
7541   // X * 0.0 --> 0.0
7542   if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
7543     if (YC->getValueAPF().isZero())
7544       return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
7545 
7546   return SDValue();
7547 }
7548 
7549 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7550                                SDValue Ptr, SDValue SV, unsigned Align) {
7551   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7552   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7553 }
7554 
7555 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7556                               ArrayRef<SDUse> Ops) {
7557   switch (Ops.size()) {
7558   case 0: return getNode(Opcode, DL, VT);
7559   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7560   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7561   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7562   default: break;
7563   }
7564 
7565   // Copy from an SDUse array into an SDValue array for use with
7566   // the regular getNode logic.
7567   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7568   return getNode(Opcode, DL, VT, NewOps);
7569 }
7570 
7571 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7572                               ArrayRef<SDValue> Ops) {
7573   SDNodeFlags Flags;
7574   if (Inserter)
7575     Flags = Inserter->getFlags();
7576   return getNode(Opcode, DL, VT, Ops, Flags);
7577 }
7578 
7579 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7580                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7581   unsigned NumOps = Ops.size();
7582   switch (NumOps) {
7583   case 0: return getNode(Opcode, DL, VT);
7584   case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7585   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7586   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7587   default: break;
7588   }
7589 
7590   switch (Opcode) {
7591   default: break;
7592   case ISD::BUILD_VECTOR:
7593     // Attempt to simplify BUILD_VECTOR.
7594     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7595       return V;
7596     break;
7597   case ISD::CONCAT_VECTORS:
7598     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7599       return V;
7600     break;
7601   case ISD::SELECT_CC:
7602     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7603     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7604            "LHS and RHS of condition must have same type!");
7605     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7606            "True and False arms of SelectCC must have same type!");
7607     assert(Ops[2].getValueType() == VT &&
7608            "select_cc node must be of same type as true and false value!");
7609     break;
7610   case ISD::BR_CC:
7611     assert(NumOps == 5 && "BR_CC takes 5 operands!");
7612     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7613            "LHS/RHS of comparison should match types!");
7614     break;
7615   }
7616 
7617   // Memoize nodes.
7618   SDNode *N;
7619   SDVTList VTs = getVTList(VT);
7620 
7621   if (VT != MVT::Glue) {
7622     FoldingSetNodeID ID;
7623     AddNodeIDNode(ID, Opcode, VTs, Ops);
7624     void *IP = nullptr;
7625 
7626     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7627       return SDValue(E, 0);
7628 
7629     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7630     createOperands(N, Ops);
7631 
7632     CSEMap.InsertNode(N, IP);
7633   } else {
7634     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7635     createOperands(N, Ops);
7636   }
7637 
7638   N->setFlags(Flags);
7639   InsertNode(N);
7640   SDValue V(N, 0);
7641   NewSDValueDbgMsg(V, "Creating new node: ", this);
7642   return V;
7643 }
7644 
7645 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7646                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7647   return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7648 }
7649 
7650 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7651                               ArrayRef<SDValue> Ops) {
7652   SDNodeFlags Flags;
7653   if (Inserter)
7654     Flags = Inserter->getFlags();
7655   return getNode(Opcode, DL, VTList, Ops, Flags);
7656 }
7657 
7658 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7659                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7660   if (VTList.NumVTs == 1)
7661     return getNode(Opcode, DL, VTList.VTs[0], Ops);
7662 
7663   switch (Opcode) {
7664   case ISD::STRICT_FP_EXTEND:
7665     assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
7666            "Invalid STRICT_FP_EXTEND!");
7667     assert(VTList.VTs[0].isFloatingPoint() &&
7668            Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
7669     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7670            "STRICT_FP_EXTEND result type should be vector iff the operand "
7671            "type is vector!");
7672     assert((!VTList.VTs[0].isVector() ||
7673             VTList.VTs[0].getVectorNumElements() ==
7674             Ops[1].getValueType().getVectorNumElements()) &&
7675            "Vector element count mismatch!");
7676     assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
7677            "Invalid fpext node, dst <= src!");
7678     break;
7679   case ISD::STRICT_FP_ROUND:
7680     assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
7681     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7682            "STRICT_FP_ROUND result type should be vector iff the operand "
7683            "type is vector!");
7684     assert((!VTList.VTs[0].isVector() ||
7685             VTList.VTs[0].getVectorNumElements() ==
7686             Ops[1].getValueType().getVectorNumElements()) &&
7687            "Vector element count mismatch!");
7688     assert(VTList.VTs[0].isFloatingPoint() &&
7689            Ops[1].getValueType().isFloatingPoint() &&
7690            VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
7691            isa<ConstantSDNode>(Ops[2]) &&
7692            (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
7693             cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
7694            "Invalid STRICT_FP_ROUND!");
7695     break;
7696 #if 0
7697   // FIXME: figure out how to safely handle things like
7698   // int foo(int x) { return 1 << (x & 255); }
7699   // int bar() { return foo(256); }
7700   case ISD::SRA_PARTS:
7701   case ISD::SRL_PARTS:
7702   case ISD::SHL_PARTS:
7703     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7704         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7705       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7706     else if (N3.getOpcode() == ISD::AND)
7707       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7708         // If the and is only masking out bits that cannot effect the shift,
7709         // eliminate the and.
7710         unsigned NumBits = VT.getScalarSizeInBits()*2;
7711         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7712           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7713       }
7714     break;
7715 #endif
7716   }
7717 
7718   // Memoize the node unless it returns a flag.
7719   SDNode *N;
7720   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7721     FoldingSetNodeID ID;
7722     AddNodeIDNode(ID, Opcode, VTList, Ops);
7723     void *IP = nullptr;
7724     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7725       return SDValue(E, 0);
7726 
7727     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7728     createOperands(N, Ops);
7729     CSEMap.InsertNode(N, IP);
7730   } else {
7731     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7732     createOperands(N, Ops);
7733   }
7734 
7735   N->setFlags(Flags);
7736   InsertNode(N);
7737   SDValue V(N, 0);
7738   NewSDValueDbgMsg(V, "Creating new node: ", this);
7739   return V;
7740 }
7741 
7742 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7743                               SDVTList VTList) {
7744   return getNode(Opcode, DL, VTList, None);
7745 }
7746 
7747 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7748                               SDValue N1) {
7749   SDValue Ops[] = { N1 };
7750   return getNode(Opcode, DL, VTList, Ops);
7751 }
7752 
7753 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7754                               SDValue N1, SDValue N2) {
7755   SDValue Ops[] = { N1, N2 };
7756   return getNode(Opcode, DL, VTList, Ops);
7757 }
7758 
7759 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7760                               SDValue N1, SDValue N2, SDValue N3) {
7761   SDValue Ops[] = { N1, N2, N3 };
7762   return getNode(Opcode, DL, VTList, Ops);
7763 }
7764 
7765 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7766                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7767   SDValue Ops[] = { N1, N2, N3, N4 };
7768   return getNode(Opcode, DL, VTList, Ops);
7769 }
7770 
7771 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7772                               SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7773                               SDValue N5) {
7774   SDValue Ops[] = { N1, N2, N3, N4, N5 };
7775   return getNode(Opcode, DL, VTList, Ops);
7776 }
7777 
7778 SDVTList SelectionDAG::getVTList(EVT VT) {
7779   return makeVTList(SDNode::getValueTypeList(VT), 1);
7780 }
7781 
7782 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7783   FoldingSetNodeID ID;
7784   ID.AddInteger(2U);
7785   ID.AddInteger(VT1.getRawBits());
7786   ID.AddInteger(VT2.getRawBits());
7787 
7788   void *IP = nullptr;
7789   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7790   if (!Result) {
7791     EVT *Array = Allocator.Allocate<EVT>(2);
7792     Array[0] = VT1;
7793     Array[1] = VT2;
7794     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7795     VTListMap.InsertNode(Result, IP);
7796   }
7797   return Result->getSDVTList();
7798 }
7799 
7800 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7801   FoldingSetNodeID ID;
7802   ID.AddInteger(3U);
7803   ID.AddInteger(VT1.getRawBits());
7804   ID.AddInteger(VT2.getRawBits());
7805   ID.AddInteger(VT3.getRawBits());
7806 
7807   void *IP = nullptr;
7808   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7809   if (!Result) {
7810     EVT *Array = Allocator.Allocate<EVT>(3);
7811     Array[0] = VT1;
7812     Array[1] = VT2;
7813     Array[2] = VT3;
7814     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7815     VTListMap.InsertNode(Result, IP);
7816   }
7817   return Result->getSDVTList();
7818 }
7819 
7820 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7821   FoldingSetNodeID ID;
7822   ID.AddInteger(4U);
7823   ID.AddInteger(VT1.getRawBits());
7824   ID.AddInteger(VT2.getRawBits());
7825   ID.AddInteger(VT3.getRawBits());
7826   ID.AddInteger(VT4.getRawBits());
7827 
7828   void *IP = nullptr;
7829   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7830   if (!Result) {
7831     EVT *Array = Allocator.Allocate<EVT>(4);
7832     Array[0] = VT1;
7833     Array[1] = VT2;
7834     Array[2] = VT3;
7835     Array[3] = VT4;
7836     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7837     VTListMap.InsertNode(Result, IP);
7838   }
7839   return Result->getSDVTList();
7840 }
7841 
7842 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7843   unsigned NumVTs = VTs.size();
7844   FoldingSetNodeID ID;
7845   ID.AddInteger(NumVTs);
7846   for (unsigned index = 0; index < NumVTs; index++) {
7847     ID.AddInteger(VTs[index].getRawBits());
7848   }
7849 
7850   void *IP = nullptr;
7851   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7852   if (!Result) {
7853     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7854     llvm::copy(VTs, Array);
7855     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7856     VTListMap.InsertNode(Result, IP);
7857   }
7858   return Result->getSDVTList();
7859 }
7860 
7861 
7862 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7863 /// specified operands.  If the resultant node already exists in the DAG,
7864 /// this does not modify the specified node, instead it returns the node that
7865 /// already exists.  If the resultant node does not exist in the DAG, the
7866 /// input node is returned.  As a degenerate case, if you specify the same
7867 /// input operands as the node already has, the input node is returned.
7868 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7869   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7870 
7871   // Check to see if there is no change.
7872   if (Op == N->getOperand(0)) return N;
7873 
7874   // See if the modified node already exists.
7875   void *InsertPos = nullptr;
7876   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7877     return Existing;
7878 
7879   // Nope it doesn't.  Remove the node from its current place in the maps.
7880   if (InsertPos)
7881     if (!RemoveNodeFromCSEMaps(N))
7882       InsertPos = nullptr;
7883 
7884   // Now we update the operands.
7885   N->OperandList[0].set(Op);
7886 
7887   updateDivergence(N);
7888   // If this gets put into a CSE map, add it.
7889   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7890   return N;
7891 }
7892 
7893 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7894   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7895 
7896   // Check to see if there is no change.
7897   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7898     return N;   // No operands changed, just return the input node.
7899 
7900   // See if the modified node already exists.
7901   void *InsertPos = nullptr;
7902   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7903     return Existing;
7904 
7905   // Nope it doesn't.  Remove the node from its current place in the maps.
7906   if (InsertPos)
7907     if (!RemoveNodeFromCSEMaps(N))
7908       InsertPos = nullptr;
7909 
7910   // Now we update the operands.
7911   if (N->OperandList[0] != Op1)
7912     N->OperandList[0].set(Op1);
7913   if (N->OperandList[1] != Op2)
7914     N->OperandList[1].set(Op2);
7915 
7916   updateDivergence(N);
7917   // If this gets put into a CSE map, add it.
7918   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7919   return N;
7920 }
7921 
7922 SDNode *SelectionDAG::
7923 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7924   SDValue Ops[] = { Op1, Op2, Op3 };
7925   return UpdateNodeOperands(N, Ops);
7926 }
7927 
7928 SDNode *SelectionDAG::
7929 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7930                    SDValue Op3, SDValue Op4) {
7931   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7932   return UpdateNodeOperands(N, Ops);
7933 }
7934 
7935 SDNode *SelectionDAG::
7936 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7937                    SDValue Op3, SDValue Op4, SDValue Op5) {
7938   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7939   return UpdateNodeOperands(N, Ops);
7940 }
7941 
7942 SDNode *SelectionDAG::
7943 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7944   unsigned NumOps = Ops.size();
7945   assert(N->getNumOperands() == NumOps &&
7946          "Update with wrong number of operands");
7947 
7948   // If no operands changed just return the input node.
7949   if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7950     return N;
7951 
7952   // See if the modified node already exists.
7953   void *InsertPos = nullptr;
7954   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7955     return Existing;
7956 
7957   // Nope it doesn't.  Remove the node from its current place in the maps.
7958   if (InsertPos)
7959     if (!RemoveNodeFromCSEMaps(N))
7960       InsertPos = nullptr;
7961 
7962   // Now we update the operands.
7963   for (unsigned i = 0; i != NumOps; ++i)
7964     if (N->OperandList[i] != Ops[i])
7965       N->OperandList[i].set(Ops[i]);
7966 
7967   updateDivergence(N);
7968   // If this gets put into a CSE map, add it.
7969   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7970   return N;
7971 }
7972 
7973 /// DropOperands - Release the operands and set this node to have
7974 /// zero operands.
7975 void SDNode::DropOperands() {
7976   // Unlike the code in MorphNodeTo that does this, we don't need to
7977   // watch for dead nodes here.
7978   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7979     SDUse &Use = *I++;
7980     Use.set(SDValue());
7981   }
7982 }
7983 
7984 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7985                                   ArrayRef<MachineMemOperand *> NewMemRefs) {
7986   if (NewMemRefs.empty()) {
7987     N->clearMemRefs();
7988     return;
7989   }
7990 
7991   // Check if we can avoid allocating by storing a single reference directly.
7992   if (NewMemRefs.size() == 1) {
7993     N->MemRefs = NewMemRefs[0];
7994     N->NumMemRefs = 1;
7995     return;
7996   }
7997 
7998   MachineMemOperand **MemRefsBuffer =
7999       Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
8000   llvm::copy(NewMemRefs, MemRefsBuffer);
8001   N->MemRefs = MemRefsBuffer;
8002   N->NumMemRefs = static_cast<int>(NewMemRefs.size());
8003 }
8004 
8005 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
8006 /// machine opcode.
8007 ///
8008 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8009                                    EVT VT) {
8010   SDVTList VTs = getVTList(VT);
8011   return SelectNodeTo(N, MachineOpc, VTs, None);
8012 }
8013 
8014 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8015                                    EVT VT, SDValue Op1) {
8016   SDVTList VTs = getVTList(VT);
8017   SDValue Ops[] = { Op1 };
8018   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8019 }
8020 
8021 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8022                                    EVT VT, SDValue Op1,
8023                                    SDValue Op2) {
8024   SDVTList VTs = getVTList(VT);
8025   SDValue Ops[] = { Op1, Op2 };
8026   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8027 }
8028 
8029 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8030                                    EVT VT, SDValue Op1,
8031                                    SDValue Op2, SDValue Op3) {
8032   SDVTList VTs = getVTList(VT);
8033   SDValue Ops[] = { Op1, Op2, Op3 };
8034   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8035 }
8036 
8037 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8038                                    EVT VT, ArrayRef<SDValue> Ops) {
8039   SDVTList VTs = getVTList(VT);
8040   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8041 }
8042 
8043 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8044                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
8045   SDVTList VTs = getVTList(VT1, VT2);
8046   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8047 }
8048 
8049 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8050                                    EVT VT1, EVT VT2) {
8051   SDVTList VTs = getVTList(VT1, VT2);
8052   return SelectNodeTo(N, MachineOpc, VTs, None);
8053 }
8054 
8055 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8056                                    EVT VT1, EVT VT2, EVT VT3,
8057                                    ArrayRef<SDValue> Ops) {
8058   SDVTList VTs = getVTList(VT1, VT2, VT3);
8059   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8060 }
8061 
8062 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8063                                    EVT VT1, EVT VT2,
8064                                    SDValue Op1, SDValue Op2) {
8065   SDVTList VTs = getVTList(VT1, VT2);
8066   SDValue Ops[] = { Op1, Op2 };
8067   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8068 }
8069 
8070 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8071                                    SDVTList VTs,ArrayRef<SDValue> Ops) {
8072   SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
8073   // Reset the NodeID to -1.
8074   New->setNodeId(-1);
8075   if (New != N) {
8076     ReplaceAllUsesWith(N, New);
8077     RemoveDeadNode(N);
8078   }
8079   return New;
8080 }
8081 
8082 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
8083 /// the line number information on the merged node since it is not possible to
8084 /// preserve the information that operation is associated with multiple lines.
8085 /// This will make the debugger working better at -O0, were there is a higher
8086 /// probability having other instructions associated with that line.
8087 ///
8088 /// For IROrder, we keep the smaller of the two
8089 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
8090   DebugLoc NLoc = N->getDebugLoc();
8091   if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
8092     N->setDebugLoc(DebugLoc());
8093   }
8094   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
8095   N->setIROrder(Order);
8096   return N;
8097 }
8098 
8099 /// MorphNodeTo - This *mutates* the specified node to have the specified
8100 /// return type, opcode, and operands.
8101 ///
8102 /// Note that MorphNodeTo returns the resultant node.  If there is already a
8103 /// node of the specified opcode and operands, it returns that node instead of
8104 /// the current one.  Note that the SDLoc need not be the same.
8105 ///
8106 /// Using MorphNodeTo is faster than creating a new node and swapping it in
8107 /// with ReplaceAllUsesWith both because it often avoids allocating a new
8108 /// node, and because it doesn't require CSE recalculation for any of
8109 /// the node's users.
8110 ///
8111 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
8112 /// As a consequence it isn't appropriate to use from within the DAG combiner or
8113 /// the legalizer which maintain worklists that would need to be updated when
8114 /// deleting things.
8115 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
8116                                   SDVTList VTs, ArrayRef<SDValue> Ops) {
8117   // If an identical node already exists, use it.
8118   void *IP = nullptr;
8119   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
8120     FoldingSetNodeID ID;
8121     AddNodeIDNode(ID, Opc, VTs, Ops);
8122     if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
8123       return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
8124   }
8125 
8126   if (!RemoveNodeFromCSEMaps(N))
8127     IP = nullptr;
8128 
8129   // Start the morphing.
8130   N->NodeType = Opc;
8131   N->ValueList = VTs.VTs;
8132   N->NumValues = VTs.NumVTs;
8133 
8134   // Clear the operands list, updating used nodes to remove this from their
8135   // use list.  Keep track of any operands that become dead as a result.
8136   SmallPtrSet<SDNode*, 16> DeadNodeSet;
8137   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
8138     SDUse &Use = *I++;
8139     SDNode *Used = Use.getNode();
8140     Use.set(SDValue());
8141     if (Used->use_empty())
8142       DeadNodeSet.insert(Used);
8143   }
8144 
8145   // For MachineNode, initialize the memory references information.
8146   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
8147     MN->clearMemRefs();
8148 
8149   // Swap for an appropriately sized array from the recycler.
8150   removeOperands(N);
8151   createOperands(N, Ops);
8152 
8153   // Delete any nodes that are still dead after adding the uses for the
8154   // new operands.
8155   if (!DeadNodeSet.empty()) {
8156     SmallVector<SDNode *, 16> DeadNodes;
8157     for (SDNode *N : DeadNodeSet)
8158       if (N->use_empty())
8159         DeadNodes.push_back(N);
8160     RemoveDeadNodes(DeadNodes);
8161   }
8162 
8163   if (IP)
8164     CSEMap.InsertNode(N, IP);   // Memoize the new node.
8165   return N;
8166 }
8167 
8168 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
8169   unsigned OrigOpc = Node->getOpcode();
8170   unsigned NewOpc;
8171   switch (OrigOpc) {
8172   default:
8173     llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
8174 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
8175   case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
8176 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
8177   case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
8178 #include "llvm/IR/ConstrainedOps.def"
8179   }
8180 
8181   assert(Node->getNumValues() == 2 && "Unexpected number of results!");
8182 
8183   // We're taking this node out of the chain, so we need to re-link things.
8184   SDValue InputChain = Node->getOperand(0);
8185   SDValue OutputChain = SDValue(Node, 1);
8186   ReplaceAllUsesOfValueWith(OutputChain, InputChain);
8187 
8188   SmallVector<SDValue, 3> Ops;
8189   for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
8190     Ops.push_back(Node->getOperand(i));
8191 
8192   SDVTList VTs = getVTList(Node->getValueType(0));
8193   SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
8194 
8195   // MorphNodeTo can operate in two ways: if an existing node with the
8196   // specified operands exists, it can just return it.  Otherwise, it
8197   // updates the node in place to have the requested operands.
8198   if (Res == Node) {
8199     // If we updated the node in place, reset the node ID.  To the isel,
8200     // this should be just like a newly allocated machine node.
8201     Res->setNodeId(-1);
8202   } else {
8203     ReplaceAllUsesWith(Node, Res);
8204     RemoveDeadNode(Node);
8205   }
8206 
8207   return Res;
8208 }
8209 
8210 /// getMachineNode - These are used for target selectors to create a new node
8211 /// with specified return type(s), MachineInstr opcode, and operands.
8212 ///
8213 /// Note that getMachineNode returns the resultant node.  If there is already a
8214 /// node of the specified opcode and operands, it returns that node instead of
8215 /// the current one.
8216 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8217                                             EVT VT) {
8218   SDVTList VTs = getVTList(VT);
8219   return getMachineNode(Opcode, dl, VTs, None);
8220 }
8221 
8222 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8223                                             EVT VT, SDValue Op1) {
8224   SDVTList VTs = getVTList(VT);
8225   SDValue Ops[] = { Op1 };
8226   return getMachineNode(Opcode, dl, VTs, Ops);
8227 }
8228 
8229 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8230                                             EVT VT, SDValue Op1, SDValue Op2) {
8231   SDVTList VTs = getVTList(VT);
8232   SDValue Ops[] = { Op1, Op2 };
8233   return getMachineNode(Opcode, dl, VTs, Ops);
8234 }
8235 
8236 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8237                                             EVT VT, SDValue Op1, SDValue Op2,
8238                                             SDValue Op3) {
8239   SDVTList VTs = getVTList(VT);
8240   SDValue Ops[] = { Op1, Op2, Op3 };
8241   return getMachineNode(Opcode, dl, VTs, Ops);
8242 }
8243 
8244 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8245                                             EVT VT, ArrayRef<SDValue> Ops) {
8246   SDVTList VTs = getVTList(VT);
8247   return getMachineNode(Opcode, dl, VTs, Ops);
8248 }
8249 
8250 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8251                                             EVT VT1, EVT VT2, SDValue Op1,
8252                                             SDValue Op2) {
8253   SDVTList VTs = getVTList(VT1, VT2);
8254   SDValue Ops[] = { Op1, Op2 };
8255   return getMachineNode(Opcode, dl, VTs, Ops);
8256 }
8257 
8258 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8259                                             EVT VT1, EVT VT2, SDValue Op1,
8260                                             SDValue Op2, SDValue Op3) {
8261   SDVTList VTs = getVTList(VT1, VT2);
8262   SDValue Ops[] = { Op1, Op2, Op3 };
8263   return getMachineNode(Opcode, dl, VTs, Ops);
8264 }
8265 
8266 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8267                                             EVT VT1, EVT VT2,
8268                                             ArrayRef<SDValue> Ops) {
8269   SDVTList VTs = getVTList(VT1, VT2);
8270   return getMachineNode(Opcode, dl, VTs, Ops);
8271 }
8272 
8273 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8274                                             EVT VT1, EVT VT2, EVT VT3,
8275                                             SDValue Op1, SDValue Op2) {
8276   SDVTList VTs = getVTList(VT1, VT2, VT3);
8277   SDValue Ops[] = { Op1, Op2 };
8278   return getMachineNode(Opcode, dl, VTs, Ops);
8279 }
8280 
8281 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8282                                             EVT VT1, EVT VT2, EVT VT3,
8283                                             SDValue Op1, SDValue Op2,
8284                                             SDValue Op3) {
8285   SDVTList VTs = getVTList(VT1, VT2, VT3);
8286   SDValue Ops[] = { Op1, Op2, Op3 };
8287   return getMachineNode(Opcode, dl, VTs, Ops);
8288 }
8289 
8290 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8291                                             EVT VT1, EVT VT2, EVT VT3,
8292                                             ArrayRef<SDValue> Ops) {
8293   SDVTList VTs = getVTList(VT1, VT2, VT3);
8294   return getMachineNode(Opcode, dl, VTs, Ops);
8295 }
8296 
8297 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8298                                             ArrayRef<EVT> ResultTys,
8299                                             ArrayRef<SDValue> Ops) {
8300   SDVTList VTs = getVTList(ResultTys);
8301   return getMachineNode(Opcode, dl, VTs, Ops);
8302 }
8303 
8304 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
8305                                             SDVTList VTs,
8306                                             ArrayRef<SDValue> Ops) {
8307   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
8308   MachineSDNode *N;
8309   void *IP = nullptr;
8310 
8311   if (DoCSE) {
8312     FoldingSetNodeID ID;
8313     AddNodeIDNode(ID, ~Opcode, VTs, Ops);
8314     IP = nullptr;
8315     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8316       return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
8317     }
8318   }
8319 
8320   // Allocate a new MachineSDNode.
8321   N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8322   createOperands(N, Ops);
8323 
8324   if (DoCSE)
8325     CSEMap.InsertNode(N, IP);
8326 
8327   InsertNode(N);
8328   NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
8329   return N;
8330 }
8331 
8332 /// getTargetExtractSubreg - A convenience function for creating
8333 /// TargetOpcode::EXTRACT_SUBREG nodes.
8334 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8335                                              SDValue Operand) {
8336   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8337   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
8338                                   VT, Operand, SRIdxVal);
8339   return SDValue(Subreg, 0);
8340 }
8341 
8342 /// getTargetInsertSubreg - A convenience function for creating
8343 /// TargetOpcode::INSERT_SUBREG nodes.
8344 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8345                                             SDValue Operand, SDValue Subreg) {
8346   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8347   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
8348                                   VT, Operand, Subreg, SRIdxVal);
8349   return SDValue(Result, 0);
8350 }
8351 
8352 /// getNodeIfExists - Get the specified node if it's already available, or
8353 /// else return NULL.
8354 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8355                                       ArrayRef<SDValue> Ops) {
8356   SDNodeFlags Flags;
8357   if (Inserter)
8358     Flags = Inserter->getFlags();
8359   return getNodeIfExists(Opcode, VTList, Ops, Flags);
8360 }
8361 
8362 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8363                                       ArrayRef<SDValue> Ops,
8364                                       const SDNodeFlags Flags) {
8365   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8366     FoldingSetNodeID ID;
8367     AddNodeIDNode(ID, Opcode, VTList, Ops);
8368     void *IP = nullptr;
8369     if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
8370       E->intersectFlagsWith(Flags);
8371       return E;
8372     }
8373   }
8374   return nullptr;
8375 }
8376 
8377 /// doesNodeExist - Check if a node exists without modifying its flags.
8378 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
8379                                  ArrayRef<SDValue> Ops) {
8380   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8381     FoldingSetNodeID ID;
8382     AddNodeIDNode(ID, Opcode, VTList, Ops);
8383     void *IP = nullptr;
8384     if (FindNodeOrInsertPos(ID, SDLoc(), IP))
8385       return true;
8386   }
8387   return false;
8388 }
8389 
8390 /// getDbgValue - Creates a SDDbgValue node.
8391 ///
8392 /// SDNode
8393 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
8394                                       SDNode *N, unsigned R, bool IsIndirect,
8395                                       const DebugLoc &DL, unsigned O) {
8396   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8397          "Expected inlined-at fields to agree");
8398   return new (DbgInfo->getAlloc())
8399       SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
8400 }
8401 
8402 /// Constant
8403 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
8404                                               DIExpression *Expr,
8405                                               const Value *C,
8406                                               const DebugLoc &DL, unsigned O) {
8407   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8408          "Expected inlined-at fields to agree");
8409   return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
8410 }
8411 
8412 /// FrameIndex
8413 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8414                                                 DIExpression *Expr, unsigned FI,
8415                                                 bool IsIndirect,
8416                                                 const DebugLoc &DL,
8417                                                 unsigned O) {
8418   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8419          "Expected inlined-at fields to agree");
8420   return new (DbgInfo->getAlloc())
8421       SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
8422 }
8423 
8424 /// VReg
8425 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
8426                                           DIExpression *Expr,
8427                                           unsigned VReg, bool IsIndirect,
8428                                           const DebugLoc &DL, unsigned O) {
8429   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8430          "Expected inlined-at fields to agree");
8431   return new (DbgInfo->getAlloc())
8432       SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
8433 }
8434 
8435 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8436                                      unsigned OffsetInBits, unsigned SizeInBits,
8437                                      bool InvalidateDbg) {
8438   SDNode *FromNode = From.getNode();
8439   SDNode *ToNode = To.getNode();
8440   assert(FromNode && ToNode && "Can't modify dbg values");
8441 
8442   // PR35338
8443   // TODO: assert(From != To && "Redundant dbg value transfer");
8444   // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8445   if (From == To || FromNode == ToNode)
8446     return;
8447 
8448   if (!FromNode->getHasDebugValue())
8449     return;
8450 
8451   SmallVector<SDDbgValue *, 2> ClonedDVs;
8452   for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8453     if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
8454       continue;
8455 
8456     // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8457 
8458     // Just transfer the dbg value attached to From.
8459     if (Dbg->getResNo() != From.getResNo())
8460       continue;
8461 
8462     DIVariable *Var = Dbg->getVariable();
8463     auto *Expr = Dbg->getExpression();
8464     // If a fragment is requested, update the expression.
8465     if (SizeInBits) {
8466       // When splitting a larger (e.g., sign-extended) value whose
8467       // lower bits are described with an SDDbgValue, do not attempt
8468       // to transfer the SDDbgValue to the upper bits.
8469       if (auto FI = Expr->getFragmentInfo())
8470         if (OffsetInBits + SizeInBits > FI->SizeInBits)
8471           continue;
8472       auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8473                                                              SizeInBits);
8474       if (!Fragment)
8475         continue;
8476       Expr = *Fragment;
8477     }
8478     // Clone the SDDbgValue and move it to To.
8479     SDDbgValue *Clone = getDbgValue(
8480         Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(),
8481         std::max(ToNode->getIROrder(), Dbg->getOrder()));
8482     ClonedDVs.push_back(Clone);
8483 
8484     if (InvalidateDbg) {
8485       // Invalidate value and indicate the SDDbgValue should not be emitted.
8486       Dbg->setIsInvalidated();
8487       Dbg->setIsEmitted();
8488     }
8489   }
8490 
8491   for (SDDbgValue *Dbg : ClonedDVs)
8492     AddDbgValue(Dbg, ToNode, false);
8493 }
8494 
8495 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8496   if (!N.getHasDebugValue())
8497     return;
8498 
8499   SmallVector<SDDbgValue *, 2> ClonedDVs;
8500   for (auto DV : GetDbgValues(&N)) {
8501     if (DV->isInvalidated())
8502       continue;
8503     switch (N.getOpcode()) {
8504     default:
8505       break;
8506     case ISD::ADD:
8507       SDValue N0 = N.getOperand(0);
8508       SDValue N1 = N.getOperand(1);
8509       if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8510           isConstantIntBuildVectorOrConstantInt(N1)) {
8511         uint64_t Offset = N.getConstantOperandVal(1);
8512         // Rewrite an ADD constant node into a DIExpression. Since we are
8513         // performing arithmetic to compute the variable's *value* in the
8514         // DIExpression, we need to mark the expression with a
8515         // DW_OP_stack_value.
8516         auto *DIExpr = DV->getExpression();
8517         DIExpr =
8518             DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset);
8519         SDDbgValue *Clone =
8520             getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
8521                         DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
8522         ClonedDVs.push_back(Clone);
8523         DV->setIsInvalidated();
8524         DV->setIsEmitted();
8525         LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8526                    N0.getNode()->dumprFull(this);
8527                    dbgs() << " into " << *DIExpr << '\n');
8528       }
8529     }
8530   }
8531 
8532   for (SDDbgValue *Dbg : ClonedDVs)
8533     AddDbgValue(Dbg, Dbg->getSDNode(), false);
8534 }
8535 
8536 /// Creates a SDDbgLabel node.
8537 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8538                                       const DebugLoc &DL, unsigned O) {
8539   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8540          "Expected inlined-at fields to agree");
8541   return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8542 }
8543 
8544 namespace {
8545 
8546 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8547 /// pointed to by a use iterator is deleted, increment the use iterator
8548 /// so that it doesn't dangle.
8549 ///
8550 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8551   SDNode::use_iterator &UI;
8552   SDNode::use_iterator &UE;
8553 
8554   void NodeDeleted(SDNode *N, SDNode *E) override {
8555     // Increment the iterator as needed.
8556     while (UI != UE && N == *UI)
8557       ++UI;
8558   }
8559 
8560 public:
8561   RAUWUpdateListener(SelectionDAG &d,
8562                      SDNode::use_iterator &ui,
8563                      SDNode::use_iterator &ue)
8564     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8565 };
8566 
8567 } // end anonymous namespace
8568 
8569 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8570 /// This can cause recursive merging of nodes in the DAG.
8571 ///
8572 /// This version assumes From has a single result value.
8573 ///
8574 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8575   SDNode *From = FromN.getNode();
8576   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8577          "Cannot replace with this method!");
8578   assert(From != To.getNode() && "Cannot replace uses of with self");
8579 
8580   // Preserve Debug Values
8581   transferDbgValues(FromN, To);
8582 
8583   // Iterate over all the existing uses of From. New uses will be added
8584   // to the beginning of the use list, which we avoid visiting.
8585   // This specifically avoids visiting uses of From that arise while the
8586   // replacement is happening, because any such uses would be the result
8587   // of CSE: If an existing node looks like From after one of its operands
8588   // is replaced by To, we don't want to replace of all its users with To
8589   // too. See PR3018 for more info.
8590   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8591   RAUWUpdateListener Listener(*this, UI, UE);
8592   while (UI != UE) {
8593     SDNode *User = *UI;
8594 
8595     // This node is about to morph, remove its old self from the CSE maps.
8596     RemoveNodeFromCSEMaps(User);
8597 
8598     // A user can appear in a use list multiple times, and when this
8599     // happens the uses are usually next to each other in the list.
8600     // To help reduce the number of CSE recomputations, process all
8601     // the uses of this user that we can find this way.
8602     do {
8603       SDUse &Use = UI.getUse();
8604       ++UI;
8605       Use.set(To);
8606       if (To->isDivergent() != From->isDivergent())
8607         updateDivergence(User);
8608     } while (UI != UE && *UI == User);
8609     // Now that we have modified User, add it back to the CSE maps.  If it
8610     // already exists there, recursively merge the results together.
8611     AddModifiedNodeToCSEMaps(User);
8612   }
8613 
8614   // If we just RAUW'd the root, take note.
8615   if (FromN == getRoot())
8616     setRoot(To);
8617 }
8618 
8619 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8620 /// This can cause recursive merging of nodes in the DAG.
8621 ///
8622 /// This version assumes that for each value of From, there is a
8623 /// corresponding value in To in the same position with the same type.
8624 ///
8625 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8626 #ifndef NDEBUG
8627   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8628     assert((!From->hasAnyUseOfValue(i) ||
8629             From->getValueType(i) == To->getValueType(i)) &&
8630            "Cannot use this version of ReplaceAllUsesWith!");
8631 #endif
8632 
8633   // Handle the trivial case.
8634   if (From == To)
8635     return;
8636 
8637   // Preserve Debug Info. Only do this if there's a use.
8638   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8639     if (From->hasAnyUseOfValue(i)) {
8640       assert((i < To->getNumValues()) && "Invalid To location");
8641       transferDbgValues(SDValue(From, i), SDValue(To, i));
8642     }
8643 
8644   // Iterate over just the existing users of From. See the comments in
8645   // the ReplaceAllUsesWith above.
8646   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8647   RAUWUpdateListener Listener(*this, UI, UE);
8648   while (UI != UE) {
8649     SDNode *User = *UI;
8650 
8651     // This node is about to morph, remove its old self from the CSE maps.
8652     RemoveNodeFromCSEMaps(User);
8653 
8654     // A user can appear in a use list multiple times, and when this
8655     // happens the uses are usually next to each other in the list.
8656     // To help reduce the number of CSE recomputations, process all
8657     // the uses of this user that we can find this way.
8658     do {
8659       SDUse &Use = UI.getUse();
8660       ++UI;
8661       Use.setNode(To);
8662       if (To->isDivergent() != From->isDivergent())
8663         updateDivergence(User);
8664     } while (UI != UE && *UI == User);
8665 
8666     // Now that we have modified User, add it back to the CSE maps.  If it
8667     // already exists there, recursively merge the results together.
8668     AddModifiedNodeToCSEMaps(User);
8669   }
8670 
8671   // If we just RAUW'd the root, take note.
8672   if (From == getRoot().getNode())
8673     setRoot(SDValue(To, getRoot().getResNo()));
8674 }
8675 
8676 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8677 /// This can cause recursive merging of nodes in the DAG.
8678 ///
8679 /// This version can replace From with any result values.  To must match the
8680 /// number and types of values returned by From.
8681 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8682   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
8683     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8684 
8685   // Preserve Debug Info.
8686   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8687     transferDbgValues(SDValue(From, i), To[i]);
8688 
8689   // Iterate over just the existing users of From. See the comments in
8690   // the ReplaceAllUsesWith above.
8691   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8692   RAUWUpdateListener Listener(*this, UI, UE);
8693   while (UI != UE) {
8694     SDNode *User = *UI;
8695 
8696     // This node is about to morph, remove its old self from the CSE maps.
8697     RemoveNodeFromCSEMaps(User);
8698 
8699     // A user can appear in a use list multiple times, and when this happens the
8700     // uses are usually next to each other in the list.  To help reduce the
8701     // number of CSE and divergence recomputations, process all the uses of this
8702     // user that we can find this way.
8703     bool To_IsDivergent = false;
8704     do {
8705       SDUse &Use = UI.getUse();
8706       const SDValue &ToOp = To[Use.getResNo()];
8707       ++UI;
8708       Use.set(ToOp);
8709       To_IsDivergent |= ToOp->isDivergent();
8710     } while (UI != UE && *UI == User);
8711 
8712     if (To_IsDivergent != From->isDivergent())
8713       updateDivergence(User);
8714 
8715     // Now that we have modified User, add it back to the CSE maps.  If it
8716     // already exists there, recursively merge the results together.
8717     AddModifiedNodeToCSEMaps(User);
8718   }
8719 
8720   // If we just RAUW'd the root, take note.
8721   if (From == getRoot().getNode())
8722     setRoot(SDValue(To[getRoot().getResNo()]));
8723 }
8724 
8725 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8726 /// uses of other values produced by From.getNode() alone.  The Deleted
8727 /// vector is handled the same way as for ReplaceAllUsesWith.
8728 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8729   // Handle the really simple, really trivial case efficiently.
8730   if (From == To) return;
8731 
8732   // Handle the simple, trivial, case efficiently.
8733   if (From.getNode()->getNumValues() == 1) {
8734     ReplaceAllUsesWith(From, To);
8735     return;
8736   }
8737 
8738   // Preserve Debug Info.
8739   transferDbgValues(From, To);
8740 
8741   // Iterate over just the existing users of From. See the comments in
8742   // the ReplaceAllUsesWith above.
8743   SDNode::use_iterator UI = From.getNode()->use_begin(),
8744                        UE = From.getNode()->use_end();
8745   RAUWUpdateListener Listener(*this, UI, UE);
8746   while (UI != UE) {
8747     SDNode *User = *UI;
8748     bool UserRemovedFromCSEMaps = false;
8749 
8750     // A user can appear in a use list multiple times, and when this
8751     // happens the uses are usually next to each other in the list.
8752     // To help reduce the number of CSE recomputations, process all
8753     // the uses of this user that we can find this way.
8754     do {
8755       SDUse &Use = UI.getUse();
8756 
8757       // Skip uses of different values from the same node.
8758       if (Use.getResNo() != From.getResNo()) {
8759         ++UI;
8760         continue;
8761       }
8762 
8763       // If this node hasn't been modified yet, it's still in the CSE maps,
8764       // so remove its old self from the CSE maps.
8765       if (!UserRemovedFromCSEMaps) {
8766         RemoveNodeFromCSEMaps(User);
8767         UserRemovedFromCSEMaps = true;
8768       }
8769 
8770       ++UI;
8771       Use.set(To);
8772       if (To->isDivergent() != From->isDivergent())
8773         updateDivergence(User);
8774     } while (UI != UE && *UI == User);
8775     // We are iterating over all uses of the From node, so if a use
8776     // doesn't use the specific value, no changes are made.
8777     if (!UserRemovedFromCSEMaps)
8778       continue;
8779 
8780     // Now that we have modified User, add it back to the CSE maps.  If it
8781     // already exists there, recursively merge the results together.
8782     AddModifiedNodeToCSEMaps(User);
8783   }
8784 
8785   // If we just RAUW'd the root, take note.
8786   if (From == getRoot())
8787     setRoot(To);
8788 }
8789 
8790 namespace {
8791 
8792   /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8793   /// to record information about a use.
8794   struct UseMemo {
8795     SDNode *User;
8796     unsigned Index;
8797     SDUse *Use;
8798   };
8799 
8800   /// operator< - Sort Memos by User.
8801   bool operator<(const UseMemo &L, const UseMemo &R) {
8802     return (intptr_t)L.User < (intptr_t)R.User;
8803   }
8804 
8805 } // end anonymous namespace
8806 
8807 bool SelectionDAG::calculateDivergence(SDNode *N) {
8808   if (TLI->isSDNodeAlwaysUniform(N)) {
8809     assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
8810            "Conflicting divergence information!");
8811     return false;
8812   }
8813   if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
8814     return true;
8815   for (auto &Op : N->ops()) {
8816     if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
8817       return true;
8818   }
8819   return false;
8820 }
8821 
8822 void SelectionDAG::updateDivergence(SDNode *N) {
8823   SmallVector<SDNode *, 16> Worklist(1, N);
8824   do {
8825     N = Worklist.pop_back_val();
8826     bool IsDivergent = calculateDivergence(N);
8827     if (N->SDNodeBits.IsDivergent != IsDivergent) {
8828       N->SDNodeBits.IsDivergent = IsDivergent;
8829       llvm::append_range(Worklist, N->uses());
8830     }
8831   } while (!Worklist.empty());
8832 }
8833 
8834 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
8835   DenseMap<SDNode *, unsigned> Degree;
8836   Order.reserve(AllNodes.size());
8837   for (auto &N : allnodes()) {
8838     unsigned NOps = N.getNumOperands();
8839     Degree[&N] = NOps;
8840     if (0 == NOps)
8841       Order.push_back(&N);
8842   }
8843   for (size_t I = 0; I != Order.size(); ++I) {
8844     SDNode *N = Order[I];
8845     for (auto U : N->uses()) {
8846       unsigned &UnsortedOps = Degree[U];
8847       if (0 == --UnsortedOps)
8848         Order.push_back(U);
8849     }
8850   }
8851 }
8852 
8853 #ifndef NDEBUG
8854 void SelectionDAG::VerifyDAGDiverence() {
8855   std::vector<SDNode *> TopoOrder;
8856   CreateTopologicalOrder(TopoOrder);
8857   for (auto *N : TopoOrder) {
8858     assert(calculateDivergence(N) == N->isDivergent() &&
8859            "Divergence bit inconsistency detected");
8860   }
8861 }
8862 #endif
8863 
8864 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8865 /// uses of other values produced by From.getNode() alone.  The same value
8866 /// may appear in both the From and To list.  The Deleted vector is
8867 /// handled the same way as for ReplaceAllUsesWith.
8868 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8869                                               const SDValue *To,
8870                                               unsigned Num){
8871   // Handle the simple, trivial case efficiently.
8872   if (Num == 1)
8873     return ReplaceAllUsesOfValueWith(*From, *To);
8874 
8875   transferDbgValues(*From, *To);
8876 
8877   // Read up all the uses and make records of them. This helps
8878   // processing new uses that are introduced during the
8879   // replacement process.
8880   SmallVector<UseMemo, 4> Uses;
8881   for (unsigned i = 0; i != Num; ++i) {
8882     unsigned FromResNo = From[i].getResNo();
8883     SDNode *FromNode = From[i].getNode();
8884     for (SDNode::use_iterator UI = FromNode->use_begin(),
8885          E = FromNode->use_end(); UI != E; ++UI) {
8886       SDUse &Use = UI.getUse();
8887       if (Use.getResNo() == FromResNo) {
8888         UseMemo Memo = { *UI, i, &Use };
8889         Uses.push_back(Memo);
8890       }
8891     }
8892   }
8893 
8894   // Sort the uses, so that all the uses from a given User are together.
8895   llvm::sort(Uses);
8896 
8897   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8898        UseIndex != UseIndexEnd; ) {
8899     // We know that this user uses some value of From.  If it is the right
8900     // value, update it.
8901     SDNode *User = Uses[UseIndex].User;
8902 
8903     // This node is about to morph, remove its old self from the CSE maps.
8904     RemoveNodeFromCSEMaps(User);
8905 
8906     // The Uses array is sorted, so all the uses for a given User
8907     // are next to each other in the list.
8908     // To help reduce the number of CSE recomputations, process all
8909     // the uses of this user that we can find this way.
8910     do {
8911       unsigned i = Uses[UseIndex].Index;
8912       SDUse &Use = *Uses[UseIndex].Use;
8913       ++UseIndex;
8914 
8915       Use.set(To[i]);
8916     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8917 
8918     // Now that we have modified User, add it back to the CSE maps.  If it
8919     // already exists there, recursively merge the results together.
8920     AddModifiedNodeToCSEMaps(User);
8921   }
8922 }
8923 
8924 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8925 /// based on their topological order. It returns the maximum id and a vector
8926 /// of the SDNodes* in assigned order by reference.
8927 unsigned SelectionDAG::AssignTopologicalOrder() {
8928   unsigned DAGSize = 0;
8929 
8930   // SortedPos tracks the progress of the algorithm. Nodes before it are
8931   // sorted, nodes after it are unsorted. When the algorithm completes
8932   // it is at the end of the list.
8933   allnodes_iterator SortedPos = allnodes_begin();
8934 
8935   // Visit all the nodes. Move nodes with no operands to the front of
8936   // the list immediately. Annotate nodes that do have operands with their
8937   // operand count. Before we do this, the Node Id fields of the nodes
8938   // may contain arbitrary values. After, the Node Id fields for nodes
8939   // before SortedPos will contain the topological sort index, and the
8940   // Node Id fields for nodes At SortedPos and after will contain the
8941   // count of outstanding operands.
8942   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8943     SDNode *N = &*I++;
8944     checkForCycles(N, this);
8945     unsigned Degree = N->getNumOperands();
8946     if (Degree == 0) {
8947       // A node with no uses, add it to the result array immediately.
8948       N->setNodeId(DAGSize++);
8949       allnodes_iterator Q(N);
8950       if (Q != SortedPos)
8951         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8952       assert(SortedPos != AllNodes.end() && "Overran node list");
8953       ++SortedPos;
8954     } else {
8955       // Temporarily use the Node Id as scratch space for the degree count.
8956       N->setNodeId(Degree);
8957     }
8958   }
8959 
8960   // Visit all the nodes. As we iterate, move nodes into sorted order,
8961   // such that by the time the end is reached all nodes will be sorted.
8962   for (SDNode &Node : allnodes()) {
8963     SDNode *N = &Node;
8964     checkForCycles(N, this);
8965     // N is in sorted position, so all its uses have one less operand
8966     // that needs to be sorted.
8967     for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8968          UI != UE; ++UI) {
8969       SDNode *P = *UI;
8970       unsigned Degree = P->getNodeId();
8971       assert(Degree != 0 && "Invalid node degree");
8972       --Degree;
8973       if (Degree == 0) {
8974         // All of P's operands are sorted, so P may sorted now.
8975         P->setNodeId(DAGSize++);
8976         if (P->getIterator() != SortedPos)
8977           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8978         assert(SortedPos != AllNodes.end() && "Overran node list");
8979         ++SortedPos;
8980       } else {
8981         // Update P's outstanding operand count.
8982         P->setNodeId(Degree);
8983       }
8984     }
8985     if (Node.getIterator() == SortedPos) {
8986 #ifndef NDEBUG
8987       allnodes_iterator I(N);
8988       SDNode *S = &*++I;
8989       dbgs() << "Overran sorted position:\n";
8990       S->dumprFull(this); dbgs() << "\n";
8991       dbgs() << "Checking if this is due to cycles\n";
8992       checkForCycles(this, true);
8993 #endif
8994       llvm_unreachable(nullptr);
8995     }
8996   }
8997 
8998   assert(SortedPos == AllNodes.end() &&
8999          "Topological sort incomplete!");
9000   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
9001          "First node in topological sort is not the entry token!");
9002   assert(AllNodes.front().getNodeId() == 0 &&
9003          "First node in topological sort has non-zero id!");
9004   assert(AllNodes.front().getNumOperands() == 0 &&
9005          "First node in topological sort has operands!");
9006   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
9007          "Last node in topologic sort has unexpected id!");
9008   assert(AllNodes.back().use_empty() &&
9009          "Last node in topologic sort has users!");
9010   assert(DAGSize == allnodes_size() && "Node count mismatch!");
9011   return DAGSize;
9012 }
9013 
9014 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
9015 /// value is produced by SD.
9016 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
9017   if (SD) {
9018     assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
9019     SD->setHasDebugValue(true);
9020   }
9021   DbgInfo->add(DB, SD, isParameter);
9022 }
9023 
9024 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
9025   DbgInfo->add(DB);
9026 }
9027 
9028 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
9029                                                    SDValue NewMemOpChain) {
9030   assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
9031   assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
9032   // The new memory operation must have the same position as the old load in
9033   // terms of memory dependency. Create a TokenFactor for the old load and new
9034   // memory operation and update uses of the old load's output chain to use that
9035   // TokenFactor.
9036   if (OldChain == NewMemOpChain || OldChain.use_empty())
9037     return NewMemOpChain;
9038 
9039   SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
9040                                 OldChain, NewMemOpChain);
9041   ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
9042   UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
9043   return TokenFactor;
9044 }
9045 
9046 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
9047                                                    SDValue NewMemOp) {
9048   assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
9049   SDValue OldChain = SDValue(OldLoad, 1);
9050   SDValue NewMemOpChain = NewMemOp.getValue(1);
9051   return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
9052 }
9053 
9054 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
9055                                                      Function **OutFunction) {
9056   assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
9057 
9058   auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
9059   auto *Module = MF->getFunction().getParent();
9060   auto *Function = Module->getFunction(Symbol);
9061 
9062   if (OutFunction != nullptr)
9063       *OutFunction = Function;
9064 
9065   if (Function != nullptr) {
9066     auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
9067     return getGlobalAddress(Function, SDLoc(Op), PtrTy);
9068   }
9069 
9070   std::string ErrorStr;
9071   raw_string_ostream ErrorFormatter(ErrorStr);
9072 
9073   ErrorFormatter << "Undefined external symbol ";
9074   ErrorFormatter << '"' << Symbol << '"';
9075   ErrorFormatter.flush();
9076 
9077   report_fatal_error(ErrorStr);
9078 }
9079 
9080 //===----------------------------------------------------------------------===//
9081 //                              SDNode Class
9082 //===----------------------------------------------------------------------===//
9083 
9084 bool llvm::isNullConstant(SDValue V) {
9085   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9086   return Const != nullptr && Const->isNullValue();
9087 }
9088 
9089 bool llvm::isNullFPConstant(SDValue V) {
9090   ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
9091   return Const != nullptr && Const->isZero() && !Const->isNegative();
9092 }
9093 
9094 bool llvm::isAllOnesConstant(SDValue V) {
9095   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9096   return Const != nullptr && Const->isAllOnesValue();
9097 }
9098 
9099 bool llvm::isOneConstant(SDValue V) {
9100   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9101   return Const != nullptr && Const->isOne();
9102 }
9103 
9104 SDValue llvm::peekThroughBitcasts(SDValue V) {
9105   while (V.getOpcode() == ISD::BITCAST)
9106     V = V.getOperand(0);
9107   return V;
9108 }
9109 
9110 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
9111   while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
9112     V = V.getOperand(0);
9113   return V;
9114 }
9115 
9116 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
9117   while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
9118     V = V.getOperand(0);
9119   return V;
9120 }
9121 
9122 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
9123   if (V.getOpcode() != ISD::XOR)
9124     return false;
9125   V = peekThroughBitcasts(V.getOperand(1));
9126   unsigned NumBits = V.getScalarValueSizeInBits();
9127   ConstantSDNode *C =
9128       isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
9129   return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
9130 }
9131 
9132 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
9133                                           bool AllowTruncation) {
9134   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9135     return CN;
9136 
9137   // SplatVectors can truncate their operands. Ignore that case here unless
9138   // AllowTruncation is set.
9139   if (N->getOpcode() == ISD::SPLAT_VECTOR) {
9140     EVT VecEltVT = N->getValueType(0).getVectorElementType();
9141     if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
9142       EVT CVT = CN->getValueType(0);
9143       assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
9144       if (AllowTruncation || CVT == VecEltVT)
9145         return CN;
9146     }
9147   }
9148 
9149   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9150     BitVector UndefElements;
9151     ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
9152 
9153     // BuildVectors can truncate their operands. Ignore that case here unless
9154     // AllowTruncation is set.
9155     if (CN && (UndefElements.none() || AllowUndefs)) {
9156       EVT CVT = CN->getValueType(0);
9157       EVT NSVT = N.getValueType().getScalarType();
9158       assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9159       if (AllowTruncation || (CVT == NSVT))
9160         return CN;
9161     }
9162   }
9163 
9164   return nullptr;
9165 }
9166 
9167 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
9168                                           bool AllowUndefs,
9169                                           bool AllowTruncation) {
9170   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9171     return CN;
9172 
9173   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9174     BitVector UndefElements;
9175     ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
9176 
9177     // BuildVectors can truncate their operands. Ignore that case here unless
9178     // AllowTruncation is set.
9179     if (CN && (UndefElements.none() || AllowUndefs)) {
9180       EVT CVT = CN->getValueType(0);
9181       EVT NSVT = N.getValueType().getScalarType();
9182       assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9183       if (AllowTruncation || (CVT == NSVT))
9184         return CN;
9185     }
9186   }
9187 
9188   return nullptr;
9189 }
9190 
9191 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
9192   if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9193     return CN;
9194 
9195   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9196     BitVector UndefElements;
9197     ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
9198     if (CN && (UndefElements.none() || AllowUndefs))
9199       return CN;
9200   }
9201 
9202   if (N.getOpcode() == ISD::SPLAT_VECTOR)
9203     if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
9204       return CN;
9205 
9206   return nullptr;
9207 }
9208 
9209 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
9210                                               const APInt &DemandedElts,
9211                                               bool AllowUndefs) {
9212   if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9213     return CN;
9214 
9215   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9216     BitVector UndefElements;
9217     ConstantFPSDNode *CN =
9218         BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
9219     if (CN && (UndefElements.none() || AllowUndefs))
9220       return CN;
9221   }
9222 
9223   return nullptr;
9224 }
9225 
9226 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
9227   // TODO: may want to use peekThroughBitcast() here.
9228   ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
9229   return C && C->isNullValue();
9230 }
9231 
9232 bool llvm::isOneOrOneSplat(SDValue N) {
9233   // TODO: may want to use peekThroughBitcast() here.
9234   unsigned BitWidth = N.getScalarValueSizeInBits();
9235   ConstantSDNode *C = isConstOrConstSplat(N);
9236   return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
9237 }
9238 
9239 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
9240   N = peekThroughBitcasts(N);
9241   unsigned BitWidth = N.getScalarValueSizeInBits();
9242   ConstantSDNode *C = isConstOrConstSplat(N);
9243   return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
9244 }
9245 
9246 HandleSDNode::~HandleSDNode() {
9247   DropOperands();
9248 }
9249 
9250 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
9251                                          const DebugLoc &DL,
9252                                          const GlobalValue *GA, EVT VT,
9253                                          int64_t o, unsigned TF)
9254     : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
9255   TheGlobal = GA;
9256 }
9257 
9258 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
9259                                          EVT VT, unsigned SrcAS,
9260                                          unsigned DestAS)
9261     : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
9262       SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
9263 
9264 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
9265                      SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
9266     : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
9267   MemSDNodeBits.IsVolatile = MMO->isVolatile();
9268   MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
9269   MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
9270   MemSDNodeBits.IsInvariant = MMO->isInvariant();
9271 
9272   // We check here that the size of the memory operand fits within the size of
9273   // the MMO. This is because the MMO might indicate only a possible address
9274   // range instead of specifying the affected memory addresses precisely.
9275   // TODO: Make MachineMemOperands aware of scalable vectors.
9276   assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
9277          "Size mismatch!");
9278 }
9279 
9280 /// Profile - Gather unique data for the node.
9281 ///
9282 void SDNode::Profile(FoldingSetNodeID &ID) const {
9283   AddNodeIDNode(ID, this);
9284 }
9285 
9286 namespace {
9287 
9288   struct EVTArray {
9289     std::vector<EVT> VTs;
9290 
9291     EVTArray() {
9292       VTs.reserve(MVT::LAST_VALUETYPE);
9293       for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
9294         VTs.push_back(MVT((MVT::SimpleValueType)i));
9295     }
9296   };
9297 
9298 } // end anonymous namespace
9299 
9300 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
9301 static ManagedStatic<EVTArray> SimpleVTArray;
9302 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
9303 
9304 /// getValueTypeList - Return a pointer to the specified value type.
9305 ///
9306 const EVT *SDNode::getValueTypeList(EVT VT) {
9307   if (VT.isExtended()) {
9308     sys::SmartScopedLock<true> Lock(*VTMutex);
9309     return &(*EVTs->insert(VT).first);
9310   } else {
9311     assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
9312            "Value type out of range!");
9313     return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
9314   }
9315 }
9316 
9317 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
9318 /// indicated value.  This method ignores uses of other values defined by this
9319 /// operation.
9320 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
9321   assert(Value < getNumValues() && "Bad value!");
9322 
9323   // TODO: Only iterate over uses of a given value of the node
9324   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
9325     if (UI.getUse().getResNo() == Value) {
9326       if (NUses == 0)
9327         return false;
9328       --NUses;
9329     }
9330   }
9331 
9332   // Found exactly the right number of uses?
9333   return NUses == 0;
9334 }
9335 
9336 /// hasAnyUseOfValue - Return true if there are any use of the indicated
9337 /// value. This method ignores uses of other values defined by this operation.
9338 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
9339   assert(Value < getNumValues() && "Bad value!");
9340 
9341   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
9342     if (UI.getUse().getResNo() == Value)
9343       return true;
9344 
9345   return false;
9346 }
9347 
9348 /// isOnlyUserOf - Return true if this node is the only use of N.
9349 bool SDNode::isOnlyUserOf(const SDNode *N) const {
9350   bool Seen = false;
9351   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9352     SDNode *User = *I;
9353     if (User == this)
9354       Seen = true;
9355     else
9356       return false;
9357   }
9358 
9359   return Seen;
9360 }
9361 
9362 /// Return true if the only users of N are contained in Nodes.
9363 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
9364   bool Seen = false;
9365   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9366     SDNode *User = *I;
9367     if (llvm::is_contained(Nodes, User))
9368       Seen = true;
9369     else
9370       return false;
9371   }
9372 
9373   return Seen;
9374 }
9375 
9376 /// isOperand - Return true if this node is an operand of N.
9377 bool SDValue::isOperandOf(const SDNode *N) const {
9378   return is_contained(N->op_values(), *this);
9379 }
9380 
9381 bool SDNode::isOperandOf(const SDNode *N) const {
9382   return any_of(N->op_values(),
9383                 [this](SDValue Op) { return this == Op.getNode(); });
9384 }
9385 
9386 /// reachesChainWithoutSideEffects - Return true if this operand (which must
9387 /// be a chain) reaches the specified operand without crossing any
9388 /// side-effecting instructions on any chain path.  In practice, this looks
9389 /// through token factors and non-volatile loads.  In order to remain efficient,
9390 /// this only looks a couple of nodes in, it does not do an exhaustive search.
9391 ///
9392 /// Note that we only need to examine chains when we're searching for
9393 /// side-effects; SelectionDAG requires that all side-effects are represented
9394 /// by chains, even if another operand would force a specific ordering. This
9395 /// constraint is necessary to allow transformations like splitting loads.
9396 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
9397                                              unsigned Depth) const {
9398   if (*this == Dest) return true;
9399 
9400   // Don't search too deeply, we just want to be able to see through
9401   // TokenFactor's etc.
9402   if (Depth == 0) return false;
9403 
9404   // If this is a token factor, all inputs to the TF happen in parallel.
9405   if (getOpcode() == ISD::TokenFactor) {
9406     // First, try a shallow search.
9407     if (is_contained((*this)->ops(), Dest)) {
9408       // We found the chain we want as an operand of this TokenFactor.
9409       // Essentially, we reach the chain without side-effects if we could
9410       // serialize the TokenFactor into a simple chain of operations with
9411       // Dest as the last operation. This is automatically true if the
9412       // chain has one use: there are no other ordering constraints.
9413       // If the chain has more than one use, we give up: some other
9414       // use of Dest might force a side-effect between Dest and the current
9415       // node.
9416       if (Dest.hasOneUse())
9417         return true;
9418     }
9419     // Next, try a deep search: check whether every operand of the TokenFactor
9420     // reaches Dest.
9421     return llvm::all_of((*this)->ops(), [=](SDValue Op) {
9422       return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
9423     });
9424   }
9425 
9426   // Loads don't have side effects, look through them.
9427   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
9428     if (Ld->isUnordered())
9429       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
9430   }
9431   return false;
9432 }
9433 
9434 bool SDNode::hasPredecessor(const SDNode *N) const {
9435   SmallPtrSet<const SDNode *, 32> Visited;
9436   SmallVector<const SDNode *, 16> Worklist;
9437   Worklist.push_back(this);
9438   return hasPredecessorHelper(N, Visited, Worklist);
9439 }
9440 
9441 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9442   this->Flags.intersectWith(Flags);
9443 }
9444 
9445 SDValue
9446 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9447                                   ArrayRef<ISD::NodeType> CandidateBinOps,
9448                                   bool AllowPartials) {
9449   // The pattern must end in an extract from index 0.
9450   if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9451       !isNullConstant(Extract->getOperand(1)))
9452     return SDValue();
9453 
9454   // Match against one of the candidate binary ops.
9455   SDValue Op = Extract->getOperand(0);
9456   if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9457         return Op.getOpcode() == unsigned(BinOp);
9458       }))
9459     return SDValue();
9460 
9461   // Floating-point reductions may require relaxed constraints on the final step
9462   // of the reduction because they may reorder intermediate operations.
9463   unsigned CandidateBinOp = Op.getOpcode();
9464   if (Op.getValueType().isFloatingPoint()) {
9465     SDNodeFlags Flags = Op->getFlags();
9466     switch (CandidateBinOp) {
9467     case ISD::FADD:
9468       if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9469         return SDValue();
9470       break;
9471     default:
9472       llvm_unreachable("Unhandled FP opcode for binop reduction");
9473     }
9474   }
9475 
9476   // Matching failed - attempt to see if we did enough stages that a partial
9477   // reduction from a subvector is possible.
9478   auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9479     if (!AllowPartials || !Op)
9480       return SDValue();
9481     EVT OpVT = Op.getValueType();
9482     EVT OpSVT = OpVT.getScalarType();
9483     EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9484     if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9485       return SDValue();
9486     BinOp = (ISD::NodeType)CandidateBinOp;
9487     return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9488                    getVectorIdxConstant(0, SDLoc(Op)));
9489   };
9490 
9491   // At each stage, we're looking for something that looks like:
9492   // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9493   //                    <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9494   //                               i32 undef, i32 undef, i32 undef, i32 undef>
9495   // %a = binop <8 x i32> %op, %s
9496   // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9497   // we expect something like:
9498   // <4,5,6,7,u,u,u,u>
9499   // <2,3,u,u,u,u,u,u>
9500   // <1,u,u,u,u,u,u,u>
9501   // While a partial reduction match would be:
9502   // <2,3,u,u,u,u,u,u>
9503   // <1,u,u,u,u,u,u,u>
9504   unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9505   SDValue PrevOp;
9506   for (unsigned i = 0; i < Stages; ++i) {
9507     unsigned MaskEnd = (1 << i);
9508 
9509     if (Op.getOpcode() != CandidateBinOp)
9510       return PartialReduction(PrevOp, MaskEnd);
9511 
9512     SDValue Op0 = Op.getOperand(0);
9513     SDValue Op1 = Op.getOperand(1);
9514 
9515     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9516     if (Shuffle) {
9517       Op = Op1;
9518     } else {
9519       Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9520       Op = Op0;
9521     }
9522 
9523     // The first operand of the shuffle should be the same as the other operand
9524     // of the binop.
9525     if (!Shuffle || Shuffle->getOperand(0) != Op)
9526       return PartialReduction(PrevOp, MaskEnd);
9527 
9528     // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9529     for (int Index = 0; Index < (int)MaskEnd; ++Index)
9530       if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9531         return PartialReduction(PrevOp, MaskEnd);
9532 
9533     PrevOp = Op;
9534   }
9535 
9536   // Handle subvector reductions, which tend to appear after the shuffle
9537   // reduction stages.
9538   while (Op.getOpcode() == CandidateBinOp) {
9539     unsigned NumElts = Op.getValueType().getVectorNumElements();
9540     SDValue Op0 = Op.getOperand(0);
9541     SDValue Op1 = Op.getOperand(1);
9542     if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9543         Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9544         Op0.getOperand(0) != Op1.getOperand(0))
9545       break;
9546     SDValue Src = Op0.getOperand(0);
9547     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
9548     if (NumSrcElts != (2 * NumElts))
9549       break;
9550     if (!(Op0.getConstantOperandAPInt(1) == 0 &&
9551           Op1.getConstantOperandAPInt(1) == NumElts) &&
9552         !(Op1.getConstantOperandAPInt(1) == 0 &&
9553           Op0.getConstantOperandAPInt(1) == NumElts))
9554       break;
9555     Op = Src;
9556   }
9557 
9558   BinOp = (ISD::NodeType)CandidateBinOp;
9559   return Op;
9560 }
9561 
9562 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9563   assert(N->getNumValues() == 1 &&
9564          "Can't unroll a vector with multiple results!");
9565 
9566   EVT VT = N->getValueType(0);
9567   unsigned NE = VT.getVectorNumElements();
9568   EVT EltVT = VT.getVectorElementType();
9569   SDLoc dl(N);
9570 
9571   SmallVector<SDValue, 8> Scalars;
9572   SmallVector<SDValue, 4> Operands(N->getNumOperands());
9573 
9574   // If ResNE is 0, fully unroll the vector op.
9575   if (ResNE == 0)
9576     ResNE = NE;
9577   else if (NE > ResNE)
9578     NE = ResNE;
9579 
9580   unsigned i;
9581   for (i= 0; i != NE; ++i) {
9582     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9583       SDValue Operand = N->getOperand(j);
9584       EVT OperandVT = Operand.getValueType();
9585       if (OperandVT.isVector()) {
9586         // A vector operand; extract a single element.
9587         EVT OperandEltVT = OperandVT.getVectorElementType();
9588         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
9589                               Operand, getVectorIdxConstant(i, dl));
9590       } else {
9591         // A scalar operand; just use it as is.
9592         Operands[j] = Operand;
9593       }
9594     }
9595 
9596     switch (N->getOpcode()) {
9597     default: {
9598       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9599                                 N->getFlags()));
9600       break;
9601     }
9602     case ISD::VSELECT:
9603       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9604       break;
9605     case ISD::SHL:
9606     case ISD::SRA:
9607     case ISD::SRL:
9608     case ISD::ROTL:
9609     case ISD::ROTR:
9610       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9611                                getShiftAmountOperand(Operands[0].getValueType(),
9612                                                      Operands[1])));
9613       break;
9614     case ISD::SIGN_EXTEND_INREG: {
9615       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9616       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9617                                 Operands[0],
9618                                 getValueType(ExtVT)));
9619     }
9620     }
9621   }
9622 
9623   for (; i < ResNE; ++i)
9624     Scalars.push_back(getUNDEF(EltVT));
9625 
9626   EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9627   return getBuildVector(VecVT, dl, Scalars);
9628 }
9629 
9630 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9631     SDNode *N, unsigned ResNE) {
9632   unsigned Opcode = N->getOpcode();
9633   assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9634           Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9635           Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9636          "Expected an overflow opcode");
9637 
9638   EVT ResVT = N->getValueType(0);
9639   EVT OvVT = N->getValueType(1);
9640   EVT ResEltVT = ResVT.getVectorElementType();
9641   EVT OvEltVT = OvVT.getVectorElementType();
9642   SDLoc dl(N);
9643 
9644   // If ResNE is 0, fully unroll the vector op.
9645   unsigned NE = ResVT.getVectorNumElements();
9646   if (ResNE == 0)
9647     ResNE = NE;
9648   else if (NE > ResNE)
9649     NE = ResNE;
9650 
9651   SmallVector<SDValue, 8> LHSScalars;
9652   SmallVector<SDValue, 8> RHSScalars;
9653   ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
9654   ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
9655 
9656   EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
9657   SDVTList VTs = getVTList(ResEltVT, SVT);
9658   SmallVector<SDValue, 8> ResScalars;
9659   SmallVector<SDValue, 8> OvScalars;
9660   for (unsigned i = 0; i < NE; ++i) {
9661     SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
9662     SDValue Ov =
9663         getSelect(dl, OvEltVT, Res.getValue(1),
9664                   getBoolConstant(true, dl, OvEltVT, ResVT),
9665                   getConstant(0, dl, OvEltVT));
9666 
9667     ResScalars.push_back(Res);
9668     OvScalars.push_back(Ov);
9669   }
9670 
9671   ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
9672   OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
9673 
9674   EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
9675   EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
9676   return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
9677                         getBuildVector(NewOvVT, dl, OvScalars));
9678 }
9679 
9680 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
9681                                                   LoadSDNode *Base,
9682                                                   unsigned Bytes,
9683                                                   int Dist) const {
9684   if (LD->isVolatile() || Base->isVolatile())
9685     return false;
9686   // TODO: probably too restrictive for atomics, revisit
9687   if (!LD->isSimple())
9688     return false;
9689   if (LD->isIndexed() || Base->isIndexed())
9690     return false;
9691   if (LD->getChain() != Base->getChain())
9692     return false;
9693   EVT VT = LD->getValueType(0);
9694   if (VT.getSizeInBits() / 8 != Bytes)
9695     return false;
9696 
9697   auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
9698   auto LocDecomp = BaseIndexOffset::match(LD, *this);
9699 
9700   int64_t Offset = 0;
9701   if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
9702     return (Dist * Bytes == Offset);
9703   return false;
9704 }
9705 
9706 /// InferPtrAlignment - Infer alignment of a load / store address. Return None
9707 /// if it cannot be inferred.
9708 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
9709   // If this is a GlobalAddress + cst, return the alignment.
9710   const GlobalValue *GV = nullptr;
9711   int64_t GVOffset = 0;
9712   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9713     unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
9714     KnownBits Known(PtrWidth);
9715     llvm::computeKnownBits(GV, Known, getDataLayout());
9716     unsigned AlignBits = Known.countMinTrailingZeros();
9717     if (AlignBits)
9718       return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
9719   }
9720 
9721   // If this is a direct reference to a stack slot, use information about the
9722   // stack slot's alignment.
9723   int FrameIdx = INT_MIN;
9724   int64_t FrameOffset = 0;
9725   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9726     FrameIdx = FI->getIndex();
9727   } else if (isBaseWithConstantOffset(Ptr) &&
9728              isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9729     // Handle FI+Cst
9730     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9731     FrameOffset = Ptr.getConstantOperandVal(1);
9732   }
9733 
9734   if (FrameIdx != INT_MIN) {
9735     const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9736     return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
9737   }
9738 
9739   return None;
9740 }
9741 
9742 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9743 /// which is split (or expanded) into two not necessarily identical pieces.
9744 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9745   // Currently all types are split in half.
9746   EVT LoVT, HiVT;
9747   if (!VT.isVector())
9748     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9749   else
9750     LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9751 
9752   return std::make_pair(LoVT, HiVT);
9753 }
9754 
9755 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
9756 /// type, dependent on an enveloping VT that has been split into two identical
9757 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
9758 std::pair<EVT, EVT>
9759 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
9760                                        bool *HiIsEmpty) const {
9761   EVT EltTp = VT.getVectorElementType();
9762   // Examples:
9763   //   custom VL=8  with enveloping VL=8/8 yields 8/0 (hi empty)
9764   //   custom VL=9  with enveloping VL=8/8 yields 8/1
9765   //   custom VL=10 with enveloping VL=8/8 yields 8/2
9766   //   etc.
9767   ElementCount VTNumElts = VT.getVectorElementCount();
9768   ElementCount EnvNumElts = EnvVT.getVectorElementCount();
9769   assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
9770          "Mixing fixed width and scalable vectors when enveloping a type");
9771   EVT LoVT, HiVT;
9772   if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
9773     LoVT = EnvVT;
9774     HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
9775     *HiIsEmpty = false;
9776   } else {
9777     // Flag that hi type has zero storage size, but return split envelop type
9778     // (this would be easier if vector types with zero elements were allowed).
9779     LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
9780     HiVT = EnvVT;
9781     *HiIsEmpty = true;
9782   }
9783   return std::make_pair(LoVT, HiVT);
9784 }
9785 
9786 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9787 /// low/high part.
9788 std::pair<SDValue, SDValue>
9789 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9790                           const EVT &HiVT) {
9791   assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
9792          LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
9793          "Splitting vector with an invalid mixture of fixed and scalable "
9794          "vector types");
9795   assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
9796              N.getValueType().getVectorMinNumElements() &&
9797          "More vector elements requested than available!");
9798   SDValue Lo, Hi;
9799   Lo =
9800       getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
9801   // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
9802   // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
9803   // IDX with the runtime scaling factor of the result vector type. For
9804   // fixed-width result vectors, that runtime scaling factor is 1.
9805   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9806                getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
9807   return std::make_pair(Lo, Hi);
9808 }
9809 
9810 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
9811 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
9812   EVT VT = N.getValueType();
9813   EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
9814                                 NextPowerOf2(VT.getVectorNumElements()));
9815   return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
9816                  getVectorIdxConstant(0, DL));
9817 }
9818 
9819 void SelectionDAG::ExtractVectorElements(SDValue Op,
9820                                          SmallVectorImpl<SDValue> &Args,
9821                                          unsigned Start, unsigned Count,
9822                                          EVT EltVT) {
9823   EVT VT = Op.getValueType();
9824   if (Count == 0)
9825     Count = VT.getVectorNumElements();
9826   if (EltVT == EVT())
9827     EltVT = VT.getVectorElementType();
9828   SDLoc SL(Op);
9829   for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9830     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
9831                            getVectorIdxConstant(i, SL)));
9832   }
9833 }
9834 
9835 // getAddressSpace - Return the address space this GlobalAddress belongs to.
9836 unsigned GlobalAddressSDNode::getAddressSpace() const {
9837   return getGlobal()->getType()->getAddressSpace();
9838 }
9839 
9840 Type *ConstantPoolSDNode::getType() const {
9841   if (isMachineConstantPoolEntry())
9842     return Val.MachineCPVal->getType();
9843   return Val.ConstVal->getType();
9844 }
9845 
9846 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9847                                         unsigned &SplatBitSize,
9848                                         bool &HasAnyUndefs,
9849                                         unsigned MinSplatBits,
9850                                         bool IsBigEndian) const {
9851   EVT VT = getValueType(0);
9852   assert(VT.isVector() && "Expected a vector type");
9853   unsigned VecWidth = VT.getSizeInBits();
9854   if (MinSplatBits > VecWidth)
9855     return false;
9856 
9857   // FIXME: The widths are based on this node's type, but build vectors can
9858   // truncate their operands.
9859   SplatValue = APInt(VecWidth, 0);
9860   SplatUndef = APInt(VecWidth, 0);
9861 
9862   // Get the bits. Bits with undefined values (when the corresponding element
9863   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9864   // in SplatValue. If any of the values are not constant, give up and return
9865   // false.
9866   unsigned int NumOps = getNumOperands();
9867   assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9868   unsigned EltWidth = VT.getScalarSizeInBits();
9869 
9870   for (unsigned j = 0; j < NumOps; ++j) {
9871     unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9872     SDValue OpVal = getOperand(i);
9873     unsigned BitPos = j * EltWidth;
9874 
9875     if (OpVal.isUndef())
9876       SplatUndef.setBits(BitPos, BitPos + EltWidth);
9877     else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9878       SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9879     else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9880       SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9881     else
9882       return false;
9883   }
9884 
9885   // The build_vector is all constants or undefs. Find the smallest element
9886   // size that splats the vector.
9887   HasAnyUndefs = (SplatUndef != 0);
9888 
9889   // FIXME: This does not work for vectors with elements less than 8 bits.
9890   while (VecWidth > 8) {
9891     unsigned HalfSize = VecWidth / 2;
9892     APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9893     APInt LowValue = SplatValue.trunc(HalfSize);
9894     APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9895     APInt LowUndef = SplatUndef.trunc(HalfSize);
9896 
9897     // If the two halves do not match (ignoring undef bits), stop here.
9898     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9899         MinSplatBits > HalfSize)
9900       break;
9901 
9902     SplatValue = HighValue | LowValue;
9903     SplatUndef = HighUndef & LowUndef;
9904 
9905     VecWidth = HalfSize;
9906   }
9907 
9908   SplatBitSize = VecWidth;
9909   return true;
9910 }
9911 
9912 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
9913                                          BitVector *UndefElements) const {
9914   unsigned NumOps = getNumOperands();
9915   if (UndefElements) {
9916     UndefElements->clear();
9917     UndefElements->resize(NumOps);
9918   }
9919   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
9920   if (!DemandedElts)
9921     return SDValue();
9922   SDValue Splatted;
9923   for (unsigned i = 0; i != NumOps; ++i) {
9924     if (!DemandedElts[i])
9925       continue;
9926     SDValue Op = getOperand(i);
9927     if (Op.isUndef()) {
9928       if (UndefElements)
9929         (*UndefElements)[i] = true;
9930     } else if (!Splatted) {
9931       Splatted = Op;
9932     } else if (Splatted != Op) {
9933       return SDValue();
9934     }
9935   }
9936 
9937   if (!Splatted) {
9938     unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
9939     assert(getOperand(FirstDemandedIdx).isUndef() &&
9940            "Can only have a splat without a constant for all undefs.");
9941     return getOperand(FirstDemandedIdx);
9942   }
9943 
9944   return Splatted;
9945 }
9946 
9947 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9948   APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
9949   return getSplatValue(DemandedElts, UndefElements);
9950 }
9951 
9952 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
9953                                             SmallVectorImpl<SDValue> &Sequence,
9954                                             BitVector *UndefElements) const {
9955   unsigned NumOps = getNumOperands();
9956   Sequence.clear();
9957   if (UndefElements) {
9958     UndefElements->clear();
9959     UndefElements->resize(NumOps);
9960   }
9961   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
9962   if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
9963     return false;
9964 
9965   // Set the undefs even if we don't find a sequence (like getSplatValue).
9966   if (UndefElements)
9967     for (unsigned I = 0; I != NumOps; ++I)
9968       if (DemandedElts[I] && getOperand(I).isUndef())
9969         (*UndefElements)[I] = true;
9970 
9971   // Iteratively widen the sequence length looking for repetitions.
9972   for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
9973     Sequence.append(SeqLen, SDValue());
9974     for (unsigned I = 0; I != NumOps; ++I) {
9975       if (!DemandedElts[I])
9976         continue;
9977       SDValue &SeqOp = Sequence[I % SeqLen];
9978       SDValue Op = getOperand(I);
9979       if (Op.isUndef()) {
9980         if (!SeqOp)
9981           SeqOp = Op;
9982         continue;
9983       }
9984       if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
9985         Sequence.clear();
9986         break;
9987       }
9988       SeqOp = Op;
9989     }
9990     if (!Sequence.empty())
9991       return true;
9992   }
9993 
9994   assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
9995   return false;
9996 }
9997 
9998 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
9999                                             BitVector *UndefElements) const {
10000   APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
10001   return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
10002 }
10003 
10004 ConstantSDNode *
10005 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
10006                                         BitVector *UndefElements) const {
10007   return dyn_cast_or_null<ConstantSDNode>(
10008       getSplatValue(DemandedElts, UndefElements));
10009 }
10010 
10011 ConstantSDNode *
10012 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
10013   return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
10014 }
10015 
10016 ConstantFPSDNode *
10017 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
10018                                           BitVector *UndefElements) const {
10019   return dyn_cast_or_null<ConstantFPSDNode>(
10020       getSplatValue(DemandedElts, UndefElements));
10021 }
10022 
10023 ConstantFPSDNode *
10024 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
10025   return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
10026 }
10027 
10028 int32_t
10029 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
10030                                                    uint32_t BitWidth) const {
10031   if (ConstantFPSDNode *CN =
10032           dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
10033     bool IsExact;
10034     APSInt IntVal(BitWidth);
10035     const APFloat &APF = CN->getValueAPF();
10036     if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
10037             APFloat::opOK ||
10038         !IsExact)
10039       return -1;
10040 
10041     return IntVal.exactLogBase2();
10042   }
10043   return -1;
10044 }
10045 
10046 bool BuildVectorSDNode::isConstant() const {
10047   for (const SDValue &Op : op_values()) {
10048     unsigned Opc = Op.getOpcode();
10049     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
10050       return false;
10051   }
10052   return true;
10053 }
10054 
10055 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
10056   // Find the first non-undef value in the shuffle mask.
10057   unsigned i, e;
10058   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
10059     /* search */;
10060 
10061   // If all elements are undefined, this shuffle can be considered a splat
10062   // (although it should eventually get simplified away completely).
10063   if (i == e)
10064     return true;
10065 
10066   // Make sure all remaining elements are either undef or the same as the first
10067   // non-undef value.
10068   for (int Idx = Mask[i]; i != e; ++i)
10069     if (Mask[i] >= 0 && Mask[i] != Idx)
10070       return false;
10071   return true;
10072 }
10073 
10074 // Returns the SDNode if it is a constant integer BuildVector
10075 // or constant integer.
10076 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
10077   if (isa<ConstantSDNode>(N))
10078     return N.getNode();
10079   if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
10080     return N.getNode();
10081   // Treat a GlobalAddress supporting constant offset folding as a
10082   // constant integer.
10083   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
10084     if (GA->getOpcode() == ISD::GlobalAddress &&
10085         TLI->isOffsetFoldingLegal(GA))
10086       return GA;
10087   if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
10088       isa<ConstantSDNode>(N.getOperand(0)))
10089     return N.getNode();
10090   return nullptr;
10091 }
10092 
10093 // Returns the SDNode if it is a constant float BuildVector
10094 // or constant float.
10095 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
10096   if (isa<ConstantFPSDNode>(N))
10097     return N.getNode();
10098 
10099   if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
10100     return N.getNode();
10101 
10102   return nullptr;
10103 }
10104 
10105 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
10106   assert(!Node->OperandList && "Node already has operands");
10107   assert(SDNode::getMaxNumOperands() >= Vals.size() &&
10108          "too many operands to fit into SDNode");
10109   SDUse *Ops = OperandRecycler.allocate(
10110       ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
10111 
10112   bool IsDivergent = false;
10113   for (unsigned I = 0; I != Vals.size(); ++I) {
10114     Ops[I].setUser(Node);
10115     Ops[I].setInitial(Vals[I]);
10116     if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
10117       IsDivergent |= Ops[I].getNode()->isDivergent();
10118   }
10119   Node->NumOperands = Vals.size();
10120   Node->OperandList = Ops;
10121   if (!TLI->isSDNodeAlwaysUniform(Node)) {
10122     IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
10123     Node->SDNodeBits.IsDivergent = IsDivergent;
10124   }
10125   checkForCycles(Node);
10126 }
10127 
10128 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
10129                                      SmallVectorImpl<SDValue> &Vals) {
10130   size_t Limit = SDNode::getMaxNumOperands();
10131   while (Vals.size() > Limit) {
10132     unsigned SliceIdx = Vals.size() - Limit;
10133     auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
10134     SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
10135     Vals.erase(Vals.begin() + SliceIdx, Vals.end());
10136     Vals.emplace_back(NewTF);
10137   }
10138   return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
10139 }
10140 
10141 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
10142                                         EVT VT, SDNodeFlags Flags) {
10143   switch (Opcode) {
10144   default:
10145     return SDValue();
10146   case ISD::ADD:
10147   case ISD::OR:
10148   case ISD::XOR:
10149   case ISD::UMAX:
10150     return getConstant(0, DL, VT);
10151   case ISD::MUL:
10152     return getConstant(1, DL, VT);
10153   case ISD::AND:
10154   case ISD::UMIN:
10155     return getAllOnesConstant(DL, VT);
10156   case ISD::SMAX:
10157     return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
10158   case ISD::SMIN:
10159     return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
10160   case ISD::FADD:
10161     return getConstantFP(-0.0, DL, VT);
10162   case ISD::FMUL:
10163     return getConstantFP(1.0, DL, VT);
10164   case ISD::FMINNUM:
10165   case ISD::FMAXNUM: {
10166     // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
10167     const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
10168     APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
10169                         !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
10170                         APFloat::getLargest(Semantics);
10171     if (Opcode == ISD::FMAXNUM)
10172       NeutralAF.changeSign();
10173 
10174     return getConstantFP(NeutralAF, DL, VT);
10175   }
10176   }
10177 }
10178 
10179 #ifndef NDEBUG
10180 static void checkForCyclesHelper(const SDNode *N,
10181                                  SmallPtrSetImpl<const SDNode*> &Visited,
10182                                  SmallPtrSetImpl<const SDNode*> &Checked,
10183                                  const llvm::SelectionDAG *DAG) {
10184   // If this node has already been checked, don't check it again.
10185   if (Checked.count(N))
10186     return;
10187 
10188   // If a node has already been visited on this depth-first walk, reject it as
10189   // a cycle.
10190   if (!Visited.insert(N).second) {
10191     errs() << "Detected cycle in SelectionDAG\n";
10192     dbgs() << "Offending node:\n";
10193     N->dumprFull(DAG); dbgs() << "\n";
10194     abort();
10195   }
10196 
10197   for (const SDValue &Op : N->op_values())
10198     checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
10199 
10200   Checked.insert(N);
10201   Visited.erase(N);
10202 }
10203 #endif
10204 
10205 void llvm::checkForCycles(const llvm::SDNode *N,
10206                           const llvm::SelectionDAG *DAG,
10207                           bool force) {
10208 #ifndef NDEBUG
10209   bool check = force;
10210 #ifdef EXPENSIVE_CHECKS
10211   check = true;
10212 #endif  // EXPENSIVE_CHECKS
10213   if (check) {
10214     assert(N && "Checking nonexistent SDNode");
10215     SmallPtrSet<const SDNode*, 32> visited;
10216     SmallPtrSet<const SDNode*, 32> checked;
10217     checkForCyclesHelper(N, visited, checked, DAG);
10218   }
10219 #endif  // !NDEBUG
10220 }
10221 
10222 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
10223   checkForCycles(DAG->getRoot().getNode(), DAG, force);
10224 }
10225