1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineConstantPool.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineMemOperand.h"
39 #include "llvm/CodeGen/RuntimeLibcalls.h"
40 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
41 #include "llvm/CodeGen/SelectionDAGNodes.h"
42 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
43 #include "llvm/CodeGen/TargetFrameLowering.h"
44 #include "llvm/CodeGen/TargetLowering.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/CodeGen/TargetSubtargetInfo.h"
47 #include "llvm/CodeGen/ValueTypes.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfoMetadata.h"
52 #include "llvm/IR/DebugLoc.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GlobalValue.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CodeGen.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/KnownBits.h"
65 #include "llvm/Support/MachineValueType.h"
66 #include "llvm/Support/ManagedStatic.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/Mutex.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Target/TargetMachine.h"
71 #include "llvm/Target/TargetOptions.h"
72 #include "llvm/Transforms/Utils/SizeOpts.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <cstdlib>
77 #include <limits>
78 #include <set>
79 #include <string>
80 #include <utility>
81 #include <vector>
82 
83 using namespace llvm;
84 
85 /// makeVTList - Return an instance of the SDVTList struct initialized with the
86 /// specified members.
87 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
88   SDVTList Res = {VTs, NumVTs};
89   return Res;
90 }
91 
92 // Default null implementations of the callbacks.
93 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
94 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
95 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
96 
97 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
98 
99 #define DEBUG_TYPE "selectiondag"
100 
101 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
102        cl::Hidden, cl::init(true),
103        cl::desc("Gang up loads and stores generated by inlining of memcpy"));
104 
105 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
106        cl::desc("Number limit for gluing ld/st of memcpy."),
107        cl::Hidden, cl::init(0));
108 
109 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
110   LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
111 }
112 
113 //===----------------------------------------------------------------------===//
114 //                              ConstantFPSDNode Class
115 //===----------------------------------------------------------------------===//
116 
117 /// isExactlyValue - We don't rely on operator== working on double values, as
118 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
119 /// As such, this method can be used to do an exact bit-for-bit comparison of
120 /// two floating point values.
121 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
122   return getValueAPF().bitwiseIsEqual(V);
123 }
124 
125 bool ConstantFPSDNode::isValueValidForType(EVT VT,
126                                            const APFloat& Val) {
127   assert(VT.isFloatingPoint() && "Can only convert between FP types");
128 
129   // convert modifies in place, so make a copy.
130   APFloat Val2 = APFloat(Val);
131   bool losesInfo;
132   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
133                       APFloat::rmNearestTiesToEven,
134                       &losesInfo);
135   return !losesInfo;
136 }
137 
138 //===----------------------------------------------------------------------===//
139 //                              ISD Namespace
140 //===----------------------------------------------------------------------===//
141 
142 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
143   if (N->getOpcode() == ISD::SPLAT_VECTOR) {
144     unsigned EltSize =
145         N->getValueType(0).getVectorElementType().getSizeInBits();
146     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
147       SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
148       return true;
149     }
150     if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
151       SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
152       return true;
153     }
154   }
155 
156   auto *BV = dyn_cast<BuildVectorSDNode>(N);
157   if (!BV)
158     return false;
159 
160   APInt SplatUndef;
161   unsigned SplatBitSize;
162   bool HasUndefs;
163   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
164   return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
165                              EltSize) &&
166          EltSize == SplatBitSize;
167 }
168 
169 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
170 // specializations of the more general isConstantSplatVector()?
171 
172 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
173   // Look through a bit convert.
174   while (N->getOpcode() == ISD::BITCAST)
175     N = N->getOperand(0).getNode();
176 
177   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
178     APInt SplatVal;
179     return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
180   }
181 
182   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
183 
184   unsigned i = 0, e = N->getNumOperands();
185 
186   // Skip over all of the undef values.
187   while (i != e && N->getOperand(i).isUndef())
188     ++i;
189 
190   // Do not accept an all-undef vector.
191   if (i == e) return false;
192 
193   // Do not accept build_vectors that aren't all constants or which have non-~0
194   // elements. We have to be a bit careful here, as the type of the constant
195   // may not be the same as the type of the vector elements due to type
196   // legalization (the elements are promoted to a legal type for the target and
197   // a vector of a type may be legal when the base element type is not).
198   // We only want to check enough bits to cover the vector elements, because
199   // we care if the resultant vector is all ones, not whether the individual
200   // constants are.
201   SDValue NotZero = N->getOperand(i);
202   unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
203   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
204     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
205       return false;
206   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
207     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
208       return false;
209   } else
210     return false;
211 
212   // Okay, we have at least one ~0 value, check to see if the rest match or are
213   // undefs. Even with the above element type twiddling, this should be OK, as
214   // the same type legalization should have applied to all the elements.
215   for (++i; i != e; ++i)
216     if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
217       return false;
218   return true;
219 }
220 
221 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
222   // Look through a bit convert.
223   while (N->getOpcode() == ISD::BITCAST)
224     N = N->getOperand(0).getNode();
225 
226   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
227     APInt SplatVal;
228     return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
229   }
230 
231   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
232 
233   bool IsAllUndef = true;
234   for (const SDValue &Op : N->op_values()) {
235     if (Op.isUndef())
236       continue;
237     IsAllUndef = false;
238     // Do not accept build_vectors that aren't all constants or which have non-0
239     // elements. We have to be a bit careful here, as the type of the constant
240     // may not be the same as the type of the vector elements due to type
241     // legalization (the elements are promoted to a legal type for the target
242     // and a vector of a type may be legal when the base element type is not).
243     // We only want to check enough bits to cover the vector elements, because
244     // we care if the resultant vector is all zeros, not whether the individual
245     // constants are.
246     unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
247     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
248       if (CN->getAPIntValue().countTrailingZeros() < EltSize)
249         return false;
250     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
251       if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
252         return false;
253     } else
254       return false;
255   }
256 
257   // Do not accept an all-undef vector.
258   if (IsAllUndef)
259     return false;
260   return true;
261 }
262 
263 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
264   return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
265 }
266 
267 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
268   return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
269 }
270 
271 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
272   if (N->getOpcode() != ISD::BUILD_VECTOR)
273     return false;
274 
275   for (const SDValue &Op : N->op_values()) {
276     if (Op.isUndef())
277       continue;
278     if (!isa<ConstantSDNode>(Op))
279       return false;
280   }
281   return true;
282 }
283 
284 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
285   if (N->getOpcode() != ISD::BUILD_VECTOR)
286     return false;
287 
288   for (const SDValue &Op : N->op_values()) {
289     if (Op.isUndef())
290       continue;
291     if (!isa<ConstantFPSDNode>(Op))
292       return false;
293   }
294   return true;
295 }
296 
297 bool ISD::allOperandsUndef(const SDNode *N) {
298   // Return false if the node has no operands.
299   // This is "logically inconsistent" with the definition of "all" but
300   // is probably the desired behavior.
301   if (N->getNumOperands() == 0)
302     return false;
303   return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
304 }
305 
306 bool ISD::matchUnaryPredicate(SDValue Op,
307                               std::function<bool(ConstantSDNode *)> Match,
308                               bool AllowUndefs) {
309   // FIXME: Add support for scalar UNDEF cases?
310   if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
311     return Match(Cst);
312 
313   // FIXME: Add support for vector UNDEF cases?
314   if (ISD::BUILD_VECTOR != Op.getOpcode() &&
315       ISD::SPLAT_VECTOR != Op.getOpcode())
316     return false;
317 
318   EVT SVT = Op.getValueType().getScalarType();
319   for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
320     if (AllowUndefs && Op.getOperand(i).isUndef()) {
321       if (!Match(nullptr))
322         return false;
323       continue;
324     }
325 
326     auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
327     if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
328       return false;
329   }
330   return true;
331 }
332 
333 bool ISD::matchBinaryPredicate(
334     SDValue LHS, SDValue RHS,
335     std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
336     bool AllowUndefs, bool AllowTypeMismatch) {
337   if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
338     return false;
339 
340   // TODO: Add support for scalar UNDEF cases?
341   if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
342     if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
343       return Match(LHSCst, RHSCst);
344 
345   // TODO: Add support for vector UNDEF cases?
346   if (LHS.getOpcode() != RHS.getOpcode() ||
347       (LHS.getOpcode() != ISD::BUILD_VECTOR &&
348        LHS.getOpcode() != ISD::SPLAT_VECTOR))
349     return false;
350 
351   EVT SVT = LHS.getValueType().getScalarType();
352   for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
353     SDValue LHSOp = LHS.getOperand(i);
354     SDValue RHSOp = RHS.getOperand(i);
355     bool LHSUndef = AllowUndefs && LHSOp.isUndef();
356     bool RHSUndef = AllowUndefs && RHSOp.isUndef();
357     auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
358     auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
359     if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
360       return false;
361     if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
362                                LHSOp.getValueType() != RHSOp.getValueType()))
363       return false;
364     if (!Match(LHSCst, RHSCst))
365       return false;
366   }
367   return true;
368 }
369 
370 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
371   switch (VecReduceOpcode) {
372   default:
373     llvm_unreachable("Expected VECREDUCE opcode");
374   case ISD::VECREDUCE_FADD:
375   case ISD::VECREDUCE_SEQ_FADD:
376   case ISD::VP_REDUCE_FADD:
377   case ISD::VP_REDUCE_SEQ_FADD:
378     return ISD::FADD;
379   case ISD::VECREDUCE_FMUL:
380   case ISD::VECREDUCE_SEQ_FMUL:
381   case ISD::VP_REDUCE_FMUL:
382   case ISD::VP_REDUCE_SEQ_FMUL:
383     return ISD::FMUL;
384   case ISD::VECREDUCE_ADD:
385   case ISD::VP_REDUCE_ADD:
386     return ISD::ADD;
387   case ISD::VECREDUCE_MUL:
388   case ISD::VP_REDUCE_MUL:
389     return ISD::MUL;
390   case ISD::VECREDUCE_AND:
391   case ISD::VP_REDUCE_AND:
392     return ISD::AND;
393   case ISD::VECREDUCE_OR:
394   case ISD::VP_REDUCE_OR:
395     return ISD::OR;
396   case ISD::VECREDUCE_XOR:
397   case ISD::VP_REDUCE_XOR:
398     return ISD::XOR;
399   case ISD::VECREDUCE_SMAX:
400   case ISD::VP_REDUCE_SMAX:
401     return ISD::SMAX;
402   case ISD::VECREDUCE_SMIN:
403   case ISD::VP_REDUCE_SMIN:
404     return ISD::SMIN;
405   case ISD::VECREDUCE_UMAX:
406   case ISD::VP_REDUCE_UMAX:
407     return ISD::UMAX;
408   case ISD::VECREDUCE_UMIN:
409   case ISD::VP_REDUCE_UMIN:
410     return ISD::UMIN;
411   case ISD::VECREDUCE_FMAX:
412   case ISD::VP_REDUCE_FMAX:
413     return ISD::FMAXNUM;
414   case ISD::VECREDUCE_FMIN:
415   case ISD::VP_REDUCE_FMIN:
416     return ISD::FMINNUM;
417   }
418 }
419 
420 bool ISD::isVPOpcode(unsigned Opcode) {
421   switch (Opcode) {
422   default:
423     return false;
424 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...)                                    \
425   case ISD::VPSD:                                                              \
426     return true;
427 #include "llvm/IR/VPIntrinsics.def"
428   }
429 }
430 
431 bool ISD::isVPBinaryOp(unsigned Opcode) {
432   switch (Opcode) {
433   default:
434     break;
435 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
436 #define VP_PROPERTY_BINARYOP return true;
437 #define END_REGISTER_VP_SDNODE(VPSD) break;
438 #include "llvm/IR/VPIntrinsics.def"
439   }
440   return false;
441 }
442 
443 bool ISD::isVPReduction(unsigned Opcode) {
444   switch (Opcode) {
445   default:
446     break;
447 #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
448 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
449 #define END_REGISTER_VP_SDNODE(VPSD) break;
450 #include "llvm/IR/VPIntrinsics.def"
451   }
452   return false;
453 }
454 
455 /// The operand position of the vector mask.
456 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
457   switch (Opcode) {
458   default:
459     return None;
460 #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...)         \
461   case ISD::VPSD:                                                              \
462     return MASKPOS;
463 #include "llvm/IR/VPIntrinsics.def"
464   }
465 }
466 
467 /// The operand position of the explicit vector length parameter.
468 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
469   switch (Opcode) {
470   default:
471     return None;
472 #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS)      \
473   case ISD::VPSD:                                                              \
474     return EVLPOS;
475 #include "llvm/IR/VPIntrinsics.def"
476   }
477 }
478 
479 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
480   switch (ExtType) {
481   case ISD::EXTLOAD:
482     return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
483   case ISD::SEXTLOAD:
484     return ISD::SIGN_EXTEND;
485   case ISD::ZEXTLOAD:
486     return ISD::ZERO_EXTEND;
487   default:
488     break;
489   }
490 
491   llvm_unreachable("Invalid LoadExtType");
492 }
493 
494 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
495   // To perform this operation, we just need to swap the L and G bits of the
496   // operation.
497   unsigned OldL = (Operation >> 2) & 1;
498   unsigned OldG = (Operation >> 1) & 1;
499   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
500                        (OldL << 1) |       // New G bit
501                        (OldG << 2));       // New L bit.
502 }
503 
504 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
505   unsigned Operation = Op;
506   if (isIntegerLike)
507     Operation ^= 7;   // Flip L, G, E bits, but not U.
508   else
509     Operation ^= 15;  // Flip all of the condition bits.
510 
511   if (Operation > ISD::SETTRUE2)
512     Operation &= ~8;  // Don't let N and U bits get set.
513 
514   return ISD::CondCode(Operation);
515 }
516 
517 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
518   return getSetCCInverseImpl(Op, Type.isInteger());
519 }
520 
521 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
522                                                bool isIntegerLike) {
523   return getSetCCInverseImpl(Op, isIntegerLike);
524 }
525 
526 /// For an integer comparison, return 1 if the comparison is a signed operation
527 /// and 2 if the result is an unsigned comparison. Return zero if the operation
528 /// does not depend on the sign of the input (setne and seteq).
529 static int isSignedOp(ISD::CondCode Opcode) {
530   switch (Opcode) {
531   default: llvm_unreachable("Illegal integer setcc operation!");
532   case ISD::SETEQ:
533   case ISD::SETNE: return 0;
534   case ISD::SETLT:
535   case ISD::SETLE:
536   case ISD::SETGT:
537   case ISD::SETGE: return 1;
538   case ISD::SETULT:
539   case ISD::SETULE:
540   case ISD::SETUGT:
541   case ISD::SETUGE: return 2;
542   }
543 }
544 
545 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
546                                        EVT Type) {
547   bool IsInteger = Type.isInteger();
548   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
549     // Cannot fold a signed integer setcc with an unsigned integer setcc.
550     return ISD::SETCC_INVALID;
551 
552   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
553 
554   // If the N and U bits get set, then the resultant comparison DOES suddenly
555   // care about orderedness, and it is true when ordered.
556   if (Op > ISD::SETTRUE2)
557     Op &= ~16;     // Clear the U bit if the N bit is set.
558 
559   // Canonicalize illegal integer setcc's.
560   if (IsInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
561     Op = ISD::SETNE;
562 
563   return ISD::CondCode(Op);
564 }
565 
566 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
567                                         EVT Type) {
568   bool IsInteger = Type.isInteger();
569   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
570     // Cannot fold a signed setcc with an unsigned setcc.
571     return ISD::SETCC_INVALID;
572 
573   // Combine all of the condition bits.
574   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
575 
576   // Canonicalize illegal integer setcc's.
577   if (IsInteger) {
578     switch (Result) {
579     default: break;
580     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
581     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
582     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
583     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
584     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
585     }
586   }
587 
588   return Result;
589 }
590 
591 //===----------------------------------------------------------------------===//
592 //                           SDNode Profile Support
593 //===----------------------------------------------------------------------===//
594 
595 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
596 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
597   ID.AddInteger(OpC);
598 }
599 
600 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
601 /// solely with their pointer.
602 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
603   ID.AddPointer(VTList.VTs);
604 }
605 
606 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
607 static void AddNodeIDOperands(FoldingSetNodeID &ID,
608                               ArrayRef<SDValue> Ops) {
609   for (auto& Op : Ops) {
610     ID.AddPointer(Op.getNode());
611     ID.AddInteger(Op.getResNo());
612   }
613 }
614 
615 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
616 static void AddNodeIDOperands(FoldingSetNodeID &ID,
617                               ArrayRef<SDUse> Ops) {
618   for (auto& Op : Ops) {
619     ID.AddPointer(Op.getNode());
620     ID.AddInteger(Op.getResNo());
621   }
622 }
623 
624 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
625                           SDVTList VTList, ArrayRef<SDValue> OpList) {
626   AddNodeIDOpcode(ID, OpC);
627   AddNodeIDValueTypes(ID, VTList);
628   AddNodeIDOperands(ID, OpList);
629 }
630 
631 /// If this is an SDNode with special info, add this info to the NodeID data.
632 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
633   switch (N->getOpcode()) {
634   case ISD::TargetExternalSymbol:
635   case ISD::ExternalSymbol:
636   case ISD::MCSymbol:
637     llvm_unreachable("Should only be used on nodes with operands");
638   default: break;  // Normal nodes don't need extra info.
639   case ISD::TargetConstant:
640   case ISD::Constant: {
641     const ConstantSDNode *C = cast<ConstantSDNode>(N);
642     ID.AddPointer(C->getConstantIntValue());
643     ID.AddBoolean(C->isOpaque());
644     break;
645   }
646   case ISD::TargetConstantFP:
647   case ISD::ConstantFP:
648     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
649     break;
650   case ISD::TargetGlobalAddress:
651   case ISD::GlobalAddress:
652   case ISD::TargetGlobalTLSAddress:
653   case ISD::GlobalTLSAddress: {
654     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
655     ID.AddPointer(GA->getGlobal());
656     ID.AddInteger(GA->getOffset());
657     ID.AddInteger(GA->getTargetFlags());
658     break;
659   }
660   case ISD::BasicBlock:
661     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
662     break;
663   case ISD::Register:
664     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
665     break;
666   case ISD::RegisterMask:
667     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
668     break;
669   case ISD::SRCVALUE:
670     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
671     break;
672   case ISD::FrameIndex:
673   case ISD::TargetFrameIndex:
674     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
675     break;
676   case ISD::LIFETIME_START:
677   case ISD::LIFETIME_END:
678     if (cast<LifetimeSDNode>(N)->hasOffset()) {
679       ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
680       ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
681     }
682     break;
683   case ISD::PSEUDO_PROBE:
684     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
685     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
686     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
687     break;
688   case ISD::JumpTable:
689   case ISD::TargetJumpTable:
690     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
691     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
692     break;
693   case ISD::ConstantPool:
694   case ISD::TargetConstantPool: {
695     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
696     ID.AddInteger(CP->getAlign().value());
697     ID.AddInteger(CP->getOffset());
698     if (CP->isMachineConstantPoolEntry())
699       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
700     else
701       ID.AddPointer(CP->getConstVal());
702     ID.AddInteger(CP->getTargetFlags());
703     break;
704   }
705   case ISD::TargetIndex: {
706     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
707     ID.AddInteger(TI->getIndex());
708     ID.AddInteger(TI->getOffset());
709     ID.AddInteger(TI->getTargetFlags());
710     break;
711   }
712   case ISD::LOAD: {
713     const LoadSDNode *LD = cast<LoadSDNode>(N);
714     ID.AddInteger(LD->getMemoryVT().getRawBits());
715     ID.AddInteger(LD->getRawSubclassData());
716     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
717     break;
718   }
719   case ISD::STORE: {
720     const StoreSDNode *ST = cast<StoreSDNode>(N);
721     ID.AddInteger(ST->getMemoryVT().getRawBits());
722     ID.AddInteger(ST->getRawSubclassData());
723     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
724     break;
725   }
726   case ISD::VP_LOAD: {
727     const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
728     ID.AddInteger(ELD->getMemoryVT().getRawBits());
729     ID.AddInteger(ELD->getRawSubclassData());
730     ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
731     break;
732   }
733   case ISD::VP_STORE: {
734     const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
735     ID.AddInteger(EST->getMemoryVT().getRawBits());
736     ID.AddInteger(EST->getRawSubclassData());
737     ID.AddInteger(EST->getPointerInfo().getAddrSpace());
738     break;
739   }
740   case ISD::VP_GATHER: {
741     const VPGatherSDNode *EG = cast<VPGatherSDNode>(N);
742     ID.AddInteger(EG->getMemoryVT().getRawBits());
743     ID.AddInteger(EG->getRawSubclassData());
744     ID.AddInteger(EG->getPointerInfo().getAddrSpace());
745     break;
746   }
747   case ISD::VP_SCATTER: {
748     const VPScatterSDNode *ES = cast<VPScatterSDNode>(N);
749     ID.AddInteger(ES->getMemoryVT().getRawBits());
750     ID.AddInteger(ES->getRawSubclassData());
751     ID.AddInteger(ES->getPointerInfo().getAddrSpace());
752     break;
753   }
754   case ISD::MLOAD: {
755     const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
756     ID.AddInteger(MLD->getMemoryVT().getRawBits());
757     ID.AddInteger(MLD->getRawSubclassData());
758     ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
759     break;
760   }
761   case ISD::MSTORE: {
762     const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
763     ID.AddInteger(MST->getMemoryVT().getRawBits());
764     ID.AddInteger(MST->getRawSubclassData());
765     ID.AddInteger(MST->getPointerInfo().getAddrSpace());
766     break;
767   }
768   case ISD::MGATHER: {
769     const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
770     ID.AddInteger(MG->getMemoryVT().getRawBits());
771     ID.AddInteger(MG->getRawSubclassData());
772     ID.AddInteger(MG->getPointerInfo().getAddrSpace());
773     break;
774   }
775   case ISD::MSCATTER: {
776     const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
777     ID.AddInteger(MS->getMemoryVT().getRawBits());
778     ID.AddInteger(MS->getRawSubclassData());
779     ID.AddInteger(MS->getPointerInfo().getAddrSpace());
780     break;
781   }
782   case ISD::ATOMIC_CMP_SWAP:
783   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
784   case ISD::ATOMIC_SWAP:
785   case ISD::ATOMIC_LOAD_ADD:
786   case ISD::ATOMIC_LOAD_SUB:
787   case ISD::ATOMIC_LOAD_AND:
788   case ISD::ATOMIC_LOAD_CLR:
789   case ISD::ATOMIC_LOAD_OR:
790   case ISD::ATOMIC_LOAD_XOR:
791   case ISD::ATOMIC_LOAD_NAND:
792   case ISD::ATOMIC_LOAD_MIN:
793   case ISD::ATOMIC_LOAD_MAX:
794   case ISD::ATOMIC_LOAD_UMIN:
795   case ISD::ATOMIC_LOAD_UMAX:
796   case ISD::ATOMIC_LOAD:
797   case ISD::ATOMIC_STORE: {
798     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
799     ID.AddInteger(AT->getMemoryVT().getRawBits());
800     ID.AddInteger(AT->getRawSubclassData());
801     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
802     break;
803   }
804   case ISD::PREFETCH: {
805     const MemSDNode *PF = cast<MemSDNode>(N);
806     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
807     break;
808   }
809   case ISD::VECTOR_SHUFFLE: {
810     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
811     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
812          i != e; ++i)
813       ID.AddInteger(SVN->getMaskElt(i));
814     break;
815   }
816   case ISD::TargetBlockAddress:
817   case ISD::BlockAddress: {
818     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
819     ID.AddPointer(BA->getBlockAddress());
820     ID.AddInteger(BA->getOffset());
821     ID.AddInteger(BA->getTargetFlags());
822     break;
823   }
824   } // end switch (N->getOpcode())
825 
826   // Target specific memory nodes could also have address spaces to check.
827   if (N->isTargetMemoryOpcode())
828     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
829 }
830 
831 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
832 /// data.
833 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
834   AddNodeIDOpcode(ID, N->getOpcode());
835   // Add the return value info.
836   AddNodeIDValueTypes(ID, N->getVTList());
837   // Add the operand info.
838   AddNodeIDOperands(ID, N->ops());
839 
840   // Handle SDNode leafs with special info.
841   AddNodeIDCustom(ID, N);
842 }
843 
844 //===----------------------------------------------------------------------===//
845 //                              SelectionDAG Class
846 //===----------------------------------------------------------------------===//
847 
848 /// doNotCSE - Return true if CSE should not be performed for this node.
849 static bool doNotCSE(SDNode *N) {
850   if (N->getValueType(0) == MVT::Glue)
851     return true; // Never CSE anything that produces a flag.
852 
853   switch (N->getOpcode()) {
854   default: break;
855   case ISD::HANDLENODE:
856   case ISD::EH_LABEL:
857     return true;   // Never CSE these nodes.
858   }
859 
860   // Check that remaining values produced are not flags.
861   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
862     if (N->getValueType(i) == MVT::Glue)
863       return true; // Never CSE anything that produces a flag.
864 
865   return false;
866 }
867 
868 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
869 /// SelectionDAG.
870 void SelectionDAG::RemoveDeadNodes() {
871   // Create a dummy node (which is not added to allnodes), that adds a reference
872   // to the root node, preventing it from being deleted.
873   HandleSDNode Dummy(getRoot());
874 
875   SmallVector<SDNode*, 128> DeadNodes;
876 
877   // Add all obviously-dead nodes to the DeadNodes worklist.
878   for (SDNode &Node : allnodes())
879     if (Node.use_empty())
880       DeadNodes.push_back(&Node);
881 
882   RemoveDeadNodes(DeadNodes);
883 
884   // If the root changed (e.g. it was a dead load, update the root).
885   setRoot(Dummy.getValue());
886 }
887 
888 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
889 /// given list, and any nodes that become unreachable as a result.
890 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
891 
892   // Process the worklist, deleting the nodes and adding their uses to the
893   // worklist.
894   while (!DeadNodes.empty()) {
895     SDNode *N = DeadNodes.pop_back_val();
896     // Skip to next node if we've already managed to delete the node. This could
897     // happen if replacing a node causes a node previously added to the node to
898     // be deleted.
899     if (N->getOpcode() == ISD::DELETED_NODE)
900       continue;
901 
902     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
903       DUL->NodeDeleted(N, nullptr);
904 
905     // Take the node out of the appropriate CSE map.
906     RemoveNodeFromCSEMaps(N);
907 
908     // Next, brutally remove the operand list.  This is safe to do, as there are
909     // no cycles in the graph.
910     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
911       SDUse &Use = *I++;
912       SDNode *Operand = Use.getNode();
913       Use.set(SDValue());
914 
915       // Now that we removed this operand, see if there are no uses of it left.
916       if (Operand->use_empty())
917         DeadNodes.push_back(Operand);
918     }
919 
920     DeallocateNode(N);
921   }
922 }
923 
924 void SelectionDAG::RemoveDeadNode(SDNode *N){
925   SmallVector<SDNode*, 16> DeadNodes(1, N);
926 
927   // Create a dummy node that adds a reference to the root node, preventing
928   // it from being deleted.  (This matters if the root is an operand of the
929   // dead node.)
930   HandleSDNode Dummy(getRoot());
931 
932   RemoveDeadNodes(DeadNodes);
933 }
934 
935 void SelectionDAG::DeleteNode(SDNode *N) {
936   // First take this out of the appropriate CSE map.
937   RemoveNodeFromCSEMaps(N);
938 
939   // Finally, remove uses due to operands of this node, remove from the
940   // AllNodes list, and delete the node.
941   DeleteNodeNotInCSEMaps(N);
942 }
943 
944 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
945   assert(N->getIterator() != AllNodes.begin() &&
946          "Cannot delete the entry node!");
947   assert(N->use_empty() && "Cannot delete a node that is not dead!");
948 
949   // Drop all of the operands and decrement used node's use counts.
950   N->DropOperands();
951 
952   DeallocateNode(N);
953 }
954 
955 void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
956   assert(!(V->isVariadic() && isParameter));
957   if (isParameter)
958     ByvalParmDbgValues.push_back(V);
959   else
960     DbgValues.push_back(V);
961   for (const SDNode *Node : V->getSDNodes())
962     if (Node)
963       DbgValMap[Node].push_back(V);
964 }
965 
966 void SDDbgInfo::erase(const SDNode *Node) {
967   DbgValMapType::iterator I = DbgValMap.find(Node);
968   if (I == DbgValMap.end())
969     return;
970   for (auto &Val: I->second)
971     Val->setIsInvalidated();
972   DbgValMap.erase(I);
973 }
974 
975 void SelectionDAG::DeallocateNode(SDNode *N) {
976   // If we have operands, deallocate them.
977   removeOperands(N);
978 
979   NodeAllocator.Deallocate(AllNodes.remove(N));
980 
981   // Set the opcode to DELETED_NODE to help catch bugs when node
982   // memory is reallocated.
983   // FIXME: There are places in SDag that have grown a dependency on the opcode
984   // value in the released node.
985   __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
986   N->NodeType = ISD::DELETED_NODE;
987 
988   // If any of the SDDbgValue nodes refer to this SDNode, invalidate
989   // them and forget about that node.
990   DbgInfo->erase(N);
991 }
992 
993 #ifndef NDEBUG
994 /// VerifySDNode - Check the given SDNode.  Aborts if it is invalid.
995 static void VerifySDNode(SDNode *N) {
996   switch (N->getOpcode()) {
997   default:
998     break;
999   case ISD::BUILD_PAIR: {
1000     EVT VT = N->getValueType(0);
1001     assert(N->getNumValues() == 1 && "Too many results!");
1002     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
1003            "Wrong return type!");
1004     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
1005     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1006            "Mismatched operand types!");
1007     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
1008            "Wrong operand type!");
1009     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
1010            "Wrong return type size");
1011     break;
1012   }
1013   case ISD::BUILD_VECTOR: {
1014     assert(N->getNumValues() == 1 && "Too many results!");
1015     assert(N->getValueType(0).isVector() && "Wrong return type!");
1016     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
1017            "Wrong number of operands!");
1018     EVT EltVT = N->getValueType(0).getVectorElementType();
1019     for (const SDUse &Op : N->ops()) {
1020       assert((Op.getValueType() == EltVT ||
1021               (EltVT.isInteger() && Op.getValueType().isInteger() &&
1022                EltVT.bitsLE(Op.getValueType()))) &&
1023              "Wrong operand type!");
1024       assert(Op.getValueType() == N->getOperand(0).getValueType() &&
1025              "Operands must all have the same type");
1026     }
1027     break;
1028   }
1029   }
1030 }
1031 #endif // NDEBUG
1032 
1033 /// Insert a newly allocated node into the DAG.
1034 ///
1035 /// Handles insertion into the all nodes list and CSE map, as well as
1036 /// verification and other common operations when a new node is allocated.
1037 void SelectionDAG::InsertNode(SDNode *N) {
1038   AllNodes.push_back(N);
1039 #ifndef NDEBUG
1040   N->PersistentId = NextPersistentId++;
1041   VerifySDNode(N);
1042 #endif
1043   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1044     DUL->NodeInserted(N);
1045 }
1046 
1047 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1048 /// correspond to it.  This is useful when we're about to delete or repurpose
1049 /// the node.  We don't want future request for structurally identical nodes
1050 /// to return N anymore.
1051 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1052   bool Erased = false;
1053   switch (N->getOpcode()) {
1054   case ISD::HANDLENODE: return false;  // noop.
1055   case ISD::CONDCODE:
1056     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
1057            "Cond code doesn't exist!");
1058     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1059     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1060     break;
1061   case ISD::ExternalSymbol:
1062     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1063     break;
1064   case ISD::TargetExternalSymbol: {
1065     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1066     Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1067         ESN->getSymbol(), ESN->getTargetFlags()));
1068     break;
1069   }
1070   case ISD::MCSymbol: {
1071     auto *MCSN = cast<MCSymbolSDNode>(N);
1072     Erased = MCSymbols.erase(MCSN->getMCSymbol());
1073     break;
1074   }
1075   case ISD::VALUETYPE: {
1076     EVT VT = cast<VTSDNode>(N)->getVT();
1077     if (VT.isExtended()) {
1078       Erased = ExtendedValueTypeNodes.erase(VT);
1079     } else {
1080       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1081       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1082     }
1083     break;
1084   }
1085   default:
1086     // Remove it from the CSE Map.
1087     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1088     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1089     Erased = CSEMap.RemoveNode(N);
1090     break;
1091   }
1092 #ifndef NDEBUG
1093   // Verify that the node was actually in one of the CSE maps, unless it has a
1094   // flag result (which cannot be CSE'd) or is one of the special cases that are
1095   // not subject to CSE.
1096   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1097       !N->isMachineOpcode() && !doNotCSE(N)) {
1098     N->dump(this);
1099     dbgs() << "\n";
1100     llvm_unreachable("Node is not in map!");
1101   }
1102 #endif
1103   return Erased;
1104 }
1105 
1106 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1107 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1108 /// node already exists, in which case transfer all its users to the existing
1109 /// node. This transfer can potentially trigger recursive merging.
1110 void
1111 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1112   // For node types that aren't CSE'd, just act as if no identical node
1113   // already exists.
1114   if (!doNotCSE(N)) {
1115     SDNode *Existing = CSEMap.GetOrInsertNode(N);
1116     if (Existing != N) {
1117       // If there was already an existing matching node, use ReplaceAllUsesWith
1118       // to replace the dead one with the existing one.  This can cause
1119       // recursive merging of other unrelated nodes down the line.
1120       ReplaceAllUsesWith(N, Existing);
1121 
1122       // N is now dead. Inform the listeners and delete it.
1123       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1124         DUL->NodeDeleted(N, Existing);
1125       DeleteNodeNotInCSEMaps(N);
1126       return;
1127     }
1128   }
1129 
1130   // If the node doesn't already exist, we updated it.  Inform listeners.
1131   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1132     DUL->NodeUpdated(N);
1133 }
1134 
1135 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1136 /// were replaced with those specified.  If this node is never memoized,
1137 /// return null, otherwise return a pointer to the slot it would take.  If a
1138 /// node already exists with these operands, the slot will be non-null.
1139 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1140                                            void *&InsertPos) {
1141   if (doNotCSE(N))
1142     return nullptr;
1143 
1144   SDValue Ops[] = { Op };
1145   FoldingSetNodeID ID;
1146   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1147   AddNodeIDCustom(ID, N);
1148   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1149   if (Node)
1150     Node->intersectFlagsWith(N->getFlags());
1151   return Node;
1152 }
1153 
1154 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1155 /// were replaced with those specified.  If this node is never memoized,
1156 /// return null, otherwise return a pointer to the slot it would take.  If a
1157 /// node already exists with these operands, the slot will be non-null.
1158 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1159                                            SDValue Op1, SDValue Op2,
1160                                            void *&InsertPos) {
1161   if (doNotCSE(N))
1162     return nullptr;
1163 
1164   SDValue Ops[] = { Op1, Op2 };
1165   FoldingSetNodeID ID;
1166   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1167   AddNodeIDCustom(ID, N);
1168   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1169   if (Node)
1170     Node->intersectFlagsWith(N->getFlags());
1171   return Node;
1172 }
1173 
1174 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1175 /// were replaced with those specified.  If this node is never memoized,
1176 /// return null, otherwise return a pointer to the slot it would take.  If a
1177 /// node already exists with these operands, the slot will be non-null.
1178 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1179                                            void *&InsertPos) {
1180   if (doNotCSE(N))
1181     return nullptr;
1182 
1183   FoldingSetNodeID ID;
1184   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1185   AddNodeIDCustom(ID, N);
1186   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1187   if (Node)
1188     Node->intersectFlagsWith(N->getFlags());
1189   return Node;
1190 }
1191 
1192 Align SelectionDAG::getEVTAlign(EVT VT) const {
1193   Type *Ty = VT == MVT::iPTR ?
1194                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1195                    VT.getTypeForEVT(*getContext());
1196 
1197   return getDataLayout().getABITypeAlign(Ty);
1198 }
1199 
1200 // EntryNode could meaningfully have debug info if we can find it...
1201 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1202     : TM(tm), OptLevel(OL),
1203       EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1204       Root(getEntryNode()) {
1205   InsertNode(&EntryNode);
1206   DbgInfo = new SDDbgInfo();
1207 }
1208 
1209 void SelectionDAG::init(MachineFunction &NewMF,
1210                         OptimizationRemarkEmitter &NewORE,
1211                         Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1212                         LegacyDivergenceAnalysis * Divergence,
1213                         ProfileSummaryInfo *PSIin,
1214                         BlockFrequencyInfo *BFIin) {
1215   MF = &NewMF;
1216   SDAGISelPass = PassPtr;
1217   ORE = &NewORE;
1218   TLI = getSubtarget().getTargetLowering();
1219   TSI = getSubtarget().getSelectionDAGInfo();
1220   LibInfo = LibraryInfo;
1221   Context = &MF->getFunction().getContext();
1222   DA = Divergence;
1223   PSI = PSIin;
1224   BFI = BFIin;
1225 }
1226 
1227 SelectionDAG::~SelectionDAG() {
1228   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1229   allnodes_clear();
1230   OperandRecycler.clear(OperandAllocator);
1231   delete DbgInfo;
1232 }
1233 
1234 bool SelectionDAG::shouldOptForSize() const {
1235   return MF->getFunction().hasOptSize() ||
1236       llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1237 }
1238 
1239 void SelectionDAG::allnodes_clear() {
1240   assert(&*AllNodes.begin() == &EntryNode);
1241   AllNodes.remove(AllNodes.begin());
1242   while (!AllNodes.empty())
1243     DeallocateNode(&AllNodes.front());
1244 #ifndef NDEBUG
1245   NextPersistentId = 0;
1246 #endif
1247 }
1248 
1249 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1250                                           void *&InsertPos) {
1251   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1252   if (N) {
1253     switch (N->getOpcode()) {
1254     default: break;
1255     case ISD::Constant:
1256     case ISD::ConstantFP:
1257       llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1258                        "debug location.  Use another overload.");
1259     }
1260   }
1261   return N;
1262 }
1263 
1264 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1265                                           const SDLoc &DL, void *&InsertPos) {
1266   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1267   if (N) {
1268     switch (N->getOpcode()) {
1269     case ISD::Constant:
1270     case ISD::ConstantFP:
1271       // Erase debug location from the node if the node is used at several
1272       // different places. Do not propagate one location to all uses as it
1273       // will cause a worse single stepping debugging experience.
1274       if (N->getDebugLoc() != DL.getDebugLoc())
1275         N->setDebugLoc(DebugLoc());
1276       break;
1277     default:
1278       // When the node's point of use is located earlier in the instruction
1279       // sequence than its prior point of use, update its debug info to the
1280       // earlier location.
1281       if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1282         N->setDebugLoc(DL.getDebugLoc());
1283       break;
1284     }
1285   }
1286   return N;
1287 }
1288 
1289 void SelectionDAG::clear() {
1290   allnodes_clear();
1291   OperandRecycler.clear(OperandAllocator);
1292   OperandAllocator.Reset();
1293   CSEMap.clear();
1294 
1295   ExtendedValueTypeNodes.clear();
1296   ExternalSymbols.clear();
1297   TargetExternalSymbols.clear();
1298   MCSymbols.clear();
1299   SDCallSiteDbgInfo.clear();
1300   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1301             static_cast<CondCodeSDNode*>(nullptr));
1302   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1303             static_cast<SDNode*>(nullptr));
1304 
1305   EntryNode.UseList = nullptr;
1306   InsertNode(&EntryNode);
1307   Root = getEntryNode();
1308   DbgInfo->clear();
1309 }
1310 
1311 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1312   return VT.bitsGT(Op.getValueType())
1313              ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1314              : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1315 }
1316 
1317 std::pair<SDValue, SDValue>
1318 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1319                                        const SDLoc &DL, EVT VT) {
1320   assert(!VT.bitsEq(Op.getValueType()) &&
1321          "Strict no-op FP extend/round not allowed.");
1322   SDValue Res =
1323       VT.bitsGT(Op.getValueType())
1324           ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1325           : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1326                     {Chain, Op, getIntPtrConstant(0, DL)});
1327 
1328   return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1329 }
1330 
1331 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1332   return VT.bitsGT(Op.getValueType()) ?
1333     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1334     getNode(ISD::TRUNCATE, DL, VT, Op);
1335 }
1336 
1337 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1338   return VT.bitsGT(Op.getValueType()) ?
1339     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1340     getNode(ISD::TRUNCATE, DL, VT, Op);
1341 }
1342 
1343 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1344   return VT.bitsGT(Op.getValueType()) ?
1345     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1346     getNode(ISD::TRUNCATE, DL, VT, Op);
1347 }
1348 
1349 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1350                                         EVT OpVT) {
1351   if (VT.bitsLE(Op.getValueType()))
1352     return getNode(ISD::TRUNCATE, SL, VT, Op);
1353 
1354   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1355   return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1356 }
1357 
1358 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1359   EVT OpVT = Op.getValueType();
1360   assert(VT.isInteger() && OpVT.isInteger() &&
1361          "Cannot getZeroExtendInReg FP types");
1362   assert(VT.isVector() == OpVT.isVector() &&
1363          "getZeroExtendInReg type should be vector iff the operand "
1364          "type is vector!");
1365   assert((!VT.isVector() ||
1366           VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1367          "Vector element counts must match in getZeroExtendInReg");
1368   assert(VT.bitsLE(OpVT) && "Not extending!");
1369   if (OpVT == VT)
1370     return Op;
1371   APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1372                                    VT.getScalarSizeInBits());
1373   return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1374 }
1375 
1376 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1377   // Only unsigned pointer semantics are supported right now. In the future this
1378   // might delegate to TLI to check pointer signedness.
1379   return getZExtOrTrunc(Op, DL, VT);
1380 }
1381 
1382 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1383   // Only unsigned pointer semantics are supported right now. In the future this
1384   // might delegate to TLI to check pointer signedness.
1385   return getZeroExtendInReg(Op, DL, VT);
1386 }
1387 
1388 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1389 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1390   return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1391 }
1392 
1393 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1394   SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1395   return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1396 }
1397 
1398 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1399                                       EVT OpVT) {
1400   if (!V)
1401     return getConstant(0, DL, VT);
1402 
1403   switch (TLI->getBooleanContents(OpVT)) {
1404   case TargetLowering::ZeroOrOneBooleanContent:
1405   case TargetLowering::UndefinedBooleanContent:
1406     return getConstant(1, DL, VT);
1407   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1408     return getAllOnesConstant(DL, VT);
1409   }
1410   llvm_unreachable("Unexpected boolean content enum!");
1411 }
1412 
1413 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1414                                   bool isT, bool isO) {
1415   EVT EltVT = VT.getScalarType();
1416   assert((EltVT.getSizeInBits() >= 64 ||
1417           (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1418          "getConstant with a uint64_t value that doesn't fit in the type!");
1419   return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1420 }
1421 
1422 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1423                                   bool isT, bool isO) {
1424   return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1425 }
1426 
1427 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1428                                   EVT VT, bool isT, bool isO) {
1429   assert(VT.isInteger() && "Cannot create FP integer constant!");
1430 
1431   EVT EltVT = VT.getScalarType();
1432   const ConstantInt *Elt = &Val;
1433 
1434   // In some cases the vector type is legal but the element type is illegal and
1435   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
1436   // inserted value (the type does not need to match the vector element type).
1437   // Any extra bits introduced will be truncated away.
1438   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1439                            TargetLowering::TypePromoteInteger) {
1440     EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1441     APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1442     Elt = ConstantInt::get(*getContext(), NewVal);
1443   }
1444   // In other cases the element type is illegal and needs to be expanded, for
1445   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1446   // the value into n parts and use a vector type with n-times the elements.
1447   // Then bitcast to the type requested.
1448   // Legalizing constants too early makes the DAGCombiner's job harder so we
1449   // only legalize if the DAG tells us we must produce legal types.
1450   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1451            TLI->getTypeAction(*getContext(), EltVT) ==
1452                TargetLowering::TypeExpandInteger) {
1453     const APInt &NewVal = Elt->getValue();
1454     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1455     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1456 
1457     // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1458     if (VT.isScalableVector()) {
1459       assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1460              "Can only handle an even split!");
1461       unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1462 
1463       SmallVector<SDValue, 2> ScalarParts;
1464       for (unsigned i = 0; i != Parts; ++i)
1465         ScalarParts.push_back(getConstant(
1466             NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1467             ViaEltVT, isT, isO));
1468 
1469       return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1470     }
1471 
1472     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1473     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1474 
1475     // Check the temporary vector is the correct size. If this fails then
1476     // getTypeToTransformTo() probably returned a type whose size (in bits)
1477     // isn't a power-of-2 factor of the requested type size.
1478     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1479 
1480     SmallVector<SDValue, 2> EltParts;
1481     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1482       EltParts.push_back(getConstant(
1483           NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1484           ViaEltVT, isT, isO));
1485 
1486     // EltParts is currently in little endian order. If we actually want
1487     // big-endian order then reverse it now.
1488     if (getDataLayout().isBigEndian())
1489       std::reverse(EltParts.begin(), EltParts.end());
1490 
1491     // The elements must be reversed when the element order is different
1492     // to the endianness of the elements (because the BITCAST is itself a
1493     // vector shuffle in this situation). However, we do not need any code to
1494     // perform this reversal because getConstant() is producing a vector
1495     // splat.
1496     // This situation occurs in MIPS MSA.
1497 
1498     SmallVector<SDValue, 8> Ops;
1499     for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1500       llvm::append_range(Ops, EltParts);
1501 
1502     SDValue V =
1503         getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1504     return V;
1505   }
1506 
1507   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1508          "APInt size does not match type size!");
1509   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1510   FoldingSetNodeID ID;
1511   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1512   ID.AddPointer(Elt);
1513   ID.AddBoolean(isO);
1514   void *IP = nullptr;
1515   SDNode *N = nullptr;
1516   if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1517     if (!VT.isVector())
1518       return SDValue(N, 0);
1519 
1520   if (!N) {
1521     N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1522     CSEMap.InsertNode(N, IP);
1523     InsertNode(N);
1524     NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1525   }
1526 
1527   SDValue Result(N, 0);
1528   if (VT.isScalableVector())
1529     Result = getSplatVector(VT, DL, Result);
1530   else if (VT.isVector())
1531     Result = getSplatBuildVector(VT, DL, Result);
1532 
1533   return Result;
1534 }
1535 
1536 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1537                                         bool isTarget) {
1538   return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1539 }
1540 
1541 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1542                                              const SDLoc &DL, bool LegalTypes) {
1543   assert(VT.isInteger() && "Shift amount is not an integer type!");
1544   EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1545   return getConstant(Val, DL, ShiftVT);
1546 }
1547 
1548 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1549                                            bool isTarget) {
1550   return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1551 }
1552 
1553 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1554                                     bool isTarget) {
1555   return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1556 }
1557 
1558 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1559                                     EVT VT, bool isTarget) {
1560   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1561 
1562   EVT EltVT = VT.getScalarType();
1563 
1564   // Do the map lookup using the actual bit pattern for the floating point
1565   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1566   // we don't have issues with SNANs.
1567   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1568   FoldingSetNodeID ID;
1569   AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1570   ID.AddPointer(&V);
1571   void *IP = nullptr;
1572   SDNode *N = nullptr;
1573   if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1574     if (!VT.isVector())
1575       return SDValue(N, 0);
1576 
1577   if (!N) {
1578     N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1579     CSEMap.InsertNode(N, IP);
1580     InsertNode(N);
1581   }
1582 
1583   SDValue Result(N, 0);
1584   if (VT.isScalableVector())
1585     Result = getSplatVector(VT, DL, Result);
1586   else if (VT.isVector())
1587     Result = getSplatBuildVector(VT, DL, Result);
1588   NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1589   return Result;
1590 }
1591 
1592 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1593                                     bool isTarget) {
1594   EVT EltVT = VT.getScalarType();
1595   if (EltVT == MVT::f32)
1596     return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1597   if (EltVT == MVT::f64)
1598     return getConstantFP(APFloat(Val), DL, VT, isTarget);
1599   if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1600       EltVT == MVT::f16 || EltVT == MVT::bf16) {
1601     bool Ignored;
1602     APFloat APF = APFloat(Val);
1603     APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1604                 &Ignored);
1605     return getConstantFP(APF, DL, VT, isTarget);
1606   }
1607   llvm_unreachable("Unsupported type in getConstantFP");
1608 }
1609 
1610 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1611                                        EVT VT, int64_t Offset, bool isTargetGA,
1612                                        unsigned TargetFlags) {
1613   assert((TargetFlags == 0 || isTargetGA) &&
1614          "Cannot set target flags on target-independent globals");
1615 
1616   // Truncate (with sign-extension) the offset value to the pointer size.
1617   unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1618   if (BitWidth < 64)
1619     Offset = SignExtend64(Offset, BitWidth);
1620 
1621   unsigned Opc;
1622   if (GV->isThreadLocal())
1623     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1624   else
1625     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1626 
1627   FoldingSetNodeID ID;
1628   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1629   ID.AddPointer(GV);
1630   ID.AddInteger(Offset);
1631   ID.AddInteger(TargetFlags);
1632   void *IP = nullptr;
1633   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1634     return SDValue(E, 0);
1635 
1636   auto *N = newSDNode<GlobalAddressSDNode>(
1637       Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1638   CSEMap.InsertNode(N, IP);
1639     InsertNode(N);
1640   return SDValue(N, 0);
1641 }
1642 
1643 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1644   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1645   FoldingSetNodeID ID;
1646   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1647   ID.AddInteger(FI);
1648   void *IP = nullptr;
1649   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1650     return SDValue(E, 0);
1651 
1652   auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1653   CSEMap.InsertNode(N, IP);
1654   InsertNode(N);
1655   return SDValue(N, 0);
1656 }
1657 
1658 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1659                                    unsigned TargetFlags) {
1660   assert((TargetFlags == 0 || isTarget) &&
1661          "Cannot set target flags on target-independent jump tables");
1662   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1663   FoldingSetNodeID ID;
1664   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1665   ID.AddInteger(JTI);
1666   ID.AddInteger(TargetFlags);
1667   void *IP = nullptr;
1668   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1669     return SDValue(E, 0);
1670 
1671   auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1672   CSEMap.InsertNode(N, IP);
1673   InsertNode(N);
1674   return SDValue(N, 0);
1675 }
1676 
1677 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1678                                       MaybeAlign Alignment, int Offset,
1679                                       bool isTarget, unsigned TargetFlags) {
1680   assert((TargetFlags == 0 || isTarget) &&
1681          "Cannot set target flags on target-independent globals");
1682   if (!Alignment)
1683     Alignment = shouldOptForSize()
1684                     ? getDataLayout().getABITypeAlign(C->getType())
1685                     : getDataLayout().getPrefTypeAlign(C->getType());
1686   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1687   FoldingSetNodeID ID;
1688   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1689   ID.AddInteger(Alignment->value());
1690   ID.AddInteger(Offset);
1691   ID.AddPointer(C);
1692   ID.AddInteger(TargetFlags);
1693   void *IP = nullptr;
1694   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1695     return SDValue(E, 0);
1696 
1697   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1698                                           TargetFlags);
1699   CSEMap.InsertNode(N, IP);
1700   InsertNode(N);
1701   SDValue V = SDValue(N, 0);
1702   NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1703   return V;
1704 }
1705 
1706 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1707                                       MaybeAlign Alignment, int Offset,
1708                                       bool isTarget, unsigned TargetFlags) {
1709   assert((TargetFlags == 0 || isTarget) &&
1710          "Cannot set target flags on target-independent globals");
1711   if (!Alignment)
1712     Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1713   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1714   FoldingSetNodeID ID;
1715   AddNodeIDNode(ID, Opc, getVTList(VT), None);
1716   ID.AddInteger(Alignment->value());
1717   ID.AddInteger(Offset);
1718   C->addSelectionDAGCSEId(ID);
1719   ID.AddInteger(TargetFlags);
1720   void *IP = nullptr;
1721   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1722     return SDValue(E, 0);
1723 
1724   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1725                                           TargetFlags);
1726   CSEMap.InsertNode(N, IP);
1727   InsertNode(N);
1728   return SDValue(N, 0);
1729 }
1730 
1731 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1732                                      unsigned TargetFlags) {
1733   FoldingSetNodeID ID;
1734   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1735   ID.AddInteger(Index);
1736   ID.AddInteger(Offset);
1737   ID.AddInteger(TargetFlags);
1738   void *IP = nullptr;
1739   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1740     return SDValue(E, 0);
1741 
1742   auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1743   CSEMap.InsertNode(N, IP);
1744   InsertNode(N);
1745   return SDValue(N, 0);
1746 }
1747 
1748 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1749   FoldingSetNodeID ID;
1750   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1751   ID.AddPointer(MBB);
1752   void *IP = nullptr;
1753   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1754     return SDValue(E, 0);
1755 
1756   auto *N = newSDNode<BasicBlockSDNode>(MBB);
1757   CSEMap.InsertNode(N, IP);
1758   InsertNode(N);
1759   return SDValue(N, 0);
1760 }
1761 
1762 SDValue SelectionDAG::getValueType(EVT VT) {
1763   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1764       ValueTypeNodes.size())
1765     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1766 
1767   SDNode *&N = VT.isExtended() ?
1768     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1769 
1770   if (N) return SDValue(N, 0);
1771   N = newSDNode<VTSDNode>(VT);
1772   InsertNode(N);
1773   return SDValue(N, 0);
1774 }
1775 
1776 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1777   SDNode *&N = ExternalSymbols[Sym];
1778   if (N) return SDValue(N, 0);
1779   N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1780   InsertNode(N);
1781   return SDValue(N, 0);
1782 }
1783 
1784 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1785   SDNode *&N = MCSymbols[Sym];
1786   if (N)
1787     return SDValue(N, 0);
1788   N = newSDNode<MCSymbolSDNode>(Sym, VT);
1789   InsertNode(N);
1790   return SDValue(N, 0);
1791 }
1792 
1793 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1794                                               unsigned TargetFlags) {
1795   SDNode *&N =
1796       TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1797   if (N) return SDValue(N, 0);
1798   N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1799   InsertNode(N);
1800   return SDValue(N, 0);
1801 }
1802 
1803 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1804   if ((unsigned)Cond >= CondCodeNodes.size())
1805     CondCodeNodes.resize(Cond+1);
1806 
1807   if (!CondCodeNodes[Cond]) {
1808     auto *N = newSDNode<CondCodeSDNode>(Cond);
1809     CondCodeNodes[Cond] = N;
1810     InsertNode(N);
1811   }
1812 
1813   return SDValue(CondCodeNodes[Cond], 0);
1814 }
1815 
1816 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
1817   APInt One(ResVT.getScalarSizeInBits(), 1);
1818   return getStepVector(DL, ResVT, One);
1819 }
1820 
1821 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) {
1822   assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
1823   if (ResVT.isScalableVector())
1824     return getNode(
1825         ISD::STEP_VECTOR, DL, ResVT,
1826         getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
1827 
1828   SmallVector<SDValue, 16> OpsStepConstants;
1829   for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
1830     OpsStepConstants.push_back(
1831         getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
1832   return getBuildVector(ResVT, DL, OpsStepConstants);
1833 }
1834 
1835 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1836 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1837 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1838   std::swap(N1, N2);
1839   ShuffleVectorSDNode::commuteMask(M);
1840 }
1841 
1842 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1843                                        SDValue N2, ArrayRef<int> Mask) {
1844   assert(VT.getVectorNumElements() == Mask.size() &&
1845          "Must have the same number of vector elements as mask elements!");
1846   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1847          "Invalid VECTOR_SHUFFLE");
1848 
1849   // Canonicalize shuffle undef, undef -> undef
1850   if (N1.isUndef() && N2.isUndef())
1851     return getUNDEF(VT);
1852 
1853   // Validate that all indices in Mask are within the range of the elements
1854   // input to the shuffle.
1855   int NElts = Mask.size();
1856   assert(llvm::all_of(Mask,
1857                       [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1858          "Index out of range");
1859 
1860   // Copy the mask so we can do any needed cleanup.
1861   SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1862 
1863   // Canonicalize shuffle v, v -> v, undef
1864   if (N1 == N2) {
1865     N2 = getUNDEF(VT);
1866     for (int i = 0; i != NElts; ++i)
1867       if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1868   }
1869 
1870   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
1871   if (N1.isUndef())
1872     commuteShuffle(N1, N2, MaskVec);
1873 
1874   if (TLI->hasVectorBlend()) {
1875     // If shuffling a splat, try to blend the splat instead. We do this here so
1876     // that even when this arises during lowering we don't have to re-handle it.
1877     auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1878       BitVector UndefElements;
1879       SDValue Splat = BV->getSplatValue(&UndefElements);
1880       if (!Splat)
1881         return;
1882 
1883       for (int i = 0; i < NElts; ++i) {
1884         if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1885           continue;
1886 
1887         // If this input comes from undef, mark it as such.
1888         if (UndefElements[MaskVec[i] - Offset]) {
1889           MaskVec[i] = -1;
1890           continue;
1891         }
1892 
1893         // If we can blend a non-undef lane, use that instead.
1894         if (!UndefElements[i])
1895           MaskVec[i] = i + Offset;
1896       }
1897     };
1898     if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1899       BlendSplat(N1BV, 0);
1900     if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1901       BlendSplat(N2BV, NElts);
1902   }
1903 
1904   // Canonicalize all index into lhs, -> shuffle lhs, undef
1905   // Canonicalize all index into rhs, -> shuffle rhs, undef
1906   bool AllLHS = true, AllRHS = true;
1907   bool N2Undef = N2.isUndef();
1908   for (int i = 0; i != NElts; ++i) {
1909     if (MaskVec[i] >= NElts) {
1910       if (N2Undef)
1911         MaskVec[i] = -1;
1912       else
1913         AllLHS = false;
1914     } else if (MaskVec[i] >= 0) {
1915       AllRHS = false;
1916     }
1917   }
1918   if (AllLHS && AllRHS)
1919     return getUNDEF(VT);
1920   if (AllLHS && !N2Undef)
1921     N2 = getUNDEF(VT);
1922   if (AllRHS) {
1923     N1 = getUNDEF(VT);
1924     commuteShuffle(N1, N2, MaskVec);
1925   }
1926   // Reset our undef status after accounting for the mask.
1927   N2Undef = N2.isUndef();
1928   // Re-check whether both sides ended up undef.
1929   if (N1.isUndef() && N2Undef)
1930     return getUNDEF(VT);
1931 
1932   // If Identity shuffle return that node.
1933   bool Identity = true, AllSame = true;
1934   for (int i = 0; i != NElts; ++i) {
1935     if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1936     if (MaskVec[i] != MaskVec[0]) AllSame = false;
1937   }
1938   if (Identity && NElts)
1939     return N1;
1940 
1941   // Shuffling a constant splat doesn't change the result.
1942   if (N2Undef) {
1943     SDValue V = N1;
1944 
1945     // Look through any bitcasts. We check that these don't change the number
1946     // (and size) of elements and just changes their types.
1947     while (V.getOpcode() == ISD::BITCAST)
1948       V = V->getOperand(0);
1949 
1950     // A splat should always show up as a build vector node.
1951     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1952       BitVector UndefElements;
1953       SDValue Splat = BV->getSplatValue(&UndefElements);
1954       // If this is a splat of an undef, shuffling it is also undef.
1955       if (Splat && Splat.isUndef())
1956         return getUNDEF(VT);
1957 
1958       bool SameNumElts =
1959           V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1960 
1961       // We only have a splat which can skip shuffles if there is a splatted
1962       // value and no undef lanes rearranged by the shuffle.
1963       if (Splat && UndefElements.none()) {
1964         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1965         // number of elements match or the value splatted is a zero constant.
1966         if (SameNumElts)
1967           return N1;
1968         if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1969           if (C->isZero())
1970             return N1;
1971       }
1972 
1973       // If the shuffle itself creates a splat, build the vector directly.
1974       if (AllSame && SameNumElts) {
1975         EVT BuildVT = BV->getValueType(0);
1976         const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1977         SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1978 
1979         // We may have jumped through bitcasts, so the type of the
1980         // BUILD_VECTOR may not match the type of the shuffle.
1981         if (BuildVT != VT)
1982           NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1983         return NewBV;
1984       }
1985     }
1986   }
1987 
1988   FoldingSetNodeID ID;
1989   SDValue Ops[2] = { N1, N2 };
1990   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1991   for (int i = 0; i != NElts; ++i)
1992     ID.AddInteger(MaskVec[i]);
1993 
1994   void* IP = nullptr;
1995   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1996     return SDValue(E, 0);
1997 
1998   // Allocate the mask array for the node out of the BumpPtrAllocator, since
1999   // SDNode doesn't have access to it.  This memory will be "leaked" when
2000   // the node is deallocated, but recovered when the NodeAllocator is released.
2001   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
2002   llvm::copy(MaskVec, MaskAlloc);
2003 
2004   auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
2005                                            dl.getDebugLoc(), MaskAlloc);
2006   createOperands(N, Ops);
2007 
2008   CSEMap.InsertNode(N, IP);
2009   InsertNode(N);
2010   SDValue V = SDValue(N, 0);
2011   NewSDValueDbgMsg(V, "Creating new node: ", this);
2012   return V;
2013 }
2014 
2015 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
2016   EVT VT = SV.getValueType(0);
2017   SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
2018   ShuffleVectorSDNode::commuteMask(MaskVec);
2019 
2020   SDValue Op0 = SV.getOperand(0);
2021   SDValue Op1 = SV.getOperand(1);
2022   return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2023 }
2024 
2025 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
2026   FoldingSetNodeID ID;
2027   AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
2028   ID.AddInteger(RegNo);
2029   void *IP = nullptr;
2030   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2031     return SDValue(E, 0);
2032 
2033   auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
2034   N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
2035   CSEMap.InsertNode(N, IP);
2036   InsertNode(N);
2037   return SDValue(N, 0);
2038 }
2039 
2040 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
2041   FoldingSetNodeID ID;
2042   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
2043   ID.AddPointer(RegMask);
2044   void *IP = nullptr;
2045   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2046     return SDValue(E, 0);
2047 
2048   auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2049   CSEMap.InsertNode(N, IP);
2050   InsertNode(N);
2051   return SDValue(N, 0);
2052 }
2053 
2054 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
2055                                  MCSymbol *Label) {
2056   return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2057 }
2058 
2059 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2060                                    SDValue Root, MCSymbol *Label) {
2061   FoldingSetNodeID ID;
2062   SDValue Ops[] = { Root };
2063   AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2064   ID.AddPointer(Label);
2065   void *IP = nullptr;
2066   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2067     return SDValue(E, 0);
2068 
2069   auto *N =
2070       newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2071   createOperands(N, Ops);
2072 
2073   CSEMap.InsertNode(N, IP);
2074   InsertNode(N);
2075   return SDValue(N, 0);
2076 }
2077 
2078 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2079                                       int64_t Offset, bool isTarget,
2080                                       unsigned TargetFlags) {
2081   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2082 
2083   FoldingSetNodeID ID;
2084   AddNodeIDNode(ID, Opc, getVTList(VT), None);
2085   ID.AddPointer(BA);
2086   ID.AddInteger(Offset);
2087   ID.AddInteger(TargetFlags);
2088   void *IP = nullptr;
2089   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2090     return SDValue(E, 0);
2091 
2092   auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
2093   CSEMap.InsertNode(N, IP);
2094   InsertNode(N);
2095   return SDValue(N, 0);
2096 }
2097 
2098 SDValue SelectionDAG::getSrcValue(const Value *V) {
2099   FoldingSetNodeID ID;
2100   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
2101   ID.AddPointer(V);
2102 
2103   void *IP = nullptr;
2104   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2105     return SDValue(E, 0);
2106 
2107   auto *N = newSDNode<SrcValueSDNode>(V);
2108   CSEMap.InsertNode(N, IP);
2109   InsertNode(N);
2110   return SDValue(N, 0);
2111 }
2112 
2113 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2114   FoldingSetNodeID ID;
2115   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2116   ID.AddPointer(MD);
2117 
2118   void *IP = nullptr;
2119   if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2120     return SDValue(E, 0);
2121 
2122   auto *N = newSDNode<MDNodeSDNode>(MD);
2123   CSEMap.InsertNode(N, IP);
2124   InsertNode(N);
2125   return SDValue(N, 0);
2126 }
2127 
2128 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2129   if (VT == V.getValueType())
2130     return V;
2131 
2132   return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2133 }
2134 
2135 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2136                                        unsigned SrcAS, unsigned DestAS) {
2137   SDValue Ops[] = {Ptr};
2138   FoldingSetNodeID ID;
2139   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2140   ID.AddInteger(SrcAS);
2141   ID.AddInteger(DestAS);
2142 
2143   void *IP = nullptr;
2144   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2145     return SDValue(E, 0);
2146 
2147   auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2148                                            VT, SrcAS, DestAS);
2149   createOperands(N, Ops);
2150 
2151   CSEMap.InsertNode(N, IP);
2152   InsertNode(N);
2153   return SDValue(N, 0);
2154 }
2155 
2156 SDValue SelectionDAG::getFreeze(SDValue V) {
2157   return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2158 }
2159 
2160 /// getShiftAmountOperand - Return the specified value casted to
2161 /// the target's desired shift amount type.
2162 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2163   EVT OpTy = Op.getValueType();
2164   EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2165   if (OpTy == ShTy || OpTy.isVector()) return Op;
2166 
2167   return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2168 }
2169 
2170 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2171   SDLoc dl(Node);
2172   const TargetLowering &TLI = getTargetLoweringInfo();
2173   const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2174   EVT VT = Node->getValueType(0);
2175   SDValue Tmp1 = Node->getOperand(0);
2176   SDValue Tmp2 = Node->getOperand(1);
2177   const MaybeAlign MA(Node->getConstantOperandVal(3));
2178 
2179   SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2180                                Tmp2, MachinePointerInfo(V));
2181   SDValue VAList = VAListLoad;
2182 
2183   if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2184     VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2185                      getConstant(MA->value() - 1, dl, VAList.getValueType()));
2186 
2187     VAList =
2188         getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2189                 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2190   }
2191 
2192   // Increment the pointer, VAList, to the next vaarg
2193   Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2194                  getConstant(getDataLayout().getTypeAllocSize(
2195                                                VT.getTypeForEVT(*getContext())),
2196                              dl, VAList.getValueType()));
2197   // Store the incremented VAList to the legalized pointer
2198   Tmp1 =
2199       getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2200   // Load the actual argument out of the pointer VAList
2201   return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2202 }
2203 
2204 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2205   SDLoc dl(Node);
2206   const TargetLowering &TLI = getTargetLoweringInfo();
2207   // This defaults to loading a pointer from the input and storing it to the
2208   // output, returning the chain.
2209   const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2210   const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2211   SDValue Tmp1 =
2212       getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2213               Node->getOperand(2), MachinePointerInfo(VS));
2214   return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2215                   MachinePointerInfo(VD));
2216 }
2217 
2218 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2219   const DataLayout &DL = getDataLayout();
2220   Type *Ty = VT.getTypeForEVT(*getContext());
2221   Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2222 
2223   if (TLI->isTypeLegal(VT) || !VT.isVector())
2224     return RedAlign;
2225 
2226   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2227   const Align StackAlign = TFI->getStackAlign();
2228 
2229   // See if we can choose a smaller ABI alignment in cases where it's an
2230   // illegal vector type that will get broken down.
2231   if (RedAlign > StackAlign) {
2232     EVT IntermediateVT;
2233     MVT RegisterVT;
2234     unsigned NumIntermediates;
2235     TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2236                                 NumIntermediates, RegisterVT);
2237     Ty = IntermediateVT.getTypeForEVT(*getContext());
2238     Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2239     if (RedAlign2 < RedAlign)
2240       RedAlign = RedAlign2;
2241   }
2242 
2243   return RedAlign;
2244 }
2245 
2246 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2247   MachineFrameInfo &MFI = MF->getFrameInfo();
2248   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2249   int StackID = 0;
2250   if (Bytes.isScalable())
2251     StackID = TFI->getStackIDForScalableVectors();
2252   // The stack id gives an indication of whether the object is scalable or
2253   // not, so it's safe to pass in the minimum size here.
2254   int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2255                                        false, nullptr, StackID);
2256   return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2257 }
2258 
2259 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2260   Type *Ty = VT.getTypeForEVT(*getContext());
2261   Align StackAlign =
2262       std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2263   return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2264 }
2265 
2266 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2267   TypeSize VT1Size = VT1.getStoreSize();
2268   TypeSize VT2Size = VT2.getStoreSize();
2269   assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2270          "Don't know how to choose the maximum size when creating a stack "
2271          "temporary");
2272   TypeSize Bytes =
2273       VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2274 
2275   Type *Ty1 = VT1.getTypeForEVT(*getContext());
2276   Type *Ty2 = VT2.getTypeForEVT(*getContext());
2277   const DataLayout &DL = getDataLayout();
2278   Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2279   return CreateStackTemporary(Bytes, Align);
2280 }
2281 
2282 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2283                                 ISD::CondCode Cond, const SDLoc &dl) {
2284   EVT OpVT = N1.getValueType();
2285 
2286   // These setcc operations always fold.
2287   switch (Cond) {
2288   default: break;
2289   case ISD::SETFALSE:
2290   case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2291   case ISD::SETTRUE:
2292   case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2293 
2294   case ISD::SETOEQ:
2295   case ISD::SETOGT:
2296   case ISD::SETOGE:
2297   case ISD::SETOLT:
2298   case ISD::SETOLE:
2299   case ISD::SETONE:
2300   case ISD::SETO:
2301   case ISD::SETUO:
2302   case ISD::SETUEQ:
2303   case ISD::SETUNE:
2304     assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2305     break;
2306   }
2307 
2308   if (OpVT.isInteger()) {
2309     // For EQ and NE, we can always pick a value for the undef to make the
2310     // predicate pass or fail, so we can return undef.
2311     // Matches behavior in llvm::ConstantFoldCompareInstruction.
2312     // icmp eq/ne X, undef -> undef.
2313     if ((N1.isUndef() || N2.isUndef()) &&
2314         (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2315       return getUNDEF(VT);
2316 
2317     // If both operands are undef, we can return undef for int comparison.
2318     // icmp undef, undef -> undef.
2319     if (N1.isUndef() && N2.isUndef())
2320       return getUNDEF(VT);
2321 
2322     // icmp X, X -> true/false
2323     // icmp X, undef -> true/false because undef could be X.
2324     if (N1 == N2)
2325       return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2326   }
2327 
2328   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2329     const APInt &C2 = N2C->getAPIntValue();
2330     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2331       const APInt &C1 = N1C->getAPIntValue();
2332 
2333       return getBoolConstant(ICmpInst::compare(C1, C2, getICmpCondCode(Cond)),
2334                              dl, VT, OpVT);
2335     }
2336   }
2337 
2338   auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2339   auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2340 
2341   if (N1CFP && N2CFP) {
2342     APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2343     switch (Cond) {
2344     default: break;
2345     case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
2346                         return getUNDEF(VT);
2347                       LLVM_FALLTHROUGH;
2348     case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2349                                              OpVT);
2350     case ISD::SETNE:  if (R==APFloat::cmpUnordered)
2351                         return getUNDEF(VT);
2352                       LLVM_FALLTHROUGH;
2353     case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2354                                              R==APFloat::cmpLessThan, dl, VT,
2355                                              OpVT);
2356     case ISD::SETLT:  if (R==APFloat::cmpUnordered)
2357                         return getUNDEF(VT);
2358                       LLVM_FALLTHROUGH;
2359     case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2360                                              OpVT);
2361     case ISD::SETGT:  if (R==APFloat::cmpUnordered)
2362                         return getUNDEF(VT);
2363                       LLVM_FALLTHROUGH;
2364     case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2365                                              VT, OpVT);
2366     case ISD::SETLE:  if (R==APFloat::cmpUnordered)
2367                         return getUNDEF(VT);
2368                       LLVM_FALLTHROUGH;
2369     case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2370                                              R==APFloat::cmpEqual, dl, VT,
2371                                              OpVT);
2372     case ISD::SETGE:  if (R==APFloat::cmpUnordered)
2373                         return getUNDEF(VT);
2374                       LLVM_FALLTHROUGH;
2375     case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2376                                          R==APFloat::cmpEqual, dl, VT, OpVT);
2377     case ISD::SETO:   return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2378                                              OpVT);
2379     case ISD::SETUO:  return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2380                                              OpVT);
2381     case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2382                                              R==APFloat::cmpEqual, dl, VT,
2383                                              OpVT);
2384     case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2385                                              OpVT);
2386     case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2387                                              R==APFloat::cmpLessThan, dl, VT,
2388                                              OpVT);
2389     case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2390                                              R==APFloat::cmpUnordered, dl, VT,
2391                                              OpVT);
2392     case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2393                                              VT, OpVT);
2394     case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2395                                              OpVT);
2396     }
2397   } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2398     // Ensure that the constant occurs on the RHS.
2399     ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2400     if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2401       return SDValue();
2402     return getSetCC(dl, VT, N2, N1, SwappedCond);
2403   } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2404              (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2405     // If an operand is known to be a nan (or undef that could be a nan), we can
2406     // fold it.
2407     // Choosing NaN for the undef will always make unordered comparison succeed
2408     // and ordered comparison fails.
2409     // Matches behavior in llvm::ConstantFoldCompareInstruction.
2410     switch (ISD::getUnorderedFlavor(Cond)) {
2411     default:
2412       llvm_unreachable("Unknown flavor!");
2413     case 0: // Known false.
2414       return getBoolConstant(false, dl, VT, OpVT);
2415     case 1: // Known true.
2416       return getBoolConstant(true, dl, VT, OpVT);
2417     case 2: // Undefined.
2418       return getUNDEF(VT);
2419     }
2420   }
2421 
2422   // Could not fold it.
2423   return SDValue();
2424 }
2425 
2426 /// See if the specified operand can be simplified with the knowledge that only
2427 /// the bits specified by DemandedBits are used.
2428 /// TODO: really we should be making this into the DAG equivalent of
2429 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2430 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2431   EVT VT = V.getValueType();
2432 
2433   if (VT.isScalableVector())
2434     return SDValue();
2435 
2436   APInt DemandedElts = VT.isVector()
2437                            ? APInt::getAllOnes(VT.getVectorNumElements())
2438                            : APInt(1, 1);
2439   return GetDemandedBits(V, DemandedBits, DemandedElts);
2440 }
2441 
2442 /// See if the specified operand can be simplified with the knowledge that only
2443 /// the bits specified by DemandedBits are used in the elements specified by
2444 /// DemandedElts.
2445 /// TODO: really we should be making this into the DAG equivalent of
2446 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2447 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2448                                       const APInt &DemandedElts) {
2449   switch (V.getOpcode()) {
2450   default:
2451     return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2452                                                 *this);
2453   case ISD::Constant: {
2454     const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2455     APInt NewVal = CVal & DemandedBits;
2456     if (NewVal != CVal)
2457       return getConstant(NewVal, SDLoc(V), V.getValueType());
2458     break;
2459   }
2460   case ISD::SRL:
2461     // Only look at single-use SRLs.
2462     if (!V.getNode()->hasOneUse())
2463       break;
2464     if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2465       // See if we can recursively simplify the LHS.
2466       unsigned Amt = RHSC->getZExtValue();
2467 
2468       // Watch out for shift count overflow though.
2469       if (Amt >= DemandedBits.getBitWidth())
2470         break;
2471       APInt SrcDemandedBits = DemandedBits << Amt;
2472       if (SDValue SimplifyLHS =
2473               GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2474         return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2475                        V.getOperand(1));
2476     }
2477     break;
2478   }
2479   return SDValue();
2480 }
2481 
2482 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
2483 /// use this predicate to simplify operations downstream.
2484 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2485   unsigned BitWidth = Op.getScalarValueSizeInBits();
2486   return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2487 }
2488 
2489 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
2490 /// this predicate to simplify operations downstream.  Mask is known to be zero
2491 /// for bits that V cannot have.
2492 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2493                                      unsigned Depth) const {
2494   return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2495 }
2496 
2497 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2498 /// DemandedElts.  We use this predicate to simplify operations downstream.
2499 /// Mask is known to be zero for bits that V cannot have.
2500 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2501                                      const APInt &DemandedElts,
2502                                      unsigned Depth) const {
2503   return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2504 }
2505 
2506 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2507 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2508                                         unsigned Depth) const {
2509   return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2510 }
2511 
2512 /// isSplatValue - Return true if the vector V has the same value
2513 /// across all DemandedElts. For scalable vectors it does not make
2514 /// sense to specify which elements are demanded or undefined, therefore
2515 /// they are simply ignored.
2516 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2517                                 APInt &UndefElts, unsigned Depth) const {
2518   unsigned Opcode = V.getOpcode();
2519   EVT VT = V.getValueType();
2520   assert(VT.isVector() && "Vector type expected");
2521 
2522   if (!VT.isScalableVector() && !DemandedElts)
2523     return false; // No demanded elts, better to assume we don't know anything.
2524 
2525   if (Depth >= MaxRecursionDepth)
2526     return false; // Limit search depth.
2527 
2528   // Deal with some common cases here that work for both fixed and scalable
2529   // vector types.
2530   switch (Opcode) {
2531   case ISD::SPLAT_VECTOR:
2532     UndefElts = V.getOperand(0).isUndef()
2533                     ? APInt::getAllOnes(DemandedElts.getBitWidth())
2534                     : APInt(DemandedElts.getBitWidth(), 0);
2535     return true;
2536   case ISD::ADD:
2537   case ISD::SUB:
2538   case ISD::AND:
2539   case ISD::XOR:
2540   case ISD::OR: {
2541     APInt UndefLHS, UndefRHS;
2542     SDValue LHS = V.getOperand(0);
2543     SDValue RHS = V.getOperand(1);
2544     if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2545         isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2546       UndefElts = UndefLHS | UndefRHS;
2547       return true;
2548     }
2549     return false;
2550   }
2551   case ISD::ABS:
2552   case ISD::TRUNCATE:
2553   case ISD::SIGN_EXTEND:
2554   case ISD::ZERO_EXTEND:
2555     return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2556   default:
2557     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
2558         Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
2559       return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, Depth);
2560     break;
2561 }
2562 
2563   // We don't support other cases than those above for scalable vectors at
2564   // the moment.
2565   if (VT.isScalableVector())
2566     return false;
2567 
2568   unsigned NumElts = VT.getVectorNumElements();
2569   assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2570   UndefElts = APInt::getZero(NumElts);
2571 
2572   switch (Opcode) {
2573   case ISD::BUILD_VECTOR: {
2574     SDValue Scl;
2575     for (unsigned i = 0; i != NumElts; ++i) {
2576       SDValue Op = V.getOperand(i);
2577       if (Op.isUndef()) {
2578         UndefElts.setBit(i);
2579         continue;
2580       }
2581       if (!DemandedElts[i])
2582         continue;
2583       if (Scl && Scl != Op)
2584         return false;
2585       Scl = Op;
2586     }
2587     return true;
2588   }
2589   case ISD::VECTOR_SHUFFLE: {
2590     // Check if this is a shuffle node doing a splat.
2591     // TODO: Do we need to handle shuffle(splat, undef, mask)?
2592     int SplatIndex = -1;
2593     ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2594     for (int i = 0; i != (int)NumElts; ++i) {
2595       int M = Mask[i];
2596       if (M < 0) {
2597         UndefElts.setBit(i);
2598         continue;
2599       }
2600       if (!DemandedElts[i])
2601         continue;
2602       if (0 <= SplatIndex && SplatIndex != M)
2603         return false;
2604       SplatIndex = M;
2605     }
2606     return true;
2607   }
2608   case ISD::EXTRACT_SUBVECTOR: {
2609     // Offset the demanded elts by the subvector index.
2610     SDValue Src = V.getOperand(0);
2611     // We don't support scalable vectors at the moment.
2612     if (Src.getValueType().isScalableVector())
2613       return false;
2614     uint64_t Idx = V.getConstantOperandVal(1);
2615     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2616     APInt UndefSrcElts;
2617     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2618     if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2619       UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2620       return true;
2621     }
2622     break;
2623   }
2624   case ISD::ANY_EXTEND_VECTOR_INREG:
2625   case ISD::SIGN_EXTEND_VECTOR_INREG:
2626   case ISD::ZERO_EXTEND_VECTOR_INREG: {
2627     // Widen the demanded elts by the src element count.
2628     SDValue Src = V.getOperand(0);
2629     // We don't support scalable vectors at the moment.
2630     if (Src.getValueType().isScalableVector())
2631       return false;
2632     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2633     APInt UndefSrcElts;
2634     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2635     if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2636       UndefElts = UndefSrcElts.truncOrSelf(NumElts);
2637       return true;
2638     }
2639     break;
2640   }
2641   case ISD::BITCAST: {
2642     SDValue Src = V.getOperand(0);
2643     EVT SrcVT = Src.getValueType();
2644     unsigned SrcBitWidth = SrcVT.getScalarSizeInBits();
2645     unsigned BitWidth = VT.getScalarSizeInBits();
2646 
2647     // Ignore bitcasts from unsupported types.
2648     // TODO: Add fp support?
2649     if (!SrcVT.isVector() || !SrcVT.isInteger() || !VT.isInteger())
2650       break;
2651 
2652     // Bitcast 'small element' vector to 'large element' vector.
2653     if ((BitWidth % SrcBitWidth) == 0) {
2654       // See if each sub element is a splat.
2655       unsigned Scale = BitWidth / SrcBitWidth;
2656       unsigned NumSrcElts = SrcVT.getVectorNumElements();
2657       APInt ScaledDemandedElts =
2658           APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
2659       for (unsigned I = 0; I != Scale; ++I) {
2660         APInt SubUndefElts;
2661         APInt SubDemandedElt = APInt::getOneBitSet(Scale, I);
2662         APInt SubDemandedElts = APInt::getSplat(NumSrcElts, SubDemandedElt);
2663         SubDemandedElts &= ScaledDemandedElts;
2664         if (!isSplatValue(Src, SubDemandedElts, SubUndefElts, Depth + 1))
2665           return false;
2666         // TODO: Add support for merging sub undef elements.
2667         if (SubDemandedElts.isSubsetOf(SubUndefElts))
2668           return false;
2669       }
2670       return true;
2671     }
2672     break;
2673   }
2674   }
2675 
2676   return false;
2677 }
2678 
2679 /// Helper wrapper to main isSplatValue function.
2680 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const {
2681   EVT VT = V.getValueType();
2682   assert(VT.isVector() && "Vector type expected");
2683 
2684   APInt UndefElts;
2685   APInt DemandedElts;
2686 
2687   // For now we don't support this with scalable vectors.
2688   if (!VT.isScalableVector())
2689     DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2690   return isSplatValue(V, DemandedElts, UndefElts) &&
2691          (AllowUndefs || !UndefElts);
2692 }
2693 
2694 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2695   V = peekThroughExtractSubvectors(V);
2696 
2697   EVT VT = V.getValueType();
2698   unsigned Opcode = V.getOpcode();
2699   switch (Opcode) {
2700   default: {
2701     APInt UndefElts;
2702     APInt DemandedElts;
2703 
2704     if (!VT.isScalableVector())
2705       DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2706 
2707     if (isSplatValue(V, DemandedElts, UndefElts)) {
2708       if (VT.isScalableVector()) {
2709         // DemandedElts and UndefElts are ignored for scalable vectors, since
2710         // the only supported cases are SPLAT_VECTOR nodes.
2711         SplatIdx = 0;
2712       } else {
2713         // Handle case where all demanded elements are UNDEF.
2714         if (DemandedElts.isSubsetOf(UndefElts)) {
2715           SplatIdx = 0;
2716           return getUNDEF(VT);
2717         }
2718         SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2719       }
2720       return V;
2721     }
2722     break;
2723   }
2724   case ISD::SPLAT_VECTOR:
2725     SplatIdx = 0;
2726     return V;
2727   case ISD::VECTOR_SHUFFLE: {
2728     if (VT.isScalableVector())
2729       return SDValue();
2730 
2731     // Check if this is a shuffle node doing a splat.
2732     // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2733     // getTargetVShiftNode currently struggles without the splat source.
2734     auto *SVN = cast<ShuffleVectorSDNode>(V);
2735     if (!SVN->isSplat())
2736       break;
2737     int Idx = SVN->getSplatIndex();
2738     int NumElts = V.getValueType().getVectorNumElements();
2739     SplatIdx = Idx % NumElts;
2740     return V.getOperand(Idx / NumElts);
2741   }
2742   }
2743 
2744   return SDValue();
2745 }
2746 
2747 SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2748   int SplatIdx;
2749   if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2750     EVT SVT = SrcVector.getValueType().getScalarType();
2751     EVT LegalSVT = SVT;
2752     if (LegalTypes && !TLI->isTypeLegal(SVT)) {
2753       if (!SVT.isInteger())
2754         return SDValue();
2755       LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
2756       if (LegalSVT.bitsLT(SVT))
2757         return SDValue();
2758     }
2759     return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
2760                    getVectorIdxConstant(SplatIdx, SDLoc(V)));
2761   }
2762   return SDValue();
2763 }
2764 
2765 const APInt *
2766 SelectionDAG::getValidShiftAmountConstant(SDValue V,
2767                                           const APInt &DemandedElts) const {
2768   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2769           V.getOpcode() == ISD::SRA) &&
2770          "Unknown shift node");
2771   unsigned BitWidth = V.getScalarValueSizeInBits();
2772   if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2773     // Shifting more than the bitwidth is not valid.
2774     const APInt &ShAmt = SA->getAPIntValue();
2775     if (ShAmt.ult(BitWidth))
2776       return &ShAmt;
2777   }
2778   return nullptr;
2779 }
2780 
2781 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2782     SDValue V, const APInt &DemandedElts) const {
2783   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2784           V.getOpcode() == ISD::SRA) &&
2785          "Unknown shift node");
2786   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2787     return ValidAmt;
2788   unsigned BitWidth = V.getScalarValueSizeInBits();
2789   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2790   if (!BV)
2791     return nullptr;
2792   const APInt *MinShAmt = nullptr;
2793   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2794     if (!DemandedElts[i])
2795       continue;
2796     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2797     if (!SA)
2798       return nullptr;
2799     // Shifting more than the bitwidth is not valid.
2800     const APInt &ShAmt = SA->getAPIntValue();
2801     if (ShAmt.uge(BitWidth))
2802       return nullptr;
2803     if (MinShAmt && MinShAmt->ule(ShAmt))
2804       continue;
2805     MinShAmt = &ShAmt;
2806   }
2807   return MinShAmt;
2808 }
2809 
2810 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2811     SDValue V, const APInt &DemandedElts) const {
2812   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2813           V.getOpcode() == ISD::SRA) &&
2814          "Unknown shift node");
2815   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2816     return ValidAmt;
2817   unsigned BitWidth = V.getScalarValueSizeInBits();
2818   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2819   if (!BV)
2820     return nullptr;
2821   const APInt *MaxShAmt = nullptr;
2822   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2823     if (!DemandedElts[i])
2824       continue;
2825     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2826     if (!SA)
2827       return nullptr;
2828     // Shifting more than the bitwidth is not valid.
2829     const APInt &ShAmt = SA->getAPIntValue();
2830     if (ShAmt.uge(BitWidth))
2831       return nullptr;
2832     if (MaxShAmt && MaxShAmt->uge(ShAmt))
2833       continue;
2834     MaxShAmt = &ShAmt;
2835   }
2836   return MaxShAmt;
2837 }
2838 
2839 /// Determine which bits of Op are known to be either zero or one and return
2840 /// them in Known. For vectors, the known bits are those that are shared by
2841 /// every vector element.
2842 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2843   EVT VT = Op.getValueType();
2844 
2845   // TOOD: Until we have a plan for how to represent demanded elements for
2846   // scalable vectors, we can just bail out for now.
2847   if (Op.getValueType().isScalableVector()) {
2848     unsigned BitWidth = Op.getScalarValueSizeInBits();
2849     return KnownBits(BitWidth);
2850   }
2851 
2852   APInt DemandedElts = VT.isVector()
2853                            ? APInt::getAllOnes(VT.getVectorNumElements())
2854                            : APInt(1, 1);
2855   return computeKnownBits(Op, DemandedElts, Depth);
2856 }
2857 
2858 /// Determine which bits of Op are known to be either zero or one and return
2859 /// them in Known. The DemandedElts argument allows us to only collect the known
2860 /// bits that are shared by the requested vector elements.
2861 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2862                                          unsigned Depth) const {
2863   unsigned BitWidth = Op.getScalarValueSizeInBits();
2864 
2865   KnownBits Known(BitWidth);   // Don't know anything.
2866 
2867   // TOOD: Until we have a plan for how to represent demanded elements for
2868   // scalable vectors, we can just bail out for now.
2869   if (Op.getValueType().isScalableVector())
2870     return Known;
2871 
2872   if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2873     // We know all of the bits for a constant!
2874     return KnownBits::makeConstant(C->getAPIntValue());
2875   }
2876   if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2877     // We know all of the bits for a constant fp!
2878     return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2879   }
2880 
2881   if (Depth >= MaxRecursionDepth)
2882     return Known;  // Limit search depth.
2883 
2884   KnownBits Known2;
2885   unsigned NumElts = DemandedElts.getBitWidth();
2886   assert((!Op.getValueType().isVector() ||
2887           NumElts == Op.getValueType().getVectorNumElements()) &&
2888          "Unexpected vector size");
2889 
2890   if (!DemandedElts)
2891     return Known;  // No demanded elts, better to assume we don't know anything.
2892 
2893   unsigned Opcode = Op.getOpcode();
2894   switch (Opcode) {
2895   case ISD::BUILD_VECTOR:
2896     // Collect the known bits that are shared by every demanded vector element.
2897     Known.Zero.setAllBits(); Known.One.setAllBits();
2898     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2899       if (!DemandedElts[i])
2900         continue;
2901 
2902       SDValue SrcOp = Op.getOperand(i);
2903       Known2 = computeKnownBits(SrcOp, Depth + 1);
2904 
2905       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2906       if (SrcOp.getValueSizeInBits() != BitWidth) {
2907         assert(SrcOp.getValueSizeInBits() > BitWidth &&
2908                "Expected BUILD_VECTOR implicit truncation");
2909         Known2 = Known2.trunc(BitWidth);
2910       }
2911 
2912       // Known bits are the values that are shared by every demanded element.
2913       Known = KnownBits::commonBits(Known, Known2);
2914 
2915       // If we don't know any bits, early out.
2916       if (Known.isUnknown())
2917         break;
2918     }
2919     break;
2920   case ISD::VECTOR_SHUFFLE: {
2921     // Collect the known bits that are shared by every vector element referenced
2922     // by the shuffle.
2923     APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2924     Known.Zero.setAllBits(); Known.One.setAllBits();
2925     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2926     assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2927     for (unsigned i = 0; i != NumElts; ++i) {
2928       if (!DemandedElts[i])
2929         continue;
2930 
2931       int M = SVN->getMaskElt(i);
2932       if (M < 0) {
2933         // For UNDEF elements, we don't know anything about the common state of
2934         // the shuffle result.
2935         Known.resetAll();
2936         DemandedLHS.clearAllBits();
2937         DemandedRHS.clearAllBits();
2938         break;
2939       }
2940 
2941       if ((unsigned)M < NumElts)
2942         DemandedLHS.setBit((unsigned)M % NumElts);
2943       else
2944         DemandedRHS.setBit((unsigned)M % NumElts);
2945     }
2946     // Known bits are the values that are shared by every demanded element.
2947     if (!!DemandedLHS) {
2948       SDValue LHS = Op.getOperand(0);
2949       Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2950       Known = KnownBits::commonBits(Known, Known2);
2951     }
2952     // If we don't know any bits, early out.
2953     if (Known.isUnknown())
2954       break;
2955     if (!!DemandedRHS) {
2956       SDValue RHS = Op.getOperand(1);
2957       Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2958       Known = KnownBits::commonBits(Known, Known2);
2959     }
2960     break;
2961   }
2962   case ISD::CONCAT_VECTORS: {
2963     // Split DemandedElts and test each of the demanded subvectors.
2964     Known.Zero.setAllBits(); Known.One.setAllBits();
2965     EVT SubVectorVT = Op.getOperand(0).getValueType();
2966     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2967     unsigned NumSubVectors = Op.getNumOperands();
2968     for (unsigned i = 0; i != NumSubVectors; ++i) {
2969       APInt DemandedSub =
2970           DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
2971       if (!!DemandedSub) {
2972         SDValue Sub = Op.getOperand(i);
2973         Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2974         Known = KnownBits::commonBits(Known, Known2);
2975       }
2976       // If we don't know any bits, early out.
2977       if (Known.isUnknown())
2978         break;
2979     }
2980     break;
2981   }
2982   case ISD::INSERT_SUBVECTOR: {
2983     // Demand any elements from the subvector and the remainder from the src its
2984     // inserted into.
2985     SDValue Src = Op.getOperand(0);
2986     SDValue Sub = Op.getOperand(1);
2987     uint64_t Idx = Op.getConstantOperandVal(2);
2988     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2989     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2990     APInt DemandedSrcElts = DemandedElts;
2991     DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
2992 
2993     Known.One.setAllBits();
2994     Known.Zero.setAllBits();
2995     if (!!DemandedSubElts) {
2996       Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2997       if (Known.isUnknown())
2998         break; // early-out.
2999     }
3000     if (!!DemandedSrcElts) {
3001       Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3002       Known = KnownBits::commonBits(Known, Known2);
3003     }
3004     break;
3005   }
3006   case ISD::EXTRACT_SUBVECTOR: {
3007     // Offset the demanded elts by the subvector index.
3008     SDValue Src = Op.getOperand(0);
3009     // Bail until we can represent demanded elements for scalable vectors.
3010     if (Src.getValueType().isScalableVector())
3011       break;
3012     uint64_t Idx = Op.getConstantOperandVal(1);
3013     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3014     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3015     Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3016     break;
3017   }
3018   case ISD::SCALAR_TO_VECTOR: {
3019     // We know about scalar_to_vector as much as we know about it source,
3020     // which becomes the first element of otherwise unknown vector.
3021     if (DemandedElts != 1)
3022       break;
3023 
3024     SDValue N0 = Op.getOperand(0);
3025     Known = computeKnownBits(N0, Depth + 1);
3026     if (N0.getValueSizeInBits() != BitWidth)
3027       Known = Known.trunc(BitWidth);
3028 
3029     break;
3030   }
3031   case ISD::BITCAST: {
3032     SDValue N0 = Op.getOperand(0);
3033     EVT SubVT = N0.getValueType();
3034     unsigned SubBitWidth = SubVT.getScalarSizeInBits();
3035 
3036     // Ignore bitcasts from unsupported types.
3037     if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
3038       break;
3039 
3040     // Fast handling of 'identity' bitcasts.
3041     if (BitWidth == SubBitWidth) {
3042       Known = computeKnownBits(N0, DemandedElts, Depth + 1);
3043       break;
3044     }
3045 
3046     bool IsLE = getDataLayout().isLittleEndian();
3047 
3048     // Bitcast 'small element' vector to 'large element' scalar/vector.
3049     if ((BitWidth % SubBitWidth) == 0) {
3050       assert(N0.getValueType().isVector() && "Expected bitcast from vector");
3051 
3052       // Collect known bits for the (larger) output by collecting the known
3053       // bits from each set of sub elements and shift these into place.
3054       // We need to separately call computeKnownBits for each set of
3055       // sub elements as the knownbits for each is likely to be different.
3056       unsigned SubScale = BitWidth / SubBitWidth;
3057       APInt SubDemandedElts(NumElts * SubScale, 0);
3058       for (unsigned i = 0; i != NumElts; ++i)
3059         if (DemandedElts[i])
3060           SubDemandedElts.setBit(i * SubScale);
3061 
3062       for (unsigned i = 0; i != SubScale; ++i) {
3063         Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3064                          Depth + 1);
3065         unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3066         Known.insertBits(Known2, SubBitWidth * Shifts);
3067       }
3068     }
3069 
3070     // Bitcast 'large element' scalar/vector to 'small element' vector.
3071     if ((SubBitWidth % BitWidth) == 0) {
3072       assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3073 
3074       // Collect known bits for the (smaller) output by collecting the known
3075       // bits from the overlapping larger input elements and extracting the
3076       // sub sections we actually care about.
3077       unsigned SubScale = SubBitWidth / BitWidth;
3078       APInt SubDemandedElts =
3079           APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3080       Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3081 
3082       Known.Zero.setAllBits(); Known.One.setAllBits();
3083       for (unsigned i = 0; i != NumElts; ++i)
3084         if (DemandedElts[i]) {
3085           unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3086           unsigned Offset = (Shifts % SubScale) * BitWidth;
3087           Known = KnownBits::commonBits(Known,
3088                                         Known2.extractBits(BitWidth, Offset));
3089           // If we don't know any bits, early out.
3090           if (Known.isUnknown())
3091             break;
3092         }
3093     }
3094     break;
3095   }
3096   case ISD::AND:
3097     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3098     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3099 
3100     Known &= Known2;
3101     break;
3102   case ISD::OR:
3103     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3104     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3105 
3106     Known |= Known2;
3107     break;
3108   case ISD::XOR:
3109     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3110     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3111 
3112     Known ^= Known2;
3113     break;
3114   case ISD::MUL: {
3115     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3116     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3117     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3118     // TODO: SelfMultiply can be poison, but not undef.
3119     if (SelfMultiply)
3120       SelfMultiply &= isGuaranteedNotToBeUndefOrPoison(
3121           Op.getOperand(0), DemandedElts, false, Depth + 1);
3122     Known = KnownBits::mul(Known, Known2, SelfMultiply);
3123     break;
3124   }
3125   case ISD::MULHU: {
3126     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3127     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3128     Known = KnownBits::mulhu(Known, Known2);
3129     break;
3130   }
3131   case ISD::MULHS: {
3132     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3133     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3134     Known = KnownBits::mulhs(Known, Known2);
3135     break;
3136   }
3137   case ISD::UMUL_LOHI: {
3138     assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3139     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3140     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3141     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3142     if (Op.getResNo() == 0)
3143       Known = KnownBits::mul(Known, Known2, SelfMultiply);
3144     else
3145       Known = KnownBits::mulhu(Known, Known2);
3146     break;
3147   }
3148   case ISD::SMUL_LOHI: {
3149     assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3150     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3151     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3152     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3153     if (Op.getResNo() == 0)
3154       Known = KnownBits::mul(Known, Known2, SelfMultiply);
3155     else
3156       Known = KnownBits::mulhs(Known, Known2);
3157     break;
3158   }
3159   case ISD::UDIV: {
3160     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3161     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3162     Known = KnownBits::udiv(Known, Known2);
3163     break;
3164   }
3165   case ISD::AVGCEILU: {
3166     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3167     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3168     Known = Known.zext(BitWidth + 1);
3169     Known2 = Known2.zext(BitWidth + 1);
3170     KnownBits One = KnownBits::makeConstant(APInt(1, 1));
3171     Known = KnownBits::computeForAddCarry(Known, Known2, One);
3172     Known = Known.extractBits(BitWidth, 1);
3173     break;
3174   }
3175   case ISD::SELECT:
3176   case ISD::VSELECT:
3177     Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3178     // If we don't know any bits, early out.
3179     if (Known.isUnknown())
3180       break;
3181     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3182 
3183     // Only known if known in both the LHS and RHS.
3184     Known = KnownBits::commonBits(Known, Known2);
3185     break;
3186   case ISD::SELECT_CC:
3187     Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3188     // If we don't know any bits, early out.
3189     if (Known.isUnknown())
3190       break;
3191     Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3192 
3193     // Only known if known in both the LHS and RHS.
3194     Known = KnownBits::commonBits(Known, Known2);
3195     break;
3196   case ISD::SMULO:
3197   case ISD::UMULO:
3198     if (Op.getResNo() != 1)
3199       break;
3200     // The boolean result conforms to getBooleanContents.
3201     // If we know the result of a setcc has the top bits zero, use this info.
3202     // We know that we have an integer-based boolean since these operations
3203     // are only available for integer.
3204     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3205             TargetLowering::ZeroOrOneBooleanContent &&
3206         BitWidth > 1)
3207       Known.Zero.setBitsFrom(1);
3208     break;
3209   case ISD::SETCC:
3210   case ISD::STRICT_FSETCC:
3211   case ISD::STRICT_FSETCCS: {
3212     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3213     // If we know the result of a setcc has the top bits zero, use this info.
3214     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3215             TargetLowering::ZeroOrOneBooleanContent &&
3216         BitWidth > 1)
3217       Known.Zero.setBitsFrom(1);
3218     break;
3219   }
3220   case ISD::SHL:
3221     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3222     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3223     Known = KnownBits::shl(Known, Known2);
3224 
3225     // Minimum shift low bits are known zero.
3226     if (const APInt *ShMinAmt =
3227             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3228       Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3229     break;
3230   case ISD::SRL:
3231     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3232     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3233     Known = KnownBits::lshr(Known, Known2);
3234 
3235     // Minimum shift high bits are known zero.
3236     if (const APInt *ShMinAmt =
3237             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3238       Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3239     break;
3240   case ISD::SRA:
3241     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3242     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3243     Known = KnownBits::ashr(Known, Known2);
3244     // TODO: Add minimum shift high known sign bits.
3245     break;
3246   case ISD::FSHL:
3247   case ISD::FSHR:
3248     if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3249       unsigned Amt = C->getAPIntValue().urem(BitWidth);
3250 
3251       // For fshl, 0-shift returns the 1st arg.
3252       // For fshr, 0-shift returns the 2nd arg.
3253       if (Amt == 0) {
3254         Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3255                                  DemandedElts, Depth + 1);
3256         break;
3257       }
3258 
3259       // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3260       // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3261       Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3262       Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3263       if (Opcode == ISD::FSHL) {
3264         Known.One <<= Amt;
3265         Known.Zero <<= Amt;
3266         Known2.One.lshrInPlace(BitWidth - Amt);
3267         Known2.Zero.lshrInPlace(BitWidth - Amt);
3268       } else {
3269         Known.One <<= BitWidth - Amt;
3270         Known.Zero <<= BitWidth - Amt;
3271         Known2.One.lshrInPlace(Amt);
3272         Known2.Zero.lshrInPlace(Amt);
3273       }
3274       Known.One |= Known2.One;
3275       Known.Zero |= Known2.Zero;
3276     }
3277     break;
3278   case ISD::SIGN_EXTEND_INREG: {
3279     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3280     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3281     Known = Known.sextInReg(EVT.getScalarSizeInBits());
3282     break;
3283   }
3284   case ISD::CTTZ:
3285   case ISD::CTTZ_ZERO_UNDEF: {
3286     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3287     // If we have a known 1, its position is our upper bound.
3288     unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3289     unsigned LowBits = Log2_32(PossibleTZ) + 1;
3290     Known.Zero.setBitsFrom(LowBits);
3291     break;
3292   }
3293   case ISD::CTLZ:
3294   case ISD::CTLZ_ZERO_UNDEF: {
3295     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3296     // If we have a known 1, its position is our upper bound.
3297     unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3298     unsigned LowBits = Log2_32(PossibleLZ) + 1;
3299     Known.Zero.setBitsFrom(LowBits);
3300     break;
3301   }
3302   case ISD::CTPOP: {
3303     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3304     // If we know some of the bits are zero, they can't be one.
3305     unsigned PossibleOnes = Known2.countMaxPopulation();
3306     Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3307     break;
3308   }
3309   case ISD::PARITY: {
3310     // Parity returns 0 everywhere but the LSB.
3311     Known.Zero.setBitsFrom(1);
3312     break;
3313   }
3314   case ISD::LOAD: {
3315     LoadSDNode *LD = cast<LoadSDNode>(Op);
3316     const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3317     if (ISD::isNON_EXTLoad(LD) && Cst) {
3318       // Determine any common known bits from the loaded constant pool value.
3319       Type *CstTy = Cst->getType();
3320       if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3321         // If its a vector splat, then we can (quickly) reuse the scalar path.
3322         // NOTE: We assume all elements match and none are UNDEF.
3323         if (CstTy->isVectorTy()) {
3324           if (const Constant *Splat = Cst->getSplatValue()) {
3325             Cst = Splat;
3326             CstTy = Cst->getType();
3327           }
3328         }
3329         // TODO - do we need to handle different bitwidths?
3330         if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3331           // Iterate across all vector elements finding common known bits.
3332           Known.One.setAllBits();
3333           Known.Zero.setAllBits();
3334           for (unsigned i = 0; i != NumElts; ++i) {
3335             if (!DemandedElts[i])
3336               continue;
3337             if (Constant *Elt = Cst->getAggregateElement(i)) {
3338               if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3339                 const APInt &Value = CInt->getValue();
3340                 Known.One &= Value;
3341                 Known.Zero &= ~Value;
3342                 continue;
3343               }
3344               if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3345                 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3346                 Known.One &= Value;
3347                 Known.Zero &= ~Value;
3348                 continue;
3349               }
3350             }
3351             Known.One.clearAllBits();
3352             Known.Zero.clearAllBits();
3353             break;
3354           }
3355         } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3356           if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3357             Known = KnownBits::makeConstant(CInt->getValue());
3358           } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3359             Known =
3360                 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3361           }
3362         }
3363       }
3364     } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3365       // If this is a ZEXTLoad and we are looking at the loaded value.
3366       EVT VT = LD->getMemoryVT();
3367       unsigned MemBits = VT.getScalarSizeInBits();
3368       Known.Zero.setBitsFrom(MemBits);
3369     } else if (const MDNode *Ranges = LD->getRanges()) {
3370       if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3371         computeKnownBitsFromRangeMetadata(*Ranges, Known);
3372     }
3373     break;
3374   }
3375   case ISD::ZERO_EXTEND_VECTOR_INREG: {
3376     EVT InVT = Op.getOperand(0).getValueType();
3377     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3378     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3379     Known = Known.zext(BitWidth);
3380     break;
3381   }
3382   case ISD::ZERO_EXTEND: {
3383     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3384     Known = Known.zext(BitWidth);
3385     break;
3386   }
3387   case ISD::SIGN_EXTEND_VECTOR_INREG: {
3388     EVT InVT = Op.getOperand(0).getValueType();
3389     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3390     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3391     // If the sign bit is known to be zero or one, then sext will extend
3392     // it to the top bits, else it will just zext.
3393     Known = Known.sext(BitWidth);
3394     break;
3395   }
3396   case ISD::SIGN_EXTEND: {
3397     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3398     // If the sign bit is known to be zero or one, then sext will extend
3399     // it to the top bits, else it will just zext.
3400     Known = Known.sext(BitWidth);
3401     break;
3402   }
3403   case ISD::ANY_EXTEND_VECTOR_INREG: {
3404     EVT InVT = Op.getOperand(0).getValueType();
3405     APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3406     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3407     Known = Known.anyext(BitWidth);
3408     break;
3409   }
3410   case ISD::ANY_EXTEND: {
3411     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3412     Known = Known.anyext(BitWidth);
3413     break;
3414   }
3415   case ISD::TRUNCATE: {
3416     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3417     Known = Known.trunc(BitWidth);
3418     break;
3419   }
3420   case ISD::AssertZext: {
3421     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3422     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3423     Known = computeKnownBits(Op.getOperand(0), Depth+1);
3424     Known.Zero |= (~InMask);
3425     Known.One  &= (~Known.Zero);
3426     break;
3427   }
3428   case ISD::AssertAlign: {
3429     unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3430     assert(LogOfAlign != 0);
3431 
3432     // TODO: Should use maximum with source
3433     // If a node is guaranteed to be aligned, set low zero bits accordingly as
3434     // well as clearing one bits.
3435     Known.Zero.setLowBits(LogOfAlign);
3436     Known.One.clearLowBits(LogOfAlign);
3437     break;
3438   }
3439   case ISD::FGETSIGN:
3440     // All bits are zero except the low bit.
3441     Known.Zero.setBitsFrom(1);
3442     break;
3443   case ISD::USUBO:
3444   case ISD::SSUBO:
3445     if (Op.getResNo() == 1) {
3446       // If we know the result of a setcc has the top bits zero, use this info.
3447       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3448               TargetLowering::ZeroOrOneBooleanContent &&
3449           BitWidth > 1)
3450         Known.Zero.setBitsFrom(1);
3451       break;
3452     }
3453     LLVM_FALLTHROUGH;
3454   case ISD::SUB:
3455   case ISD::SUBC: {
3456     assert(Op.getResNo() == 0 &&
3457            "We only compute knownbits for the difference here.");
3458 
3459     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3460     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3461     Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3462                                         Known, Known2);
3463     break;
3464   }
3465   case ISD::UADDO:
3466   case ISD::SADDO:
3467   case ISD::ADDCARRY:
3468     if (Op.getResNo() == 1) {
3469       // If we know the result of a setcc has the top bits zero, use this info.
3470       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3471               TargetLowering::ZeroOrOneBooleanContent &&
3472           BitWidth > 1)
3473         Known.Zero.setBitsFrom(1);
3474       break;
3475     }
3476     LLVM_FALLTHROUGH;
3477   case ISD::ADD:
3478   case ISD::ADDC:
3479   case ISD::ADDE: {
3480     assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3481 
3482     // With ADDE and ADDCARRY, a carry bit may be added in.
3483     KnownBits Carry(1);
3484     if (Opcode == ISD::ADDE)
3485       // Can't track carry from glue, set carry to unknown.
3486       Carry.resetAll();
3487     else if (Opcode == ISD::ADDCARRY)
3488       // TODO: Compute known bits for the carry operand. Not sure if it is worth
3489       // the trouble (how often will we find a known carry bit). And I haven't
3490       // tested this very much yet, but something like this might work:
3491       //   Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3492       //   Carry = Carry.zextOrTrunc(1, false);
3493       Carry.resetAll();
3494     else
3495       Carry.setAllZero();
3496 
3497     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3498     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3499     Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3500     break;
3501   }
3502   case ISD::SREM: {
3503     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3504     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3505     Known = KnownBits::srem(Known, Known2);
3506     break;
3507   }
3508   case ISD::UREM: {
3509     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3510     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3511     Known = KnownBits::urem(Known, Known2);
3512     break;
3513   }
3514   case ISD::EXTRACT_ELEMENT: {
3515     Known = computeKnownBits(Op.getOperand(0), Depth+1);
3516     const unsigned Index = Op.getConstantOperandVal(1);
3517     const unsigned EltBitWidth = Op.getValueSizeInBits();
3518 
3519     // Remove low part of known bits mask
3520     Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3521     Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3522 
3523     // Remove high part of known bit mask
3524     Known = Known.trunc(EltBitWidth);
3525     break;
3526   }
3527   case ISD::EXTRACT_VECTOR_ELT: {
3528     SDValue InVec = Op.getOperand(0);
3529     SDValue EltNo = Op.getOperand(1);
3530     EVT VecVT = InVec.getValueType();
3531     // computeKnownBits not yet implemented for scalable vectors.
3532     if (VecVT.isScalableVector())
3533       break;
3534     const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3535     const unsigned NumSrcElts = VecVT.getVectorNumElements();
3536 
3537     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3538     // anything about the extended bits.
3539     if (BitWidth > EltBitWidth)
3540       Known = Known.trunc(EltBitWidth);
3541 
3542     // If we know the element index, just demand that vector element, else for
3543     // an unknown element index, ignore DemandedElts and demand them all.
3544     APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
3545     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3546     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3547       DemandedSrcElts =
3548           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3549 
3550     Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3551     if (BitWidth > EltBitWidth)
3552       Known = Known.anyext(BitWidth);
3553     break;
3554   }
3555   case ISD::INSERT_VECTOR_ELT: {
3556     // If we know the element index, split the demand between the
3557     // source vector and the inserted element, otherwise assume we need
3558     // the original demanded vector elements and the value.
3559     SDValue InVec = Op.getOperand(0);
3560     SDValue InVal = Op.getOperand(1);
3561     SDValue EltNo = Op.getOperand(2);
3562     bool DemandedVal = true;
3563     APInt DemandedVecElts = DemandedElts;
3564     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3565     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3566       unsigned EltIdx = CEltNo->getZExtValue();
3567       DemandedVal = !!DemandedElts[EltIdx];
3568       DemandedVecElts.clearBit(EltIdx);
3569     }
3570     Known.One.setAllBits();
3571     Known.Zero.setAllBits();
3572     if (DemandedVal) {
3573       Known2 = computeKnownBits(InVal, Depth + 1);
3574       Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3575     }
3576     if (!!DemandedVecElts) {
3577       Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3578       Known = KnownBits::commonBits(Known, Known2);
3579     }
3580     break;
3581   }
3582   case ISD::BITREVERSE: {
3583     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3584     Known = Known2.reverseBits();
3585     break;
3586   }
3587   case ISD::BSWAP: {
3588     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3589     Known = Known2.byteSwap();
3590     break;
3591   }
3592   case ISD::ABS: {
3593     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3594     Known = Known2.abs();
3595     break;
3596   }
3597   case ISD::USUBSAT: {
3598     // The result of usubsat will never be larger than the LHS.
3599     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3600     Known.Zero.setHighBits(Known2.countMinLeadingZeros());
3601     break;
3602   }
3603   case ISD::UMIN: {
3604     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3605     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3606     Known = KnownBits::umin(Known, Known2);
3607     break;
3608   }
3609   case ISD::UMAX: {
3610     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3611     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3612     Known = KnownBits::umax(Known, Known2);
3613     break;
3614   }
3615   case ISD::SMIN:
3616   case ISD::SMAX: {
3617     // If we have a clamp pattern, we know that the number of sign bits will be
3618     // the minimum of the clamp min/max range.
3619     bool IsMax = (Opcode == ISD::SMAX);
3620     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3621     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3622       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3623         CstHigh =
3624             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3625     if (CstLow && CstHigh) {
3626       if (!IsMax)
3627         std::swap(CstLow, CstHigh);
3628 
3629       const APInt &ValueLow = CstLow->getAPIntValue();
3630       const APInt &ValueHigh = CstHigh->getAPIntValue();
3631       if (ValueLow.sle(ValueHigh)) {
3632         unsigned LowSignBits = ValueLow.getNumSignBits();
3633         unsigned HighSignBits = ValueHigh.getNumSignBits();
3634         unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3635         if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3636           Known.One.setHighBits(MinSignBits);
3637           break;
3638         }
3639         if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3640           Known.Zero.setHighBits(MinSignBits);
3641           break;
3642         }
3643       }
3644     }
3645 
3646     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3647     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3648     if (IsMax)
3649       Known = KnownBits::smax(Known, Known2);
3650     else
3651       Known = KnownBits::smin(Known, Known2);
3652     break;
3653   }
3654   case ISD::FP_TO_UINT_SAT: {
3655     // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
3656     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3657     Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits());
3658     break;
3659   }
3660   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3661     if (Op.getResNo() == 1) {
3662       // The boolean result conforms to getBooleanContents.
3663       // If we know the result of a setcc has the top bits zero, use this info.
3664       // We know that we have an integer-based boolean since these operations
3665       // are only available for integer.
3666       if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3667               TargetLowering::ZeroOrOneBooleanContent &&
3668           BitWidth > 1)
3669         Known.Zero.setBitsFrom(1);
3670       break;
3671     }
3672     LLVM_FALLTHROUGH;
3673   case ISD::ATOMIC_CMP_SWAP:
3674   case ISD::ATOMIC_SWAP:
3675   case ISD::ATOMIC_LOAD_ADD:
3676   case ISD::ATOMIC_LOAD_SUB:
3677   case ISD::ATOMIC_LOAD_AND:
3678   case ISD::ATOMIC_LOAD_CLR:
3679   case ISD::ATOMIC_LOAD_OR:
3680   case ISD::ATOMIC_LOAD_XOR:
3681   case ISD::ATOMIC_LOAD_NAND:
3682   case ISD::ATOMIC_LOAD_MIN:
3683   case ISD::ATOMIC_LOAD_MAX:
3684   case ISD::ATOMIC_LOAD_UMIN:
3685   case ISD::ATOMIC_LOAD_UMAX:
3686   case ISD::ATOMIC_LOAD: {
3687     unsigned MemBits =
3688         cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
3689     // If we are looking at the loaded value.
3690     if (Op.getResNo() == 0) {
3691       if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
3692         Known.Zero.setBitsFrom(MemBits);
3693     }
3694     break;
3695   }
3696   case ISD::FrameIndex:
3697   case ISD::TargetFrameIndex:
3698     TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3699                                        Known, getMachineFunction());
3700     break;
3701 
3702   default:
3703     if (Opcode < ISD::BUILTIN_OP_END)
3704       break;
3705     LLVM_FALLTHROUGH;
3706   case ISD::INTRINSIC_WO_CHAIN:
3707   case ISD::INTRINSIC_W_CHAIN:
3708   case ISD::INTRINSIC_VOID:
3709     // Allow the target to implement this method for its nodes.
3710     TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3711     break;
3712   }
3713 
3714   assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3715   return Known;
3716 }
3717 
3718 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3719                                                              SDValue N1) const {
3720   // X + 0 never overflow
3721   if (isNullConstant(N1))
3722     return OFK_Never;
3723 
3724   KnownBits N1Known = computeKnownBits(N1);
3725   if (N1Known.Zero.getBoolValue()) {
3726     KnownBits N0Known = computeKnownBits(N0);
3727 
3728     bool overflow;
3729     (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3730     if (!overflow)
3731       return OFK_Never;
3732   }
3733 
3734   // mulhi + 1 never overflow
3735   if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3736       (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3737     return OFK_Never;
3738 
3739   if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3740     KnownBits N0Known = computeKnownBits(N0);
3741 
3742     if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3743       return OFK_Never;
3744   }
3745 
3746   return OFK_Sometime;
3747 }
3748 
3749 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3750   EVT OpVT = Val.getValueType();
3751   unsigned BitWidth = OpVT.getScalarSizeInBits();
3752 
3753   // Is the constant a known power of 2?
3754   if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3755     return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3756 
3757   // A left-shift of a constant one will have exactly one bit set because
3758   // shifting the bit off the end is undefined.
3759   if (Val.getOpcode() == ISD::SHL) {
3760     auto *C = isConstOrConstSplat(Val.getOperand(0));
3761     if (C && C->getAPIntValue() == 1)
3762       return true;
3763   }
3764 
3765   // Similarly, a logical right-shift of a constant sign-bit will have exactly
3766   // one bit set.
3767   if (Val.getOpcode() == ISD::SRL) {
3768     auto *C = isConstOrConstSplat(Val.getOperand(0));
3769     if (C && C->getAPIntValue().isSignMask())
3770       return true;
3771   }
3772 
3773   // Are all operands of a build vector constant powers of two?
3774   if (Val.getOpcode() == ISD::BUILD_VECTOR)
3775     if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3776           if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3777             return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3778           return false;
3779         }))
3780       return true;
3781 
3782   // Is the operand of a splat vector a constant power of two?
3783   if (Val.getOpcode() == ISD::SPLAT_VECTOR)
3784     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
3785       if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2())
3786         return true;
3787 
3788   // More could be done here, though the above checks are enough
3789   // to handle some common cases.
3790 
3791   // Fall back to computeKnownBits to catch other known cases.
3792   KnownBits Known = computeKnownBits(Val);
3793   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3794 }
3795 
3796 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3797   EVT VT = Op.getValueType();
3798 
3799   // TODO: Assume we don't know anything for now.
3800   if (VT.isScalableVector())
3801     return 1;
3802 
3803   APInt DemandedElts = VT.isVector()
3804                            ? APInt::getAllOnes(VT.getVectorNumElements())
3805                            : APInt(1, 1);
3806   return ComputeNumSignBits(Op, DemandedElts, Depth);
3807 }
3808 
3809 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3810                                           unsigned Depth) const {
3811   EVT VT = Op.getValueType();
3812   assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3813   unsigned VTBits = VT.getScalarSizeInBits();
3814   unsigned NumElts = DemandedElts.getBitWidth();
3815   unsigned Tmp, Tmp2;
3816   unsigned FirstAnswer = 1;
3817 
3818   if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3819     const APInt &Val = C->getAPIntValue();
3820     return Val.getNumSignBits();
3821   }
3822 
3823   if (Depth >= MaxRecursionDepth)
3824     return 1;  // Limit search depth.
3825 
3826   if (!DemandedElts || VT.isScalableVector())
3827     return 1;  // No demanded elts, better to assume we don't know anything.
3828 
3829   unsigned Opcode = Op.getOpcode();
3830   switch (Opcode) {
3831   default: break;
3832   case ISD::AssertSext:
3833     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3834     return VTBits-Tmp+1;
3835   case ISD::AssertZext:
3836     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3837     return VTBits-Tmp;
3838 
3839   case ISD::BUILD_VECTOR:
3840     Tmp = VTBits;
3841     for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3842       if (!DemandedElts[i])
3843         continue;
3844 
3845       SDValue SrcOp = Op.getOperand(i);
3846       Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3847 
3848       // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3849       if (SrcOp.getValueSizeInBits() != VTBits) {
3850         assert(SrcOp.getValueSizeInBits() > VTBits &&
3851                "Expected BUILD_VECTOR implicit truncation");
3852         unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3853         Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3854       }
3855       Tmp = std::min(Tmp, Tmp2);
3856     }
3857     return Tmp;
3858 
3859   case ISD::VECTOR_SHUFFLE: {
3860     // Collect the minimum number of sign bits that are shared by every vector
3861     // element referenced by the shuffle.
3862     APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3863     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3864     assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3865     for (unsigned i = 0; i != NumElts; ++i) {
3866       int M = SVN->getMaskElt(i);
3867       if (!DemandedElts[i])
3868         continue;
3869       // For UNDEF elements, we don't know anything about the common state of
3870       // the shuffle result.
3871       if (M < 0)
3872         return 1;
3873       if ((unsigned)M < NumElts)
3874         DemandedLHS.setBit((unsigned)M % NumElts);
3875       else
3876         DemandedRHS.setBit((unsigned)M % NumElts);
3877     }
3878     Tmp = std::numeric_limits<unsigned>::max();
3879     if (!!DemandedLHS)
3880       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3881     if (!!DemandedRHS) {
3882       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3883       Tmp = std::min(Tmp, Tmp2);
3884     }
3885     // If we don't know anything, early out and try computeKnownBits fall-back.
3886     if (Tmp == 1)
3887       break;
3888     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3889     return Tmp;
3890   }
3891 
3892   case ISD::BITCAST: {
3893     SDValue N0 = Op.getOperand(0);
3894     EVT SrcVT = N0.getValueType();
3895     unsigned SrcBits = SrcVT.getScalarSizeInBits();
3896 
3897     // Ignore bitcasts from unsupported types..
3898     if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3899       break;
3900 
3901     // Fast handling of 'identity' bitcasts.
3902     if (VTBits == SrcBits)
3903       return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3904 
3905     bool IsLE = getDataLayout().isLittleEndian();
3906 
3907     // Bitcast 'large element' scalar/vector to 'small element' vector.
3908     if ((SrcBits % VTBits) == 0) {
3909       assert(VT.isVector() && "Expected bitcast to vector");
3910 
3911       unsigned Scale = SrcBits / VTBits;
3912       APInt SrcDemandedElts =
3913           APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
3914 
3915       // Fast case - sign splat can be simply split across the small elements.
3916       Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3917       if (Tmp == SrcBits)
3918         return VTBits;
3919 
3920       // Slow case - determine how far the sign extends into each sub-element.
3921       Tmp2 = VTBits;
3922       for (unsigned i = 0; i != NumElts; ++i)
3923         if (DemandedElts[i]) {
3924           unsigned SubOffset = i % Scale;
3925           SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3926           SubOffset = SubOffset * VTBits;
3927           if (Tmp <= SubOffset)
3928             return 1;
3929           Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3930         }
3931       return Tmp2;
3932     }
3933     break;
3934   }
3935 
3936   case ISD::FP_TO_SINT_SAT:
3937     // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
3938     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3939     return VTBits - Tmp + 1;
3940   case ISD::SIGN_EXTEND:
3941     Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3942     return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3943   case ISD::SIGN_EXTEND_INREG:
3944     // Max of the input and what this extends.
3945     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3946     Tmp = VTBits-Tmp+1;
3947     Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3948     return std::max(Tmp, Tmp2);
3949   case ISD::SIGN_EXTEND_VECTOR_INREG: {
3950     SDValue Src = Op.getOperand(0);
3951     EVT SrcVT = Src.getValueType();
3952     APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3953     Tmp = VTBits - SrcVT.getScalarSizeInBits();
3954     return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3955   }
3956   case ISD::SRA:
3957     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3958     // SRA X, C -> adds C sign bits.
3959     if (const APInt *ShAmt =
3960             getValidMinimumShiftAmountConstant(Op, DemandedElts))
3961       Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3962     return Tmp;
3963   case ISD::SHL:
3964     if (const APInt *ShAmt =
3965             getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3966       // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3967       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3968       if (ShAmt->ult(Tmp))
3969         return Tmp - ShAmt->getZExtValue();
3970     }
3971     break;
3972   case ISD::AND:
3973   case ISD::OR:
3974   case ISD::XOR:    // NOT is handled here.
3975     // Logical binary ops preserve the number of sign bits at the worst.
3976     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3977     if (Tmp != 1) {
3978       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3979       FirstAnswer = std::min(Tmp, Tmp2);
3980       // We computed what we know about the sign bits as our first
3981       // answer. Now proceed to the generic code that uses
3982       // computeKnownBits, and pick whichever answer is better.
3983     }
3984     break;
3985 
3986   case ISD::SELECT:
3987   case ISD::VSELECT:
3988     Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3989     if (Tmp == 1) return 1;  // Early out.
3990     Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3991     return std::min(Tmp, Tmp2);
3992   case ISD::SELECT_CC:
3993     Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3994     if (Tmp == 1) return 1;  // Early out.
3995     Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3996     return std::min(Tmp, Tmp2);
3997 
3998   case ISD::SMIN:
3999   case ISD::SMAX: {
4000     // If we have a clamp pattern, we know that the number of sign bits will be
4001     // the minimum of the clamp min/max range.
4002     bool IsMax = (Opcode == ISD::SMAX);
4003     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
4004     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
4005       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
4006         CstHigh =
4007             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
4008     if (CstLow && CstHigh) {
4009       if (!IsMax)
4010         std::swap(CstLow, CstHigh);
4011       if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
4012         Tmp = CstLow->getAPIntValue().getNumSignBits();
4013         Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
4014         return std::min(Tmp, Tmp2);
4015       }
4016     }
4017 
4018     // Fallback - just get the minimum number of sign bits of the operands.
4019     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4020     if (Tmp == 1)
4021       return 1;  // Early out.
4022     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4023     return std::min(Tmp, Tmp2);
4024   }
4025   case ISD::UMIN:
4026   case ISD::UMAX:
4027     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4028     if (Tmp == 1)
4029       return 1;  // Early out.
4030     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4031     return std::min(Tmp, Tmp2);
4032   case ISD::SADDO:
4033   case ISD::UADDO:
4034   case ISD::SSUBO:
4035   case ISD::USUBO:
4036   case ISD::SMULO:
4037   case ISD::UMULO:
4038     if (Op.getResNo() != 1)
4039       break;
4040     // The boolean result conforms to getBooleanContents.  Fall through.
4041     // If setcc returns 0/-1, all bits are sign bits.
4042     // We know that we have an integer-based boolean since these operations
4043     // are only available for integer.
4044     if (TLI->getBooleanContents(VT.isVector(), false) ==
4045         TargetLowering::ZeroOrNegativeOneBooleanContent)
4046       return VTBits;
4047     break;
4048   case ISD::SETCC:
4049   case ISD::STRICT_FSETCC:
4050   case ISD::STRICT_FSETCCS: {
4051     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
4052     // If setcc returns 0/-1, all bits are sign bits.
4053     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
4054         TargetLowering::ZeroOrNegativeOneBooleanContent)
4055       return VTBits;
4056     break;
4057   }
4058   case ISD::ROTL:
4059   case ISD::ROTR:
4060     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4061 
4062     // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
4063     if (Tmp == VTBits)
4064       return VTBits;
4065 
4066     if (ConstantSDNode *C =
4067             isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
4068       unsigned RotAmt = C->getAPIntValue().urem(VTBits);
4069 
4070       // Handle rotate right by N like a rotate left by 32-N.
4071       if (Opcode == ISD::ROTR)
4072         RotAmt = (VTBits - RotAmt) % VTBits;
4073 
4074       // If we aren't rotating out all of the known-in sign bits, return the
4075       // number that are left.  This handles rotl(sext(x), 1) for example.
4076       if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
4077     }
4078     break;
4079   case ISD::ADD:
4080   case ISD::ADDC:
4081     // Add can have at most one carry bit.  Thus we know that the output
4082     // is, at worst, one more bit than the inputs.
4083     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4084     if (Tmp == 1) return 1; // Early out.
4085 
4086     // Special case decrementing a value (ADD X, -1):
4087     if (ConstantSDNode *CRHS =
4088             isConstOrConstSplat(Op.getOperand(1), DemandedElts))
4089       if (CRHS->isAllOnes()) {
4090         KnownBits Known =
4091             computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4092 
4093         // If the input is known to be 0 or 1, the output is 0/-1, which is all
4094         // sign bits set.
4095         if ((Known.Zero | 1).isAllOnes())
4096           return VTBits;
4097 
4098         // If we are subtracting one from a positive number, there is no carry
4099         // out of the result.
4100         if (Known.isNonNegative())
4101           return Tmp;
4102       }
4103 
4104     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4105     if (Tmp2 == 1) return 1; // Early out.
4106     return std::min(Tmp, Tmp2) - 1;
4107   case ISD::SUB:
4108     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4109     if (Tmp2 == 1) return 1; // Early out.
4110 
4111     // Handle NEG.
4112     if (ConstantSDNode *CLHS =
4113             isConstOrConstSplat(Op.getOperand(0), DemandedElts))
4114       if (CLHS->isZero()) {
4115         KnownBits Known =
4116             computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4117         // If the input is known to be 0 or 1, the output is 0/-1, which is all
4118         // sign bits set.
4119         if ((Known.Zero | 1).isAllOnes())
4120           return VTBits;
4121 
4122         // If the input is known to be positive (the sign bit is known clear),
4123         // the output of the NEG has the same number of sign bits as the input.
4124         if (Known.isNonNegative())
4125           return Tmp2;
4126 
4127         // Otherwise, we treat this like a SUB.
4128       }
4129 
4130     // Sub can have at most one carry bit.  Thus we know that the output
4131     // is, at worst, one more bit than the inputs.
4132     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4133     if (Tmp == 1) return 1; // Early out.
4134     return std::min(Tmp, Tmp2) - 1;
4135   case ISD::MUL: {
4136     // The output of the Mul can be at most twice the valid bits in the inputs.
4137     unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4138     if (SignBitsOp0 == 1)
4139       break;
4140     unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4141     if (SignBitsOp1 == 1)
4142       break;
4143     unsigned OutValidBits =
4144         (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4145     return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4146   }
4147   case ISD::SREM:
4148     // The sign bit is the LHS's sign bit, except when the result of the
4149     // remainder is zero. The magnitude of the result should be less than or
4150     // equal to the magnitude of the LHS. Therefore, the result should have
4151     // at least as many sign bits as the left hand side.
4152     return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4153   case ISD::TRUNCATE: {
4154     // Check if the sign bits of source go down as far as the truncated value.
4155     unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4156     unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4157     if (NumSrcSignBits > (NumSrcBits - VTBits))
4158       return NumSrcSignBits - (NumSrcBits - VTBits);
4159     break;
4160   }
4161   case ISD::EXTRACT_ELEMENT: {
4162     const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4163     const int BitWidth = Op.getValueSizeInBits();
4164     const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4165 
4166     // Get reverse index (starting from 1), Op1 value indexes elements from
4167     // little end. Sign starts at big end.
4168     const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4169 
4170     // If the sign portion ends in our element the subtraction gives correct
4171     // result. Otherwise it gives either negative or > bitwidth result
4172     return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
4173   }
4174   case ISD::INSERT_VECTOR_ELT: {
4175     // If we know the element index, split the demand between the
4176     // source vector and the inserted element, otherwise assume we need
4177     // the original demanded vector elements and the value.
4178     SDValue InVec = Op.getOperand(0);
4179     SDValue InVal = Op.getOperand(1);
4180     SDValue EltNo = Op.getOperand(2);
4181     bool DemandedVal = true;
4182     APInt DemandedVecElts = DemandedElts;
4183     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4184     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4185       unsigned EltIdx = CEltNo->getZExtValue();
4186       DemandedVal = !!DemandedElts[EltIdx];
4187       DemandedVecElts.clearBit(EltIdx);
4188     }
4189     Tmp = std::numeric_limits<unsigned>::max();
4190     if (DemandedVal) {
4191       // TODO - handle implicit truncation of inserted elements.
4192       if (InVal.getScalarValueSizeInBits() != VTBits)
4193         break;
4194       Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4195       Tmp = std::min(Tmp, Tmp2);
4196     }
4197     if (!!DemandedVecElts) {
4198       Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4199       Tmp = std::min(Tmp, Tmp2);
4200     }
4201     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4202     return Tmp;
4203   }
4204   case ISD::EXTRACT_VECTOR_ELT: {
4205     SDValue InVec = Op.getOperand(0);
4206     SDValue EltNo = Op.getOperand(1);
4207     EVT VecVT = InVec.getValueType();
4208     // ComputeNumSignBits not yet implemented for scalable vectors.
4209     if (VecVT.isScalableVector())
4210       break;
4211     const unsigned BitWidth = Op.getValueSizeInBits();
4212     const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4213     const unsigned NumSrcElts = VecVT.getVectorNumElements();
4214 
4215     // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4216     // anything about sign bits. But if the sizes match we can derive knowledge
4217     // about sign bits from the vector operand.
4218     if (BitWidth != EltBitWidth)
4219       break;
4220 
4221     // If we know the element index, just demand that vector element, else for
4222     // an unknown element index, ignore DemandedElts and demand them all.
4223     APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4224     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4225     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4226       DemandedSrcElts =
4227           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4228 
4229     return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4230   }
4231   case ISD::EXTRACT_SUBVECTOR: {
4232     // Offset the demanded elts by the subvector index.
4233     SDValue Src = Op.getOperand(0);
4234     // Bail until we can represent demanded elements for scalable vectors.
4235     if (Src.getValueType().isScalableVector())
4236       break;
4237     uint64_t Idx = Op.getConstantOperandVal(1);
4238     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4239     APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
4240     return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4241   }
4242   case ISD::CONCAT_VECTORS: {
4243     // Determine the minimum number of sign bits across all demanded
4244     // elts of the input vectors. Early out if the result is already 1.
4245     Tmp = std::numeric_limits<unsigned>::max();
4246     EVT SubVectorVT = Op.getOperand(0).getValueType();
4247     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4248     unsigned NumSubVectors = Op.getNumOperands();
4249     for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4250       APInt DemandedSub =
4251           DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
4252       if (!DemandedSub)
4253         continue;
4254       Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4255       Tmp = std::min(Tmp, Tmp2);
4256     }
4257     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4258     return Tmp;
4259   }
4260   case ISD::INSERT_SUBVECTOR: {
4261     // Demand any elements from the subvector and the remainder from the src its
4262     // inserted into.
4263     SDValue Src = Op.getOperand(0);
4264     SDValue Sub = Op.getOperand(1);
4265     uint64_t Idx = Op.getConstantOperandVal(2);
4266     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4267     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4268     APInt DemandedSrcElts = DemandedElts;
4269     DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
4270 
4271     Tmp = std::numeric_limits<unsigned>::max();
4272     if (!!DemandedSubElts) {
4273       Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4274       if (Tmp == 1)
4275         return 1; // early-out
4276     }
4277     if (!!DemandedSrcElts) {
4278       Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4279       Tmp = std::min(Tmp, Tmp2);
4280     }
4281     assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4282     return Tmp;
4283   }
4284   case ISD::ATOMIC_CMP_SWAP:
4285   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4286   case ISD::ATOMIC_SWAP:
4287   case ISD::ATOMIC_LOAD_ADD:
4288   case ISD::ATOMIC_LOAD_SUB:
4289   case ISD::ATOMIC_LOAD_AND:
4290   case ISD::ATOMIC_LOAD_CLR:
4291   case ISD::ATOMIC_LOAD_OR:
4292   case ISD::ATOMIC_LOAD_XOR:
4293   case ISD::ATOMIC_LOAD_NAND:
4294   case ISD::ATOMIC_LOAD_MIN:
4295   case ISD::ATOMIC_LOAD_MAX:
4296   case ISD::ATOMIC_LOAD_UMIN:
4297   case ISD::ATOMIC_LOAD_UMAX:
4298   case ISD::ATOMIC_LOAD: {
4299     Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4300     // If we are looking at the loaded value.
4301     if (Op.getResNo() == 0) {
4302       if (Tmp == VTBits)
4303         return 1; // early-out
4304       if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
4305         return VTBits - Tmp + 1;
4306       if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4307         return VTBits - Tmp;
4308     }
4309     break;
4310   }
4311   }
4312 
4313   // If we are looking at the loaded value of the SDNode.
4314   if (Op.getResNo() == 0) {
4315     // Handle LOADX separately here. EXTLOAD case will fallthrough.
4316     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4317       unsigned ExtType = LD->getExtensionType();
4318       switch (ExtType) {
4319       default: break;
4320       case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4321         Tmp = LD->getMemoryVT().getScalarSizeInBits();
4322         return VTBits - Tmp + 1;
4323       case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4324         Tmp = LD->getMemoryVT().getScalarSizeInBits();
4325         return VTBits - Tmp;
4326       case ISD::NON_EXTLOAD:
4327         if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4328           // We only need to handle vectors - computeKnownBits should handle
4329           // scalar cases.
4330           Type *CstTy = Cst->getType();
4331           if (CstTy->isVectorTy() &&
4332               (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() &&
4333               VTBits == CstTy->getScalarSizeInBits()) {
4334             Tmp = VTBits;
4335             for (unsigned i = 0; i != NumElts; ++i) {
4336               if (!DemandedElts[i])
4337                 continue;
4338               if (Constant *Elt = Cst->getAggregateElement(i)) {
4339                 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4340                   const APInt &Value = CInt->getValue();
4341                   Tmp = std::min(Tmp, Value.getNumSignBits());
4342                   continue;
4343                 }
4344                 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4345                   APInt Value = CFP->getValueAPF().bitcastToAPInt();
4346                   Tmp = std::min(Tmp, Value.getNumSignBits());
4347                   continue;
4348                 }
4349               }
4350               // Unknown type. Conservatively assume no bits match sign bit.
4351               return 1;
4352             }
4353             return Tmp;
4354           }
4355         }
4356         break;
4357       }
4358     }
4359   }
4360 
4361   // Allow the target to implement this method for its nodes.
4362   if (Opcode >= ISD::BUILTIN_OP_END ||
4363       Opcode == ISD::INTRINSIC_WO_CHAIN ||
4364       Opcode == ISD::INTRINSIC_W_CHAIN ||
4365       Opcode == ISD::INTRINSIC_VOID) {
4366     unsigned NumBits =
4367         TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4368     if (NumBits > 1)
4369       FirstAnswer = std::max(FirstAnswer, NumBits);
4370   }
4371 
4372   // Finally, if we can prove that the top bits of the result are 0's or 1's,
4373   // use this information.
4374   KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4375   return std::max(FirstAnswer, Known.countMinSignBits());
4376 }
4377 
4378 unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
4379                                                  unsigned Depth) const {
4380   unsigned SignBits = ComputeNumSignBits(Op, Depth);
4381   return Op.getScalarValueSizeInBits() - SignBits + 1;
4382 }
4383 
4384 unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
4385                                                  const APInt &DemandedElts,
4386                                                  unsigned Depth) const {
4387   unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
4388   return Op.getScalarValueSizeInBits() - SignBits + 1;
4389 }
4390 
4391 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
4392                                                     unsigned Depth) const {
4393   // Early out for FREEZE.
4394   if (Op.getOpcode() == ISD::FREEZE)
4395     return true;
4396 
4397   // TODO: Assume we don't know anything for now.
4398   EVT VT = Op.getValueType();
4399   if (VT.isScalableVector())
4400     return false;
4401 
4402   APInt DemandedElts = VT.isVector()
4403                            ? APInt::getAllOnes(VT.getVectorNumElements())
4404                            : APInt(1, 1);
4405   return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
4406 }
4407 
4408 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
4409                                                     const APInt &DemandedElts,
4410                                                     bool PoisonOnly,
4411                                                     unsigned Depth) const {
4412   unsigned Opcode = Op.getOpcode();
4413 
4414   // Early out for FREEZE.
4415   if (Opcode == ISD::FREEZE)
4416     return true;
4417 
4418   if (Depth >= MaxRecursionDepth)
4419     return false; // Limit search depth.
4420 
4421   if (isIntOrFPConstant(Op))
4422     return true;
4423 
4424   switch (Opcode) {
4425   case ISD::UNDEF:
4426     return PoisonOnly;
4427 
4428   case ISD::BUILD_VECTOR:
4429     // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
4430     // this shouldn't affect the result.
4431     for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
4432       if (!DemandedElts[i])
4433         continue;
4434       if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly,
4435                                             Depth + 1))
4436         return false;
4437     }
4438     return true;
4439 
4440   // TODO: Search for noundef attributes from library functions.
4441 
4442   // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
4443 
4444   default:
4445     // Allow the target to implement this method for its nodes.
4446     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
4447         Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
4448       return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4449           Op, DemandedElts, *this, PoisonOnly, Depth);
4450     break;
4451   }
4452 
4453   return false;
4454 }
4455 
4456 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4457   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4458       !isa<ConstantSDNode>(Op.getOperand(1)))
4459     return false;
4460 
4461   if (Op.getOpcode() == ISD::OR &&
4462       !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4463     return false;
4464 
4465   return true;
4466 }
4467 
4468 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4469   // If we're told that NaNs won't happen, assume they won't.
4470   if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4471     return true;
4472 
4473   if (Depth >= MaxRecursionDepth)
4474     return false; // Limit search depth.
4475 
4476   // TODO: Handle vectors.
4477   // If the value is a constant, we can obviously see if it is a NaN or not.
4478   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4479     return !C->getValueAPF().isNaN() ||
4480            (SNaN && !C->getValueAPF().isSignaling());
4481   }
4482 
4483   unsigned Opcode = Op.getOpcode();
4484   switch (Opcode) {
4485   case ISD::FADD:
4486   case ISD::FSUB:
4487   case ISD::FMUL:
4488   case ISD::FDIV:
4489   case ISD::FREM:
4490   case ISD::FSIN:
4491   case ISD::FCOS: {
4492     if (SNaN)
4493       return true;
4494     // TODO: Need isKnownNeverInfinity
4495     return false;
4496   }
4497   case ISD::FCANONICALIZE:
4498   case ISD::FEXP:
4499   case ISD::FEXP2:
4500   case ISD::FTRUNC:
4501   case ISD::FFLOOR:
4502   case ISD::FCEIL:
4503   case ISD::FROUND:
4504   case ISD::FROUNDEVEN:
4505   case ISD::FRINT:
4506   case ISD::FNEARBYINT: {
4507     if (SNaN)
4508       return true;
4509     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4510   }
4511   case ISD::FABS:
4512   case ISD::FNEG:
4513   case ISD::FCOPYSIGN: {
4514     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4515   }
4516   case ISD::SELECT:
4517     return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4518            isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4519   case ISD::FP_EXTEND:
4520   case ISD::FP_ROUND: {
4521     if (SNaN)
4522       return true;
4523     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4524   }
4525   case ISD::SINT_TO_FP:
4526   case ISD::UINT_TO_FP:
4527     return true;
4528   case ISD::FMA:
4529   case ISD::FMAD: {
4530     if (SNaN)
4531       return true;
4532     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4533            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4534            isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4535   }
4536   case ISD::FSQRT: // Need is known positive
4537   case ISD::FLOG:
4538   case ISD::FLOG2:
4539   case ISD::FLOG10:
4540   case ISD::FPOWI:
4541   case ISD::FPOW: {
4542     if (SNaN)
4543       return true;
4544     // TODO: Refine on operand
4545     return false;
4546   }
4547   case ISD::FMINNUM:
4548   case ISD::FMAXNUM: {
4549     // Only one needs to be known not-nan, since it will be returned if the
4550     // other ends up being one.
4551     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4552            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4553   }
4554   case ISD::FMINNUM_IEEE:
4555   case ISD::FMAXNUM_IEEE: {
4556     if (SNaN)
4557       return true;
4558     // This can return a NaN if either operand is an sNaN, or if both operands
4559     // are NaN.
4560     return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4561             isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4562            (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4563             isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4564   }
4565   case ISD::FMINIMUM:
4566   case ISD::FMAXIMUM: {
4567     // TODO: Does this quiet or return the origina NaN as-is?
4568     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4569            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4570   }
4571   case ISD::EXTRACT_VECTOR_ELT: {
4572     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4573   }
4574   default:
4575     if (Opcode >= ISD::BUILTIN_OP_END ||
4576         Opcode == ISD::INTRINSIC_WO_CHAIN ||
4577         Opcode == ISD::INTRINSIC_W_CHAIN ||
4578         Opcode == ISD::INTRINSIC_VOID) {
4579       return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4580     }
4581 
4582     return false;
4583   }
4584 }
4585 
4586 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4587   assert(Op.getValueType().isFloatingPoint() &&
4588          "Floating point type expected");
4589 
4590   // If the value is a constant, we can obviously see if it is a zero or not.
4591   // TODO: Add BuildVector support.
4592   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4593     return !C->isZero();
4594   return false;
4595 }
4596 
4597 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4598   assert(!Op.getValueType().isFloatingPoint() &&
4599          "Floating point types unsupported - use isKnownNeverZeroFloat");
4600 
4601   // If the value is a constant, we can obviously see if it is a zero or not.
4602   if (ISD::matchUnaryPredicate(Op,
4603                                [](ConstantSDNode *C) { return !C->isZero(); }))
4604     return true;
4605 
4606   // TODO: Recognize more cases here.
4607   switch (Op.getOpcode()) {
4608   default: break;
4609   case ISD::OR:
4610     if (isKnownNeverZero(Op.getOperand(1)) ||
4611         isKnownNeverZero(Op.getOperand(0)))
4612       return true;
4613     break;
4614   }
4615 
4616   return false;
4617 }
4618 
4619 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4620   // Check the obvious case.
4621   if (A == B) return true;
4622 
4623   // For for negative and positive zero.
4624   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4625     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4626       if (CA->isZero() && CB->isZero()) return true;
4627 
4628   // Otherwise they may not be equal.
4629   return false;
4630 }
4631 
4632 // FIXME: unify with llvm::haveNoCommonBitsSet.
4633 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4634   assert(A.getValueType() == B.getValueType() &&
4635          "Values must have the same type");
4636   // Match masked merge pattern (X & ~M) op (Y & M)
4637   if (A->getOpcode() == ISD::AND && B->getOpcode() == ISD::AND) {
4638     auto MatchNoCommonBitsPattern = [&](SDValue NotM, SDValue And) {
4639       if (isBitwiseNot(NotM, true)) {
4640         SDValue NotOperand = NotM->getOperand(0);
4641         return NotOperand == And->getOperand(0) ||
4642                NotOperand == And->getOperand(1);
4643       }
4644       return false;
4645     };
4646     if (MatchNoCommonBitsPattern(A->getOperand(0), B) ||
4647         MatchNoCommonBitsPattern(A->getOperand(1), B) ||
4648         MatchNoCommonBitsPattern(B->getOperand(0), A) ||
4649         MatchNoCommonBitsPattern(B->getOperand(1), A))
4650       return true;
4651   }
4652   return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
4653                                         computeKnownBits(B));
4654 }
4655 
4656 static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
4657                                SelectionDAG &DAG) {
4658   if (cast<ConstantSDNode>(Step)->isZero())
4659     return DAG.getConstant(0, DL, VT);
4660 
4661   return SDValue();
4662 }
4663 
4664 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4665                                 ArrayRef<SDValue> Ops,
4666                                 SelectionDAG &DAG) {
4667   int NumOps = Ops.size();
4668   assert(NumOps != 0 && "Can't build an empty vector!");
4669   assert(!VT.isScalableVector() &&
4670          "BUILD_VECTOR cannot be used with scalable types");
4671   assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4672          "Incorrect element count in BUILD_VECTOR!");
4673 
4674   // BUILD_VECTOR of UNDEFs is UNDEF.
4675   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4676     return DAG.getUNDEF(VT);
4677 
4678   // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4679   SDValue IdentitySrc;
4680   bool IsIdentity = true;
4681   for (int i = 0; i != NumOps; ++i) {
4682     if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4683         Ops[i].getOperand(0).getValueType() != VT ||
4684         (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4685         !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4686         cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4687       IsIdentity = false;
4688       break;
4689     }
4690     IdentitySrc = Ops[i].getOperand(0);
4691   }
4692   if (IsIdentity)
4693     return IdentitySrc;
4694 
4695   return SDValue();
4696 }
4697 
4698 /// Try to simplify vector concatenation to an input value, undef, or build
4699 /// vector.
4700 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4701                                   ArrayRef<SDValue> Ops,
4702                                   SelectionDAG &DAG) {
4703   assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4704   assert(llvm::all_of(Ops,
4705                       [Ops](SDValue Op) {
4706                         return Ops[0].getValueType() == Op.getValueType();
4707                       }) &&
4708          "Concatenation of vectors with inconsistent value types!");
4709   assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
4710              VT.getVectorElementCount() &&
4711          "Incorrect element count in vector concatenation!");
4712 
4713   if (Ops.size() == 1)
4714     return Ops[0];
4715 
4716   // Concat of UNDEFs is UNDEF.
4717   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4718     return DAG.getUNDEF(VT);
4719 
4720   // Scan the operands and look for extract operations from a single source
4721   // that correspond to insertion at the same location via this concatenation:
4722   // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4723   SDValue IdentitySrc;
4724   bool IsIdentity = true;
4725   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4726     SDValue Op = Ops[i];
4727     unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4728     if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4729         Op.getOperand(0).getValueType() != VT ||
4730         (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4731         Op.getConstantOperandVal(1) != IdentityIndex) {
4732       IsIdentity = false;
4733       break;
4734     }
4735     assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4736            "Unexpected identity source vector for concat of extracts");
4737     IdentitySrc = Op.getOperand(0);
4738   }
4739   if (IsIdentity) {
4740     assert(IdentitySrc && "Failed to set source vector of extracts");
4741     return IdentitySrc;
4742   }
4743 
4744   // The code below this point is only designed to work for fixed width
4745   // vectors, so we bail out for now.
4746   if (VT.isScalableVector())
4747     return SDValue();
4748 
4749   // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4750   // simplified to one big BUILD_VECTOR.
4751   // FIXME: Add support for SCALAR_TO_VECTOR as well.
4752   EVT SVT = VT.getScalarType();
4753   SmallVector<SDValue, 16> Elts;
4754   for (SDValue Op : Ops) {
4755     EVT OpVT = Op.getValueType();
4756     if (Op.isUndef())
4757       Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4758     else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4759       Elts.append(Op->op_begin(), Op->op_end());
4760     else
4761       return SDValue();
4762   }
4763 
4764   // BUILD_VECTOR requires all inputs to be of the same type, find the
4765   // maximum type and extend them all.
4766   for (SDValue Op : Elts)
4767     SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4768 
4769   if (SVT.bitsGT(VT.getScalarType())) {
4770     for (SDValue &Op : Elts) {
4771       if (Op.isUndef())
4772         Op = DAG.getUNDEF(SVT);
4773       else
4774         Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4775                  ? DAG.getZExtOrTrunc(Op, DL, SVT)
4776                  : DAG.getSExtOrTrunc(Op, DL, SVT);
4777     }
4778   }
4779 
4780   SDValue V = DAG.getBuildVector(VT, DL, Elts);
4781   NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4782   return V;
4783 }
4784 
4785 /// Gets or creates the specified node.
4786 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4787   FoldingSetNodeID ID;
4788   AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4789   void *IP = nullptr;
4790   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4791     return SDValue(E, 0);
4792 
4793   auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4794                               getVTList(VT));
4795   CSEMap.InsertNode(N, IP);
4796 
4797   InsertNode(N);
4798   SDValue V = SDValue(N, 0);
4799   NewSDValueDbgMsg(V, "Creating new node: ", this);
4800   return V;
4801 }
4802 
4803 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4804                               SDValue Operand) {
4805   SDNodeFlags Flags;
4806   if (Inserter)
4807     Flags = Inserter->getFlags();
4808   return getNode(Opcode, DL, VT, Operand, Flags);
4809 }
4810 
4811 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4812                               SDValue Operand, const SDNodeFlags Flags) {
4813   assert(Operand.getOpcode() != ISD::DELETED_NODE &&
4814          "Operand is DELETED_NODE!");
4815   // Constant fold unary operations with an integer constant operand. Even
4816   // opaque constant will be folded, because the folding of unary operations
4817   // doesn't create new constants with different values. Nevertheless, the
4818   // opaque flag is preserved during folding to prevent future folding with
4819   // other constants.
4820   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4821     const APInt &Val = C->getAPIntValue();
4822     switch (Opcode) {
4823     default: break;
4824     case ISD::SIGN_EXTEND:
4825       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4826                          C->isTargetOpcode(), C->isOpaque());
4827     case ISD::TRUNCATE:
4828       if (C->isOpaque())
4829         break;
4830       LLVM_FALLTHROUGH;
4831     case ISD::ZERO_EXTEND:
4832       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4833                          C->isTargetOpcode(), C->isOpaque());
4834     case ISD::ANY_EXTEND:
4835       // Some targets like RISCV prefer to sign extend some types.
4836       if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
4837         return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4838                            C->isTargetOpcode(), C->isOpaque());
4839       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4840                          C->isTargetOpcode(), C->isOpaque());
4841     case ISD::UINT_TO_FP:
4842     case ISD::SINT_TO_FP: {
4843       APFloat apf(EVTToAPFloatSemantics(VT),
4844                   APInt::getZero(VT.getSizeInBits()));
4845       (void)apf.convertFromAPInt(Val,
4846                                  Opcode==ISD::SINT_TO_FP,
4847                                  APFloat::rmNearestTiesToEven);
4848       return getConstantFP(apf, DL, VT);
4849     }
4850     case ISD::BITCAST:
4851       if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4852         return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4853       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4854         return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4855       if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4856         return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4857       if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4858         return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4859       break;
4860     case ISD::ABS:
4861       return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4862                          C->isOpaque());
4863     case ISD::BITREVERSE:
4864       return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4865                          C->isOpaque());
4866     case ISD::BSWAP:
4867       return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4868                          C->isOpaque());
4869     case ISD::CTPOP:
4870       return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4871                          C->isOpaque());
4872     case ISD::CTLZ:
4873     case ISD::CTLZ_ZERO_UNDEF:
4874       return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4875                          C->isOpaque());
4876     case ISD::CTTZ:
4877     case ISD::CTTZ_ZERO_UNDEF:
4878       return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4879                          C->isOpaque());
4880     case ISD::FP16_TO_FP: {
4881       bool Ignored;
4882       APFloat FPV(APFloat::IEEEhalf(),
4883                   (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4884 
4885       // This can return overflow, underflow, or inexact; we don't care.
4886       // FIXME need to be more flexible about rounding mode.
4887       (void)FPV.convert(EVTToAPFloatSemantics(VT),
4888                         APFloat::rmNearestTiesToEven, &Ignored);
4889       return getConstantFP(FPV, DL, VT);
4890     }
4891     case ISD::STEP_VECTOR: {
4892       if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this))
4893         return V;
4894       break;
4895     }
4896     }
4897   }
4898 
4899   // Constant fold unary operations with a floating point constant operand.
4900   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4901     APFloat V = C->getValueAPF();    // make copy
4902     switch (Opcode) {
4903     case ISD::FNEG:
4904       V.changeSign();
4905       return getConstantFP(V, DL, VT);
4906     case ISD::FABS:
4907       V.clearSign();
4908       return getConstantFP(V, DL, VT);
4909     case ISD::FCEIL: {
4910       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4911       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4912         return getConstantFP(V, DL, VT);
4913       break;
4914     }
4915     case ISD::FTRUNC: {
4916       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4917       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4918         return getConstantFP(V, DL, VT);
4919       break;
4920     }
4921     case ISD::FFLOOR: {
4922       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4923       if (fs == APFloat::opOK || fs == APFloat::opInexact)
4924         return getConstantFP(V, DL, VT);
4925       break;
4926     }
4927     case ISD::FP_EXTEND: {
4928       bool ignored;
4929       // This can return overflow, underflow, or inexact; we don't care.
4930       // FIXME need to be more flexible about rounding mode.
4931       (void)V.convert(EVTToAPFloatSemantics(VT),
4932                       APFloat::rmNearestTiesToEven, &ignored);
4933       return getConstantFP(V, DL, VT);
4934     }
4935     case ISD::FP_TO_SINT:
4936     case ISD::FP_TO_UINT: {
4937       bool ignored;
4938       APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4939       // FIXME need to be more flexible about rounding mode.
4940       APFloat::opStatus s =
4941           V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4942       if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4943         break;
4944       return getConstant(IntVal, DL, VT);
4945     }
4946     case ISD::BITCAST:
4947       if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4948         return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4949       if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
4950         return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4951       if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4952         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4953       if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4954         return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4955       break;
4956     case ISD::FP_TO_FP16: {
4957       bool Ignored;
4958       // This can return overflow, underflow, or inexact; we don't care.
4959       // FIXME need to be more flexible about rounding mode.
4960       (void)V.convert(APFloat::IEEEhalf(),
4961                       APFloat::rmNearestTiesToEven, &Ignored);
4962       return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4963     }
4964     }
4965   }
4966 
4967   // Constant fold unary operations with a vector integer or float operand.
4968   switch (Opcode) {
4969   default:
4970     // FIXME: Entirely reasonable to perform folding of other unary
4971     // operations here as the need arises.
4972     break;
4973   case ISD::FNEG:
4974   case ISD::FABS:
4975   case ISD::FCEIL:
4976   case ISD::FTRUNC:
4977   case ISD::FFLOOR:
4978   case ISD::FP_EXTEND:
4979   case ISD::FP_TO_SINT:
4980   case ISD::FP_TO_UINT:
4981   case ISD::TRUNCATE:
4982   case ISD::ANY_EXTEND:
4983   case ISD::ZERO_EXTEND:
4984   case ISD::SIGN_EXTEND:
4985   case ISD::UINT_TO_FP:
4986   case ISD::SINT_TO_FP:
4987   case ISD::ABS:
4988   case ISD::BITREVERSE:
4989   case ISD::BSWAP:
4990   case ISD::CTLZ:
4991   case ISD::CTLZ_ZERO_UNDEF:
4992   case ISD::CTTZ:
4993   case ISD::CTTZ_ZERO_UNDEF:
4994   case ISD::CTPOP: {
4995     SDValue Ops = {Operand};
4996     if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops))
4997       return Fold;
4998   }
4999   }
5000 
5001   unsigned OpOpcode = Operand.getNode()->getOpcode();
5002   switch (Opcode) {
5003   case ISD::STEP_VECTOR:
5004     assert(VT.isScalableVector() &&
5005            "STEP_VECTOR can only be used with scalable types");
5006     assert(OpOpcode == ISD::TargetConstant &&
5007            VT.getVectorElementType() == Operand.getValueType() &&
5008            "Unexpected step operand");
5009     break;
5010   case ISD::FREEZE:
5011     assert(VT == Operand.getValueType() && "Unexpected VT!");
5012     break;
5013   case ISD::TokenFactor:
5014   case ISD::MERGE_VALUES:
5015   case ISD::CONCAT_VECTORS:
5016     return Operand;         // Factor, merge or concat of one node?  No need.
5017   case ISD::BUILD_VECTOR: {
5018     // Attempt to simplify BUILD_VECTOR.
5019     SDValue Ops[] = {Operand};
5020     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5021       return V;
5022     break;
5023   }
5024   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
5025   case ISD::FP_EXTEND:
5026     assert(VT.isFloatingPoint() &&
5027            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
5028     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
5029     assert((!VT.isVector() ||
5030             VT.getVectorElementCount() ==
5031             Operand.getValueType().getVectorElementCount()) &&
5032            "Vector element count mismatch!");
5033     assert(Operand.getValueType().bitsLT(VT) &&
5034            "Invalid fpext node, dst < src!");
5035     if (Operand.isUndef())
5036       return getUNDEF(VT);
5037     break;
5038   case ISD::FP_TO_SINT:
5039   case ISD::FP_TO_UINT:
5040     if (Operand.isUndef())
5041       return getUNDEF(VT);
5042     break;
5043   case ISD::SINT_TO_FP:
5044   case ISD::UINT_TO_FP:
5045     // [us]itofp(undef) = 0, because the result value is bounded.
5046     if (Operand.isUndef())
5047       return getConstantFP(0.0, DL, VT);
5048     break;
5049   case ISD::SIGN_EXTEND:
5050     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
5051            "Invalid SIGN_EXTEND!");
5052     assert(VT.isVector() == Operand.getValueType().isVector() &&
5053            "SIGN_EXTEND result type type should be vector iff the operand "
5054            "type is vector!");
5055     if (Operand.getValueType() == VT) return Operand;   // noop extension
5056     assert((!VT.isVector() ||
5057             VT.getVectorElementCount() ==
5058                 Operand.getValueType().getVectorElementCount()) &&
5059            "Vector element count mismatch!");
5060     assert(Operand.getValueType().bitsLT(VT) &&
5061            "Invalid sext node, dst < src!");
5062     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
5063       return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5064     if (OpOpcode == ISD::UNDEF)
5065       // sext(undef) = 0, because the top bits will all be the same.
5066       return getConstant(0, DL, VT);
5067     break;
5068   case ISD::ZERO_EXTEND:
5069     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
5070            "Invalid ZERO_EXTEND!");
5071     assert(VT.isVector() == Operand.getValueType().isVector() &&
5072            "ZERO_EXTEND result type type should be vector iff the operand "
5073            "type is vector!");
5074     if (Operand.getValueType() == VT) return Operand;   // noop extension
5075     assert((!VT.isVector() ||
5076             VT.getVectorElementCount() ==
5077                 Operand.getValueType().getVectorElementCount()) &&
5078            "Vector element count mismatch!");
5079     assert(Operand.getValueType().bitsLT(VT) &&
5080            "Invalid zext node, dst < src!");
5081     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
5082       return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
5083     if (OpOpcode == ISD::UNDEF)
5084       // zext(undef) = 0, because the top bits will be zero.
5085       return getConstant(0, DL, VT);
5086     break;
5087   case ISD::ANY_EXTEND:
5088     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
5089            "Invalid ANY_EXTEND!");
5090     assert(VT.isVector() == Operand.getValueType().isVector() &&
5091            "ANY_EXTEND result type type should be vector iff the operand "
5092            "type is vector!");
5093     if (Operand.getValueType() == VT) return Operand;   // noop extension
5094     assert((!VT.isVector() ||
5095             VT.getVectorElementCount() ==
5096                 Operand.getValueType().getVectorElementCount()) &&
5097            "Vector element count mismatch!");
5098     assert(Operand.getValueType().bitsLT(VT) &&
5099            "Invalid anyext node, dst < src!");
5100 
5101     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5102         OpOpcode == ISD::ANY_EXTEND)
5103       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
5104       return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5105     if (OpOpcode == ISD::UNDEF)
5106       return getUNDEF(VT);
5107 
5108     // (ext (trunc x)) -> x
5109     if (OpOpcode == ISD::TRUNCATE) {
5110       SDValue OpOp = Operand.getOperand(0);
5111       if (OpOp.getValueType() == VT) {
5112         transferDbgValues(Operand, OpOp);
5113         return OpOp;
5114       }
5115     }
5116     break;
5117   case ISD::TRUNCATE:
5118     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
5119            "Invalid TRUNCATE!");
5120     assert(VT.isVector() == Operand.getValueType().isVector() &&
5121            "TRUNCATE result type type should be vector iff the operand "
5122            "type is vector!");
5123     if (Operand.getValueType() == VT) return Operand;   // noop truncate
5124     assert((!VT.isVector() ||
5125             VT.getVectorElementCount() ==
5126                 Operand.getValueType().getVectorElementCount()) &&
5127            "Vector element count mismatch!");
5128     assert(Operand.getValueType().bitsGT(VT) &&
5129            "Invalid truncate node, src < dst!");
5130     if (OpOpcode == ISD::TRUNCATE)
5131       return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5132     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5133         OpOpcode == ISD::ANY_EXTEND) {
5134       // If the source is smaller than the dest, we still need an extend.
5135       if (Operand.getOperand(0).getValueType().getScalarType()
5136             .bitsLT(VT.getScalarType()))
5137         return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5138       if (Operand.getOperand(0).getValueType().bitsGT(VT))
5139         return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5140       return Operand.getOperand(0);
5141     }
5142     if (OpOpcode == ISD::UNDEF)
5143       return getUNDEF(VT);
5144     if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
5145       return getVScale(DL, VT, Operand.getConstantOperandAPInt(0));
5146     break;
5147   case ISD::ANY_EXTEND_VECTOR_INREG:
5148   case ISD::ZERO_EXTEND_VECTOR_INREG:
5149   case ISD::SIGN_EXTEND_VECTOR_INREG:
5150     assert(VT.isVector() && "This DAG node is restricted to vector types.");
5151     assert(Operand.getValueType().bitsLE(VT) &&
5152            "The input must be the same size or smaller than the result.");
5153     assert(VT.getVectorMinNumElements() <
5154                Operand.getValueType().getVectorMinNumElements() &&
5155            "The destination vector type must have fewer lanes than the input.");
5156     break;
5157   case ISD::ABS:
5158     assert(VT.isInteger() && VT == Operand.getValueType() &&
5159            "Invalid ABS!");
5160     if (OpOpcode == ISD::UNDEF)
5161       return getUNDEF(VT);
5162     break;
5163   case ISD::BSWAP:
5164     assert(VT.isInteger() && VT == Operand.getValueType() &&
5165            "Invalid BSWAP!");
5166     assert((VT.getScalarSizeInBits() % 16 == 0) &&
5167            "BSWAP types must be a multiple of 16 bits!");
5168     if (OpOpcode == ISD::UNDEF)
5169       return getUNDEF(VT);
5170     // bswap(bswap(X)) -> X.
5171     if (OpOpcode == ISD::BSWAP)
5172       return Operand.getOperand(0);
5173     break;
5174   case ISD::BITREVERSE:
5175     assert(VT.isInteger() && VT == Operand.getValueType() &&
5176            "Invalid BITREVERSE!");
5177     if (OpOpcode == ISD::UNDEF)
5178       return getUNDEF(VT);
5179     break;
5180   case ISD::BITCAST:
5181     assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
5182            "Cannot BITCAST between types of different sizes!");
5183     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
5184     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
5185       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
5186     if (OpOpcode == ISD::UNDEF)
5187       return getUNDEF(VT);
5188     break;
5189   case ISD::SCALAR_TO_VECTOR:
5190     assert(VT.isVector() && !Operand.getValueType().isVector() &&
5191            (VT.getVectorElementType() == Operand.getValueType() ||
5192             (VT.getVectorElementType().isInteger() &&
5193              Operand.getValueType().isInteger() &&
5194              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
5195            "Illegal SCALAR_TO_VECTOR node!");
5196     if (OpOpcode == ISD::UNDEF)
5197       return getUNDEF(VT);
5198     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
5199     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
5200         isa<ConstantSDNode>(Operand.getOperand(1)) &&
5201         Operand.getConstantOperandVal(1) == 0 &&
5202         Operand.getOperand(0).getValueType() == VT)
5203       return Operand.getOperand(0);
5204     break;
5205   case ISD::FNEG:
5206     // Negation of an unknown bag of bits is still completely undefined.
5207     if (OpOpcode == ISD::UNDEF)
5208       return getUNDEF(VT);
5209 
5210     if (OpOpcode == ISD::FNEG)  // --X -> X
5211       return Operand.getOperand(0);
5212     break;
5213   case ISD::FABS:
5214     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
5215       return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
5216     break;
5217   case ISD::VSCALE:
5218     assert(VT == Operand.getValueType() && "Unexpected VT!");
5219     break;
5220   case ISD::CTPOP:
5221     if (Operand.getValueType().getScalarType() == MVT::i1)
5222       return Operand;
5223     break;
5224   case ISD::CTLZ:
5225   case ISD::CTTZ:
5226     if (Operand.getValueType().getScalarType() == MVT::i1)
5227       return getNOT(DL, Operand, Operand.getValueType());
5228     break;
5229   case ISD::VECREDUCE_SMIN:
5230   case ISD::VECREDUCE_UMAX:
5231     if (Operand.getValueType().getScalarType() == MVT::i1)
5232       return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
5233     break;
5234   case ISD::VECREDUCE_SMAX:
5235   case ISD::VECREDUCE_UMIN:
5236     if (Operand.getValueType().getScalarType() == MVT::i1)
5237       return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
5238     break;
5239   }
5240 
5241   SDNode *N;
5242   SDVTList VTs = getVTList(VT);
5243   SDValue Ops[] = {Operand};
5244   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
5245     FoldingSetNodeID ID;
5246     AddNodeIDNode(ID, Opcode, VTs, Ops);
5247     void *IP = nullptr;
5248     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5249       E->intersectFlagsWith(Flags);
5250       return SDValue(E, 0);
5251     }
5252 
5253     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5254     N->setFlags(Flags);
5255     createOperands(N, Ops);
5256     CSEMap.InsertNode(N, IP);
5257   } else {
5258     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5259     createOperands(N, Ops);
5260   }
5261 
5262   InsertNode(N);
5263   SDValue V = SDValue(N, 0);
5264   NewSDValueDbgMsg(V, "Creating new node: ", this);
5265   return V;
5266 }
5267 
5268 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
5269                                        const APInt &C2) {
5270   switch (Opcode) {
5271   case ISD::ADD:  return C1 + C2;
5272   case ISD::SUB:  return C1 - C2;
5273   case ISD::MUL:  return C1 * C2;
5274   case ISD::AND:  return C1 & C2;
5275   case ISD::OR:   return C1 | C2;
5276   case ISD::XOR:  return C1 ^ C2;
5277   case ISD::SHL:  return C1 << C2;
5278   case ISD::SRL:  return C1.lshr(C2);
5279   case ISD::SRA:  return C1.ashr(C2);
5280   case ISD::ROTL: return C1.rotl(C2);
5281   case ISD::ROTR: return C1.rotr(C2);
5282   case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
5283   case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
5284   case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
5285   case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
5286   case ISD::SADDSAT: return C1.sadd_sat(C2);
5287   case ISD::UADDSAT: return C1.uadd_sat(C2);
5288   case ISD::SSUBSAT: return C1.ssub_sat(C2);
5289   case ISD::USUBSAT: return C1.usub_sat(C2);
5290   case ISD::SSHLSAT: return C1.sshl_sat(C2);
5291   case ISD::USHLSAT: return C1.ushl_sat(C2);
5292   case ISD::UDIV:
5293     if (!C2.getBoolValue())
5294       break;
5295     return C1.udiv(C2);
5296   case ISD::UREM:
5297     if (!C2.getBoolValue())
5298       break;
5299     return C1.urem(C2);
5300   case ISD::SDIV:
5301     if (!C2.getBoolValue())
5302       break;
5303     return C1.sdiv(C2);
5304   case ISD::SREM:
5305     if (!C2.getBoolValue())
5306       break;
5307     return C1.srem(C2);
5308   case ISD::MULHS: {
5309     unsigned FullWidth = C1.getBitWidth() * 2;
5310     APInt C1Ext = C1.sext(FullWidth);
5311     APInt C2Ext = C2.sext(FullWidth);
5312     return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5313   }
5314   case ISD::MULHU: {
5315     unsigned FullWidth = C1.getBitWidth() * 2;
5316     APInt C1Ext = C1.zext(FullWidth);
5317     APInt C2Ext = C2.zext(FullWidth);
5318     return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5319   }
5320   case ISD::AVGFLOORS: {
5321     unsigned FullWidth = C1.getBitWidth() + 1;
5322     APInt C1Ext = C1.sext(FullWidth);
5323     APInt C2Ext = C2.sext(FullWidth);
5324     return (C1Ext + C2Ext).extractBits(C1.getBitWidth(), 1);
5325   }
5326   case ISD::AVGFLOORU: {
5327     unsigned FullWidth = C1.getBitWidth() + 1;
5328     APInt C1Ext = C1.zext(FullWidth);
5329     APInt C2Ext = C2.zext(FullWidth);
5330     return (C1Ext + C2Ext).extractBits(C1.getBitWidth(), 1);
5331   }
5332   case ISD::AVGCEILS: {
5333     unsigned FullWidth = C1.getBitWidth() + 1;
5334     APInt C1Ext = C1.sext(FullWidth);
5335     APInt C2Ext = C2.sext(FullWidth);
5336     return (C1Ext + C2Ext + 1).extractBits(C1.getBitWidth(), 1);
5337   }
5338   case ISD::AVGCEILU: {
5339     unsigned FullWidth = C1.getBitWidth() + 1;
5340     APInt C1Ext = C1.zext(FullWidth);
5341     APInt C2Ext = C2.zext(FullWidth);
5342     return (C1Ext + C2Ext + 1).extractBits(C1.getBitWidth(), 1);
5343   }
5344   }
5345   return llvm::None;
5346 }
5347 
5348 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
5349                                        const GlobalAddressSDNode *GA,
5350                                        const SDNode *N2) {
5351   if (GA->getOpcode() != ISD::GlobalAddress)
5352     return SDValue();
5353   if (!TLI->isOffsetFoldingLegal(GA))
5354     return SDValue();
5355   auto *C2 = dyn_cast<ConstantSDNode>(N2);
5356   if (!C2)
5357     return SDValue();
5358   int64_t Offset = C2->getSExtValue();
5359   switch (Opcode) {
5360   case ISD::ADD: break;
5361   case ISD::SUB: Offset = -uint64_t(Offset); break;
5362   default: return SDValue();
5363   }
5364   return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
5365                           GA->getOffset() + uint64_t(Offset));
5366 }
5367 
5368 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
5369   switch (Opcode) {
5370   case ISD::SDIV:
5371   case ISD::UDIV:
5372   case ISD::SREM:
5373   case ISD::UREM: {
5374     // If a divisor is zero/undef or any element of a divisor vector is
5375     // zero/undef, the whole op is undef.
5376     assert(Ops.size() == 2 && "Div/rem should have 2 operands");
5377     SDValue Divisor = Ops[1];
5378     if (Divisor.isUndef() || isNullConstant(Divisor))
5379       return true;
5380 
5381     return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
5382            llvm::any_of(Divisor->op_values(),
5383                         [](SDValue V) { return V.isUndef() ||
5384                                         isNullConstant(V); });
5385     // TODO: Handle signed overflow.
5386   }
5387   // TODO: Handle oversized shifts.
5388   default:
5389     return false;
5390   }
5391 }
5392 
5393 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
5394                                              EVT VT, ArrayRef<SDValue> Ops) {
5395   // If the opcode is a target-specific ISD node, there's nothing we can
5396   // do here and the operand rules may not line up with the below, so
5397   // bail early.
5398   // We can't create a scalar CONCAT_VECTORS so skip it. It will break
5399   // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
5400   // foldCONCAT_VECTORS in getNode before this is called.
5401   if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
5402     return SDValue();
5403 
5404   unsigned NumOps = Ops.size();
5405   if (NumOps == 0)
5406     return SDValue();
5407 
5408   if (isUndef(Opcode, Ops))
5409     return getUNDEF(VT);
5410 
5411   // Handle binops special cases.
5412   if (NumOps == 2) {
5413     if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops[0], Ops[1]))
5414       return CFP;
5415 
5416     if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
5417       if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
5418         if (C1->isOpaque() || C2->isOpaque())
5419           return SDValue();
5420 
5421         Optional<APInt> FoldAttempt =
5422             FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
5423         if (!FoldAttempt)
5424           return SDValue();
5425 
5426         SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
5427         assert((!Folded || !VT.isVector()) &&
5428                "Can't fold vectors ops with scalar operands");
5429         return Folded;
5430       }
5431     }
5432 
5433     // fold (add Sym, c) -> Sym+c
5434     if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[0]))
5435       return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode());
5436     if (TLI->isCommutativeBinOp(Opcode))
5437       if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[1]))
5438         return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode());
5439   }
5440 
5441   // This is for vector folding only from here on.
5442   if (!VT.isVector())
5443     return SDValue();
5444 
5445   ElementCount NumElts = VT.getVectorElementCount();
5446 
5447   // See if we can fold through bitcasted integer ops.
5448   // TODO: Can we handle undef elements?
5449   if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() &&
5450       Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
5451       Ops[0].getOpcode() == ISD::BITCAST &&
5452       Ops[1].getOpcode() == ISD::BITCAST) {
5453     SDValue N1 = peekThroughBitcasts(Ops[0]);
5454     SDValue N2 = peekThroughBitcasts(Ops[1]);
5455     auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
5456     auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
5457     EVT BVVT = N1.getValueType();
5458     if (BV1 && BV2 && BVVT.isInteger() && BVVT == N2.getValueType()) {
5459       bool IsLE = getDataLayout().isLittleEndian();
5460       unsigned EltBits = VT.getScalarSizeInBits();
5461       SmallVector<APInt> RawBits1, RawBits2;
5462       BitVector UndefElts1, UndefElts2;
5463       if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
5464           BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2) &&
5465           UndefElts1.none() && UndefElts2.none()) {
5466         SmallVector<APInt> RawBits;
5467         for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
5468           Optional<APInt> Fold = FoldValue(Opcode, RawBits1[I], RawBits2[I]);
5469           if (!Fold)
5470             break;
5471           RawBits.push_back(Fold.getValue());
5472         }
5473         if (RawBits.size() == NumElts.getFixedValue()) {
5474           // We have constant folded, but we need to cast this again back to
5475           // the original (possibly legalized) type.
5476           SmallVector<APInt> DstBits;
5477           BitVector DstUndefs;
5478           BuildVectorSDNode::recastRawBits(IsLE, BVVT.getScalarSizeInBits(),
5479                                            DstBits, RawBits, DstUndefs,
5480                                            BitVector(RawBits.size(), false));
5481           EVT BVEltVT = BV1->getOperand(0).getValueType();
5482           unsigned BVEltBits = BVEltVT.getSizeInBits();
5483           SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT));
5484           for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
5485             if (DstUndefs[I])
5486               continue;
5487             Ops[I] = getConstant(DstBits[I].sextOrSelf(BVEltBits), DL, BVEltVT);
5488           }
5489           return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
5490         }
5491       }
5492     }
5493   }
5494 
5495   // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
5496   //      (shl step_vector(C0), C1) -> (step_vector(C0 << C1))
5497   if ((Opcode == ISD::MUL || Opcode == ISD::SHL) &&
5498       Ops[0].getOpcode() == ISD::STEP_VECTOR) {
5499     APInt RHSVal;
5500     if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) {
5501       APInt NewStep = Opcode == ISD::MUL
5502                           ? Ops[0].getConstantOperandAPInt(0) * RHSVal
5503                           : Ops[0].getConstantOperandAPInt(0) << RHSVal;
5504       return getStepVector(DL, VT, NewStep);
5505     }
5506   }
5507 
5508   auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
5509     return !Op.getValueType().isVector() ||
5510            Op.getValueType().getVectorElementCount() == NumElts;
5511   };
5512 
5513   auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
5514     return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
5515            Op.getOpcode() == ISD::BUILD_VECTOR ||
5516            Op.getOpcode() == ISD::SPLAT_VECTOR;
5517   };
5518 
5519   // All operands must be vector types with the same number of elements as
5520   // the result type and must be either UNDEF or a build/splat vector
5521   // or UNDEF scalars.
5522   if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
5523       !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5524     return SDValue();
5525 
5526   // If we are comparing vectors, then the result needs to be a i1 boolean
5527   // that is then sign-extended back to the legal result type.
5528   EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5529 
5530   // Find legal integer scalar type for constant promotion and
5531   // ensure that its scalar size is at least as large as source.
5532   EVT LegalSVT = VT.getScalarType();
5533   if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5534     LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5535     if (LegalSVT.bitsLT(VT.getScalarType()))
5536       return SDValue();
5537   }
5538 
5539   // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
5540   // only have one operand to check. For fixed-length vector types we may have
5541   // a combination of BUILD_VECTOR and SPLAT_VECTOR.
5542   unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
5543 
5544   // Constant fold each scalar lane separately.
5545   SmallVector<SDValue, 4> ScalarResults;
5546   for (unsigned I = 0; I != NumVectorElts; I++) {
5547     SmallVector<SDValue, 4> ScalarOps;
5548     for (SDValue Op : Ops) {
5549       EVT InSVT = Op.getValueType().getScalarType();
5550       if (Op.getOpcode() != ISD::BUILD_VECTOR &&
5551           Op.getOpcode() != ISD::SPLAT_VECTOR) {
5552         if (Op.isUndef())
5553           ScalarOps.push_back(getUNDEF(InSVT));
5554         else
5555           ScalarOps.push_back(Op);
5556         continue;
5557       }
5558 
5559       SDValue ScalarOp =
5560           Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
5561       EVT ScalarVT = ScalarOp.getValueType();
5562 
5563       // Build vector (integer) scalar operands may need implicit
5564       // truncation - do this before constant folding.
5565       if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5566         ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5567 
5568       ScalarOps.push_back(ScalarOp);
5569     }
5570 
5571     // Constant fold the scalar operands.
5572     SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps);
5573 
5574     // Legalize the (integer) scalar constant if necessary.
5575     if (LegalSVT != SVT)
5576       ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5577 
5578     // Scalar folding only succeeded if the result is a constant or UNDEF.
5579     if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5580         ScalarResult.getOpcode() != ISD::ConstantFP)
5581       return SDValue();
5582     ScalarResults.push_back(ScalarResult);
5583   }
5584 
5585   SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
5586                                    : getBuildVector(VT, DL, ScalarResults);
5587   NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5588   return V;
5589 }
5590 
5591 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5592                                          EVT VT, SDValue N1, SDValue N2) {
5593   // TODO: We don't do any constant folding for strict FP opcodes here, but we
5594   //       should. That will require dealing with a potentially non-default
5595   //       rounding mode, checking the "opStatus" return value from the APFloat
5596   //       math calculations, and possibly other variations.
5597   ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false);
5598   ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false);
5599   if (N1CFP && N2CFP) {
5600     APFloat C1 = N1CFP->getValueAPF(); // make copy
5601     const APFloat &C2 = N2CFP->getValueAPF();
5602     switch (Opcode) {
5603     case ISD::FADD:
5604       C1.add(C2, APFloat::rmNearestTiesToEven);
5605       return getConstantFP(C1, DL, VT);
5606     case ISD::FSUB:
5607       C1.subtract(C2, APFloat::rmNearestTiesToEven);
5608       return getConstantFP(C1, DL, VT);
5609     case ISD::FMUL:
5610       C1.multiply(C2, APFloat::rmNearestTiesToEven);
5611       return getConstantFP(C1, DL, VT);
5612     case ISD::FDIV:
5613       C1.divide(C2, APFloat::rmNearestTiesToEven);
5614       return getConstantFP(C1, DL, VT);
5615     case ISD::FREM:
5616       C1.mod(C2);
5617       return getConstantFP(C1, DL, VT);
5618     case ISD::FCOPYSIGN:
5619       C1.copySign(C2);
5620       return getConstantFP(C1, DL, VT);
5621     case ISD::FMINNUM:
5622       return getConstantFP(minnum(C1, C2), DL, VT);
5623     case ISD::FMAXNUM:
5624       return getConstantFP(maxnum(C1, C2), DL, VT);
5625     case ISD::FMINIMUM:
5626       return getConstantFP(minimum(C1, C2), DL, VT);
5627     case ISD::FMAXIMUM:
5628       return getConstantFP(maximum(C1, C2), DL, VT);
5629     default: break;
5630     }
5631   }
5632   if (N1CFP && Opcode == ISD::FP_ROUND) {
5633     APFloat C1 = N1CFP->getValueAPF();    // make copy
5634     bool Unused;
5635     // This can return overflow, underflow, or inexact; we don't care.
5636     // FIXME need to be more flexible about rounding mode.
5637     (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5638                       &Unused);
5639     return getConstantFP(C1, DL, VT);
5640   }
5641 
5642   switch (Opcode) {
5643   case ISD::FSUB:
5644     // -0.0 - undef --> undef (consistent with "fneg undef")
5645     if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
5646       if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
5647         return getUNDEF(VT);
5648     LLVM_FALLTHROUGH;
5649 
5650   case ISD::FADD:
5651   case ISD::FMUL:
5652   case ISD::FDIV:
5653   case ISD::FREM:
5654     // If both operands are undef, the result is undef. If 1 operand is undef,
5655     // the result is NaN. This should match the behavior of the IR optimizer.
5656     if (N1.isUndef() && N2.isUndef())
5657       return getUNDEF(VT);
5658     if (N1.isUndef() || N2.isUndef())
5659       return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5660   }
5661   return SDValue();
5662 }
5663 
5664 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5665   assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
5666 
5667   // There's no need to assert on a byte-aligned pointer. All pointers are at
5668   // least byte aligned.
5669   if (A == Align(1))
5670     return Val;
5671 
5672   FoldingSetNodeID ID;
5673   AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5674   ID.AddInteger(A.value());
5675 
5676   void *IP = nullptr;
5677   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5678     return SDValue(E, 0);
5679 
5680   auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5681                                          Val.getValueType(), A);
5682   createOperands(N, {Val});
5683 
5684   CSEMap.InsertNode(N, IP);
5685   InsertNode(N);
5686 
5687   SDValue V(N, 0);
5688   NewSDValueDbgMsg(V, "Creating new node: ", this);
5689   return V;
5690 }
5691 
5692 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5693                               SDValue N1, SDValue N2) {
5694   SDNodeFlags Flags;
5695   if (Inserter)
5696     Flags = Inserter->getFlags();
5697   return getNode(Opcode, DL, VT, N1, N2, Flags);
5698 }
5699 
5700 void SelectionDAG::canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1,
5701                                                 SDValue &N2) const {
5702   if (!TLI->isCommutativeBinOp(Opcode))
5703     return;
5704 
5705   // Canonicalize:
5706   //   binop(const, nonconst) -> binop(nonconst, const)
5707   bool IsN1C = isConstantIntBuildVectorOrConstantInt(N1);
5708   bool IsN2C = isConstantIntBuildVectorOrConstantInt(N2);
5709   bool IsN1CFP = isConstantFPBuildVectorOrConstantFP(N1);
5710   bool IsN2CFP = isConstantFPBuildVectorOrConstantFP(N2);
5711   if ((IsN1C && !IsN2C) || (IsN1CFP && !IsN2CFP))
5712     std::swap(N1, N2);
5713 
5714   // Canonicalize:
5715   //  binop(splat(x), step_vector) -> binop(step_vector, splat(x))
5716   else if (N1.getOpcode() == ISD::SPLAT_VECTOR &&
5717            N2.getOpcode() == ISD::STEP_VECTOR)
5718     std::swap(N1, N2);
5719 }
5720 
5721 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5722                               SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5723   assert(N1.getOpcode() != ISD::DELETED_NODE &&
5724          N2.getOpcode() != ISD::DELETED_NODE &&
5725          "Operand is DELETED_NODE!");
5726 
5727   canonicalizeCommutativeBinop(Opcode, N1, N2);
5728 
5729   auto *N1C = dyn_cast<ConstantSDNode>(N1);
5730   auto *N2C = dyn_cast<ConstantSDNode>(N2);
5731 
5732   // Don't allow undefs in vector splats - we might be returning N2 when folding
5733   // to zero etc.
5734   ConstantSDNode *N2CV =
5735       isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true);
5736 
5737   switch (Opcode) {
5738   default: break;
5739   case ISD::TokenFactor:
5740     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5741            N2.getValueType() == MVT::Other && "Invalid token factor!");
5742     // Fold trivial token factors.
5743     if (N1.getOpcode() == ISD::EntryToken) return N2;
5744     if (N2.getOpcode() == ISD::EntryToken) return N1;
5745     if (N1 == N2) return N1;
5746     break;
5747   case ISD::BUILD_VECTOR: {
5748     // Attempt to simplify BUILD_VECTOR.
5749     SDValue Ops[] = {N1, N2};
5750     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5751       return V;
5752     break;
5753   }
5754   case ISD::CONCAT_VECTORS: {
5755     SDValue Ops[] = {N1, N2};
5756     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5757       return V;
5758     break;
5759   }
5760   case ISD::AND:
5761     assert(VT.isInteger() && "This operator does not apply to FP types!");
5762     assert(N1.getValueType() == N2.getValueType() &&
5763            N1.getValueType() == VT && "Binary operator types must match!");
5764     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
5765     // worth handling here.
5766     if (N2CV && N2CV->isZero())
5767       return N2;
5768     if (N2CV && N2CV->isAllOnes()) // X & -1 -> X
5769       return N1;
5770     break;
5771   case ISD::OR:
5772   case ISD::XOR:
5773   case ISD::ADD:
5774   case ISD::SUB:
5775     assert(VT.isInteger() && "This operator does not apply to FP types!");
5776     assert(N1.getValueType() == N2.getValueType() &&
5777            N1.getValueType() == VT && "Binary operator types must match!");
5778     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
5779     // it's worth handling here.
5780     if (N2CV && N2CV->isZero())
5781       return N1;
5782     if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
5783         VT.getVectorElementType() == MVT::i1)
5784       return getNode(ISD::XOR, DL, VT, N1, N2);
5785     break;
5786   case ISD::MUL:
5787     assert(VT.isInteger() && "This operator does not apply to FP types!");
5788     assert(N1.getValueType() == N2.getValueType() &&
5789            N1.getValueType() == VT && "Binary operator types must match!");
5790     if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5791       return getNode(ISD::AND, DL, VT, N1, N2);
5792     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5793       const APInt &MulImm = N1->getConstantOperandAPInt(0);
5794       const APInt &N2CImm = N2C->getAPIntValue();
5795       return getVScale(DL, VT, MulImm * N2CImm);
5796     }
5797     break;
5798   case ISD::UDIV:
5799   case ISD::UREM:
5800   case ISD::MULHU:
5801   case ISD::MULHS:
5802   case ISD::SDIV:
5803   case ISD::SREM:
5804   case ISD::SADDSAT:
5805   case ISD::SSUBSAT:
5806   case ISD::UADDSAT:
5807   case ISD::USUBSAT:
5808     assert(VT.isInteger() && "This operator does not apply to FP types!");
5809     assert(N1.getValueType() == N2.getValueType() &&
5810            N1.getValueType() == VT && "Binary operator types must match!");
5811     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
5812       // fold (add_sat x, y) -> (or x, y) for bool types.
5813       if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
5814         return getNode(ISD::OR, DL, VT, N1, N2);
5815       // fold (sub_sat x, y) -> (and x, ~y) for bool types.
5816       if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
5817         return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
5818     }
5819     break;
5820   case ISD::SMIN:
5821   case ISD::UMAX:
5822     assert(VT.isInteger() && "This operator does not apply to FP types!");
5823     assert(N1.getValueType() == N2.getValueType() &&
5824            N1.getValueType() == VT && "Binary operator types must match!");
5825     if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5826       return getNode(ISD::OR, DL, VT, N1, N2);
5827     break;
5828   case ISD::SMAX:
5829   case ISD::UMIN:
5830     assert(VT.isInteger() && "This operator does not apply to FP types!");
5831     assert(N1.getValueType() == N2.getValueType() &&
5832            N1.getValueType() == VT && "Binary operator types must match!");
5833     if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5834       return getNode(ISD::AND, DL, VT, N1, N2);
5835     break;
5836   case ISD::FADD:
5837   case ISD::FSUB:
5838   case ISD::FMUL:
5839   case ISD::FDIV:
5840   case ISD::FREM:
5841     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5842     assert(N1.getValueType() == N2.getValueType() &&
5843            N1.getValueType() == VT && "Binary operator types must match!");
5844     if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5845       return V;
5846     break;
5847   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
5848     assert(N1.getValueType() == VT &&
5849            N1.getValueType().isFloatingPoint() &&
5850            N2.getValueType().isFloatingPoint() &&
5851            "Invalid FCOPYSIGN!");
5852     break;
5853   case ISD::SHL:
5854     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5855       const APInt &MulImm = N1->getConstantOperandAPInt(0);
5856       const APInt &ShiftImm = N2C->getAPIntValue();
5857       return getVScale(DL, VT, MulImm << ShiftImm);
5858     }
5859     LLVM_FALLTHROUGH;
5860   case ISD::SRA:
5861   case ISD::SRL:
5862     if (SDValue V = simplifyShift(N1, N2))
5863       return V;
5864     LLVM_FALLTHROUGH;
5865   case ISD::ROTL:
5866   case ISD::ROTR:
5867     assert(VT == N1.getValueType() &&
5868            "Shift operators return type must be the same as their first arg");
5869     assert(VT.isInteger() && N2.getValueType().isInteger() &&
5870            "Shifts only work on integers");
5871     assert((!VT.isVector() || VT == N2.getValueType()) &&
5872            "Vector shift amounts must be in the same as their first arg");
5873     // Verify that the shift amount VT is big enough to hold valid shift
5874     // amounts.  This catches things like trying to shift an i1024 value by an
5875     // i8, which is easy to fall into in generic code that uses
5876     // TLI.getShiftAmount().
5877     assert(N2.getValueType().getScalarSizeInBits() >=
5878                Log2_32_Ceil(VT.getScalarSizeInBits()) &&
5879            "Invalid use of small shift amount with oversized value!");
5880 
5881     // Always fold shifts of i1 values so the code generator doesn't need to
5882     // handle them.  Since we know the size of the shift has to be less than the
5883     // size of the value, the shift/rotate count is guaranteed to be zero.
5884     if (VT == MVT::i1)
5885       return N1;
5886     if (N2CV && N2CV->isZero())
5887       return N1;
5888     break;
5889   case ISD::FP_ROUND:
5890     assert(VT.isFloatingPoint() &&
5891            N1.getValueType().isFloatingPoint() &&
5892            VT.bitsLE(N1.getValueType()) &&
5893            N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5894            "Invalid FP_ROUND!");
5895     if (N1.getValueType() == VT) return N1;  // noop conversion.
5896     break;
5897   case ISD::AssertSext:
5898   case ISD::AssertZext: {
5899     EVT EVT = cast<VTSDNode>(N2)->getVT();
5900     assert(VT == N1.getValueType() && "Not an inreg extend!");
5901     assert(VT.isInteger() && EVT.isInteger() &&
5902            "Cannot *_EXTEND_INREG FP types");
5903     assert(!EVT.isVector() &&
5904            "AssertSExt/AssertZExt type should be the vector element type "
5905            "rather than the vector type!");
5906     assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5907     if (VT.getScalarType() == EVT) return N1; // noop assertion.
5908     break;
5909   }
5910   case ISD::SIGN_EXTEND_INREG: {
5911     EVT EVT = cast<VTSDNode>(N2)->getVT();
5912     assert(VT == N1.getValueType() && "Not an inreg extend!");
5913     assert(VT.isInteger() && EVT.isInteger() &&
5914            "Cannot *_EXTEND_INREG FP types");
5915     assert(EVT.isVector() == VT.isVector() &&
5916            "SIGN_EXTEND_INREG type should be vector iff the operand "
5917            "type is vector!");
5918     assert((!EVT.isVector() ||
5919             EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
5920            "Vector element counts must match in SIGN_EXTEND_INREG");
5921     assert(EVT.bitsLE(VT) && "Not extending!");
5922     if (EVT == VT) return N1;  // Not actually extending
5923 
5924     auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5925       unsigned FromBits = EVT.getScalarSizeInBits();
5926       Val <<= Val.getBitWidth() - FromBits;
5927       Val.ashrInPlace(Val.getBitWidth() - FromBits);
5928       return getConstant(Val, DL, ConstantVT);
5929     };
5930 
5931     if (N1C) {
5932       const APInt &Val = N1C->getAPIntValue();
5933       return SignExtendInReg(Val, VT);
5934     }
5935 
5936     if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5937       SmallVector<SDValue, 8> Ops;
5938       llvm::EVT OpVT = N1.getOperand(0).getValueType();
5939       for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5940         SDValue Op = N1.getOperand(i);
5941         if (Op.isUndef()) {
5942           Ops.push_back(getUNDEF(OpVT));
5943           continue;
5944         }
5945         ConstantSDNode *C = cast<ConstantSDNode>(Op);
5946         APInt Val = C->getAPIntValue();
5947         Ops.push_back(SignExtendInReg(Val, OpVT));
5948       }
5949       return getBuildVector(VT, DL, Ops);
5950     }
5951     break;
5952   }
5953   case ISD::FP_TO_SINT_SAT:
5954   case ISD::FP_TO_UINT_SAT: {
5955     assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
5956            N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
5957     assert(N1.getValueType().isVector() == VT.isVector() &&
5958            "FP_TO_*INT_SAT type should be vector iff the operand type is "
5959            "vector!");
5960     assert((!VT.isVector() || VT.getVectorNumElements() ==
5961                                   N1.getValueType().getVectorNumElements()) &&
5962            "Vector element counts must match in FP_TO_*INT_SAT");
5963     assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
5964            "Type to saturate to must be a scalar.");
5965     assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
5966            "Not extending!");
5967     break;
5968   }
5969   case ISD::EXTRACT_VECTOR_ELT:
5970     assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5971            "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5972              element type of the vector.");
5973 
5974     // Extract from an undefined value or using an undefined index is undefined.
5975     if (N1.isUndef() || N2.isUndef())
5976       return getUNDEF(VT);
5977 
5978     // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5979     // vectors. For scalable vectors we will provide appropriate support for
5980     // dealing with arbitrary indices.
5981     if (N2C && N1.getValueType().isFixedLengthVector() &&
5982         N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5983       return getUNDEF(VT);
5984 
5985     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5986     // expanding copies of large vectors from registers. This only works for
5987     // fixed length vectors, since we need to know the exact number of
5988     // elements.
5989     if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5990         N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5991       unsigned Factor =
5992         N1.getOperand(0).getValueType().getVectorNumElements();
5993       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5994                      N1.getOperand(N2C->getZExtValue() / Factor),
5995                      getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5996     }
5997 
5998     // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5999     // lowering is expanding large vector constants.
6000     if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
6001                 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
6002       assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
6003               N1.getValueType().isFixedLengthVector()) &&
6004              "BUILD_VECTOR used for scalable vectors");
6005       unsigned Index =
6006           N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
6007       SDValue Elt = N1.getOperand(Index);
6008 
6009       if (VT != Elt.getValueType())
6010         // If the vector element type is not legal, the BUILD_VECTOR operands
6011         // are promoted and implicitly truncated, and the result implicitly
6012         // extended. Make that explicit here.
6013         Elt = getAnyExtOrTrunc(Elt, DL, VT);
6014 
6015       return Elt;
6016     }
6017 
6018     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
6019     // operations are lowered to scalars.
6020     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
6021       // If the indices are the same, return the inserted element else
6022       // if the indices are known different, extract the element from
6023       // the original vector.
6024       SDValue N1Op2 = N1.getOperand(2);
6025       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
6026 
6027       if (N1Op2C && N2C) {
6028         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
6029           if (VT == N1.getOperand(1).getValueType())
6030             return N1.getOperand(1);
6031           return getSExtOrTrunc(N1.getOperand(1), DL, VT);
6032         }
6033         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
6034       }
6035     }
6036 
6037     // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
6038     // when vector types are scalarized and v1iX is legal.
6039     // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
6040     // Here we are completely ignoring the extract element index (N2),
6041     // which is fine for fixed width vectors, since any index other than 0
6042     // is undefined anyway. However, this cannot be ignored for scalable
6043     // vectors - in theory we could support this, but we don't want to do this
6044     // without a profitability check.
6045     if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6046         N1.getValueType().isFixedLengthVector() &&
6047         N1.getValueType().getVectorNumElements() == 1) {
6048       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
6049                      N1.getOperand(1));
6050     }
6051     break;
6052   case ISD::EXTRACT_ELEMENT:
6053     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
6054     assert(!N1.getValueType().isVector() && !VT.isVector() &&
6055            (N1.getValueType().isInteger() == VT.isInteger()) &&
6056            N1.getValueType() != VT &&
6057            "Wrong types for EXTRACT_ELEMENT!");
6058 
6059     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
6060     // 64-bit integers into 32-bit parts.  Instead of building the extract of
6061     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
6062     if (N1.getOpcode() == ISD::BUILD_PAIR)
6063       return N1.getOperand(N2C->getZExtValue());
6064 
6065     // EXTRACT_ELEMENT of a constant int is also very common.
6066     if (N1C) {
6067       unsigned ElementSize = VT.getSizeInBits();
6068       unsigned Shift = ElementSize * N2C->getZExtValue();
6069       const APInt &Val = N1C->getAPIntValue();
6070       return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
6071     }
6072     break;
6073   case ISD::EXTRACT_SUBVECTOR: {
6074     EVT N1VT = N1.getValueType();
6075     assert(VT.isVector() && N1VT.isVector() &&
6076            "Extract subvector VTs must be vectors!");
6077     assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
6078            "Extract subvector VTs must have the same element type!");
6079     assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
6080            "Cannot extract a scalable vector from a fixed length vector!");
6081     assert((VT.isScalableVector() != N1VT.isScalableVector() ||
6082             VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
6083            "Extract subvector must be from larger vector to smaller vector!");
6084     assert(N2C && "Extract subvector index must be a constant");
6085     assert((VT.isScalableVector() != N1VT.isScalableVector() ||
6086             (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
6087                 N1VT.getVectorMinNumElements()) &&
6088            "Extract subvector overflow!");
6089     assert(N2C->getAPIntValue().getBitWidth() ==
6090                TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
6091            "Constant index for EXTRACT_SUBVECTOR has an invalid size");
6092 
6093     // Trivial extraction.
6094     if (VT == N1VT)
6095       return N1;
6096 
6097     // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
6098     if (N1.isUndef())
6099       return getUNDEF(VT);
6100 
6101     // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
6102     // the concat have the same type as the extract.
6103     if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
6104         VT == N1.getOperand(0).getValueType()) {
6105       unsigned Factor = VT.getVectorMinNumElements();
6106       return N1.getOperand(N2C->getZExtValue() / Factor);
6107     }
6108 
6109     // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
6110     // during shuffle legalization.
6111     if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
6112         VT == N1.getOperand(1).getValueType())
6113       return N1.getOperand(1);
6114     break;
6115   }
6116   }
6117 
6118   // Perform trivial constant folding.
6119   if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
6120     return SV;
6121 
6122   // Canonicalize an UNDEF to the RHS, even over a constant.
6123   if (N1.isUndef()) {
6124     if (TLI->isCommutativeBinOp(Opcode)) {
6125       std::swap(N1, N2);
6126     } else {
6127       switch (Opcode) {
6128       case ISD::SIGN_EXTEND_INREG:
6129       case ISD::SUB:
6130         return getUNDEF(VT);     // fold op(undef, arg2) -> undef
6131       case ISD::UDIV:
6132       case ISD::SDIV:
6133       case ISD::UREM:
6134       case ISD::SREM:
6135       case ISD::SSUBSAT:
6136       case ISD::USUBSAT:
6137         return getConstant(0, DL, VT);    // fold op(undef, arg2) -> 0
6138       }
6139     }
6140   }
6141 
6142   // Fold a bunch of operators when the RHS is undef.
6143   if (N2.isUndef()) {
6144     switch (Opcode) {
6145     case ISD::XOR:
6146       if (N1.isUndef())
6147         // Handle undef ^ undef -> 0 special case. This is a common
6148         // idiom (misuse).
6149         return getConstant(0, DL, VT);
6150       LLVM_FALLTHROUGH;
6151     case ISD::ADD:
6152     case ISD::SUB:
6153     case ISD::UDIV:
6154     case ISD::SDIV:
6155     case ISD::UREM:
6156     case ISD::SREM:
6157       return getUNDEF(VT);       // fold op(arg1, undef) -> undef
6158     case ISD::MUL:
6159     case ISD::AND:
6160     case ISD::SSUBSAT:
6161     case ISD::USUBSAT:
6162       return getConstant(0, DL, VT);  // fold op(arg1, undef) -> 0
6163     case ISD::OR:
6164     case ISD::SADDSAT:
6165     case ISD::UADDSAT:
6166       return getAllOnesConstant(DL, VT);
6167     }
6168   }
6169 
6170   // Memoize this node if possible.
6171   SDNode *N;
6172   SDVTList VTs = getVTList(VT);
6173   SDValue Ops[] = {N1, N2};
6174   if (VT != MVT::Glue) {
6175     FoldingSetNodeID ID;
6176     AddNodeIDNode(ID, Opcode, VTs, Ops);
6177     void *IP = nullptr;
6178     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6179       E->intersectFlagsWith(Flags);
6180       return SDValue(E, 0);
6181     }
6182 
6183     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6184     N->setFlags(Flags);
6185     createOperands(N, Ops);
6186     CSEMap.InsertNode(N, IP);
6187   } else {
6188     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6189     createOperands(N, Ops);
6190   }
6191 
6192   InsertNode(N);
6193   SDValue V = SDValue(N, 0);
6194   NewSDValueDbgMsg(V, "Creating new node: ", this);
6195   return V;
6196 }
6197 
6198 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6199                               SDValue N1, SDValue N2, SDValue N3) {
6200   SDNodeFlags Flags;
6201   if (Inserter)
6202     Flags = Inserter->getFlags();
6203   return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
6204 }
6205 
6206 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6207                               SDValue N1, SDValue N2, SDValue N3,
6208                               const SDNodeFlags Flags) {
6209   assert(N1.getOpcode() != ISD::DELETED_NODE &&
6210          N2.getOpcode() != ISD::DELETED_NODE &&
6211          N3.getOpcode() != ISD::DELETED_NODE &&
6212          "Operand is DELETED_NODE!");
6213   // Perform various simplifications.
6214   switch (Opcode) {
6215   case ISD::FMA: {
6216     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
6217     assert(N1.getValueType() == VT && N2.getValueType() == VT &&
6218            N3.getValueType() == VT && "FMA types must match!");
6219     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
6220     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
6221     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
6222     if (N1CFP && N2CFP && N3CFP) {
6223       APFloat  V1 = N1CFP->getValueAPF();
6224       const APFloat &V2 = N2CFP->getValueAPF();
6225       const APFloat &V3 = N3CFP->getValueAPF();
6226       V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
6227       return getConstantFP(V1, DL, VT);
6228     }
6229     break;
6230   }
6231   case ISD::BUILD_VECTOR: {
6232     // Attempt to simplify BUILD_VECTOR.
6233     SDValue Ops[] = {N1, N2, N3};
6234     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
6235       return V;
6236     break;
6237   }
6238   case ISD::CONCAT_VECTORS: {
6239     SDValue Ops[] = {N1, N2, N3};
6240     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
6241       return V;
6242     break;
6243   }
6244   case ISD::SETCC: {
6245     assert(VT.isInteger() && "SETCC result type must be an integer!");
6246     assert(N1.getValueType() == N2.getValueType() &&
6247            "SETCC operands must have the same type!");
6248     assert(VT.isVector() == N1.getValueType().isVector() &&
6249            "SETCC type should be vector iff the operand type is vector!");
6250     assert((!VT.isVector() || VT.getVectorElementCount() ==
6251                                   N1.getValueType().getVectorElementCount()) &&
6252            "SETCC vector element counts must match!");
6253     // Use FoldSetCC to simplify SETCC's.
6254     if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
6255       return V;
6256     // Vector constant folding.
6257     SDValue Ops[] = {N1, N2, N3};
6258     if (SDValue V = FoldConstantArithmetic(Opcode, DL, VT, Ops)) {
6259       NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
6260       return V;
6261     }
6262     break;
6263   }
6264   case ISD::SELECT:
6265   case ISD::VSELECT:
6266     if (SDValue V = simplifySelect(N1, N2, N3))
6267       return V;
6268     break;
6269   case ISD::VECTOR_SHUFFLE:
6270     llvm_unreachable("should use getVectorShuffle constructor!");
6271   case ISD::VECTOR_SPLICE: {
6272     if (cast<ConstantSDNode>(N3)->isNullValue())
6273       return N1;
6274     break;
6275   }
6276   case ISD::INSERT_VECTOR_ELT: {
6277     ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
6278     // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
6279     // for scalable vectors where we will generate appropriate code to
6280     // deal with out-of-bounds cases correctly.
6281     if (N3C && N1.getValueType().isFixedLengthVector() &&
6282         N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
6283       return getUNDEF(VT);
6284 
6285     // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
6286     if (N3.isUndef())
6287       return getUNDEF(VT);
6288 
6289     // If the inserted element is an UNDEF, just use the input vector.
6290     if (N2.isUndef())
6291       return N1;
6292 
6293     break;
6294   }
6295   case ISD::INSERT_SUBVECTOR: {
6296     // Inserting undef into undef is still undef.
6297     if (N1.isUndef() && N2.isUndef())
6298       return getUNDEF(VT);
6299 
6300     EVT N2VT = N2.getValueType();
6301     assert(VT == N1.getValueType() &&
6302            "Dest and insert subvector source types must match!");
6303     assert(VT.isVector() && N2VT.isVector() &&
6304            "Insert subvector VTs must be vectors!");
6305     assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
6306            "Cannot insert a scalable vector into a fixed length vector!");
6307     assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6308             VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
6309            "Insert subvector must be from smaller vector to larger vector!");
6310     assert(isa<ConstantSDNode>(N3) &&
6311            "Insert subvector index must be constant");
6312     assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6313             (N2VT.getVectorMinNumElements() +
6314              cast<ConstantSDNode>(N3)->getZExtValue()) <=
6315                 VT.getVectorMinNumElements()) &&
6316            "Insert subvector overflow!");
6317     assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
6318                TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
6319            "Constant index for INSERT_SUBVECTOR has an invalid size");
6320 
6321     // Trivial insertion.
6322     if (VT == N2VT)
6323       return N2;
6324 
6325     // If this is an insert of an extracted vector into an undef vector, we
6326     // can just use the input to the extract.
6327     if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6328         N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
6329       return N2.getOperand(0);
6330     break;
6331   }
6332   case ISD::BITCAST:
6333     // Fold bit_convert nodes from a type to themselves.
6334     if (N1.getValueType() == VT)
6335       return N1;
6336     break;
6337   }
6338 
6339   // Memoize node if it doesn't produce a flag.
6340   SDNode *N;
6341   SDVTList VTs = getVTList(VT);
6342   SDValue Ops[] = {N1, N2, N3};
6343   if (VT != MVT::Glue) {
6344     FoldingSetNodeID ID;
6345     AddNodeIDNode(ID, Opcode, VTs, Ops);
6346     void *IP = nullptr;
6347     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6348       E->intersectFlagsWith(Flags);
6349       return SDValue(E, 0);
6350     }
6351 
6352     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6353     N->setFlags(Flags);
6354     createOperands(N, Ops);
6355     CSEMap.InsertNode(N, IP);
6356   } else {
6357     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6358     createOperands(N, Ops);
6359   }
6360 
6361   InsertNode(N);
6362   SDValue V = SDValue(N, 0);
6363   NewSDValueDbgMsg(V, "Creating new node: ", this);
6364   return V;
6365 }
6366 
6367 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6368                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
6369   SDValue Ops[] = { N1, N2, N3, N4 };
6370   return getNode(Opcode, DL, VT, Ops);
6371 }
6372 
6373 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6374                               SDValue N1, SDValue N2, SDValue N3, SDValue N4,
6375                               SDValue N5) {
6376   SDValue Ops[] = { N1, N2, N3, N4, N5 };
6377   return getNode(Opcode, DL, VT, Ops);
6378 }
6379 
6380 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
6381 /// the incoming stack arguments to be loaded from the stack.
6382 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
6383   SmallVector<SDValue, 8> ArgChains;
6384 
6385   // Include the original chain at the beginning of the list. When this is
6386   // used by target LowerCall hooks, this helps legalize find the
6387   // CALLSEQ_BEGIN node.
6388   ArgChains.push_back(Chain);
6389 
6390   // Add a chain value for each stack argument.
6391   for (SDNode *U : getEntryNode().getNode()->uses())
6392     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
6393       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6394         if (FI->getIndex() < 0)
6395           ArgChains.push_back(SDValue(L, 1));
6396 
6397   // Build a tokenfactor for all the chains.
6398   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6399 }
6400 
6401 /// getMemsetValue - Vectorized representation of the memset value
6402 /// operand.
6403 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
6404                               const SDLoc &dl) {
6405   assert(!Value.isUndef());
6406 
6407   unsigned NumBits = VT.getScalarSizeInBits();
6408   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
6409     assert(C->getAPIntValue().getBitWidth() == 8);
6410     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
6411     if (VT.isInteger()) {
6412       bool IsOpaque = VT.getSizeInBits() > 64 ||
6413           !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
6414       return DAG.getConstant(Val, dl, VT, false, IsOpaque);
6415     }
6416     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
6417                              VT);
6418   }
6419 
6420   assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
6421   EVT IntVT = VT.getScalarType();
6422   if (!IntVT.isInteger())
6423     IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
6424 
6425   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
6426   if (NumBits > 8) {
6427     // Use a multiplication with 0x010101... to extend the input to the
6428     // required length.
6429     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
6430     Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
6431                         DAG.getConstant(Magic, dl, IntVT));
6432   }
6433 
6434   if (VT != Value.getValueType() && !VT.isInteger())
6435     Value = DAG.getBitcast(VT.getScalarType(), Value);
6436   if (VT != Value.getValueType())
6437     Value = DAG.getSplatBuildVector(VT, dl, Value);
6438 
6439   return Value;
6440 }
6441 
6442 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
6443 /// used when a memcpy is turned into a memset when the source is a constant
6444 /// string ptr.
6445 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
6446                                   const TargetLowering &TLI,
6447                                   const ConstantDataArraySlice &Slice) {
6448   // Handle vector with all elements zero.
6449   if (Slice.Array == nullptr) {
6450     if (VT.isInteger())
6451       return DAG.getConstant(0, dl, VT);
6452     if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
6453       return DAG.getConstantFP(0.0, dl, VT);
6454     if (VT.isVector()) {
6455       unsigned NumElts = VT.getVectorNumElements();
6456       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
6457       return DAG.getNode(ISD::BITCAST, dl, VT,
6458                          DAG.getConstant(0, dl,
6459                                          EVT::getVectorVT(*DAG.getContext(),
6460                                                           EltVT, NumElts)));
6461     }
6462     llvm_unreachable("Expected type!");
6463   }
6464 
6465   assert(!VT.isVector() && "Can't handle vector type here!");
6466   unsigned NumVTBits = VT.getSizeInBits();
6467   unsigned NumVTBytes = NumVTBits / 8;
6468   unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
6469 
6470   APInt Val(NumVTBits, 0);
6471   if (DAG.getDataLayout().isLittleEndian()) {
6472     for (unsigned i = 0; i != NumBytes; ++i)
6473       Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
6474   } else {
6475     for (unsigned i = 0; i != NumBytes; ++i)
6476       Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
6477   }
6478 
6479   // If the "cost" of materializing the integer immediate is less than the cost
6480   // of a load, then it is cost effective to turn the load into the immediate.
6481   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
6482   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
6483     return DAG.getConstant(Val, dl, VT);
6484   return SDValue();
6485 }
6486 
6487 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
6488                                            const SDLoc &DL,
6489                                            const SDNodeFlags Flags) {
6490   EVT VT = Base.getValueType();
6491   SDValue Index;
6492 
6493   if (Offset.isScalable())
6494     Index = getVScale(DL, Base.getValueType(),
6495                       APInt(Base.getValueSizeInBits().getFixedSize(),
6496                             Offset.getKnownMinSize()));
6497   else
6498     Index = getConstant(Offset.getFixedSize(), DL, VT);
6499 
6500   return getMemBasePlusOffset(Base, Index, DL, Flags);
6501 }
6502 
6503 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
6504                                            const SDLoc &DL,
6505                                            const SDNodeFlags Flags) {
6506   assert(Offset.getValueType().isInteger());
6507   EVT BasePtrVT = Ptr.getValueType();
6508   return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
6509 }
6510 
6511 /// Returns true if memcpy source is constant data.
6512 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
6513   uint64_t SrcDelta = 0;
6514   GlobalAddressSDNode *G = nullptr;
6515   if (Src.getOpcode() == ISD::GlobalAddress)
6516     G = cast<GlobalAddressSDNode>(Src);
6517   else if (Src.getOpcode() == ISD::ADD &&
6518            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
6519            Src.getOperand(1).getOpcode() == ISD::Constant) {
6520     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
6521     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
6522   }
6523   if (!G)
6524     return false;
6525 
6526   return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
6527                                   SrcDelta + G->getOffset());
6528 }
6529 
6530 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
6531                                       SelectionDAG &DAG) {
6532   // On Darwin, -Os means optimize for size without hurting performance, so
6533   // only really optimize for size when -Oz (MinSize) is used.
6534   if (MF.getTarget().getTargetTriple().isOSDarwin())
6535     return MF.getFunction().hasMinSize();
6536   return DAG.shouldOptForSize();
6537 }
6538 
6539 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
6540                           SmallVector<SDValue, 32> &OutChains, unsigned From,
6541                           unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
6542                           SmallVector<SDValue, 16> &OutStoreChains) {
6543   assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
6544   assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
6545   SmallVector<SDValue, 16> GluedLoadChains;
6546   for (unsigned i = From; i < To; ++i) {
6547     OutChains.push_back(OutLoadChains[i]);
6548     GluedLoadChains.push_back(OutLoadChains[i]);
6549   }
6550 
6551   // Chain for all loads.
6552   SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6553                                   GluedLoadChains);
6554 
6555   for (unsigned i = From; i < To; ++i) {
6556     StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
6557     SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
6558                                   ST->getBasePtr(), ST->getMemoryVT(),
6559                                   ST->getMemOperand());
6560     OutChains.push_back(NewStore);
6561   }
6562 }
6563 
6564 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6565                                        SDValue Chain, SDValue Dst, SDValue Src,
6566                                        uint64_t Size, Align Alignment,
6567                                        bool isVol, bool AlwaysInline,
6568                                        MachinePointerInfo DstPtrInfo,
6569                                        MachinePointerInfo SrcPtrInfo,
6570                                        const AAMDNodes &AAInfo) {
6571   // Turn a memcpy of undef to nop.
6572   // FIXME: We need to honor volatile even is Src is undef.
6573   if (Src.isUndef())
6574     return Chain;
6575 
6576   // Expand memcpy to a series of load and store ops if the size operand falls
6577   // below a certain threshold.
6578   // TODO: In the AlwaysInline case, if the size is big then generate a loop
6579   // rather than maybe a humongous number of loads and stores.
6580   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6581   const DataLayout &DL = DAG.getDataLayout();
6582   LLVMContext &C = *DAG.getContext();
6583   std::vector<EVT> MemOps;
6584   bool DstAlignCanChange = false;
6585   MachineFunction &MF = DAG.getMachineFunction();
6586   MachineFrameInfo &MFI = MF.getFrameInfo();
6587   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6588   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6589   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6590     DstAlignCanChange = true;
6591   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6592   if (!SrcAlign || Alignment > *SrcAlign)
6593     SrcAlign = Alignment;
6594   assert(SrcAlign && "SrcAlign must be set");
6595   ConstantDataArraySlice Slice;
6596   // If marked as volatile, perform a copy even when marked as constant.
6597   bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
6598   bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
6599   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
6600   const MemOp Op = isZeroConstant
6601                        ? MemOp::Set(Size, DstAlignCanChange, Alignment,
6602                                     /*IsZeroMemset*/ true, isVol)
6603                        : MemOp::Copy(Size, DstAlignCanChange, Alignment,
6604                                      *SrcAlign, isVol, CopyFromConstant);
6605   if (!TLI.findOptimalMemOpLowering(
6606           MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
6607           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
6608     return SDValue();
6609 
6610   if (DstAlignCanChange) {
6611     Type *Ty = MemOps[0].getTypeForEVT(C);
6612     Align NewAlign = DL.getABITypeAlign(Ty);
6613 
6614     // Don't promote to an alignment that would require dynamic stack
6615     // realignment.
6616     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
6617     if (!TRI->hasStackRealignment(MF))
6618       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
6619         NewAlign = NewAlign / 2;
6620 
6621     if (NewAlign > Alignment) {
6622       // Give the stack frame object a larger alignment if needed.
6623       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6624         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6625       Alignment = NewAlign;
6626     }
6627   }
6628 
6629   // Prepare AAInfo for loads/stores after lowering this memcpy.
6630   AAMDNodes NewAAInfo = AAInfo;
6631   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6632 
6633   MachineMemOperand::Flags MMOFlags =
6634       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6635   SmallVector<SDValue, 16> OutLoadChains;
6636   SmallVector<SDValue, 16> OutStoreChains;
6637   SmallVector<SDValue, 32> OutChains;
6638   unsigned NumMemOps = MemOps.size();
6639   uint64_t SrcOff = 0, DstOff = 0;
6640   for (unsigned i = 0; i != NumMemOps; ++i) {
6641     EVT VT = MemOps[i];
6642     unsigned VTSize = VT.getSizeInBits() / 8;
6643     SDValue Value, Store;
6644 
6645     if (VTSize > Size) {
6646       // Issuing an unaligned load / store pair  that overlaps with the previous
6647       // pair. Adjust the offset accordingly.
6648       assert(i == NumMemOps-1 && i != 0);
6649       SrcOff -= VTSize - Size;
6650       DstOff -= VTSize - Size;
6651     }
6652 
6653     if (CopyFromConstant &&
6654         (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
6655       // It's unlikely a store of a vector immediate can be done in a single
6656       // instruction. It would require a load from a constantpool first.
6657       // We only handle zero vectors here.
6658       // FIXME: Handle other cases where store of vector immediate is done in
6659       // a single instruction.
6660       ConstantDataArraySlice SubSlice;
6661       if (SrcOff < Slice.Length) {
6662         SubSlice = Slice;
6663         SubSlice.move(SrcOff);
6664       } else {
6665         // This is an out-of-bounds access and hence UB. Pretend we read zero.
6666         SubSlice.Array = nullptr;
6667         SubSlice.Offset = 0;
6668         SubSlice.Length = VTSize;
6669       }
6670       Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
6671       if (Value.getNode()) {
6672         Store = DAG.getStore(
6673             Chain, dl, Value,
6674             DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6675             DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
6676         OutChains.push_back(Store);
6677       }
6678     }
6679 
6680     if (!Store.getNode()) {
6681       // The type might not be legal for the target.  This should only happen
6682       // if the type is smaller than a legal type, as on PPC, so the right
6683       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
6684       // to Load/Store if NVT==VT.
6685       // FIXME does the case above also need this?
6686       EVT NVT = TLI.getTypeToTransformTo(C, VT);
6687       assert(NVT.bitsGE(VT));
6688 
6689       bool isDereferenceable =
6690         SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6691       MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6692       if (isDereferenceable)
6693         SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6694 
6695       Value = DAG.getExtLoad(
6696           ISD::EXTLOAD, dl, NVT, Chain,
6697           DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6698           SrcPtrInfo.getWithOffset(SrcOff), VT,
6699           commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
6700       OutLoadChains.push_back(Value.getValue(1));
6701 
6702       Store = DAG.getTruncStore(
6703           Chain, dl, Value,
6704           DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6705           DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
6706       OutStoreChains.push_back(Store);
6707     }
6708     SrcOff += VTSize;
6709     DstOff += VTSize;
6710     Size -= VTSize;
6711   }
6712 
6713   unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6714                                 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6715   unsigned NumLdStInMemcpy = OutStoreChains.size();
6716 
6717   if (NumLdStInMemcpy) {
6718     // It may be that memcpy might be converted to memset if it's memcpy
6719     // of constants. In such a case, we won't have loads and stores, but
6720     // just stores. In the absence of loads, there is nothing to gang up.
6721     if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6722       // If target does not care, just leave as it.
6723       for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6724         OutChains.push_back(OutLoadChains[i]);
6725         OutChains.push_back(OutStoreChains[i]);
6726       }
6727     } else {
6728       // Ld/St less than/equal limit set by target.
6729       if (NumLdStInMemcpy <= GluedLdStLimit) {
6730           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6731                                         NumLdStInMemcpy, OutLoadChains,
6732                                         OutStoreChains);
6733       } else {
6734         unsigned NumberLdChain =  NumLdStInMemcpy / GluedLdStLimit;
6735         unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6736         unsigned GlueIter = 0;
6737 
6738         for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6739           unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6740           unsigned IndexTo   = NumLdStInMemcpy - GlueIter;
6741 
6742           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6743                                        OutLoadChains, OutStoreChains);
6744           GlueIter += GluedLdStLimit;
6745         }
6746 
6747         // Residual ld/st.
6748         if (RemainingLdStInMemcpy) {
6749           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6750                                         RemainingLdStInMemcpy, OutLoadChains,
6751                                         OutStoreChains);
6752         }
6753       }
6754     }
6755   }
6756   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6757 }
6758 
6759 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6760                                         SDValue Chain, SDValue Dst, SDValue Src,
6761                                         uint64_t Size, Align Alignment,
6762                                         bool isVol, bool AlwaysInline,
6763                                         MachinePointerInfo DstPtrInfo,
6764                                         MachinePointerInfo SrcPtrInfo,
6765                                         const AAMDNodes &AAInfo) {
6766   // Turn a memmove of undef to nop.
6767   // FIXME: We need to honor volatile even is Src is undef.
6768   if (Src.isUndef())
6769     return Chain;
6770 
6771   // Expand memmove to a series of load and store ops if the size operand falls
6772   // below a certain threshold.
6773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6774   const DataLayout &DL = DAG.getDataLayout();
6775   LLVMContext &C = *DAG.getContext();
6776   std::vector<EVT> MemOps;
6777   bool DstAlignCanChange = false;
6778   MachineFunction &MF = DAG.getMachineFunction();
6779   MachineFrameInfo &MFI = MF.getFrameInfo();
6780   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6781   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6782   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6783     DstAlignCanChange = true;
6784   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6785   if (!SrcAlign || Alignment > *SrcAlign)
6786     SrcAlign = Alignment;
6787   assert(SrcAlign && "SrcAlign must be set");
6788   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6789   if (!TLI.findOptimalMemOpLowering(
6790           MemOps, Limit,
6791           MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
6792                       /*IsVolatile*/ true),
6793           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6794           MF.getFunction().getAttributes()))
6795     return SDValue();
6796 
6797   if (DstAlignCanChange) {
6798     Type *Ty = MemOps[0].getTypeForEVT(C);
6799     Align NewAlign = DL.getABITypeAlign(Ty);
6800     if (NewAlign > Alignment) {
6801       // Give the stack frame object a larger alignment if needed.
6802       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6803         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6804       Alignment = NewAlign;
6805     }
6806   }
6807 
6808   // Prepare AAInfo for loads/stores after lowering this memmove.
6809   AAMDNodes NewAAInfo = AAInfo;
6810   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6811 
6812   MachineMemOperand::Flags MMOFlags =
6813       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6814   uint64_t SrcOff = 0, DstOff = 0;
6815   SmallVector<SDValue, 8> LoadValues;
6816   SmallVector<SDValue, 8> LoadChains;
6817   SmallVector<SDValue, 8> OutChains;
6818   unsigned NumMemOps = MemOps.size();
6819   for (unsigned i = 0; i < NumMemOps; i++) {
6820     EVT VT = MemOps[i];
6821     unsigned VTSize = VT.getSizeInBits() / 8;
6822     SDValue Value;
6823 
6824     bool isDereferenceable =
6825       SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6826     MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6827     if (isDereferenceable)
6828       SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6829 
6830     Value = DAG.getLoad(
6831         VT, dl, Chain,
6832         DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6833         SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
6834     LoadValues.push_back(Value);
6835     LoadChains.push_back(Value.getValue(1));
6836     SrcOff += VTSize;
6837   }
6838   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6839   OutChains.clear();
6840   for (unsigned i = 0; i < NumMemOps; i++) {
6841     EVT VT = MemOps[i];
6842     unsigned VTSize = VT.getSizeInBits() / 8;
6843     SDValue Store;
6844 
6845     Store = DAG.getStore(
6846         Chain, dl, LoadValues[i],
6847         DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6848         DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
6849     OutChains.push_back(Store);
6850     DstOff += VTSize;
6851   }
6852 
6853   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6854 }
6855 
6856 /// Lower the call to 'memset' intrinsic function into a series of store
6857 /// operations.
6858 ///
6859 /// \param DAG Selection DAG where lowered code is placed.
6860 /// \param dl Link to corresponding IR location.
6861 /// \param Chain Control flow dependency.
6862 /// \param Dst Pointer to destination memory location.
6863 /// \param Src Value of byte to write into the memory.
6864 /// \param Size Number of bytes to write.
6865 /// \param Alignment Alignment of the destination in bytes.
6866 /// \param isVol True if destination is volatile.
6867 /// \param DstPtrInfo IR information on the memory pointer.
6868 /// \returns New head in the control flow, if lowering was successful, empty
6869 /// SDValue otherwise.
6870 ///
6871 /// The function tries to replace 'llvm.memset' intrinsic with several store
6872 /// operations and value calculation code. This is usually profitable for small
6873 /// memory size.
6874 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6875                                SDValue Chain, SDValue Dst, SDValue Src,
6876                                uint64_t Size, Align Alignment, bool isVol,
6877                                MachinePointerInfo DstPtrInfo,
6878                                const AAMDNodes &AAInfo) {
6879   // Turn a memset of undef to nop.
6880   // FIXME: We need to honor volatile even is Src is undef.
6881   if (Src.isUndef())
6882     return Chain;
6883 
6884   // Expand memset to a series of load/store ops if the size operand
6885   // falls below a certain threshold.
6886   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6887   std::vector<EVT> MemOps;
6888   bool DstAlignCanChange = false;
6889   MachineFunction &MF = DAG.getMachineFunction();
6890   MachineFrameInfo &MFI = MF.getFrameInfo();
6891   bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6892   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6893   if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6894     DstAlignCanChange = true;
6895   bool IsZeroVal =
6896       isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isZero();
6897   if (!TLI.findOptimalMemOpLowering(
6898           MemOps, TLI.getMaxStoresPerMemset(OptSize),
6899           MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
6900           DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
6901     return SDValue();
6902 
6903   if (DstAlignCanChange) {
6904     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6905     Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
6906     if (NewAlign > Alignment) {
6907       // Give the stack frame object a larger alignment if needed.
6908       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6909         MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6910       Alignment = NewAlign;
6911     }
6912   }
6913 
6914   SmallVector<SDValue, 8> OutChains;
6915   uint64_t DstOff = 0;
6916   unsigned NumMemOps = MemOps.size();
6917 
6918   // Find the largest store and generate the bit pattern for it.
6919   EVT LargestVT = MemOps[0];
6920   for (unsigned i = 1; i < NumMemOps; i++)
6921     if (MemOps[i].bitsGT(LargestVT))
6922       LargestVT = MemOps[i];
6923   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6924 
6925   // Prepare AAInfo for loads/stores after lowering this memset.
6926   AAMDNodes NewAAInfo = AAInfo;
6927   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6928 
6929   for (unsigned i = 0; i < NumMemOps; i++) {
6930     EVT VT = MemOps[i];
6931     unsigned VTSize = VT.getSizeInBits() / 8;
6932     if (VTSize > Size) {
6933       // Issuing an unaligned load / store pair  that overlaps with the previous
6934       // pair. Adjust the offset accordingly.
6935       assert(i == NumMemOps-1 && i != 0);
6936       DstOff -= VTSize - Size;
6937     }
6938 
6939     // If this store is smaller than the largest store see whether we can get
6940     // the smaller value for free with a truncate.
6941     SDValue Value = MemSetValue;
6942     if (VT.bitsLT(LargestVT)) {
6943       if (!LargestVT.isVector() && !VT.isVector() &&
6944           TLI.isTruncateFree(LargestVT, VT))
6945         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6946       else
6947         Value = getMemsetValue(Src, VT, DAG, dl);
6948     }
6949     assert(Value.getValueType() == VT && "Value with wrong type.");
6950     SDValue Store = DAG.getStore(
6951         Chain, dl, Value,
6952         DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6953         DstPtrInfo.getWithOffset(DstOff), Alignment,
6954         isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone,
6955         NewAAInfo);
6956     OutChains.push_back(Store);
6957     DstOff += VT.getSizeInBits() / 8;
6958     Size -= VTSize;
6959   }
6960 
6961   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6962 }
6963 
6964 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6965                                             unsigned AS) {
6966   // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6967   // pointer operands can be losslessly bitcasted to pointers of address space 0
6968   if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
6969     report_fatal_error("cannot lower memory intrinsic in address space " +
6970                        Twine(AS));
6971   }
6972 }
6973 
6974 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6975                                 SDValue Src, SDValue Size, Align Alignment,
6976                                 bool isVol, bool AlwaysInline, bool isTailCall,
6977                                 MachinePointerInfo DstPtrInfo,
6978                                 MachinePointerInfo SrcPtrInfo,
6979                                 const AAMDNodes &AAInfo) {
6980   // Check to see if we should lower the memcpy to loads and stores first.
6981   // For cases within the target-specified limits, this is the best choice.
6982   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6983   if (ConstantSize) {
6984     // Memcpy with size zero? Just return the original chain.
6985     if (ConstantSize->isZero())
6986       return Chain;
6987 
6988     SDValue Result = getMemcpyLoadsAndStores(
6989         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6990         isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
6991     if (Result.getNode())
6992       return Result;
6993   }
6994 
6995   // Then check to see if we should lower the memcpy with target-specific
6996   // code. If the target chooses to do this, this is the next best.
6997   if (TSI) {
6998     SDValue Result = TSI->EmitTargetCodeForMemcpy(
6999         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
7000         DstPtrInfo, SrcPtrInfo);
7001     if (Result.getNode())
7002       return Result;
7003   }
7004 
7005   // If we really need inline code and the target declined to provide it,
7006   // use a (potentially long) sequence of loads and stores.
7007   if (AlwaysInline) {
7008     assert(ConstantSize && "AlwaysInline requires a constant size!");
7009     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
7010                                    ConstantSize->getZExtValue(), Alignment,
7011                                    isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo);
7012   }
7013 
7014   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
7015   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
7016 
7017   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
7018   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
7019   // respect volatile, so they may do things like read or write memory
7020   // beyond the given memory regions. But fixing this isn't easy, and most
7021   // people don't care.
7022 
7023   // Emit a library call.
7024   TargetLowering::ArgListTy Args;
7025   TargetLowering::ArgListEntry Entry;
7026   Entry.Ty = Type::getInt8PtrTy(*getContext());
7027   Entry.Node = Dst; Args.push_back(Entry);
7028   Entry.Node = Src; Args.push_back(Entry);
7029 
7030   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7031   Entry.Node = Size; Args.push_back(Entry);
7032   // FIXME: pass in SDLoc
7033   TargetLowering::CallLoweringInfo CLI(*this);
7034   CLI.setDebugLoc(dl)
7035       .setChain(Chain)
7036       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
7037                     Dst.getValueType().getTypeForEVT(*getContext()),
7038                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
7039                                       TLI->getPointerTy(getDataLayout())),
7040                     std::move(Args))
7041       .setDiscardResult()
7042       .setTailCall(isTailCall);
7043 
7044   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
7045   return CallResult.second;
7046 }
7047 
7048 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
7049                                       SDValue Dst, unsigned DstAlign,
7050                                       SDValue Src, unsigned SrcAlign,
7051                                       SDValue Size, Type *SizeTy,
7052                                       unsigned ElemSz, bool isTailCall,
7053                                       MachinePointerInfo DstPtrInfo,
7054                                       MachinePointerInfo SrcPtrInfo) {
7055   // Emit a library call.
7056   TargetLowering::ArgListTy Args;
7057   TargetLowering::ArgListEntry Entry;
7058   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7059   Entry.Node = Dst;
7060   Args.push_back(Entry);
7061 
7062   Entry.Node = Src;
7063   Args.push_back(Entry);
7064 
7065   Entry.Ty = SizeTy;
7066   Entry.Node = Size;
7067   Args.push_back(Entry);
7068 
7069   RTLIB::Libcall LibraryCall =
7070       RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7071   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7072     report_fatal_error("Unsupported element size");
7073 
7074   TargetLowering::CallLoweringInfo CLI(*this);
7075   CLI.setDebugLoc(dl)
7076       .setChain(Chain)
7077       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7078                     Type::getVoidTy(*getContext()),
7079                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
7080                                       TLI->getPointerTy(getDataLayout())),
7081                     std::move(Args))
7082       .setDiscardResult()
7083       .setTailCall(isTailCall);
7084 
7085   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7086   return CallResult.second;
7087 }
7088 
7089 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
7090                                  SDValue Src, SDValue Size, Align Alignment,
7091                                  bool isVol, bool isTailCall,
7092                                  MachinePointerInfo DstPtrInfo,
7093                                  MachinePointerInfo SrcPtrInfo,
7094                                  const AAMDNodes &AAInfo) {
7095   // Check to see if we should lower the memmove to loads and stores first.
7096   // For cases within the target-specified limits, this is the best choice.
7097   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
7098   if (ConstantSize) {
7099     // Memmove with size zero? Just return the original chain.
7100     if (ConstantSize->isZero())
7101       return Chain;
7102 
7103     SDValue Result = getMemmoveLoadsAndStores(
7104         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
7105         isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
7106     if (Result.getNode())
7107       return Result;
7108   }
7109 
7110   // Then check to see if we should lower the memmove with target-specific
7111   // code. If the target chooses to do this, this is the next best.
7112   if (TSI) {
7113     SDValue Result =
7114         TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
7115                                       Alignment, isVol, DstPtrInfo, SrcPtrInfo);
7116     if (Result.getNode())
7117       return Result;
7118   }
7119 
7120   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
7121   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
7122 
7123   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
7124   // not be safe.  See memcpy above for more details.
7125 
7126   // Emit a library call.
7127   TargetLowering::ArgListTy Args;
7128   TargetLowering::ArgListEntry Entry;
7129   Entry.Ty = Type::getInt8PtrTy(*getContext());
7130   Entry.Node = Dst; Args.push_back(Entry);
7131   Entry.Node = Src; Args.push_back(Entry);
7132 
7133   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7134   Entry.Node = Size; Args.push_back(Entry);
7135   // FIXME:  pass in SDLoc
7136   TargetLowering::CallLoweringInfo CLI(*this);
7137   CLI.setDebugLoc(dl)
7138       .setChain(Chain)
7139       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
7140                     Dst.getValueType().getTypeForEVT(*getContext()),
7141                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
7142                                       TLI->getPointerTy(getDataLayout())),
7143                     std::move(Args))
7144       .setDiscardResult()
7145       .setTailCall(isTailCall);
7146 
7147   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
7148   return CallResult.second;
7149 }
7150 
7151 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
7152                                        SDValue Dst, unsigned DstAlign,
7153                                        SDValue Src, unsigned SrcAlign,
7154                                        SDValue Size, Type *SizeTy,
7155                                        unsigned ElemSz, bool isTailCall,
7156                                        MachinePointerInfo DstPtrInfo,
7157                                        MachinePointerInfo SrcPtrInfo) {
7158   // Emit a library call.
7159   TargetLowering::ArgListTy Args;
7160   TargetLowering::ArgListEntry Entry;
7161   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7162   Entry.Node = Dst;
7163   Args.push_back(Entry);
7164 
7165   Entry.Node = Src;
7166   Args.push_back(Entry);
7167 
7168   Entry.Ty = SizeTy;
7169   Entry.Node = Size;
7170   Args.push_back(Entry);
7171 
7172   RTLIB::Libcall LibraryCall =
7173       RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7174   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7175     report_fatal_error("Unsupported element size");
7176 
7177   TargetLowering::CallLoweringInfo CLI(*this);
7178   CLI.setDebugLoc(dl)
7179       .setChain(Chain)
7180       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7181                     Type::getVoidTy(*getContext()),
7182                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
7183                                       TLI->getPointerTy(getDataLayout())),
7184                     std::move(Args))
7185       .setDiscardResult()
7186       .setTailCall(isTailCall);
7187 
7188   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7189   return CallResult.second;
7190 }
7191 
7192 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
7193                                 SDValue Src, SDValue Size, Align Alignment,
7194                                 bool isVol, bool isTailCall,
7195                                 MachinePointerInfo DstPtrInfo,
7196                                 const AAMDNodes &AAInfo) {
7197   // Check to see if we should lower the memset to stores first.
7198   // For cases within the target-specified limits, this is the best choice.
7199   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
7200   if (ConstantSize) {
7201     // Memset with size zero? Just return the original chain.
7202     if (ConstantSize->isZero())
7203       return Chain;
7204 
7205     SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
7206                                      ConstantSize->getZExtValue(), Alignment,
7207                                      isVol, DstPtrInfo, AAInfo);
7208 
7209     if (Result.getNode())
7210       return Result;
7211   }
7212 
7213   // Then check to see if we should lower the memset with target-specific
7214   // code. If the target chooses to do this, this is the next best.
7215   if (TSI) {
7216     SDValue Result = TSI->EmitTargetCodeForMemset(
7217         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
7218     if (Result.getNode())
7219       return Result;
7220   }
7221 
7222   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
7223 
7224   // Emit a library call.
7225   TargetLowering::ArgListTy Args;
7226   TargetLowering::ArgListEntry Entry;
7227   Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
7228   Args.push_back(Entry);
7229   Entry.Node = Src;
7230   Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
7231   Args.push_back(Entry);
7232   Entry.Node = Size;
7233   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7234   Args.push_back(Entry);
7235 
7236   // FIXME: pass in SDLoc
7237   TargetLowering::CallLoweringInfo CLI(*this);
7238   CLI.setDebugLoc(dl)
7239       .setChain(Chain)
7240       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
7241                     Dst.getValueType().getTypeForEVT(*getContext()),
7242                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
7243                                       TLI->getPointerTy(getDataLayout())),
7244                     std::move(Args))
7245       .setDiscardResult()
7246       .setTailCall(isTailCall);
7247 
7248   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
7249   return CallResult.second;
7250 }
7251 
7252 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
7253                                       SDValue Dst, unsigned DstAlign,
7254                                       SDValue Value, SDValue Size, Type *SizeTy,
7255                                       unsigned ElemSz, bool isTailCall,
7256                                       MachinePointerInfo DstPtrInfo) {
7257   // Emit a library call.
7258   TargetLowering::ArgListTy Args;
7259   TargetLowering::ArgListEntry Entry;
7260   Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7261   Entry.Node = Dst;
7262   Args.push_back(Entry);
7263 
7264   Entry.Ty = Type::getInt8Ty(*getContext());
7265   Entry.Node = Value;
7266   Args.push_back(Entry);
7267 
7268   Entry.Ty = SizeTy;
7269   Entry.Node = Size;
7270   Args.push_back(Entry);
7271 
7272   RTLIB::Libcall LibraryCall =
7273       RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7274   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7275     report_fatal_error("Unsupported element size");
7276 
7277   TargetLowering::CallLoweringInfo CLI(*this);
7278   CLI.setDebugLoc(dl)
7279       .setChain(Chain)
7280       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7281                     Type::getVoidTy(*getContext()),
7282                     getExternalSymbol(TLI->getLibcallName(LibraryCall),
7283                                       TLI->getPointerTy(getDataLayout())),
7284                     std::move(Args))
7285       .setDiscardResult()
7286       .setTailCall(isTailCall);
7287 
7288   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7289   return CallResult.second;
7290 }
7291 
7292 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7293                                 SDVTList VTList, ArrayRef<SDValue> Ops,
7294                                 MachineMemOperand *MMO) {
7295   FoldingSetNodeID ID;
7296   ID.AddInteger(MemVT.getRawBits());
7297   AddNodeIDNode(ID, Opcode, VTList, Ops);
7298   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7299   void* IP = nullptr;
7300   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7301     cast<AtomicSDNode>(E)->refineAlignment(MMO);
7302     return SDValue(E, 0);
7303   }
7304 
7305   auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7306                                     VTList, MemVT, MMO);
7307   createOperands(N, Ops);
7308 
7309   CSEMap.InsertNode(N, IP);
7310   InsertNode(N);
7311   return SDValue(N, 0);
7312 }
7313 
7314 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
7315                                        EVT MemVT, SDVTList VTs, SDValue Chain,
7316                                        SDValue Ptr, SDValue Cmp, SDValue Swp,
7317                                        MachineMemOperand *MMO) {
7318   assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
7319          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
7320   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
7321 
7322   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
7323   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7324 }
7325 
7326 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7327                                 SDValue Chain, SDValue Ptr, SDValue Val,
7328                                 MachineMemOperand *MMO) {
7329   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
7330           Opcode == ISD::ATOMIC_LOAD_SUB ||
7331           Opcode == ISD::ATOMIC_LOAD_AND ||
7332           Opcode == ISD::ATOMIC_LOAD_CLR ||
7333           Opcode == ISD::ATOMIC_LOAD_OR ||
7334           Opcode == ISD::ATOMIC_LOAD_XOR ||
7335           Opcode == ISD::ATOMIC_LOAD_NAND ||
7336           Opcode == ISD::ATOMIC_LOAD_MIN ||
7337           Opcode == ISD::ATOMIC_LOAD_MAX ||
7338           Opcode == ISD::ATOMIC_LOAD_UMIN ||
7339           Opcode == ISD::ATOMIC_LOAD_UMAX ||
7340           Opcode == ISD::ATOMIC_LOAD_FADD ||
7341           Opcode == ISD::ATOMIC_LOAD_FSUB ||
7342           Opcode == ISD::ATOMIC_SWAP ||
7343           Opcode == ISD::ATOMIC_STORE) &&
7344          "Invalid Atomic Op");
7345 
7346   EVT VT = Val.getValueType();
7347 
7348   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
7349                                                getVTList(VT, MVT::Other);
7350   SDValue Ops[] = {Chain, Ptr, Val};
7351   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7352 }
7353 
7354 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7355                                 EVT VT, SDValue Chain, SDValue Ptr,
7356                                 MachineMemOperand *MMO) {
7357   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
7358 
7359   SDVTList VTs = getVTList(VT, MVT::Other);
7360   SDValue Ops[] = {Chain, Ptr};
7361   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7362 }
7363 
7364 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
7365 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
7366   if (Ops.size() == 1)
7367     return Ops[0];
7368 
7369   SmallVector<EVT, 4> VTs;
7370   VTs.reserve(Ops.size());
7371   for (const SDValue &Op : Ops)
7372     VTs.push_back(Op.getValueType());
7373   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
7374 }
7375 
7376 SDValue SelectionDAG::getMemIntrinsicNode(
7377     unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
7378     EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
7379     MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
7380   if (!Size && MemVT.isScalableVector())
7381     Size = MemoryLocation::UnknownSize;
7382   else if (!Size)
7383     Size = MemVT.getStoreSize();
7384 
7385   MachineFunction &MF = getMachineFunction();
7386   MachineMemOperand *MMO =
7387       MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
7388 
7389   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
7390 }
7391 
7392 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
7393                                           SDVTList VTList,
7394                                           ArrayRef<SDValue> Ops, EVT MemVT,
7395                                           MachineMemOperand *MMO) {
7396   assert((Opcode == ISD::INTRINSIC_VOID ||
7397           Opcode == ISD::INTRINSIC_W_CHAIN ||
7398           Opcode == ISD::PREFETCH ||
7399           ((int)Opcode <= std::numeric_limits<int>::max() &&
7400            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
7401          "Opcode is not a memory-accessing opcode!");
7402 
7403   // Memoize the node unless it returns a flag.
7404   MemIntrinsicSDNode *N;
7405   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7406     FoldingSetNodeID ID;
7407     AddNodeIDNode(ID, Opcode, VTList, Ops);
7408     ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
7409         Opcode, dl.getIROrder(), VTList, MemVT, MMO));
7410     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7411     void *IP = nullptr;
7412     if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7413       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
7414       return SDValue(E, 0);
7415     }
7416 
7417     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7418                                       VTList, MemVT, MMO);
7419     createOperands(N, Ops);
7420 
7421   CSEMap.InsertNode(N, IP);
7422   } else {
7423     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7424                                       VTList, MemVT, MMO);
7425     createOperands(N, Ops);
7426   }
7427   InsertNode(N);
7428   SDValue V(N, 0);
7429   NewSDValueDbgMsg(V, "Creating new node: ", this);
7430   return V;
7431 }
7432 
7433 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
7434                                       SDValue Chain, int FrameIndex,
7435                                       int64_t Size, int64_t Offset) {
7436   const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
7437   const auto VTs = getVTList(MVT::Other);
7438   SDValue Ops[2] = {
7439       Chain,
7440       getFrameIndex(FrameIndex,
7441                     getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
7442                     true)};
7443 
7444   FoldingSetNodeID ID;
7445   AddNodeIDNode(ID, Opcode, VTs, Ops);
7446   ID.AddInteger(FrameIndex);
7447   ID.AddInteger(Size);
7448   ID.AddInteger(Offset);
7449   void *IP = nullptr;
7450   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7451     return SDValue(E, 0);
7452 
7453   LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
7454       Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
7455   createOperands(N, Ops);
7456   CSEMap.InsertNode(N, IP);
7457   InsertNode(N);
7458   SDValue V(N, 0);
7459   NewSDValueDbgMsg(V, "Creating new node: ", this);
7460   return V;
7461 }
7462 
7463 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
7464                                          uint64_t Guid, uint64_t Index,
7465                                          uint32_t Attr) {
7466   const unsigned Opcode = ISD::PSEUDO_PROBE;
7467   const auto VTs = getVTList(MVT::Other);
7468   SDValue Ops[] = {Chain};
7469   FoldingSetNodeID ID;
7470   AddNodeIDNode(ID, Opcode, VTs, Ops);
7471   ID.AddInteger(Guid);
7472   ID.AddInteger(Index);
7473   void *IP = nullptr;
7474   if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
7475     return SDValue(E, 0);
7476 
7477   auto *N = newSDNode<PseudoProbeSDNode>(
7478       Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
7479   createOperands(N, Ops);
7480   CSEMap.InsertNode(N, IP);
7481   InsertNode(N);
7482   SDValue V(N, 0);
7483   NewSDValueDbgMsg(V, "Creating new node: ", this);
7484   return V;
7485 }
7486 
7487 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7488 /// MachinePointerInfo record from it.  This is particularly useful because the
7489 /// code generator has many cases where it doesn't bother passing in a
7490 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
7491 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7492                                            SelectionDAG &DAG, SDValue Ptr,
7493                                            int64_t Offset = 0) {
7494   // If this is FI+Offset, we can model it.
7495   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
7496     return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
7497                                              FI->getIndex(), Offset);
7498 
7499   // If this is (FI+Offset1)+Offset2, we can model it.
7500   if (Ptr.getOpcode() != ISD::ADD ||
7501       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
7502       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
7503     return Info;
7504 
7505   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7506   return MachinePointerInfo::getFixedStack(
7507       DAG.getMachineFunction(), FI,
7508       Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
7509 }
7510 
7511 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7512 /// MachinePointerInfo record from it.  This is particularly useful because the
7513 /// code generator has many cases where it doesn't bother passing in a
7514 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
7515 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7516                                            SelectionDAG &DAG, SDValue Ptr,
7517                                            SDValue OffsetOp) {
7518   // If the 'Offset' value isn't a constant, we can't handle this.
7519   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
7520     return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
7521   if (OffsetOp.isUndef())
7522     return InferPointerInfo(Info, DAG, Ptr);
7523   return Info;
7524 }
7525 
7526 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7527                               EVT VT, const SDLoc &dl, SDValue Chain,
7528                               SDValue Ptr, SDValue Offset,
7529                               MachinePointerInfo PtrInfo, EVT MemVT,
7530                               Align Alignment,
7531                               MachineMemOperand::Flags MMOFlags,
7532                               const AAMDNodes &AAInfo, const MDNode *Ranges) {
7533   assert(Chain.getValueType() == MVT::Other &&
7534         "Invalid chain type");
7535 
7536   MMOFlags |= MachineMemOperand::MOLoad;
7537   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7538   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7539   // clients.
7540   if (PtrInfo.V.isNull())
7541     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7542 
7543   uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7544   MachineFunction &MF = getMachineFunction();
7545   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7546                                                    Alignment, AAInfo, Ranges);
7547   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
7548 }
7549 
7550 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7551                               EVT VT, const SDLoc &dl, SDValue Chain,
7552                               SDValue Ptr, SDValue Offset, EVT MemVT,
7553                               MachineMemOperand *MMO) {
7554   if (VT == MemVT) {
7555     ExtType = ISD::NON_EXTLOAD;
7556   } else if (ExtType == ISD::NON_EXTLOAD) {
7557     assert(VT == MemVT && "Non-extending load from different memory type!");
7558   } else {
7559     // Extending load.
7560     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7561            "Should only be an extending load, not truncating!");
7562     assert(VT.isInteger() == MemVT.isInteger() &&
7563            "Cannot convert from FP to Int or Int -> FP!");
7564     assert(VT.isVector() == MemVT.isVector() &&
7565            "Cannot use an ext load to convert to or from a vector!");
7566     assert((!VT.isVector() ||
7567             VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7568            "Cannot use an ext load to change the number of vector elements!");
7569   }
7570 
7571   bool Indexed = AM != ISD::UNINDEXED;
7572   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7573 
7574   SDVTList VTs = Indexed ?
7575     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
7576   SDValue Ops[] = { Chain, Ptr, Offset };
7577   FoldingSetNodeID ID;
7578   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
7579   ID.AddInteger(MemVT.getRawBits());
7580   ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
7581       dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
7582   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7583   void *IP = nullptr;
7584   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7585     cast<LoadSDNode>(E)->refineAlignment(MMO);
7586     return SDValue(E, 0);
7587   }
7588   auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7589                                   ExtType, MemVT, MMO);
7590   createOperands(N, Ops);
7591 
7592   CSEMap.InsertNode(N, IP);
7593   InsertNode(N);
7594   SDValue V(N, 0);
7595   NewSDValueDbgMsg(V, "Creating new node: ", this);
7596   return V;
7597 }
7598 
7599 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7600                               SDValue Ptr, MachinePointerInfo PtrInfo,
7601                               MaybeAlign Alignment,
7602                               MachineMemOperand::Flags MMOFlags,
7603                               const AAMDNodes &AAInfo, const MDNode *Ranges) {
7604   SDValue Undef = getUNDEF(Ptr.getValueType());
7605   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7606                  PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
7607 }
7608 
7609 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7610                               SDValue Ptr, MachineMemOperand *MMO) {
7611   SDValue Undef = getUNDEF(Ptr.getValueType());
7612   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7613                  VT, MMO);
7614 }
7615 
7616 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7617                                  EVT VT, SDValue Chain, SDValue Ptr,
7618                                  MachinePointerInfo PtrInfo, EVT MemVT,
7619                                  MaybeAlign Alignment,
7620                                  MachineMemOperand::Flags MMOFlags,
7621                                  const AAMDNodes &AAInfo) {
7622   SDValue Undef = getUNDEF(Ptr.getValueType());
7623   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
7624                  MemVT, Alignment, MMOFlags, AAInfo);
7625 }
7626 
7627 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7628                                  EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
7629                                  MachineMemOperand *MMO) {
7630   SDValue Undef = getUNDEF(Ptr.getValueType());
7631   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
7632                  MemVT, MMO);
7633 }
7634 
7635 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
7636                                      SDValue Base, SDValue Offset,
7637                                      ISD::MemIndexedMode AM) {
7638   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
7639   assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7640   // Don't propagate the invariant or dereferenceable flags.
7641   auto MMOFlags =
7642       LD->getMemOperand()->getFlags() &
7643       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7644   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7645                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
7646                  LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
7647 }
7648 
7649 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7650                                SDValue Ptr, MachinePointerInfo PtrInfo,
7651                                Align Alignment,
7652                                MachineMemOperand::Flags MMOFlags,
7653                                const AAMDNodes &AAInfo) {
7654   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7655 
7656   MMOFlags |= MachineMemOperand::MOStore;
7657   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7658 
7659   if (PtrInfo.V.isNull())
7660     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7661 
7662   MachineFunction &MF = getMachineFunction();
7663   uint64_t Size =
7664       MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7665   MachineMemOperand *MMO =
7666       MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7667   return getStore(Chain, dl, Val, Ptr, MMO);
7668 }
7669 
7670 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7671                                SDValue Ptr, MachineMemOperand *MMO) {
7672   assert(Chain.getValueType() == MVT::Other &&
7673         "Invalid chain type");
7674   EVT VT = Val.getValueType();
7675   SDVTList VTs = getVTList(MVT::Other);
7676   SDValue Undef = getUNDEF(Ptr.getValueType());
7677   SDValue Ops[] = { Chain, Val, Ptr, Undef };
7678   FoldingSetNodeID ID;
7679   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7680   ID.AddInteger(VT.getRawBits());
7681   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7682       dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
7683   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7684   void *IP = nullptr;
7685   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7686     cast<StoreSDNode>(E)->refineAlignment(MMO);
7687     return SDValue(E, 0);
7688   }
7689   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7690                                    ISD::UNINDEXED, false, VT, MMO);
7691   createOperands(N, Ops);
7692 
7693   CSEMap.InsertNode(N, IP);
7694   InsertNode(N);
7695   SDValue V(N, 0);
7696   NewSDValueDbgMsg(V, "Creating new node: ", this);
7697   return V;
7698 }
7699 
7700 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7701                                     SDValue Ptr, MachinePointerInfo PtrInfo,
7702                                     EVT SVT, Align Alignment,
7703                                     MachineMemOperand::Flags MMOFlags,
7704                                     const AAMDNodes &AAInfo) {
7705   assert(Chain.getValueType() == MVT::Other &&
7706         "Invalid chain type");
7707 
7708   MMOFlags |= MachineMemOperand::MOStore;
7709   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7710 
7711   if (PtrInfo.V.isNull())
7712     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7713 
7714   MachineFunction &MF = getMachineFunction();
7715   MachineMemOperand *MMO = MF.getMachineMemOperand(
7716       PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7717       Alignment, AAInfo);
7718   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
7719 }
7720 
7721 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7722                                     SDValue Ptr, EVT SVT,
7723                                     MachineMemOperand *MMO) {
7724   EVT VT = Val.getValueType();
7725 
7726   assert(Chain.getValueType() == MVT::Other &&
7727         "Invalid chain type");
7728   if (VT == SVT)
7729     return getStore(Chain, dl, Val, Ptr, MMO);
7730 
7731   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7732          "Should only be a truncating store, not extending!");
7733   assert(VT.isInteger() == SVT.isInteger() &&
7734          "Can't do FP-INT conversion!");
7735   assert(VT.isVector() == SVT.isVector() &&
7736          "Cannot use trunc store to convert to or from a vector!");
7737   assert((!VT.isVector() ||
7738           VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7739          "Cannot use trunc store to change the number of vector elements!");
7740 
7741   SDVTList VTs = getVTList(MVT::Other);
7742   SDValue Undef = getUNDEF(Ptr.getValueType());
7743   SDValue Ops[] = { Chain, Val, Ptr, Undef };
7744   FoldingSetNodeID ID;
7745   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7746   ID.AddInteger(SVT.getRawBits());
7747   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7748       dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7749   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7750   void *IP = nullptr;
7751   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7752     cast<StoreSDNode>(E)->refineAlignment(MMO);
7753     return SDValue(E, 0);
7754   }
7755   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7756                                    ISD::UNINDEXED, true, SVT, MMO);
7757   createOperands(N, Ops);
7758 
7759   CSEMap.InsertNode(N, IP);
7760   InsertNode(N);
7761   SDValue V(N, 0);
7762   NewSDValueDbgMsg(V, "Creating new node: ", this);
7763   return V;
7764 }
7765 
7766 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7767                                       SDValue Base, SDValue Offset,
7768                                       ISD::MemIndexedMode AM) {
7769   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7770   assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7771   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7772   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7773   FoldingSetNodeID ID;
7774   AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7775   ID.AddInteger(ST->getMemoryVT().getRawBits());
7776   ID.AddInteger(ST->getRawSubclassData());
7777   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7778   void *IP = nullptr;
7779   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7780     return SDValue(E, 0);
7781 
7782   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7783                                    ST->isTruncatingStore(), ST->getMemoryVT(),
7784                                    ST->getMemOperand());
7785   createOperands(N, Ops);
7786 
7787   CSEMap.InsertNode(N, IP);
7788   InsertNode(N);
7789   SDValue V(N, 0);
7790   NewSDValueDbgMsg(V, "Creating new node: ", this);
7791   return V;
7792 }
7793 
7794 SDValue SelectionDAG::getLoadVP(
7795     ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
7796     SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL,
7797     MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
7798     MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
7799     const MDNode *Ranges, bool IsExpanding) {
7800   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7801 
7802   MMOFlags |= MachineMemOperand::MOLoad;
7803   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7804   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7805   // clients.
7806   if (PtrInfo.V.isNull())
7807     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7808 
7809   uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7810   MachineFunction &MF = getMachineFunction();
7811   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7812                                                    Alignment, AAInfo, Ranges);
7813   return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT,
7814                    MMO, IsExpanding);
7815 }
7816 
7817 SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
7818                                 ISD::LoadExtType ExtType, EVT VT,
7819                                 const SDLoc &dl, SDValue Chain, SDValue Ptr,
7820                                 SDValue Offset, SDValue Mask, SDValue EVL,
7821                                 EVT MemVT, MachineMemOperand *MMO,
7822                                 bool IsExpanding) {
7823   bool Indexed = AM != ISD::UNINDEXED;
7824   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7825 
7826   SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
7827                          : getVTList(VT, MVT::Other);
7828   SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL};
7829   FoldingSetNodeID ID;
7830   AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops);
7831   ID.AddInteger(VT.getRawBits());
7832   ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
7833       dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
7834   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7835   void *IP = nullptr;
7836   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7837     cast<VPLoadSDNode>(E)->refineAlignment(MMO);
7838     return SDValue(E, 0);
7839   }
7840   auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7841                                     ExtType, IsExpanding, MemVT, MMO);
7842   createOperands(N, Ops);
7843 
7844   CSEMap.InsertNode(N, IP);
7845   InsertNode(N);
7846   SDValue V(N, 0);
7847   NewSDValueDbgMsg(V, "Creating new node: ", this);
7848   return V;
7849 }
7850 
7851 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
7852                                 SDValue Ptr, SDValue Mask, SDValue EVL,
7853                                 MachinePointerInfo PtrInfo,
7854                                 MaybeAlign Alignment,
7855                                 MachineMemOperand::Flags MMOFlags,
7856                                 const AAMDNodes &AAInfo, const MDNode *Ranges,
7857                                 bool IsExpanding) {
7858   SDValue Undef = getUNDEF(Ptr.getValueType());
7859   return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7860                    Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
7861                    IsExpanding);
7862 }
7863 
7864 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
7865                                 SDValue Ptr, SDValue Mask, SDValue EVL,
7866                                 MachineMemOperand *MMO, bool IsExpanding) {
7867   SDValue Undef = getUNDEF(Ptr.getValueType());
7868   return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7869                    Mask, EVL, VT, MMO, IsExpanding);
7870 }
7871 
7872 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
7873                                    EVT VT, SDValue Chain, SDValue Ptr,
7874                                    SDValue Mask, SDValue EVL,
7875                                    MachinePointerInfo PtrInfo, EVT MemVT,
7876                                    MaybeAlign Alignment,
7877                                    MachineMemOperand::Flags MMOFlags,
7878                                    const AAMDNodes &AAInfo, bool IsExpanding) {
7879   SDValue Undef = getUNDEF(Ptr.getValueType());
7880   return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
7881                    EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr,
7882                    IsExpanding);
7883 }
7884 
7885 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
7886                                    EVT VT, SDValue Chain, SDValue Ptr,
7887                                    SDValue Mask, SDValue EVL, EVT MemVT,
7888                                    MachineMemOperand *MMO, bool IsExpanding) {
7889   SDValue Undef = getUNDEF(Ptr.getValueType());
7890   return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
7891                    EVL, MemVT, MMO, IsExpanding);
7892 }
7893 
7894 SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
7895                                        SDValue Base, SDValue Offset,
7896                                        ISD::MemIndexedMode AM) {
7897   auto *LD = cast<VPLoadSDNode>(OrigLoad);
7898   assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7899   // Don't propagate the invariant or dereferenceable flags.
7900   auto MMOFlags =
7901       LD->getMemOperand()->getFlags() &
7902       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7903   return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7904                    LD->getChain(), Base, Offset, LD->getMask(),
7905                    LD->getVectorLength(), LD->getPointerInfo(),
7906                    LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
7907                    nullptr, LD->isExpandingLoad());
7908 }
7909 
7910 SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
7911                                  SDValue Ptr, SDValue Offset, SDValue Mask,
7912                                  SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
7913                                  ISD::MemIndexedMode AM, bool IsTruncating,
7914                                  bool IsCompressing) {
7915   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7916   bool Indexed = AM != ISD::UNINDEXED;
7917   assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
7918   SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
7919                          : getVTList(MVT::Other);
7920   SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
7921   FoldingSetNodeID ID;
7922   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
7923   ID.AddInteger(MemVT.getRawBits());
7924   ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
7925       dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7926   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7927   void *IP = nullptr;
7928   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7929     cast<VPStoreSDNode>(E)->refineAlignment(MMO);
7930     return SDValue(E, 0);
7931   }
7932   auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7933                                      IsTruncating, IsCompressing, MemVT, MMO);
7934   createOperands(N, Ops);
7935 
7936   CSEMap.InsertNode(N, IP);
7937   InsertNode(N);
7938   SDValue V(N, 0);
7939   NewSDValueDbgMsg(V, "Creating new node: ", this);
7940   return V;
7941 }
7942 
7943 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
7944                                       SDValue Val, SDValue Ptr, SDValue Mask,
7945                                       SDValue EVL, MachinePointerInfo PtrInfo,
7946                                       EVT SVT, Align Alignment,
7947                                       MachineMemOperand::Flags MMOFlags,
7948                                       const AAMDNodes &AAInfo,
7949                                       bool IsCompressing) {
7950   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7951 
7952   MMOFlags |= MachineMemOperand::MOStore;
7953   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7954 
7955   if (PtrInfo.V.isNull())
7956     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7957 
7958   MachineFunction &MF = getMachineFunction();
7959   MachineMemOperand *MMO = MF.getMachineMemOperand(
7960       PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7961       Alignment, AAInfo);
7962   return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
7963                          IsCompressing);
7964 }
7965 
7966 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
7967                                       SDValue Val, SDValue Ptr, SDValue Mask,
7968                                       SDValue EVL, EVT SVT,
7969                                       MachineMemOperand *MMO,
7970                                       bool IsCompressing) {
7971   EVT VT = Val.getValueType();
7972 
7973   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7974   if (VT == SVT)
7975     return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask,
7976                       EVL, VT, MMO, ISD::UNINDEXED,
7977                       /*IsTruncating*/ false, IsCompressing);
7978 
7979   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7980          "Should only be a truncating store, not extending!");
7981   assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
7982   assert(VT.isVector() == SVT.isVector() &&
7983          "Cannot use trunc store to convert to or from a vector!");
7984   assert((!VT.isVector() ||
7985           VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7986          "Cannot use trunc store to change the number of vector elements!");
7987 
7988   SDVTList VTs = getVTList(MVT::Other);
7989   SDValue Undef = getUNDEF(Ptr.getValueType());
7990   SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
7991   FoldingSetNodeID ID;
7992   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
7993   ID.AddInteger(SVT.getRawBits());
7994   ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
7995       dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
7996   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7997   void *IP = nullptr;
7998   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7999     cast<VPStoreSDNode>(E)->refineAlignment(MMO);
8000     return SDValue(E, 0);
8001   }
8002   auto *N =
8003       newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8004                                ISD::UNINDEXED, true, IsCompressing, SVT, MMO);
8005   createOperands(N, Ops);
8006 
8007   CSEMap.InsertNode(N, IP);
8008   InsertNode(N);
8009   SDValue V(N, 0);
8010   NewSDValueDbgMsg(V, "Creating new node: ", this);
8011   return V;
8012 }
8013 
8014 SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
8015                                         SDValue Base, SDValue Offset,
8016                                         ISD::MemIndexedMode AM) {
8017   auto *ST = cast<VPStoreSDNode>(OrigStore);
8018   assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
8019   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
8020   SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
8021                    Offset,         ST->getMask(),  ST->getVectorLength()};
8022   FoldingSetNodeID ID;
8023   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
8024   ID.AddInteger(ST->getMemoryVT().getRawBits());
8025   ID.AddInteger(ST->getRawSubclassData());
8026   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
8027   void *IP = nullptr;
8028   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
8029     return SDValue(E, 0);
8030 
8031   auto *N = newSDNode<VPStoreSDNode>(
8032       dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(),
8033       ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
8034   createOperands(N, Ops);
8035 
8036   CSEMap.InsertNode(N, IP);
8037   InsertNode(N);
8038   SDValue V(N, 0);
8039   NewSDValueDbgMsg(V, "Creating new node: ", this);
8040   return V;
8041 }
8042 
8043 SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
8044                                   ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
8045                                   ISD::MemIndexType IndexType) {
8046   assert(Ops.size() == 6 && "Incompatible number of operands");
8047 
8048   FoldingSetNodeID ID;
8049   AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops);
8050   ID.AddInteger(VT.getRawBits());
8051   ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
8052       dl.getIROrder(), VTs, VT, MMO, IndexType));
8053   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8054   void *IP = nullptr;
8055   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8056     cast<VPGatherSDNode>(E)->refineAlignment(MMO);
8057     return SDValue(E, 0);
8058   }
8059 
8060   auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8061                                       VT, MMO, IndexType);
8062   createOperands(N, Ops);
8063 
8064   assert(N->getMask().getValueType().getVectorElementCount() ==
8065              N->getValueType(0).getVectorElementCount() &&
8066          "Vector width mismatch between mask and data");
8067   assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8068              N->getValueType(0).getVectorElementCount().isScalable() &&
8069          "Scalable flags of index and data do not match");
8070   assert(ElementCount::isKnownGE(
8071              N->getIndex().getValueType().getVectorElementCount(),
8072              N->getValueType(0).getVectorElementCount()) &&
8073          "Vector width mismatch between index and data");
8074   assert(isa<ConstantSDNode>(N->getScale()) &&
8075          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8076          "Scale should be a constant power of 2");
8077 
8078   CSEMap.InsertNode(N, IP);
8079   InsertNode(N);
8080   SDValue V(N, 0);
8081   NewSDValueDbgMsg(V, "Creating new node: ", this);
8082   return V;
8083 }
8084 
8085 SDValue SelectionDAG::getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl,
8086                                    ArrayRef<SDValue> Ops,
8087                                    MachineMemOperand *MMO,
8088                                    ISD::MemIndexType IndexType) {
8089   assert(Ops.size() == 7 && "Incompatible number of operands");
8090 
8091   FoldingSetNodeID ID;
8092   AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops);
8093   ID.AddInteger(VT.getRawBits());
8094   ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
8095       dl.getIROrder(), VTs, VT, MMO, IndexType));
8096   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8097   void *IP = nullptr;
8098   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8099     cast<VPScatterSDNode>(E)->refineAlignment(MMO);
8100     return SDValue(E, 0);
8101   }
8102   auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8103                                        VT, MMO, IndexType);
8104   createOperands(N, Ops);
8105 
8106   assert(N->getMask().getValueType().getVectorElementCount() ==
8107              N->getValue().getValueType().getVectorElementCount() &&
8108          "Vector width mismatch between mask and data");
8109   assert(
8110       N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8111           N->getValue().getValueType().getVectorElementCount().isScalable() &&
8112       "Scalable flags of index and data do not match");
8113   assert(ElementCount::isKnownGE(
8114              N->getIndex().getValueType().getVectorElementCount(),
8115              N->getValue().getValueType().getVectorElementCount()) &&
8116          "Vector width mismatch between index and data");
8117   assert(isa<ConstantSDNode>(N->getScale()) &&
8118          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8119          "Scale should be a constant power of 2");
8120 
8121   CSEMap.InsertNode(N, IP);
8122   InsertNode(N);
8123   SDValue V(N, 0);
8124   NewSDValueDbgMsg(V, "Creating new node: ", this);
8125   return V;
8126 }
8127 
8128 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
8129                                     SDValue Base, SDValue Offset, SDValue Mask,
8130                                     SDValue PassThru, EVT MemVT,
8131                                     MachineMemOperand *MMO,
8132                                     ISD::MemIndexedMode AM,
8133                                     ISD::LoadExtType ExtTy, bool isExpanding) {
8134   bool Indexed = AM != ISD::UNINDEXED;
8135   assert((Indexed || Offset.isUndef()) &&
8136          "Unindexed masked load with an offset!");
8137   SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
8138                          : getVTList(VT, MVT::Other);
8139   SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
8140   FoldingSetNodeID ID;
8141   AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
8142   ID.AddInteger(MemVT.getRawBits());
8143   ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
8144       dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
8145   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8146   void *IP = nullptr;
8147   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8148     cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
8149     return SDValue(E, 0);
8150   }
8151   auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8152                                         AM, ExtTy, isExpanding, MemVT, MMO);
8153   createOperands(N, Ops);
8154 
8155   CSEMap.InsertNode(N, IP);
8156   InsertNode(N);
8157   SDValue V(N, 0);
8158   NewSDValueDbgMsg(V, "Creating new node: ", this);
8159   return V;
8160 }
8161 
8162 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
8163                                            SDValue Base, SDValue Offset,
8164                                            ISD::MemIndexedMode AM) {
8165   MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
8166   assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
8167   return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
8168                        Offset, LD->getMask(), LD->getPassThru(),
8169                        LD->getMemoryVT(), LD->getMemOperand(), AM,
8170                        LD->getExtensionType(), LD->isExpandingLoad());
8171 }
8172 
8173 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
8174                                      SDValue Val, SDValue Base, SDValue Offset,
8175                                      SDValue Mask, EVT MemVT,
8176                                      MachineMemOperand *MMO,
8177                                      ISD::MemIndexedMode AM, bool IsTruncating,
8178                                      bool IsCompressing) {
8179   assert(Chain.getValueType() == MVT::Other &&
8180         "Invalid chain type");
8181   bool Indexed = AM != ISD::UNINDEXED;
8182   assert((Indexed || Offset.isUndef()) &&
8183          "Unindexed masked store with an offset!");
8184   SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
8185                          : getVTList(MVT::Other);
8186   SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
8187   FoldingSetNodeID ID;
8188   AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
8189   ID.AddInteger(MemVT.getRawBits());
8190   ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
8191       dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
8192   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8193   void *IP = nullptr;
8194   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8195     cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
8196     return SDValue(E, 0);
8197   }
8198   auto *N =
8199       newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
8200                                    IsTruncating, IsCompressing, MemVT, MMO);
8201   createOperands(N, Ops);
8202 
8203   CSEMap.InsertNode(N, IP);
8204   InsertNode(N);
8205   SDValue V(N, 0);
8206   NewSDValueDbgMsg(V, "Creating new node: ", this);
8207   return V;
8208 }
8209 
8210 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
8211                                             SDValue Base, SDValue Offset,
8212                                             ISD::MemIndexedMode AM) {
8213   MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
8214   assert(ST->getOffset().isUndef() &&
8215          "Masked store is already a indexed store!");
8216   return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
8217                         ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
8218                         AM, ST->isTruncatingStore(), ST->isCompressingStore());
8219 }
8220 
8221 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
8222                                       ArrayRef<SDValue> Ops,
8223                                       MachineMemOperand *MMO,
8224                                       ISD::MemIndexType IndexType,
8225                                       ISD::LoadExtType ExtTy) {
8226   assert(Ops.size() == 6 && "Incompatible number of operands");
8227 
8228   FoldingSetNodeID ID;
8229   AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
8230   ID.AddInteger(MemVT.getRawBits());
8231   ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
8232       dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
8233   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8234   void *IP = nullptr;
8235   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8236     cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
8237     return SDValue(E, 0);
8238   }
8239 
8240   IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
8241   auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
8242                                           VTs, MemVT, MMO, IndexType, ExtTy);
8243   createOperands(N, Ops);
8244 
8245   assert(N->getPassThru().getValueType() == N->getValueType(0) &&
8246          "Incompatible type of the PassThru value in MaskedGatherSDNode");
8247   assert(N->getMask().getValueType().getVectorElementCount() ==
8248              N->getValueType(0).getVectorElementCount() &&
8249          "Vector width mismatch between mask and data");
8250   assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8251              N->getValueType(0).getVectorElementCount().isScalable() &&
8252          "Scalable flags of index and data do not match");
8253   assert(ElementCount::isKnownGE(
8254              N->getIndex().getValueType().getVectorElementCount(),
8255              N->getValueType(0).getVectorElementCount()) &&
8256          "Vector width mismatch between index and data");
8257   assert(isa<ConstantSDNode>(N->getScale()) &&
8258          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8259          "Scale should be a constant power of 2");
8260 
8261   CSEMap.InsertNode(N, IP);
8262   InsertNode(N);
8263   SDValue V(N, 0);
8264   NewSDValueDbgMsg(V, "Creating new node: ", this);
8265   return V;
8266 }
8267 
8268 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
8269                                        ArrayRef<SDValue> Ops,
8270                                        MachineMemOperand *MMO,
8271                                        ISD::MemIndexType IndexType,
8272                                        bool IsTrunc) {
8273   assert(Ops.size() == 6 && "Incompatible number of operands");
8274 
8275   FoldingSetNodeID ID;
8276   AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
8277   ID.AddInteger(MemVT.getRawBits());
8278   ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
8279       dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
8280   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8281   void *IP = nullptr;
8282   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8283     cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
8284     return SDValue(E, 0);
8285   }
8286 
8287   IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
8288   auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
8289                                            VTs, MemVT, MMO, IndexType, IsTrunc);
8290   createOperands(N, Ops);
8291 
8292   assert(N->getMask().getValueType().getVectorElementCount() ==
8293              N->getValue().getValueType().getVectorElementCount() &&
8294          "Vector width mismatch between mask and data");
8295   assert(
8296       N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8297           N->getValue().getValueType().getVectorElementCount().isScalable() &&
8298       "Scalable flags of index and data do not match");
8299   assert(ElementCount::isKnownGE(
8300              N->getIndex().getValueType().getVectorElementCount(),
8301              N->getValue().getValueType().getVectorElementCount()) &&
8302          "Vector width mismatch between index and data");
8303   assert(isa<ConstantSDNode>(N->getScale()) &&
8304          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8305          "Scale should be a constant power of 2");
8306 
8307   CSEMap.InsertNode(N, IP);
8308   InsertNode(N);
8309   SDValue V(N, 0);
8310   NewSDValueDbgMsg(V, "Creating new node: ", this);
8311   return V;
8312 }
8313 
8314 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
8315   // select undef, T, F --> T (if T is a constant), otherwise F
8316   // select, ?, undef, F --> F
8317   // select, ?, T, undef --> T
8318   if (Cond.isUndef())
8319     return isConstantValueOfAnyType(T) ? T : F;
8320   if (T.isUndef())
8321     return F;
8322   if (F.isUndef())
8323     return T;
8324 
8325   // select true, T, F --> T
8326   // select false, T, F --> F
8327   if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
8328     return CondC->isZero() ? F : T;
8329 
8330   // TODO: This should simplify VSELECT with constant condition using something
8331   // like this (but check boolean contents to be complete?):
8332   //  if (ISD::isBuildVectorAllOnes(Cond.getNode()))
8333   //    return T;
8334   //  if (ISD::isBuildVectorAllZeros(Cond.getNode()))
8335   //    return F;
8336 
8337   // select ?, T, T --> T
8338   if (T == F)
8339     return T;
8340 
8341   return SDValue();
8342 }
8343 
8344 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
8345   // shift undef, Y --> 0 (can always assume that the undef value is 0)
8346   if (X.isUndef())
8347     return getConstant(0, SDLoc(X.getNode()), X.getValueType());
8348   // shift X, undef --> undef (because it may shift by the bitwidth)
8349   if (Y.isUndef())
8350     return getUNDEF(X.getValueType());
8351 
8352   // shift 0, Y --> 0
8353   // shift X, 0 --> X
8354   if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
8355     return X;
8356 
8357   // shift X, C >= bitwidth(X) --> undef
8358   // All vector elements must be too big (or undef) to avoid partial undefs.
8359   auto isShiftTooBig = [X](ConstantSDNode *Val) {
8360     return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
8361   };
8362   if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
8363     return getUNDEF(X.getValueType());
8364 
8365   return SDValue();
8366 }
8367 
8368 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
8369                                       SDNodeFlags Flags) {
8370   // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
8371   // (an undef operand can be chosen to be Nan/Inf), then the result of this
8372   // operation is poison. That result can be relaxed to undef.
8373   ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
8374   ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
8375   bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
8376                 (YC && YC->getValueAPF().isNaN());
8377   bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
8378                 (YC && YC->getValueAPF().isInfinity());
8379 
8380   if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
8381     return getUNDEF(X.getValueType());
8382 
8383   if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
8384     return getUNDEF(X.getValueType());
8385 
8386   if (!YC)
8387     return SDValue();
8388 
8389   // X + -0.0 --> X
8390   if (Opcode == ISD::FADD)
8391     if (YC->getValueAPF().isNegZero())
8392       return X;
8393 
8394   // X - +0.0 --> X
8395   if (Opcode == ISD::FSUB)
8396     if (YC->getValueAPF().isPosZero())
8397       return X;
8398 
8399   // X * 1.0 --> X
8400   // X / 1.0 --> X
8401   if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
8402     if (YC->getValueAPF().isExactlyValue(1.0))
8403       return X;
8404 
8405   // X * 0.0 --> 0.0
8406   if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
8407     if (YC->getValueAPF().isZero())
8408       return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
8409 
8410   return SDValue();
8411 }
8412 
8413 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
8414                                SDValue Ptr, SDValue SV, unsigned Align) {
8415   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
8416   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
8417 }
8418 
8419 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8420                               ArrayRef<SDUse> Ops) {
8421   switch (Ops.size()) {
8422   case 0: return getNode(Opcode, DL, VT);
8423   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
8424   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
8425   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
8426   default: break;
8427   }
8428 
8429   // Copy from an SDUse array into an SDValue array for use with
8430   // the regular getNode logic.
8431   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
8432   return getNode(Opcode, DL, VT, NewOps);
8433 }
8434 
8435 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8436                               ArrayRef<SDValue> Ops) {
8437   SDNodeFlags Flags;
8438   if (Inserter)
8439     Flags = Inserter->getFlags();
8440   return getNode(Opcode, DL, VT, Ops, Flags);
8441 }
8442 
8443 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8444                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
8445   unsigned NumOps = Ops.size();
8446   switch (NumOps) {
8447   case 0: return getNode(Opcode, DL, VT);
8448   case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
8449   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
8450   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
8451   default: break;
8452   }
8453 
8454 #ifndef NDEBUG
8455   for (auto &Op : Ops)
8456     assert(Op.getOpcode() != ISD::DELETED_NODE &&
8457            "Operand is DELETED_NODE!");
8458 #endif
8459 
8460   switch (Opcode) {
8461   default: break;
8462   case ISD::BUILD_VECTOR:
8463     // Attempt to simplify BUILD_VECTOR.
8464     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8465       return V;
8466     break;
8467   case ISD::CONCAT_VECTORS:
8468     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8469       return V;
8470     break;
8471   case ISD::SELECT_CC:
8472     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
8473     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
8474            "LHS and RHS of condition must have same type!");
8475     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
8476            "True and False arms of SelectCC must have same type!");
8477     assert(Ops[2].getValueType() == VT &&
8478            "select_cc node must be of same type as true and false value!");
8479     break;
8480   case ISD::BR_CC:
8481     assert(NumOps == 5 && "BR_CC takes 5 operands!");
8482     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
8483            "LHS/RHS of comparison should match types!");
8484     break;
8485   }
8486 
8487   // Memoize nodes.
8488   SDNode *N;
8489   SDVTList VTs = getVTList(VT);
8490 
8491   if (VT != MVT::Glue) {
8492     FoldingSetNodeID ID;
8493     AddNodeIDNode(ID, Opcode, VTs, Ops);
8494     void *IP = nullptr;
8495 
8496     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8497       return SDValue(E, 0);
8498 
8499     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8500     createOperands(N, Ops);
8501 
8502     CSEMap.InsertNode(N, IP);
8503   } else {
8504     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8505     createOperands(N, Ops);
8506   }
8507 
8508   N->setFlags(Flags);
8509   InsertNode(N);
8510   SDValue V(N, 0);
8511   NewSDValueDbgMsg(V, "Creating new node: ", this);
8512   return V;
8513 }
8514 
8515 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
8516                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
8517   return getNode(Opcode, DL, getVTList(ResultTys), Ops);
8518 }
8519 
8520 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8521                               ArrayRef<SDValue> Ops) {
8522   SDNodeFlags Flags;
8523   if (Inserter)
8524     Flags = Inserter->getFlags();
8525   return getNode(Opcode, DL, VTList, Ops, Flags);
8526 }
8527 
8528 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8529                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
8530   if (VTList.NumVTs == 1)
8531     return getNode(Opcode, DL, VTList.VTs[0], Ops);
8532 
8533 #ifndef NDEBUG
8534   for (auto &Op : Ops)
8535     assert(Op.getOpcode() != ISD::DELETED_NODE &&
8536            "Operand is DELETED_NODE!");
8537 #endif
8538 
8539   switch (Opcode) {
8540   case ISD::STRICT_FP_EXTEND:
8541     assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
8542            "Invalid STRICT_FP_EXTEND!");
8543     assert(VTList.VTs[0].isFloatingPoint() &&
8544            Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
8545     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
8546            "STRICT_FP_EXTEND result type should be vector iff the operand "
8547            "type is vector!");
8548     assert((!VTList.VTs[0].isVector() ||
8549             VTList.VTs[0].getVectorNumElements() ==
8550             Ops[1].getValueType().getVectorNumElements()) &&
8551            "Vector element count mismatch!");
8552     assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
8553            "Invalid fpext node, dst <= src!");
8554     break;
8555   case ISD::STRICT_FP_ROUND:
8556     assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
8557     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
8558            "STRICT_FP_ROUND result type should be vector iff the operand "
8559            "type is vector!");
8560     assert((!VTList.VTs[0].isVector() ||
8561             VTList.VTs[0].getVectorNumElements() ==
8562             Ops[1].getValueType().getVectorNumElements()) &&
8563            "Vector element count mismatch!");
8564     assert(VTList.VTs[0].isFloatingPoint() &&
8565            Ops[1].getValueType().isFloatingPoint() &&
8566            VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
8567            isa<ConstantSDNode>(Ops[2]) &&
8568            (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
8569             cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
8570            "Invalid STRICT_FP_ROUND!");
8571     break;
8572 #if 0
8573   // FIXME: figure out how to safely handle things like
8574   // int foo(int x) { return 1 << (x & 255); }
8575   // int bar() { return foo(256); }
8576   case ISD::SRA_PARTS:
8577   case ISD::SRL_PARTS:
8578   case ISD::SHL_PARTS:
8579     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
8580         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
8581       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
8582     else if (N3.getOpcode() == ISD::AND)
8583       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
8584         // If the and is only masking out bits that cannot effect the shift,
8585         // eliminate the and.
8586         unsigned NumBits = VT.getScalarSizeInBits()*2;
8587         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
8588           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
8589       }
8590     break;
8591 #endif
8592   }
8593 
8594   // Memoize the node unless it returns a flag.
8595   SDNode *N;
8596   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
8597     FoldingSetNodeID ID;
8598     AddNodeIDNode(ID, Opcode, VTList, Ops);
8599     void *IP = nullptr;
8600     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8601       return SDValue(E, 0);
8602 
8603     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8604     createOperands(N, Ops);
8605     CSEMap.InsertNode(N, IP);
8606   } else {
8607     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8608     createOperands(N, Ops);
8609   }
8610 
8611   N->setFlags(Flags);
8612   InsertNode(N);
8613   SDValue V(N, 0);
8614   NewSDValueDbgMsg(V, "Creating new node: ", this);
8615   return V;
8616 }
8617 
8618 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
8619                               SDVTList VTList) {
8620   return getNode(Opcode, DL, VTList, None);
8621 }
8622 
8623 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8624                               SDValue N1) {
8625   SDValue Ops[] = { N1 };
8626   return getNode(Opcode, DL, VTList, Ops);
8627 }
8628 
8629 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8630                               SDValue N1, SDValue N2) {
8631   SDValue Ops[] = { N1, N2 };
8632   return getNode(Opcode, DL, VTList, Ops);
8633 }
8634 
8635 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8636                               SDValue N1, SDValue N2, SDValue N3) {
8637   SDValue Ops[] = { N1, N2, N3 };
8638   return getNode(Opcode, DL, VTList, Ops);
8639 }
8640 
8641 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8642                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
8643   SDValue Ops[] = { N1, N2, N3, N4 };
8644   return getNode(Opcode, DL, VTList, Ops);
8645 }
8646 
8647 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8648                               SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8649                               SDValue N5) {
8650   SDValue Ops[] = { N1, N2, N3, N4, N5 };
8651   return getNode(Opcode, DL, VTList, Ops);
8652 }
8653 
8654 SDVTList SelectionDAG::getVTList(EVT VT) {
8655   return makeVTList(SDNode::getValueTypeList(VT), 1);
8656 }
8657 
8658 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
8659   FoldingSetNodeID ID;
8660   ID.AddInteger(2U);
8661   ID.AddInteger(VT1.getRawBits());
8662   ID.AddInteger(VT2.getRawBits());
8663 
8664   void *IP = nullptr;
8665   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8666   if (!Result) {
8667     EVT *Array = Allocator.Allocate<EVT>(2);
8668     Array[0] = VT1;
8669     Array[1] = VT2;
8670     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
8671     VTListMap.InsertNode(Result, IP);
8672   }
8673   return Result->getSDVTList();
8674 }
8675 
8676 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
8677   FoldingSetNodeID ID;
8678   ID.AddInteger(3U);
8679   ID.AddInteger(VT1.getRawBits());
8680   ID.AddInteger(VT2.getRawBits());
8681   ID.AddInteger(VT3.getRawBits());
8682 
8683   void *IP = nullptr;
8684   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8685   if (!Result) {
8686     EVT *Array = Allocator.Allocate<EVT>(3);
8687     Array[0] = VT1;
8688     Array[1] = VT2;
8689     Array[2] = VT3;
8690     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
8691     VTListMap.InsertNode(Result, IP);
8692   }
8693   return Result->getSDVTList();
8694 }
8695 
8696 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
8697   FoldingSetNodeID ID;
8698   ID.AddInteger(4U);
8699   ID.AddInteger(VT1.getRawBits());
8700   ID.AddInteger(VT2.getRawBits());
8701   ID.AddInteger(VT3.getRawBits());
8702   ID.AddInteger(VT4.getRawBits());
8703 
8704   void *IP = nullptr;
8705   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8706   if (!Result) {
8707     EVT *Array = Allocator.Allocate<EVT>(4);
8708     Array[0] = VT1;
8709     Array[1] = VT2;
8710     Array[2] = VT3;
8711     Array[3] = VT4;
8712     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
8713     VTListMap.InsertNode(Result, IP);
8714   }
8715   return Result->getSDVTList();
8716 }
8717 
8718 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
8719   unsigned NumVTs = VTs.size();
8720   FoldingSetNodeID ID;
8721   ID.AddInteger(NumVTs);
8722   for (unsigned index = 0; index < NumVTs; index++) {
8723     ID.AddInteger(VTs[index].getRawBits());
8724   }
8725 
8726   void *IP = nullptr;
8727   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8728   if (!Result) {
8729     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
8730     llvm::copy(VTs, Array);
8731     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
8732     VTListMap.InsertNode(Result, IP);
8733   }
8734   return Result->getSDVTList();
8735 }
8736 
8737 
8738 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
8739 /// specified operands.  If the resultant node already exists in the DAG,
8740 /// this does not modify the specified node, instead it returns the node that
8741 /// already exists.  If the resultant node does not exist in the DAG, the
8742 /// input node is returned.  As a degenerate case, if you specify the same
8743 /// input operands as the node already has, the input node is returned.
8744 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
8745   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
8746 
8747   // Check to see if there is no change.
8748   if (Op == N->getOperand(0)) return N;
8749 
8750   // See if the modified node already exists.
8751   void *InsertPos = nullptr;
8752   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
8753     return Existing;
8754 
8755   // Nope it doesn't.  Remove the node from its current place in the maps.
8756   if (InsertPos)
8757     if (!RemoveNodeFromCSEMaps(N))
8758       InsertPos = nullptr;
8759 
8760   // Now we update the operands.
8761   N->OperandList[0].set(Op);
8762 
8763   updateDivergence(N);
8764   // If this gets put into a CSE map, add it.
8765   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8766   return N;
8767 }
8768 
8769 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
8770   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
8771 
8772   // Check to see if there is no change.
8773   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
8774     return N;   // No operands changed, just return the input node.
8775 
8776   // See if the modified node already exists.
8777   void *InsertPos = nullptr;
8778   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
8779     return Existing;
8780 
8781   // Nope it doesn't.  Remove the node from its current place in the maps.
8782   if (InsertPos)
8783     if (!RemoveNodeFromCSEMaps(N))
8784       InsertPos = nullptr;
8785 
8786   // Now we update the operands.
8787   if (N->OperandList[0] != Op1)
8788     N->OperandList[0].set(Op1);
8789   if (N->OperandList[1] != Op2)
8790     N->OperandList[1].set(Op2);
8791 
8792   updateDivergence(N);
8793   // If this gets put into a CSE map, add it.
8794   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8795   return N;
8796 }
8797 
8798 SDNode *SelectionDAG::
8799 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
8800   SDValue Ops[] = { Op1, Op2, Op3 };
8801   return UpdateNodeOperands(N, Ops);
8802 }
8803 
8804 SDNode *SelectionDAG::
8805 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8806                    SDValue Op3, SDValue Op4) {
8807   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
8808   return UpdateNodeOperands(N, Ops);
8809 }
8810 
8811 SDNode *SelectionDAG::
8812 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8813                    SDValue Op3, SDValue Op4, SDValue Op5) {
8814   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
8815   return UpdateNodeOperands(N, Ops);
8816 }
8817 
8818 SDNode *SelectionDAG::
8819 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
8820   unsigned NumOps = Ops.size();
8821   assert(N->getNumOperands() == NumOps &&
8822          "Update with wrong number of operands");
8823 
8824   // If no operands changed just return the input node.
8825   if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
8826     return N;
8827 
8828   // See if the modified node already exists.
8829   void *InsertPos = nullptr;
8830   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
8831     return Existing;
8832 
8833   // Nope it doesn't.  Remove the node from its current place in the maps.
8834   if (InsertPos)
8835     if (!RemoveNodeFromCSEMaps(N))
8836       InsertPos = nullptr;
8837 
8838   // Now we update the operands.
8839   for (unsigned i = 0; i != NumOps; ++i)
8840     if (N->OperandList[i] != Ops[i])
8841       N->OperandList[i].set(Ops[i]);
8842 
8843   updateDivergence(N);
8844   // If this gets put into a CSE map, add it.
8845   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8846   return N;
8847 }
8848 
8849 /// DropOperands - Release the operands and set this node to have
8850 /// zero operands.
8851 void SDNode::DropOperands() {
8852   // Unlike the code in MorphNodeTo that does this, we don't need to
8853   // watch for dead nodes here.
8854   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
8855     SDUse &Use = *I++;
8856     Use.set(SDValue());
8857   }
8858 }
8859 
8860 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
8861                                   ArrayRef<MachineMemOperand *> NewMemRefs) {
8862   if (NewMemRefs.empty()) {
8863     N->clearMemRefs();
8864     return;
8865   }
8866 
8867   // Check if we can avoid allocating by storing a single reference directly.
8868   if (NewMemRefs.size() == 1) {
8869     N->MemRefs = NewMemRefs[0];
8870     N->NumMemRefs = 1;
8871     return;
8872   }
8873 
8874   MachineMemOperand **MemRefsBuffer =
8875       Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
8876   llvm::copy(NewMemRefs, MemRefsBuffer);
8877   N->MemRefs = MemRefsBuffer;
8878   N->NumMemRefs = static_cast<int>(NewMemRefs.size());
8879 }
8880 
8881 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
8882 /// machine opcode.
8883 ///
8884 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8885                                    EVT VT) {
8886   SDVTList VTs = getVTList(VT);
8887   return SelectNodeTo(N, MachineOpc, VTs, None);
8888 }
8889 
8890 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8891                                    EVT VT, SDValue Op1) {
8892   SDVTList VTs = getVTList(VT);
8893   SDValue Ops[] = { Op1 };
8894   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8895 }
8896 
8897 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8898                                    EVT VT, SDValue Op1,
8899                                    SDValue Op2) {
8900   SDVTList VTs = getVTList(VT);
8901   SDValue Ops[] = { Op1, Op2 };
8902   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8903 }
8904 
8905 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8906                                    EVT VT, SDValue Op1,
8907                                    SDValue Op2, SDValue Op3) {
8908   SDVTList VTs = getVTList(VT);
8909   SDValue Ops[] = { Op1, Op2, Op3 };
8910   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8911 }
8912 
8913 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8914                                    EVT VT, ArrayRef<SDValue> Ops) {
8915   SDVTList VTs = getVTList(VT);
8916   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8917 }
8918 
8919 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8920                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
8921   SDVTList VTs = getVTList(VT1, VT2);
8922   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8923 }
8924 
8925 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8926                                    EVT VT1, EVT VT2) {
8927   SDVTList VTs = getVTList(VT1, VT2);
8928   return SelectNodeTo(N, MachineOpc, VTs, None);
8929 }
8930 
8931 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8932                                    EVT VT1, EVT VT2, EVT VT3,
8933                                    ArrayRef<SDValue> Ops) {
8934   SDVTList VTs = getVTList(VT1, VT2, VT3);
8935   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8936 }
8937 
8938 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8939                                    EVT VT1, EVT VT2,
8940                                    SDValue Op1, SDValue Op2) {
8941   SDVTList VTs = getVTList(VT1, VT2);
8942   SDValue Ops[] = { Op1, Op2 };
8943   return SelectNodeTo(N, MachineOpc, VTs, Ops);
8944 }
8945 
8946 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8947                                    SDVTList VTs,ArrayRef<SDValue> Ops) {
8948   SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
8949   // Reset the NodeID to -1.
8950   New->setNodeId(-1);
8951   if (New != N) {
8952     ReplaceAllUsesWith(N, New);
8953     RemoveDeadNode(N);
8954   }
8955   return New;
8956 }
8957 
8958 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
8959 /// the line number information on the merged node since it is not possible to
8960 /// preserve the information that operation is associated with multiple lines.
8961 /// This will make the debugger working better at -O0, were there is a higher
8962 /// probability having other instructions associated with that line.
8963 ///
8964 /// For IROrder, we keep the smaller of the two
8965 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
8966   DebugLoc NLoc = N->getDebugLoc();
8967   if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
8968     N->setDebugLoc(DebugLoc());
8969   }
8970   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
8971   N->setIROrder(Order);
8972   return N;
8973 }
8974 
8975 /// MorphNodeTo - This *mutates* the specified node to have the specified
8976 /// return type, opcode, and operands.
8977 ///
8978 /// Note that MorphNodeTo returns the resultant node.  If there is already a
8979 /// node of the specified opcode and operands, it returns that node instead of
8980 /// the current one.  Note that the SDLoc need not be the same.
8981 ///
8982 /// Using MorphNodeTo is faster than creating a new node and swapping it in
8983 /// with ReplaceAllUsesWith both because it often avoids allocating a new
8984 /// node, and because it doesn't require CSE recalculation for any of
8985 /// the node's users.
8986 ///
8987 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
8988 /// As a consequence it isn't appropriate to use from within the DAG combiner or
8989 /// the legalizer which maintain worklists that would need to be updated when
8990 /// deleting things.
8991 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
8992                                   SDVTList VTs, ArrayRef<SDValue> Ops) {
8993   // If an identical node already exists, use it.
8994   void *IP = nullptr;
8995   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
8996     FoldingSetNodeID ID;
8997     AddNodeIDNode(ID, Opc, VTs, Ops);
8998     if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
8999       return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
9000   }
9001 
9002   if (!RemoveNodeFromCSEMaps(N))
9003     IP = nullptr;
9004 
9005   // Start the morphing.
9006   N->NodeType = Opc;
9007   N->ValueList = VTs.VTs;
9008   N->NumValues = VTs.NumVTs;
9009 
9010   // Clear the operands list, updating used nodes to remove this from their
9011   // use list.  Keep track of any operands that become dead as a result.
9012   SmallPtrSet<SDNode*, 16> DeadNodeSet;
9013   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
9014     SDUse &Use = *I++;
9015     SDNode *Used = Use.getNode();
9016     Use.set(SDValue());
9017     if (Used->use_empty())
9018       DeadNodeSet.insert(Used);
9019   }
9020 
9021   // For MachineNode, initialize the memory references information.
9022   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
9023     MN->clearMemRefs();
9024 
9025   // Swap for an appropriately sized array from the recycler.
9026   removeOperands(N);
9027   createOperands(N, Ops);
9028 
9029   // Delete any nodes that are still dead after adding the uses for the
9030   // new operands.
9031   if (!DeadNodeSet.empty()) {
9032     SmallVector<SDNode *, 16> DeadNodes;
9033     for (SDNode *N : DeadNodeSet)
9034       if (N->use_empty())
9035         DeadNodes.push_back(N);
9036     RemoveDeadNodes(DeadNodes);
9037   }
9038 
9039   if (IP)
9040     CSEMap.InsertNode(N, IP);   // Memoize the new node.
9041   return N;
9042 }
9043 
9044 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
9045   unsigned OrigOpc = Node->getOpcode();
9046   unsigned NewOpc;
9047   switch (OrigOpc) {
9048   default:
9049     llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
9050 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
9051   case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
9052 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
9053   case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
9054 #include "llvm/IR/ConstrainedOps.def"
9055   }
9056 
9057   assert(Node->getNumValues() == 2 && "Unexpected number of results!");
9058 
9059   // We're taking this node out of the chain, so we need to re-link things.
9060   SDValue InputChain = Node->getOperand(0);
9061   SDValue OutputChain = SDValue(Node, 1);
9062   ReplaceAllUsesOfValueWith(OutputChain, InputChain);
9063 
9064   SmallVector<SDValue, 3> Ops;
9065   for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
9066     Ops.push_back(Node->getOperand(i));
9067 
9068   SDVTList VTs = getVTList(Node->getValueType(0));
9069   SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
9070 
9071   // MorphNodeTo can operate in two ways: if an existing node with the
9072   // specified operands exists, it can just return it.  Otherwise, it
9073   // updates the node in place to have the requested operands.
9074   if (Res == Node) {
9075     // If we updated the node in place, reset the node ID.  To the isel,
9076     // this should be just like a newly allocated machine node.
9077     Res->setNodeId(-1);
9078   } else {
9079     ReplaceAllUsesWith(Node, Res);
9080     RemoveDeadNode(Node);
9081   }
9082 
9083   return Res;
9084 }
9085 
9086 /// getMachineNode - These are used for target selectors to create a new node
9087 /// with specified return type(s), MachineInstr opcode, and operands.
9088 ///
9089 /// Note that getMachineNode returns the resultant node.  If there is already a
9090 /// node of the specified opcode and operands, it returns that node instead of
9091 /// the current one.
9092 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9093                                             EVT VT) {
9094   SDVTList VTs = getVTList(VT);
9095   return getMachineNode(Opcode, dl, VTs, None);
9096 }
9097 
9098 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9099                                             EVT VT, SDValue Op1) {
9100   SDVTList VTs = getVTList(VT);
9101   SDValue Ops[] = { Op1 };
9102   return getMachineNode(Opcode, dl, VTs, Ops);
9103 }
9104 
9105 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9106                                             EVT VT, SDValue Op1, SDValue Op2) {
9107   SDVTList VTs = getVTList(VT);
9108   SDValue Ops[] = { Op1, Op2 };
9109   return getMachineNode(Opcode, dl, VTs, Ops);
9110 }
9111 
9112 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9113                                             EVT VT, SDValue Op1, SDValue Op2,
9114                                             SDValue Op3) {
9115   SDVTList VTs = getVTList(VT);
9116   SDValue Ops[] = { Op1, Op2, Op3 };
9117   return getMachineNode(Opcode, dl, VTs, Ops);
9118 }
9119 
9120 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9121                                             EVT VT, ArrayRef<SDValue> Ops) {
9122   SDVTList VTs = getVTList(VT);
9123   return getMachineNode(Opcode, dl, VTs, Ops);
9124 }
9125 
9126 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9127                                             EVT VT1, EVT VT2, SDValue Op1,
9128                                             SDValue Op2) {
9129   SDVTList VTs = getVTList(VT1, VT2);
9130   SDValue Ops[] = { Op1, Op2 };
9131   return getMachineNode(Opcode, dl, VTs, Ops);
9132 }
9133 
9134 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9135                                             EVT VT1, EVT VT2, SDValue Op1,
9136                                             SDValue Op2, SDValue Op3) {
9137   SDVTList VTs = getVTList(VT1, VT2);
9138   SDValue Ops[] = { Op1, Op2, Op3 };
9139   return getMachineNode(Opcode, dl, VTs, Ops);
9140 }
9141 
9142 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9143                                             EVT VT1, EVT VT2,
9144                                             ArrayRef<SDValue> Ops) {
9145   SDVTList VTs = getVTList(VT1, VT2);
9146   return getMachineNode(Opcode, dl, VTs, Ops);
9147 }
9148 
9149 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9150                                             EVT VT1, EVT VT2, EVT VT3,
9151                                             SDValue Op1, SDValue Op2) {
9152   SDVTList VTs = getVTList(VT1, VT2, VT3);
9153   SDValue Ops[] = { Op1, Op2 };
9154   return getMachineNode(Opcode, dl, VTs, Ops);
9155 }
9156 
9157 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9158                                             EVT VT1, EVT VT2, EVT VT3,
9159                                             SDValue Op1, SDValue Op2,
9160                                             SDValue Op3) {
9161   SDVTList VTs = getVTList(VT1, VT2, VT3);
9162   SDValue Ops[] = { Op1, Op2, Op3 };
9163   return getMachineNode(Opcode, dl, VTs, Ops);
9164 }
9165 
9166 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9167                                             EVT VT1, EVT VT2, EVT VT3,
9168                                             ArrayRef<SDValue> Ops) {
9169   SDVTList VTs = getVTList(VT1, VT2, VT3);
9170   return getMachineNode(Opcode, dl, VTs, Ops);
9171 }
9172 
9173 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9174                                             ArrayRef<EVT> ResultTys,
9175                                             ArrayRef<SDValue> Ops) {
9176   SDVTList VTs = getVTList(ResultTys);
9177   return getMachineNode(Opcode, dl, VTs, Ops);
9178 }
9179 
9180 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
9181                                             SDVTList VTs,
9182                                             ArrayRef<SDValue> Ops) {
9183   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
9184   MachineSDNode *N;
9185   void *IP = nullptr;
9186 
9187   if (DoCSE) {
9188     FoldingSetNodeID ID;
9189     AddNodeIDNode(ID, ~Opcode, VTs, Ops);
9190     IP = nullptr;
9191     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9192       return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
9193     }
9194   }
9195 
9196   // Allocate a new MachineSDNode.
9197   N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
9198   createOperands(N, Ops);
9199 
9200   if (DoCSE)
9201     CSEMap.InsertNode(N, IP);
9202 
9203   InsertNode(N);
9204   NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
9205   return N;
9206 }
9207 
9208 /// getTargetExtractSubreg - A convenience function for creating
9209 /// TargetOpcode::EXTRACT_SUBREG nodes.
9210 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
9211                                              SDValue Operand) {
9212   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
9213   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
9214                                   VT, Operand, SRIdxVal);
9215   return SDValue(Subreg, 0);
9216 }
9217 
9218 /// getTargetInsertSubreg - A convenience function for creating
9219 /// TargetOpcode::INSERT_SUBREG nodes.
9220 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
9221                                             SDValue Operand, SDValue Subreg) {
9222   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
9223   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
9224                                   VT, Operand, Subreg, SRIdxVal);
9225   return SDValue(Result, 0);
9226 }
9227 
9228 /// getNodeIfExists - Get the specified node if it's already available, or
9229 /// else return NULL.
9230 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
9231                                       ArrayRef<SDValue> Ops) {
9232   SDNodeFlags Flags;
9233   if (Inserter)
9234     Flags = Inserter->getFlags();
9235   return getNodeIfExists(Opcode, VTList, Ops, Flags);
9236 }
9237 
9238 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
9239                                       ArrayRef<SDValue> Ops,
9240                                       const SDNodeFlags Flags) {
9241   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
9242     FoldingSetNodeID ID;
9243     AddNodeIDNode(ID, Opcode, VTList, Ops);
9244     void *IP = nullptr;
9245     if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
9246       E->intersectFlagsWith(Flags);
9247       return E;
9248     }
9249   }
9250   return nullptr;
9251 }
9252 
9253 /// doesNodeExist - Check if a node exists without modifying its flags.
9254 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
9255                                  ArrayRef<SDValue> Ops) {
9256   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
9257     FoldingSetNodeID ID;
9258     AddNodeIDNode(ID, Opcode, VTList, Ops);
9259     void *IP = nullptr;
9260     if (FindNodeOrInsertPos(ID, SDLoc(), IP))
9261       return true;
9262   }
9263   return false;
9264 }
9265 
9266 /// getDbgValue - Creates a SDDbgValue node.
9267 ///
9268 /// SDNode
9269 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
9270                                       SDNode *N, unsigned R, bool IsIndirect,
9271                                       const DebugLoc &DL, unsigned O) {
9272   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9273          "Expected inlined-at fields to agree");
9274   return new (DbgInfo->getAlloc())
9275       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
9276                  {}, IsIndirect, DL, O,
9277                  /*IsVariadic=*/false);
9278 }
9279 
9280 /// Constant
9281 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
9282                                               DIExpression *Expr,
9283                                               const Value *C,
9284                                               const DebugLoc &DL, unsigned O) {
9285   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9286          "Expected inlined-at fields to agree");
9287   return new (DbgInfo->getAlloc())
9288       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
9289                  /*IsIndirect=*/false, DL, O,
9290                  /*IsVariadic=*/false);
9291 }
9292 
9293 /// FrameIndex
9294 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
9295                                                 DIExpression *Expr, unsigned FI,
9296                                                 bool IsIndirect,
9297                                                 const DebugLoc &DL,
9298                                                 unsigned O) {
9299   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9300          "Expected inlined-at fields to agree");
9301   return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
9302 }
9303 
9304 /// FrameIndex with dependencies
9305 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
9306                                                 DIExpression *Expr, unsigned FI,
9307                                                 ArrayRef<SDNode *> Dependencies,
9308                                                 bool IsIndirect,
9309                                                 const DebugLoc &DL,
9310                                                 unsigned O) {
9311   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9312          "Expected inlined-at fields to agree");
9313   return new (DbgInfo->getAlloc())
9314       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
9315                  Dependencies, IsIndirect, DL, O,
9316                  /*IsVariadic=*/false);
9317 }
9318 
9319 /// VReg
9320 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
9321                                           unsigned VReg, bool IsIndirect,
9322                                           const DebugLoc &DL, unsigned O) {
9323   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9324          "Expected inlined-at fields to agree");
9325   return new (DbgInfo->getAlloc())
9326       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
9327                  {}, IsIndirect, DL, O,
9328                  /*IsVariadic=*/false);
9329 }
9330 
9331 SDDbgValue *SelectionDAG::getDbgValueList(DIVariable *Var, DIExpression *Expr,
9332                                           ArrayRef<SDDbgOperand> Locs,
9333                                           ArrayRef<SDNode *> Dependencies,
9334                                           bool IsIndirect, const DebugLoc &DL,
9335                                           unsigned O, bool IsVariadic) {
9336   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9337          "Expected inlined-at fields to agree");
9338   return new (DbgInfo->getAlloc())
9339       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
9340                  DL, O, IsVariadic);
9341 }
9342 
9343 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
9344                                      unsigned OffsetInBits, unsigned SizeInBits,
9345                                      bool InvalidateDbg) {
9346   SDNode *FromNode = From.getNode();
9347   SDNode *ToNode = To.getNode();
9348   assert(FromNode && ToNode && "Can't modify dbg values");
9349 
9350   // PR35338
9351   // TODO: assert(From != To && "Redundant dbg value transfer");
9352   // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
9353   if (From == To || FromNode == ToNode)
9354     return;
9355 
9356   if (!FromNode->getHasDebugValue())
9357     return;
9358 
9359   SDDbgOperand FromLocOp =
9360       SDDbgOperand::fromNode(From.getNode(), From.getResNo());
9361   SDDbgOperand ToLocOp = SDDbgOperand::fromNode(To.getNode(), To.getResNo());
9362 
9363   SmallVector<SDDbgValue *, 2> ClonedDVs;
9364   for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
9365     if (Dbg->isInvalidated())
9366       continue;
9367 
9368     // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
9369 
9370     // Create a new location ops vector that is equal to the old vector, but
9371     // with each instance of FromLocOp replaced with ToLocOp.
9372     bool Changed = false;
9373     auto NewLocOps = Dbg->copyLocationOps();
9374     std::replace_if(
9375         NewLocOps.begin(), NewLocOps.end(),
9376         [&Changed, FromLocOp](const SDDbgOperand &Op) {
9377           bool Match = Op == FromLocOp;
9378           Changed |= Match;
9379           return Match;
9380         },
9381         ToLocOp);
9382     // Ignore this SDDbgValue if we didn't find a matching location.
9383     if (!Changed)
9384       continue;
9385 
9386     DIVariable *Var = Dbg->getVariable();
9387     auto *Expr = Dbg->getExpression();
9388     // If a fragment is requested, update the expression.
9389     if (SizeInBits) {
9390       // When splitting a larger (e.g., sign-extended) value whose
9391       // lower bits are described with an SDDbgValue, do not attempt
9392       // to transfer the SDDbgValue to the upper bits.
9393       if (auto FI = Expr->getFragmentInfo())
9394         if (OffsetInBits + SizeInBits > FI->SizeInBits)
9395           continue;
9396       auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
9397                                                              SizeInBits);
9398       if (!Fragment)
9399         continue;
9400       Expr = *Fragment;
9401     }
9402 
9403     auto AdditionalDependencies = Dbg->getAdditionalDependencies();
9404     // Clone the SDDbgValue and move it to To.
9405     SDDbgValue *Clone = getDbgValueList(
9406         Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
9407         Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
9408         Dbg->isVariadic());
9409     ClonedDVs.push_back(Clone);
9410 
9411     if (InvalidateDbg) {
9412       // Invalidate value and indicate the SDDbgValue should not be emitted.
9413       Dbg->setIsInvalidated();
9414       Dbg->setIsEmitted();
9415     }
9416   }
9417 
9418   for (SDDbgValue *Dbg : ClonedDVs) {
9419     assert(is_contained(Dbg->getSDNodes(), ToNode) &&
9420            "Transferred DbgValues should depend on the new SDNode");
9421     AddDbgValue(Dbg, false);
9422   }
9423 }
9424 
9425 void SelectionDAG::salvageDebugInfo(SDNode &N) {
9426   if (!N.getHasDebugValue())
9427     return;
9428 
9429   SmallVector<SDDbgValue *, 2> ClonedDVs;
9430   for (auto DV : GetDbgValues(&N)) {
9431     if (DV->isInvalidated())
9432       continue;
9433     switch (N.getOpcode()) {
9434     default:
9435       break;
9436     case ISD::ADD:
9437       SDValue N0 = N.getOperand(0);
9438       SDValue N1 = N.getOperand(1);
9439       if (!isConstantIntBuildVectorOrConstantInt(N0) &&
9440           isConstantIntBuildVectorOrConstantInt(N1)) {
9441         uint64_t Offset = N.getConstantOperandVal(1);
9442 
9443         // Rewrite an ADD constant node into a DIExpression. Since we are
9444         // performing arithmetic to compute the variable's *value* in the
9445         // DIExpression, we need to mark the expression with a
9446         // DW_OP_stack_value.
9447         auto *DIExpr = DV->getExpression();
9448         auto NewLocOps = DV->copyLocationOps();
9449         bool Changed = false;
9450         for (size_t i = 0; i < NewLocOps.size(); ++i) {
9451           // We're not given a ResNo to compare against because the whole
9452           // node is going away. We know that any ISD::ADD only has one
9453           // result, so we can assume any node match is using the result.
9454           if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
9455               NewLocOps[i].getSDNode() != &N)
9456             continue;
9457           NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
9458           SmallVector<uint64_t, 3> ExprOps;
9459           DIExpression::appendOffset(ExprOps, Offset);
9460           DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
9461           Changed = true;
9462         }
9463         (void)Changed;
9464         assert(Changed && "Salvage target doesn't use N");
9465 
9466         auto AdditionalDependencies = DV->getAdditionalDependencies();
9467         SDDbgValue *Clone = getDbgValueList(DV->getVariable(), DIExpr,
9468                                             NewLocOps, AdditionalDependencies,
9469                                             DV->isIndirect(), DV->getDebugLoc(),
9470                                             DV->getOrder(), DV->isVariadic());
9471         ClonedDVs.push_back(Clone);
9472         DV->setIsInvalidated();
9473         DV->setIsEmitted();
9474         LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
9475                    N0.getNode()->dumprFull(this);
9476                    dbgs() << " into " << *DIExpr << '\n');
9477       }
9478     }
9479   }
9480 
9481   for (SDDbgValue *Dbg : ClonedDVs) {
9482     assert(!Dbg->getSDNodes().empty() &&
9483            "Salvaged DbgValue should depend on a new SDNode");
9484     AddDbgValue(Dbg, false);
9485   }
9486 }
9487 
9488 /// Creates a SDDbgLabel node.
9489 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
9490                                       const DebugLoc &DL, unsigned O) {
9491   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
9492          "Expected inlined-at fields to agree");
9493   return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
9494 }
9495 
9496 namespace {
9497 
9498 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
9499 /// pointed to by a use iterator is deleted, increment the use iterator
9500 /// so that it doesn't dangle.
9501 ///
9502 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
9503   SDNode::use_iterator &UI;
9504   SDNode::use_iterator &UE;
9505 
9506   void NodeDeleted(SDNode *N, SDNode *E) override {
9507     // Increment the iterator as needed.
9508     while (UI != UE && N == *UI)
9509       ++UI;
9510   }
9511 
9512 public:
9513   RAUWUpdateListener(SelectionDAG &d,
9514                      SDNode::use_iterator &ui,
9515                      SDNode::use_iterator &ue)
9516     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
9517 };
9518 
9519 } // end anonymous namespace
9520 
9521 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9522 /// This can cause recursive merging of nodes in the DAG.
9523 ///
9524 /// This version assumes From has a single result value.
9525 ///
9526 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
9527   SDNode *From = FromN.getNode();
9528   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
9529          "Cannot replace with this method!");
9530   assert(From != To.getNode() && "Cannot replace uses of with self");
9531 
9532   // Preserve Debug Values
9533   transferDbgValues(FromN, To);
9534 
9535   // Iterate over all the existing uses of From. New uses will be added
9536   // to the beginning of the use list, which we avoid visiting.
9537   // This specifically avoids visiting uses of From that arise while the
9538   // replacement is happening, because any such uses would be the result
9539   // of CSE: If an existing node looks like From after one of its operands
9540   // is replaced by To, we don't want to replace of all its users with To
9541   // too. See PR3018 for more info.
9542   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9543   RAUWUpdateListener Listener(*this, UI, UE);
9544   while (UI != UE) {
9545     SDNode *User = *UI;
9546 
9547     // This node is about to morph, remove its old self from the CSE maps.
9548     RemoveNodeFromCSEMaps(User);
9549 
9550     // A user can appear in a use list multiple times, and when this
9551     // happens the uses are usually next to each other in the list.
9552     // To help reduce the number of CSE recomputations, process all
9553     // the uses of this user that we can find this way.
9554     do {
9555       SDUse &Use = UI.getUse();
9556       ++UI;
9557       Use.set(To);
9558       if (To->isDivergent() != From->isDivergent())
9559         updateDivergence(User);
9560     } while (UI != UE && *UI == User);
9561     // Now that we have modified User, add it back to the CSE maps.  If it
9562     // already exists there, recursively merge the results together.
9563     AddModifiedNodeToCSEMaps(User);
9564   }
9565 
9566   // If we just RAUW'd the root, take note.
9567   if (FromN == getRoot())
9568     setRoot(To);
9569 }
9570 
9571 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9572 /// This can cause recursive merging of nodes in the DAG.
9573 ///
9574 /// This version assumes that for each value of From, there is a
9575 /// corresponding value in To in the same position with the same type.
9576 ///
9577 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
9578 #ifndef NDEBUG
9579   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9580     assert((!From->hasAnyUseOfValue(i) ||
9581             From->getValueType(i) == To->getValueType(i)) &&
9582            "Cannot use this version of ReplaceAllUsesWith!");
9583 #endif
9584 
9585   // Handle the trivial case.
9586   if (From == To)
9587     return;
9588 
9589   // Preserve Debug Info. Only do this if there's a use.
9590   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9591     if (From->hasAnyUseOfValue(i)) {
9592       assert((i < To->getNumValues()) && "Invalid To location");
9593       transferDbgValues(SDValue(From, i), SDValue(To, i));
9594     }
9595 
9596   // Iterate over just the existing users of From. See the comments in
9597   // the ReplaceAllUsesWith above.
9598   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9599   RAUWUpdateListener Listener(*this, UI, UE);
9600   while (UI != UE) {
9601     SDNode *User = *UI;
9602 
9603     // This node is about to morph, remove its old self from the CSE maps.
9604     RemoveNodeFromCSEMaps(User);
9605 
9606     // A user can appear in a use list multiple times, and when this
9607     // happens the uses are usually next to each other in the list.
9608     // To help reduce the number of CSE recomputations, process all
9609     // the uses of this user that we can find this way.
9610     do {
9611       SDUse &Use = UI.getUse();
9612       ++UI;
9613       Use.setNode(To);
9614       if (To->isDivergent() != From->isDivergent())
9615         updateDivergence(User);
9616     } while (UI != UE && *UI == User);
9617 
9618     // Now that we have modified User, add it back to the CSE maps.  If it
9619     // already exists there, recursively merge the results together.
9620     AddModifiedNodeToCSEMaps(User);
9621   }
9622 
9623   // If we just RAUW'd the root, take note.
9624   if (From == getRoot().getNode())
9625     setRoot(SDValue(To, getRoot().getResNo()));
9626 }
9627 
9628 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9629 /// This can cause recursive merging of nodes in the DAG.
9630 ///
9631 /// This version can replace From with any result values.  To must match the
9632 /// number and types of values returned by From.
9633 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
9634   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
9635     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
9636 
9637   // Preserve Debug Info.
9638   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9639     transferDbgValues(SDValue(From, i), To[i]);
9640 
9641   // Iterate over just the existing users of From. See the comments in
9642   // the ReplaceAllUsesWith above.
9643   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9644   RAUWUpdateListener Listener(*this, UI, UE);
9645   while (UI != UE) {
9646     SDNode *User = *UI;
9647 
9648     // This node is about to morph, remove its old self from the CSE maps.
9649     RemoveNodeFromCSEMaps(User);
9650 
9651     // A user can appear in a use list multiple times, and when this happens the
9652     // uses are usually next to each other in the list.  To help reduce the
9653     // number of CSE and divergence recomputations, process all the uses of this
9654     // user that we can find this way.
9655     bool To_IsDivergent = false;
9656     do {
9657       SDUse &Use = UI.getUse();
9658       const SDValue &ToOp = To[Use.getResNo()];
9659       ++UI;
9660       Use.set(ToOp);
9661       To_IsDivergent |= ToOp->isDivergent();
9662     } while (UI != UE && *UI == User);
9663 
9664     if (To_IsDivergent != From->isDivergent())
9665       updateDivergence(User);
9666 
9667     // Now that we have modified User, add it back to the CSE maps.  If it
9668     // already exists there, recursively merge the results together.
9669     AddModifiedNodeToCSEMaps(User);
9670   }
9671 
9672   // If we just RAUW'd the root, take note.
9673   if (From == getRoot().getNode())
9674     setRoot(SDValue(To[getRoot().getResNo()]));
9675 }
9676 
9677 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
9678 /// uses of other values produced by From.getNode() alone.  The Deleted
9679 /// vector is handled the same way as for ReplaceAllUsesWith.
9680 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
9681   // Handle the really simple, really trivial case efficiently.
9682   if (From == To) return;
9683 
9684   // Handle the simple, trivial, case efficiently.
9685   if (From.getNode()->getNumValues() == 1) {
9686     ReplaceAllUsesWith(From, To);
9687     return;
9688   }
9689 
9690   // Preserve Debug Info.
9691   transferDbgValues(From, To);
9692 
9693   // Iterate over just the existing users of From. See the comments in
9694   // the ReplaceAllUsesWith above.
9695   SDNode::use_iterator UI = From.getNode()->use_begin(),
9696                        UE = From.getNode()->use_end();
9697   RAUWUpdateListener Listener(*this, UI, UE);
9698   while (UI != UE) {
9699     SDNode *User = *UI;
9700     bool UserRemovedFromCSEMaps = false;
9701 
9702     // A user can appear in a use list multiple times, and when this
9703     // happens the uses are usually next to each other in the list.
9704     // To help reduce the number of CSE recomputations, process all
9705     // the uses of this user that we can find this way.
9706     do {
9707       SDUse &Use = UI.getUse();
9708 
9709       // Skip uses of different values from the same node.
9710       if (Use.getResNo() != From.getResNo()) {
9711         ++UI;
9712         continue;
9713       }
9714 
9715       // If this node hasn't been modified yet, it's still in the CSE maps,
9716       // so remove its old self from the CSE maps.
9717       if (!UserRemovedFromCSEMaps) {
9718         RemoveNodeFromCSEMaps(User);
9719         UserRemovedFromCSEMaps = true;
9720       }
9721 
9722       ++UI;
9723       Use.set(To);
9724       if (To->isDivergent() != From->isDivergent())
9725         updateDivergence(User);
9726     } while (UI != UE && *UI == User);
9727     // We are iterating over all uses of the From node, so if a use
9728     // doesn't use the specific value, no changes are made.
9729     if (!UserRemovedFromCSEMaps)
9730       continue;
9731 
9732     // Now that we have modified User, add it back to the CSE maps.  If it
9733     // already exists there, recursively merge the results together.
9734     AddModifiedNodeToCSEMaps(User);
9735   }
9736 
9737   // If we just RAUW'd the root, take note.
9738   if (From == getRoot())
9739     setRoot(To);
9740 }
9741 
9742 namespace {
9743 
9744 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
9745 /// to record information about a use.
9746 struct UseMemo {
9747   SDNode *User;
9748   unsigned Index;
9749   SDUse *Use;
9750 };
9751 
9752 /// operator< - Sort Memos by User.
9753 bool operator<(const UseMemo &L, const UseMemo &R) {
9754   return (intptr_t)L.User < (intptr_t)R.User;
9755 }
9756 
9757 /// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node
9758 /// pointed to by a UseMemo is deleted, set the User to nullptr to indicate that
9759 /// the node already has been taken care of recursively.
9760 class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener {
9761   SmallVector<UseMemo, 4> &Uses;
9762 
9763   void NodeDeleted(SDNode *N, SDNode *E) override {
9764     for (UseMemo &Memo : Uses)
9765       if (Memo.User == N)
9766         Memo.User = nullptr;
9767   }
9768 
9769 public:
9770   RAUOVWUpdateListener(SelectionDAG &d, SmallVector<UseMemo, 4> &uses)
9771       : SelectionDAG::DAGUpdateListener(d), Uses(uses) {}
9772 };
9773 
9774 } // end anonymous namespace
9775 
9776 bool SelectionDAG::calculateDivergence(SDNode *N) {
9777   if (TLI->isSDNodeAlwaysUniform(N)) {
9778     assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
9779            "Conflicting divergence information!");
9780     return false;
9781   }
9782   if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
9783     return true;
9784   for (auto &Op : N->ops()) {
9785     if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
9786       return true;
9787   }
9788   return false;
9789 }
9790 
9791 void SelectionDAG::updateDivergence(SDNode *N) {
9792   SmallVector<SDNode *, 16> Worklist(1, N);
9793   do {
9794     N = Worklist.pop_back_val();
9795     bool IsDivergent = calculateDivergence(N);
9796     if (N->SDNodeBits.IsDivergent != IsDivergent) {
9797       N->SDNodeBits.IsDivergent = IsDivergent;
9798       llvm::append_range(Worklist, N->uses());
9799     }
9800   } while (!Worklist.empty());
9801 }
9802 
9803 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
9804   DenseMap<SDNode *, unsigned> Degree;
9805   Order.reserve(AllNodes.size());
9806   for (auto &N : allnodes()) {
9807     unsigned NOps = N.getNumOperands();
9808     Degree[&N] = NOps;
9809     if (0 == NOps)
9810       Order.push_back(&N);
9811   }
9812   for (size_t I = 0; I != Order.size(); ++I) {
9813     SDNode *N = Order[I];
9814     for (auto U : N->uses()) {
9815       unsigned &UnsortedOps = Degree[U];
9816       if (0 == --UnsortedOps)
9817         Order.push_back(U);
9818     }
9819   }
9820 }
9821 
9822 #ifndef NDEBUG
9823 void SelectionDAG::VerifyDAGDivergence() {
9824   std::vector<SDNode *> TopoOrder;
9825   CreateTopologicalOrder(TopoOrder);
9826   for (auto *N : TopoOrder) {
9827     assert(calculateDivergence(N) == N->isDivergent() &&
9828            "Divergence bit inconsistency detected");
9829   }
9830 }
9831 #endif
9832 
9833 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
9834 /// uses of other values produced by From.getNode() alone.  The same value
9835 /// may appear in both the From and To list.  The Deleted vector is
9836 /// handled the same way as for ReplaceAllUsesWith.
9837 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
9838                                               const SDValue *To,
9839                                               unsigned Num){
9840   // Handle the simple, trivial case efficiently.
9841   if (Num == 1)
9842     return ReplaceAllUsesOfValueWith(*From, *To);
9843 
9844   transferDbgValues(*From, *To);
9845 
9846   // Read up all the uses and make records of them. This helps
9847   // processing new uses that are introduced during the
9848   // replacement process.
9849   SmallVector<UseMemo, 4> Uses;
9850   for (unsigned i = 0; i != Num; ++i) {
9851     unsigned FromResNo = From[i].getResNo();
9852     SDNode *FromNode = From[i].getNode();
9853     for (SDNode::use_iterator UI = FromNode->use_begin(),
9854          E = FromNode->use_end(); UI != E; ++UI) {
9855       SDUse &Use = UI.getUse();
9856       if (Use.getResNo() == FromResNo) {
9857         UseMemo Memo = { *UI, i, &Use };
9858         Uses.push_back(Memo);
9859       }
9860     }
9861   }
9862 
9863   // Sort the uses, so that all the uses from a given User are together.
9864   llvm::sort(Uses);
9865   RAUOVWUpdateListener Listener(*this, Uses);
9866 
9867   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
9868        UseIndex != UseIndexEnd; ) {
9869     // We know that this user uses some value of From.  If it is the right
9870     // value, update it.
9871     SDNode *User = Uses[UseIndex].User;
9872     // If the node has been deleted by recursive CSE updates when updating
9873     // another node, then just skip this entry.
9874     if (User == nullptr) {
9875       ++UseIndex;
9876       continue;
9877     }
9878 
9879     // This node is about to morph, remove its old self from the CSE maps.
9880     RemoveNodeFromCSEMaps(User);
9881 
9882     // The Uses array is sorted, so all the uses for a given User
9883     // are next to each other in the list.
9884     // To help reduce the number of CSE recomputations, process all
9885     // the uses of this user that we can find this way.
9886     do {
9887       unsigned i = Uses[UseIndex].Index;
9888       SDUse &Use = *Uses[UseIndex].Use;
9889       ++UseIndex;
9890 
9891       Use.set(To[i]);
9892     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
9893 
9894     // Now that we have modified User, add it back to the CSE maps.  If it
9895     // already exists there, recursively merge the results together.
9896     AddModifiedNodeToCSEMaps(User);
9897   }
9898 }
9899 
9900 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
9901 /// based on their topological order. It returns the maximum id and a vector
9902 /// of the SDNodes* in assigned order by reference.
9903 unsigned SelectionDAG::AssignTopologicalOrder() {
9904   unsigned DAGSize = 0;
9905 
9906   // SortedPos tracks the progress of the algorithm. Nodes before it are
9907   // sorted, nodes after it are unsorted. When the algorithm completes
9908   // it is at the end of the list.
9909   allnodes_iterator SortedPos = allnodes_begin();
9910 
9911   // Visit all the nodes. Move nodes with no operands to the front of
9912   // the list immediately. Annotate nodes that do have operands with their
9913   // operand count. Before we do this, the Node Id fields of the nodes
9914   // may contain arbitrary values. After, the Node Id fields for nodes
9915   // before SortedPos will contain the topological sort index, and the
9916   // Node Id fields for nodes At SortedPos and after will contain the
9917   // count of outstanding operands.
9918   for (SDNode &N : llvm::make_early_inc_range(allnodes())) {
9919     checkForCycles(&N, this);
9920     unsigned Degree = N.getNumOperands();
9921     if (Degree == 0) {
9922       // A node with no uses, add it to the result array immediately.
9923       N.setNodeId(DAGSize++);
9924       allnodes_iterator Q(&N);
9925       if (Q != SortedPos)
9926         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
9927       assert(SortedPos != AllNodes.end() && "Overran node list");
9928       ++SortedPos;
9929     } else {
9930       // Temporarily use the Node Id as scratch space for the degree count.
9931       N.setNodeId(Degree);
9932     }
9933   }
9934 
9935   // Visit all the nodes. As we iterate, move nodes into sorted order,
9936   // such that by the time the end is reached all nodes will be sorted.
9937   for (SDNode &Node : allnodes()) {
9938     SDNode *N = &Node;
9939     checkForCycles(N, this);
9940     // N is in sorted position, so all its uses have one less operand
9941     // that needs to be sorted.
9942     for (SDNode *P : N->uses()) {
9943       unsigned Degree = P->getNodeId();
9944       assert(Degree != 0 && "Invalid node degree");
9945       --Degree;
9946       if (Degree == 0) {
9947         // All of P's operands are sorted, so P may sorted now.
9948         P->setNodeId(DAGSize++);
9949         if (P->getIterator() != SortedPos)
9950           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
9951         assert(SortedPos != AllNodes.end() && "Overran node list");
9952         ++SortedPos;
9953       } else {
9954         // Update P's outstanding operand count.
9955         P->setNodeId(Degree);
9956       }
9957     }
9958     if (Node.getIterator() == SortedPos) {
9959 #ifndef NDEBUG
9960       allnodes_iterator I(N);
9961       SDNode *S = &*++I;
9962       dbgs() << "Overran sorted position:\n";
9963       S->dumprFull(this); dbgs() << "\n";
9964       dbgs() << "Checking if this is due to cycles\n";
9965       checkForCycles(this, true);
9966 #endif
9967       llvm_unreachable(nullptr);
9968     }
9969   }
9970 
9971   assert(SortedPos == AllNodes.end() &&
9972          "Topological sort incomplete!");
9973   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
9974          "First node in topological sort is not the entry token!");
9975   assert(AllNodes.front().getNodeId() == 0 &&
9976          "First node in topological sort has non-zero id!");
9977   assert(AllNodes.front().getNumOperands() == 0 &&
9978          "First node in topological sort has operands!");
9979   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
9980          "Last node in topologic sort has unexpected id!");
9981   assert(AllNodes.back().use_empty() &&
9982          "Last node in topologic sort has users!");
9983   assert(DAGSize == allnodes_size() && "Node count mismatch!");
9984   return DAGSize;
9985 }
9986 
9987 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
9988 /// value is produced by SD.
9989 void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
9990   for (SDNode *SD : DB->getSDNodes()) {
9991     if (!SD)
9992       continue;
9993     assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
9994     SD->setHasDebugValue(true);
9995   }
9996   DbgInfo->add(DB, isParameter);
9997 }
9998 
9999 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
10000 
10001 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
10002                                                    SDValue NewMemOpChain) {
10003   assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
10004   assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
10005   // The new memory operation must have the same position as the old load in
10006   // terms of memory dependency. Create a TokenFactor for the old load and new
10007   // memory operation and update uses of the old load's output chain to use that
10008   // TokenFactor.
10009   if (OldChain == NewMemOpChain || OldChain.use_empty())
10010     return NewMemOpChain;
10011 
10012   SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
10013                                 OldChain, NewMemOpChain);
10014   ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
10015   UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
10016   return TokenFactor;
10017 }
10018 
10019 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
10020                                                    SDValue NewMemOp) {
10021   assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
10022   SDValue OldChain = SDValue(OldLoad, 1);
10023   SDValue NewMemOpChain = NewMemOp.getValue(1);
10024   return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
10025 }
10026 
10027 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
10028                                                      Function **OutFunction) {
10029   assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
10030 
10031   auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
10032   auto *Module = MF->getFunction().getParent();
10033   auto *Function = Module->getFunction(Symbol);
10034 
10035   if (OutFunction != nullptr)
10036       *OutFunction = Function;
10037 
10038   if (Function != nullptr) {
10039     auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
10040     return getGlobalAddress(Function, SDLoc(Op), PtrTy);
10041   }
10042 
10043   std::string ErrorStr;
10044   raw_string_ostream ErrorFormatter(ErrorStr);
10045   ErrorFormatter << "Undefined external symbol ";
10046   ErrorFormatter << '"' << Symbol << '"';
10047   report_fatal_error(Twine(ErrorFormatter.str()));
10048 }
10049 
10050 //===----------------------------------------------------------------------===//
10051 //                              SDNode Class
10052 //===----------------------------------------------------------------------===//
10053 
10054 bool llvm::isNullConstant(SDValue V) {
10055   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
10056   return Const != nullptr && Const->isZero();
10057 }
10058 
10059 bool llvm::isNullFPConstant(SDValue V) {
10060   ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
10061   return Const != nullptr && Const->isZero() && !Const->isNegative();
10062 }
10063 
10064 bool llvm::isAllOnesConstant(SDValue V) {
10065   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
10066   return Const != nullptr && Const->isAllOnes();
10067 }
10068 
10069 bool llvm::isOneConstant(SDValue V) {
10070   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
10071   return Const != nullptr && Const->isOne();
10072 }
10073 
10074 SDValue llvm::peekThroughBitcasts(SDValue V) {
10075   while (V.getOpcode() == ISD::BITCAST)
10076     V = V.getOperand(0);
10077   return V;
10078 }
10079 
10080 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
10081   while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
10082     V = V.getOperand(0);
10083   return V;
10084 }
10085 
10086 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
10087   while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
10088     V = V.getOperand(0);
10089   return V;
10090 }
10091 
10092 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
10093   if (V.getOpcode() != ISD::XOR)
10094     return false;
10095   V = peekThroughBitcasts(V.getOperand(1));
10096   unsigned NumBits = V.getScalarValueSizeInBits();
10097   ConstantSDNode *C =
10098       isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
10099   return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
10100 }
10101 
10102 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
10103                                           bool AllowTruncation) {
10104   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
10105     return CN;
10106 
10107   // SplatVectors can truncate their operands. Ignore that case here unless
10108   // AllowTruncation is set.
10109   if (N->getOpcode() == ISD::SPLAT_VECTOR) {
10110     EVT VecEltVT = N->getValueType(0).getVectorElementType();
10111     if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
10112       EVT CVT = CN->getValueType(0);
10113       assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
10114       if (AllowTruncation || CVT == VecEltVT)
10115         return CN;
10116     }
10117   }
10118 
10119   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10120     BitVector UndefElements;
10121     ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
10122 
10123     // BuildVectors can truncate their operands. Ignore that case here unless
10124     // AllowTruncation is set.
10125     if (CN && (UndefElements.none() || AllowUndefs)) {
10126       EVT CVT = CN->getValueType(0);
10127       EVT NSVT = N.getValueType().getScalarType();
10128       assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
10129       if (AllowTruncation || (CVT == NSVT))
10130         return CN;
10131     }
10132   }
10133 
10134   return nullptr;
10135 }
10136 
10137 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
10138                                           bool AllowUndefs,
10139                                           bool AllowTruncation) {
10140   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
10141     return CN;
10142 
10143   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10144     BitVector UndefElements;
10145     ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
10146 
10147     // BuildVectors can truncate their operands. Ignore that case here unless
10148     // AllowTruncation is set.
10149     if (CN && (UndefElements.none() || AllowUndefs)) {
10150       EVT CVT = CN->getValueType(0);
10151       EVT NSVT = N.getValueType().getScalarType();
10152       assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
10153       if (AllowTruncation || (CVT == NSVT))
10154         return CN;
10155     }
10156   }
10157 
10158   return nullptr;
10159 }
10160 
10161 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
10162   if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
10163     return CN;
10164 
10165   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10166     BitVector UndefElements;
10167     ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
10168     if (CN && (UndefElements.none() || AllowUndefs))
10169       return CN;
10170   }
10171 
10172   if (N.getOpcode() == ISD::SPLAT_VECTOR)
10173     if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
10174       return CN;
10175 
10176   return nullptr;
10177 }
10178 
10179 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
10180                                               const APInt &DemandedElts,
10181                                               bool AllowUndefs) {
10182   if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
10183     return CN;
10184 
10185   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10186     BitVector UndefElements;
10187     ConstantFPSDNode *CN =
10188         BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
10189     if (CN && (UndefElements.none() || AllowUndefs))
10190       return CN;
10191   }
10192 
10193   return nullptr;
10194 }
10195 
10196 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
10197   // TODO: may want to use peekThroughBitcast() here.
10198   ConstantSDNode *C =
10199       isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
10200   return C && C->isZero();
10201 }
10202 
10203 bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
10204   // TODO: may want to use peekThroughBitcast() here.
10205   unsigned BitWidth = N.getScalarValueSizeInBits();
10206   ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
10207   return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
10208 }
10209 
10210 bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
10211   N = peekThroughBitcasts(N);
10212   unsigned BitWidth = N.getScalarValueSizeInBits();
10213   ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
10214   return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth;
10215 }
10216 
10217 HandleSDNode::~HandleSDNode() {
10218   DropOperands();
10219 }
10220 
10221 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
10222                                          const DebugLoc &DL,
10223                                          const GlobalValue *GA, EVT VT,
10224                                          int64_t o, unsigned TF)
10225     : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
10226   TheGlobal = GA;
10227 }
10228 
10229 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
10230                                          EVT VT, unsigned SrcAS,
10231                                          unsigned DestAS)
10232     : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
10233       SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
10234 
10235 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
10236                      SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
10237     : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
10238   MemSDNodeBits.IsVolatile = MMO->isVolatile();
10239   MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
10240   MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
10241   MemSDNodeBits.IsInvariant = MMO->isInvariant();
10242 
10243   // We check here that the size of the memory operand fits within the size of
10244   // the MMO. This is because the MMO might indicate only a possible address
10245   // range instead of specifying the affected memory addresses precisely.
10246   // TODO: Make MachineMemOperands aware of scalable vectors.
10247   assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
10248          "Size mismatch!");
10249 }
10250 
10251 /// Profile - Gather unique data for the node.
10252 ///
10253 void SDNode::Profile(FoldingSetNodeID &ID) const {
10254   AddNodeIDNode(ID, this);
10255 }
10256 
10257 namespace {
10258 
10259   struct EVTArray {
10260     std::vector<EVT> VTs;
10261 
10262     EVTArray() {
10263       VTs.reserve(MVT::VALUETYPE_SIZE);
10264       for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i)
10265         VTs.push_back(MVT((MVT::SimpleValueType)i));
10266     }
10267   };
10268 
10269 } // end anonymous namespace
10270 
10271 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
10272 static ManagedStatic<EVTArray> SimpleVTArray;
10273 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
10274 
10275 /// getValueTypeList - Return a pointer to the specified value type.
10276 ///
10277 const EVT *SDNode::getValueTypeList(EVT VT) {
10278   if (VT.isExtended()) {
10279     sys::SmartScopedLock<true> Lock(*VTMutex);
10280     return &(*EVTs->insert(VT).first);
10281   }
10282   assert(VT.getSimpleVT() < MVT::VALUETYPE_SIZE && "Value type out of range!");
10283   return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
10284 }
10285 
10286 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
10287 /// indicated value.  This method ignores uses of other values defined by this
10288 /// operation.
10289 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
10290   assert(Value < getNumValues() && "Bad value!");
10291 
10292   // TODO: Only iterate over uses of a given value of the node
10293   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
10294     if (UI.getUse().getResNo() == Value) {
10295       if (NUses == 0)
10296         return false;
10297       --NUses;
10298     }
10299   }
10300 
10301   // Found exactly the right number of uses?
10302   return NUses == 0;
10303 }
10304 
10305 /// hasAnyUseOfValue - Return true if there are any use of the indicated
10306 /// value. This method ignores uses of other values defined by this operation.
10307 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
10308   assert(Value < getNumValues() && "Bad value!");
10309 
10310   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
10311     if (UI.getUse().getResNo() == Value)
10312       return true;
10313 
10314   return false;
10315 }
10316 
10317 /// isOnlyUserOf - Return true if this node is the only use of N.
10318 bool SDNode::isOnlyUserOf(const SDNode *N) const {
10319   bool Seen = false;
10320   for (const SDNode *User : N->uses()) {
10321     if (User == this)
10322       Seen = true;
10323     else
10324       return false;
10325   }
10326 
10327   return Seen;
10328 }
10329 
10330 /// Return true if the only users of N are contained in Nodes.
10331 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
10332   bool Seen = false;
10333   for (const SDNode *User : N->uses()) {
10334     if (llvm::is_contained(Nodes, User))
10335       Seen = true;
10336     else
10337       return false;
10338   }
10339 
10340   return Seen;
10341 }
10342 
10343 /// isOperand - Return true if this node is an operand of N.
10344 bool SDValue::isOperandOf(const SDNode *N) const {
10345   return is_contained(N->op_values(), *this);
10346 }
10347 
10348 bool SDNode::isOperandOf(const SDNode *N) const {
10349   return any_of(N->op_values(),
10350                 [this](SDValue Op) { return this == Op.getNode(); });
10351 }
10352 
10353 /// reachesChainWithoutSideEffects - Return true if this operand (which must
10354 /// be a chain) reaches the specified operand without crossing any
10355 /// side-effecting instructions on any chain path.  In practice, this looks
10356 /// through token factors and non-volatile loads.  In order to remain efficient,
10357 /// this only looks a couple of nodes in, it does not do an exhaustive search.
10358 ///
10359 /// Note that we only need to examine chains when we're searching for
10360 /// side-effects; SelectionDAG requires that all side-effects are represented
10361 /// by chains, even if another operand would force a specific ordering. This
10362 /// constraint is necessary to allow transformations like splitting loads.
10363 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
10364                                              unsigned Depth) const {
10365   if (*this == Dest) return true;
10366 
10367   // Don't search too deeply, we just want to be able to see through
10368   // TokenFactor's etc.
10369   if (Depth == 0) return false;
10370 
10371   // If this is a token factor, all inputs to the TF happen in parallel.
10372   if (getOpcode() == ISD::TokenFactor) {
10373     // First, try a shallow search.
10374     if (is_contained((*this)->ops(), Dest)) {
10375       // We found the chain we want as an operand of this TokenFactor.
10376       // Essentially, we reach the chain without side-effects if we could
10377       // serialize the TokenFactor into a simple chain of operations with
10378       // Dest as the last operation. This is automatically true if the
10379       // chain has one use: there are no other ordering constraints.
10380       // If the chain has more than one use, we give up: some other
10381       // use of Dest might force a side-effect between Dest and the current
10382       // node.
10383       if (Dest.hasOneUse())
10384         return true;
10385     }
10386     // Next, try a deep search: check whether every operand of the TokenFactor
10387     // reaches Dest.
10388     return llvm::all_of((*this)->ops(), [=](SDValue Op) {
10389       return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
10390     });
10391   }
10392 
10393   // Loads don't have side effects, look through them.
10394   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
10395     if (Ld->isUnordered())
10396       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
10397   }
10398   return false;
10399 }
10400 
10401 bool SDNode::hasPredecessor(const SDNode *N) const {
10402   SmallPtrSet<const SDNode *, 32> Visited;
10403   SmallVector<const SDNode *, 16> Worklist;
10404   Worklist.push_back(this);
10405   return hasPredecessorHelper(N, Visited, Worklist);
10406 }
10407 
10408 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
10409   this->Flags.intersectWith(Flags);
10410 }
10411 
10412 SDValue
10413 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
10414                                   ArrayRef<ISD::NodeType> CandidateBinOps,
10415                                   bool AllowPartials) {
10416   // The pattern must end in an extract from index 0.
10417   if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10418       !isNullConstant(Extract->getOperand(1)))
10419     return SDValue();
10420 
10421   // Match against one of the candidate binary ops.
10422   SDValue Op = Extract->getOperand(0);
10423   if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
10424         return Op.getOpcode() == unsigned(BinOp);
10425       }))
10426     return SDValue();
10427 
10428   // Floating-point reductions may require relaxed constraints on the final step
10429   // of the reduction because they may reorder intermediate operations.
10430   unsigned CandidateBinOp = Op.getOpcode();
10431   if (Op.getValueType().isFloatingPoint()) {
10432     SDNodeFlags Flags = Op->getFlags();
10433     switch (CandidateBinOp) {
10434     case ISD::FADD:
10435       if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
10436         return SDValue();
10437       break;
10438     default:
10439       llvm_unreachable("Unhandled FP opcode for binop reduction");
10440     }
10441   }
10442 
10443   // Matching failed - attempt to see if we did enough stages that a partial
10444   // reduction from a subvector is possible.
10445   auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
10446     if (!AllowPartials || !Op)
10447       return SDValue();
10448     EVT OpVT = Op.getValueType();
10449     EVT OpSVT = OpVT.getScalarType();
10450     EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
10451     if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
10452       return SDValue();
10453     BinOp = (ISD::NodeType)CandidateBinOp;
10454     return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
10455                    getVectorIdxConstant(0, SDLoc(Op)));
10456   };
10457 
10458   // At each stage, we're looking for something that looks like:
10459   // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
10460   //                    <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
10461   //                               i32 undef, i32 undef, i32 undef, i32 undef>
10462   // %a = binop <8 x i32> %op, %s
10463   // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
10464   // we expect something like:
10465   // <4,5,6,7,u,u,u,u>
10466   // <2,3,u,u,u,u,u,u>
10467   // <1,u,u,u,u,u,u,u>
10468   // While a partial reduction match would be:
10469   // <2,3,u,u,u,u,u,u>
10470   // <1,u,u,u,u,u,u,u>
10471   unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
10472   SDValue PrevOp;
10473   for (unsigned i = 0; i < Stages; ++i) {
10474     unsigned MaskEnd = (1 << i);
10475 
10476     if (Op.getOpcode() != CandidateBinOp)
10477       return PartialReduction(PrevOp, MaskEnd);
10478 
10479     SDValue Op0 = Op.getOperand(0);
10480     SDValue Op1 = Op.getOperand(1);
10481 
10482     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
10483     if (Shuffle) {
10484       Op = Op1;
10485     } else {
10486       Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
10487       Op = Op0;
10488     }
10489 
10490     // The first operand of the shuffle should be the same as the other operand
10491     // of the binop.
10492     if (!Shuffle || Shuffle->getOperand(0) != Op)
10493       return PartialReduction(PrevOp, MaskEnd);
10494 
10495     // Verify the shuffle has the expected (at this stage of the pyramid) mask.
10496     for (int Index = 0; Index < (int)MaskEnd; ++Index)
10497       if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
10498         return PartialReduction(PrevOp, MaskEnd);
10499 
10500     PrevOp = Op;
10501   }
10502 
10503   // Handle subvector reductions, which tend to appear after the shuffle
10504   // reduction stages.
10505   while (Op.getOpcode() == CandidateBinOp) {
10506     unsigned NumElts = Op.getValueType().getVectorNumElements();
10507     SDValue Op0 = Op.getOperand(0);
10508     SDValue Op1 = Op.getOperand(1);
10509     if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
10510         Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
10511         Op0.getOperand(0) != Op1.getOperand(0))
10512       break;
10513     SDValue Src = Op0.getOperand(0);
10514     unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
10515     if (NumSrcElts != (2 * NumElts))
10516       break;
10517     if (!(Op0.getConstantOperandAPInt(1) == 0 &&
10518           Op1.getConstantOperandAPInt(1) == NumElts) &&
10519         !(Op1.getConstantOperandAPInt(1) == 0 &&
10520           Op0.getConstantOperandAPInt(1) == NumElts))
10521       break;
10522     Op = Src;
10523   }
10524 
10525   BinOp = (ISD::NodeType)CandidateBinOp;
10526   return Op;
10527 }
10528 
10529 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
10530   assert(N->getNumValues() == 1 &&
10531          "Can't unroll a vector with multiple results!");
10532 
10533   EVT VT = N->getValueType(0);
10534   unsigned NE = VT.getVectorNumElements();
10535   EVT EltVT = VT.getVectorElementType();
10536   SDLoc dl(N);
10537 
10538   SmallVector<SDValue, 8> Scalars;
10539   SmallVector<SDValue, 4> Operands(N->getNumOperands());
10540 
10541   // If ResNE is 0, fully unroll the vector op.
10542   if (ResNE == 0)
10543     ResNE = NE;
10544   else if (NE > ResNE)
10545     NE = ResNE;
10546 
10547   unsigned i;
10548   for (i= 0; i != NE; ++i) {
10549     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
10550       SDValue Operand = N->getOperand(j);
10551       EVT OperandVT = Operand.getValueType();
10552       if (OperandVT.isVector()) {
10553         // A vector operand; extract a single element.
10554         EVT OperandEltVT = OperandVT.getVectorElementType();
10555         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
10556                               Operand, getVectorIdxConstant(i, dl));
10557       } else {
10558         // A scalar operand; just use it as is.
10559         Operands[j] = Operand;
10560       }
10561     }
10562 
10563     switch (N->getOpcode()) {
10564     default: {
10565       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
10566                                 N->getFlags()));
10567       break;
10568     }
10569     case ISD::VSELECT:
10570       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
10571       break;
10572     case ISD::SHL:
10573     case ISD::SRA:
10574     case ISD::SRL:
10575     case ISD::ROTL:
10576     case ISD::ROTR:
10577       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
10578                                getShiftAmountOperand(Operands[0].getValueType(),
10579                                                      Operands[1])));
10580       break;
10581     case ISD::SIGN_EXTEND_INREG: {
10582       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
10583       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
10584                                 Operands[0],
10585                                 getValueType(ExtVT)));
10586     }
10587     }
10588   }
10589 
10590   for (; i < ResNE; ++i)
10591     Scalars.push_back(getUNDEF(EltVT));
10592 
10593   EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
10594   return getBuildVector(VecVT, dl, Scalars);
10595 }
10596 
10597 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
10598     SDNode *N, unsigned ResNE) {
10599   unsigned Opcode = N->getOpcode();
10600   assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
10601           Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
10602           Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
10603          "Expected an overflow opcode");
10604 
10605   EVT ResVT = N->getValueType(0);
10606   EVT OvVT = N->getValueType(1);
10607   EVT ResEltVT = ResVT.getVectorElementType();
10608   EVT OvEltVT = OvVT.getVectorElementType();
10609   SDLoc dl(N);
10610 
10611   // If ResNE is 0, fully unroll the vector op.
10612   unsigned NE = ResVT.getVectorNumElements();
10613   if (ResNE == 0)
10614     ResNE = NE;
10615   else if (NE > ResNE)
10616     NE = ResNE;
10617 
10618   SmallVector<SDValue, 8> LHSScalars;
10619   SmallVector<SDValue, 8> RHSScalars;
10620   ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
10621   ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
10622 
10623   EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
10624   SDVTList VTs = getVTList(ResEltVT, SVT);
10625   SmallVector<SDValue, 8> ResScalars;
10626   SmallVector<SDValue, 8> OvScalars;
10627   for (unsigned i = 0; i < NE; ++i) {
10628     SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
10629     SDValue Ov =
10630         getSelect(dl, OvEltVT, Res.getValue(1),
10631                   getBoolConstant(true, dl, OvEltVT, ResVT),
10632                   getConstant(0, dl, OvEltVT));
10633 
10634     ResScalars.push_back(Res);
10635     OvScalars.push_back(Ov);
10636   }
10637 
10638   ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
10639   OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
10640 
10641   EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
10642   EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
10643   return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
10644                         getBuildVector(NewOvVT, dl, OvScalars));
10645 }
10646 
10647 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
10648                                                   LoadSDNode *Base,
10649                                                   unsigned Bytes,
10650                                                   int Dist) const {
10651   if (LD->isVolatile() || Base->isVolatile())
10652     return false;
10653   // TODO: probably too restrictive for atomics, revisit
10654   if (!LD->isSimple())
10655     return false;
10656   if (LD->isIndexed() || Base->isIndexed())
10657     return false;
10658   if (LD->getChain() != Base->getChain())
10659     return false;
10660   EVT VT = LD->getValueType(0);
10661   if (VT.getSizeInBits() / 8 != Bytes)
10662     return false;
10663 
10664   auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
10665   auto LocDecomp = BaseIndexOffset::match(LD, *this);
10666 
10667   int64_t Offset = 0;
10668   if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
10669     return (Dist * Bytes == Offset);
10670   return false;
10671 }
10672 
10673 /// InferPtrAlignment - Infer alignment of a load / store address. Return None
10674 /// if it cannot be inferred.
10675 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
10676   // If this is a GlobalAddress + cst, return the alignment.
10677   const GlobalValue *GV = nullptr;
10678   int64_t GVOffset = 0;
10679   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
10680     unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
10681     KnownBits Known(PtrWidth);
10682     llvm::computeKnownBits(GV, Known, getDataLayout());
10683     unsigned AlignBits = Known.countMinTrailingZeros();
10684     if (AlignBits)
10685       return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
10686   }
10687 
10688   // If this is a direct reference to a stack slot, use information about the
10689   // stack slot's alignment.
10690   int FrameIdx = INT_MIN;
10691   int64_t FrameOffset = 0;
10692   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
10693     FrameIdx = FI->getIndex();
10694   } else if (isBaseWithConstantOffset(Ptr) &&
10695              isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
10696     // Handle FI+Cst
10697     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
10698     FrameOffset = Ptr.getConstantOperandVal(1);
10699   }
10700 
10701   if (FrameIdx != INT_MIN) {
10702     const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
10703     return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
10704   }
10705 
10706   return None;
10707 }
10708 
10709 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
10710 /// which is split (or expanded) into two not necessarily identical pieces.
10711 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
10712   // Currently all types are split in half.
10713   EVT LoVT, HiVT;
10714   if (!VT.isVector())
10715     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
10716   else
10717     LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
10718 
10719   return std::make_pair(LoVT, HiVT);
10720 }
10721 
10722 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
10723 /// type, dependent on an enveloping VT that has been split into two identical
10724 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
10725 std::pair<EVT, EVT>
10726 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
10727                                        bool *HiIsEmpty) const {
10728   EVT EltTp = VT.getVectorElementType();
10729   // Examples:
10730   //   custom VL=8  with enveloping VL=8/8 yields 8/0 (hi empty)
10731   //   custom VL=9  with enveloping VL=8/8 yields 8/1
10732   //   custom VL=10 with enveloping VL=8/8 yields 8/2
10733   //   etc.
10734   ElementCount VTNumElts = VT.getVectorElementCount();
10735   ElementCount EnvNumElts = EnvVT.getVectorElementCount();
10736   assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
10737          "Mixing fixed width and scalable vectors when enveloping a type");
10738   EVT LoVT, HiVT;
10739   if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
10740     LoVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
10741     HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
10742     *HiIsEmpty = false;
10743   } else {
10744     // Flag that hi type has zero storage size, but return split envelop type
10745     // (this would be easier if vector types with zero elements were allowed).
10746     LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
10747     HiVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
10748     *HiIsEmpty = true;
10749   }
10750   return std::make_pair(LoVT, HiVT);
10751 }
10752 
10753 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
10754 /// low/high part.
10755 std::pair<SDValue, SDValue>
10756 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
10757                           const EVT &HiVT) {
10758   assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
10759          LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
10760          "Splitting vector with an invalid mixture of fixed and scalable "
10761          "vector types");
10762   assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
10763              N.getValueType().getVectorMinNumElements() &&
10764          "More vector elements requested than available!");
10765   SDValue Lo, Hi;
10766   Lo =
10767       getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
10768   // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
10769   // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
10770   // IDX with the runtime scaling factor of the result vector type. For
10771   // fixed-width result vectors, that runtime scaling factor is 1.
10772   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
10773                getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
10774   return std::make_pair(Lo, Hi);
10775 }
10776 
10777 std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT,
10778                                                    const SDLoc &DL) {
10779   // Split the vector length parameter.
10780   // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
10781   EVT VT = N.getValueType();
10782   assert(VecVT.getVectorElementCount().isKnownEven() &&
10783          "Expecting the mask to be an evenly-sized vector");
10784   unsigned HalfMinNumElts = VecVT.getVectorMinNumElements() / 2;
10785   SDValue HalfNumElts =
10786       VecVT.isFixedLengthVector()
10787           ? getConstant(HalfMinNumElts, DL, VT)
10788           : getVScale(DL, VT, APInt(VT.getScalarSizeInBits(), HalfMinNumElts));
10789   SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts);
10790   SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts);
10791   return std::make_pair(Lo, Hi);
10792 }
10793 
10794 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
10795 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
10796   EVT VT = N.getValueType();
10797   EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
10798                                 NextPowerOf2(VT.getVectorNumElements()));
10799   return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
10800                  getVectorIdxConstant(0, DL));
10801 }
10802 
10803 void SelectionDAG::ExtractVectorElements(SDValue Op,
10804                                          SmallVectorImpl<SDValue> &Args,
10805                                          unsigned Start, unsigned Count,
10806                                          EVT EltVT) {
10807   EVT VT = Op.getValueType();
10808   if (Count == 0)
10809     Count = VT.getVectorNumElements();
10810   if (EltVT == EVT())
10811     EltVT = VT.getVectorElementType();
10812   SDLoc SL(Op);
10813   for (unsigned i = Start, e = Start + Count; i != e; ++i) {
10814     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
10815                            getVectorIdxConstant(i, SL)));
10816   }
10817 }
10818 
10819 // getAddressSpace - Return the address space this GlobalAddress belongs to.
10820 unsigned GlobalAddressSDNode::getAddressSpace() const {
10821   return getGlobal()->getType()->getAddressSpace();
10822 }
10823 
10824 Type *ConstantPoolSDNode::getType() const {
10825   if (isMachineConstantPoolEntry())
10826     return Val.MachineCPVal->getType();
10827   return Val.ConstVal->getType();
10828 }
10829 
10830 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
10831                                         unsigned &SplatBitSize,
10832                                         bool &HasAnyUndefs,
10833                                         unsigned MinSplatBits,
10834                                         bool IsBigEndian) const {
10835   EVT VT = getValueType(0);
10836   assert(VT.isVector() && "Expected a vector type");
10837   unsigned VecWidth = VT.getSizeInBits();
10838   if (MinSplatBits > VecWidth)
10839     return false;
10840 
10841   // FIXME: The widths are based on this node's type, but build vectors can
10842   // truncate their operands.
10843   SplatValue = APInt(VecWidth, 0);
10844   SplatUndef = APInt(VecWidth, 0);
10845 
10846   // Get the bits. Bits with undefined values (when the corresponding element
10847   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
10848   // in SplatValue. If any of the values are not constant, give up and return
10849   // false.
10850   unsigned int NumOps = getNumOperands();
10851   assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
10852   unsigned EltWidth = VT.getScalarSizeInBits();
10853 
10854   for (unsigned j = 0; j < NumOps; ++j) {
10855     unsigned i = IsBigEndian ? NumOps - 1 - j : j;
10856     SDValue OpVal = getOperand(i);
10857     unsigned BitPos = j * EltWidth;
10858 
10859     if (OpVal.isUndef())
10860       SplatUndef.setBits(BitPos, BitPos + EltWidth);
10861     else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
10862       SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
10863     else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
10864       SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
10865     else
10866       return false;
10867   }
10868 
10869   // The build_vector is all constants or undefs. Find the smallest element
10870   // size that splats the vector.
10871   HasAnyUndefs = (SplatUndef != 0);
10872 
10873   // FIXME: This does not work for vectors with elements less than 8 bits.
10874   while (VecWidth > 8) {
10875     unsigned HalfSize = VecWidth / 2;
10876     APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
10877     APInt LowValue = SplatValue.extractBits(HalfSize, 0);
10878     APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
10879     APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);
10880 
10881     // If the two halves do not match (ignoring undef bits), stop here.
10882     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
10883         MinSplatBits > HalfSize)
10884       break;
10885 
10886     SplatValue = HighValue | LowValue;
10887     SplatUndef = HighUndef & LowUndef;
10888 
10889     VecWidth = HalfSize;
10890   }
10891 
10892   SplatBitSize = VecWidth;
10893   return true;
10894 }
10895 
10896 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
10897                                          BitVector *UndefElements) const {
10898   unsigned NumOps = getNumOperands();
10899   if (UndefElements) {
10900     UndefElements->clear();
10901     UndefElements->resize(NumOps);
10902   }
10903   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10904   if (!DemandedElts)
10905     return SDValue();
10906   SDValue Splatted;
10907   for (unsigned i = 0; i != NumOps; ++i) {
10908     if (!DemandedElts[i])
10909       continue;
10910     SDValue Op = getOperand(i);
10911     if (Op.isUndef()) {
10912       if (UndefElements)
10913         (*UndefElements)[i] = true;
10914     } else if (!Splatted) {
10915       Splatted = Op;
10916     } else if (Splatted != Op) {
10917       return SDValue();
10918     }
10919   }
10920 
10921   if (!Splatted) {
10922     unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
10923     assert(getOperand(FirstDemandedIdx).isUndef() &&
10924            "Can only have a splat without a constant for all undefs.");
10925     return getOperand(FirstDemandedIdx);
10926   }
10927 
10928   return Splatted;
10929 }
10930 
10931 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
10932   APInt DemandedElts = APInt::getAllOnes(getNumOperands());
10933   return getSplatValue(DemandedElts, UndefElements);
10934 }
10935 
10936 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
10937                                             SmallVectorImpl<SDValue> &Sequence,
10938                                             BitVector *UndefElements) const {
10939   unsigned NumOps = getNumOperands();
10940   Sequence.clear();
10941   if (UndefElements) {
10942     UndefElements->clear();
10943     UndefElements->resize(NumOps);
10944   }
10945   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10946   if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
10947     return false;
10948 
10949   // Set the undefs even if we don't find a sequence (like getSplatValue).
10950   if (UndefElements)
10951     for (unsigned I = 0; I != NumOps; ++I)
10952       if (DemandedElts[I] && getOperand(I).isUndef())
10953         (*UndefElements)[I] = true;
10954 
10955   // Iteratively widen the sequence length looking for repetitions.
10956   for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
10957     Sequence.append(SeqLen, SDValue());
10958     for (unsigned I = 0; I != NumOps; ++I) {
10959       if (!DemandedElts[I])
10960         continue;
10961       SDValue &SeqOp = Sequence[I % SeqLen];
10962       SDValue Op = getOperand(I);
10963       if (Op.isUndef()) {
10964         if (!SeqOp)
10965           SeqOp = Op;
10966         continue;
10967       }
10968       if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
10969         Sequence.clear();
10970         break;
10971       }
10972       SeqOp = Op;
10973     }
10974     if (!Sequence.empty())
10975       return true;
10976   }
10977 
10978   assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
10979   return false;
10980 }
10981 
10982 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
10983                                             BitVector *UndefElements) const {
10984   APInt DemandedElts = APInt::getAllOnes(getNumOperands());
10985   return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
10986 }
10987 
10988 ConstantSDNode *
10989 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
10990                                         BitVector *UndefElements) const {
10991   return dyn_cast_or_null<ConstantSDNode>(
10992       getSplatValue(DemandedElts, UndefElements));
10993 }
10994 
10995 ConstantSDNode *
10996 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
10997   return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
10998 }
10999 
11000 ConstantFPSDNode *
11001 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
11002                                           BitVector *UndefElements) const {
11003   return dyn_cast_or_null<ConstantFPSDNode>(
11004       getSplatValue(DemandedElts, UndefElements));
11005 }
11006 
11007 ConstantFPSDNode *
11008 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
11009   return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
11010 }
11011 
11012 int32_t
11013 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
11014                                                    uint32_t BitWidth) const {
11015   if (ConstantFPSDNode *CN =
11016           dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
11017     bool IsExact;
11018     APSInt IntVal(BitWidth);
11019     const APFloat &APF = CN->getValueAPF();
11020     if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
11021             APFloat::opOK ||
11022         !IsExact)
11023       return -1;
11024 
11025     return IntVal.exactLogBase2();
11026   }
11027   return -1;
11028 }
11029 
11030 bool BuildVectorSDNode::getConstantRawBits(
11031     bool IsLittleEndian, unsigned DstEltSizeInBits,
11032     SmallVectorImpl<APInt> &RawBitElements, BitVector &UndefElements) const {
11033   // Early-out if this contains anything but Undef/Constant/ConstantFP.
11034   if (!isConstant())
11035     return false;
11036 
11037   unsigned NumSrcOps = getNumOperands();
11038   unsigned SrcEltSizeInBits = getValueType(0).getScalarSizeInBits();
11039   assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
11040          "Invalid bitcast scale");
11041 
11042   // Extract raw src bits.
11043   SmallVector<APInt> SrcBitElements(NumSrcOps,
11044                                     APInt::getNullValue(SrcEltSizeInBits));
11045   BitVector SrcUndeElements(NumSrcOps, false);
11046 
11047   for (unsigned I = 0; I != NumSrcOps; ++I) {
11048     SDValue Op = getOperand(I);
11049     if (Op.isUndef()) {
11050       SrcUndeElements.set(I);
11051       continue;
11052     }
11053     auto *CInt = dyn_cast<ConstantSDNode>(Op);
11054     auto *CFP = dyn_cast<ConstantFPSDNode>(Op);
11055     assert((CInt || CFP) && "Unknown constant");
11056     SrcBitElements[I] =
11057         CInt ? CInt->getAPIntValue().truncOrSelf(SrcEltSizeInBits)
11058              : CFP->getValueAPF().bitcastToAPInt();
11059   }
11060 
11061   // Recast to dst width.
11062   recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
11063                 SrcBitElements, UndefElements, SrcUndeElements);
11064   return true;
11065 }
11066 
11067 void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
11068                                       unsigned DstEltSizeInBits,
11069                                       SmallVectorImpl<APInt> &DstBitElements,
11070                                       ArrayRef<APInt> SrcBitElements,
11071                                       BitVector &DstUndefElements,
11072                                       const BitVector &SrcUndefElements) {
11073   unsigned NumSrcOps = SrcBitElements.size();
11074   unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
11075   assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
11076          "Invalid bitcast scale");
11077   assert(NumSrcOps == SrcUndefElements.size() &&
11078          "Vector size mismatch");
11079 
11080   unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
11081   DstUndefElements.clear();
11082   DstUndefElements.resize(NumDstOps, false);
11083   DstBitElements.assign(NumDstOps, APInt::getNullValue(DstEltSizeInBits));
11084 
11085   // Concatenate src elements constant bits together into dst element.
11086   if (SrcEltSizeInBits <= DstEltSizeInBits) {
11087     unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
11088     for (unsigned I = 0; I != NumDstOps; ++I) {
11089       DstUndefElements.set(I);
11090       APInt &DstBits = DstBitElements[I];
11091       for (unsigned J = 0; J != Scale; ++J) {
11092         unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
11093         if (SrcUndefElements[Idx])
11094           continue;
11095         DstUndefElements.reset(I);
11096         const APInt &SrcBits = SrcBitElements[Idx];
11097         assert(SrcBits.getBitWidth() == SrcEltSizeInBits &&
11098                "Illegal constant bitwidths");
11099         DstBits.insertBits(SrcBits, J * SrcEltSizeInBits);
11100       }
11101     }
11102     return;
11103   }
11104 
11105   // Split src element constant bits into dst elements.
11106   unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
11107   for (unsigned I = 0; I != NumSrcOps; ++I) {
11108     if (SrcUndefElements[I]) {
11109       DstUndefElements.set(I * Scale, (I + 1) * Scale);
11110       continue;
11111     }
11112     const APInt &SrcBits = SrcBitElements[I];
11113     for (unsigned J = 0; J != Scale; ++J) {
11114       unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
11115       APInt &DstBits = DstBitElements[Idx];
11116       DstBits = SrcBits.extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
11117     }
11118   }
11119 }
11120 
11121 bool BuildVectorSDNode::isConstant() const {
11122   for (const SDValue &Op : op_values()) {
11123     unsigned Opc = Op.getOpcode();
11124     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
11125       return false;
11126   }
11127   return true;
11128 }
11129 
11130 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
11131   // Find the first non-undef value in the shuffle mask.
11132   unsigned i, e;
11133   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
11134     /* search */;
11135 
11136   // If all elements are undefined, this shuffle can be considered a splat
11137   // (although it should eventually get simplified away completely).
11138   if (i == e)
11139     return true;
11140 
11141   // Make sure all remaining elements are either undef or the same as the first
11142   // non-undef value.
11143   for (int Idx = Mask[i]; i != e; ++i)
11144     if (Mask[i] >= 0 && Mask[i] != Idx)
11145       return false;
11146   return true;
11147 }
11148 
11149 // Returns the SDNode if it is a constant integer BuildVector
11150 // or constant integer.
11151 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
11152   if (isa<ConstantSDNode>(N))
11153     return N.getNode();
11154   if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
11155     return N.getNode();
11156   // Treat a GlobalAddress supporting constant offset folding as a
11157   // constant integer.
11158   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
11159     if (GA->getOpcode() == ISD::GlobalAddress &&
11160         TLI->isOffsetFoldingLegal(GA))
11161       return GA;
11162   if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
11163       isa<ConstantSDNode>(N.getOperand(0)))
11164     return N.getNode();
11165   return nullptr;
11166 }
11167 
11168 // Returns the SDNode if it is a constant float BuildVector
11169 // or constant float.
11170 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
11171   if (isa<ConstantFPSDNode>(N))
11172     return N.getNode();
11173 
11174   if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
11175     return N.getNode();
11176 
11177   if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
11178       isa<ConstantFPSDNode>(N.getOperand(0)))
11179     return N.getNode();
11180 
11181   return nullptr;
11182 }
11183 
11184 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
11185   assert(!Node->OperandList && "Node already has operands");
11186   assert(SDNode::getMaxNumOperands() >= Vals.size() &&
11187          "too many operands to fit into SDNode");
11188   SDUse *Ops = OperandRecycler.allocate(
11189       ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
11190 
11191   bool IsDivergent = false;
11192   for (unsigned I = 0; I != Vals.size(); ++I) {
11193     Ops[I].setUser(Node);
11194     Ops[I].setInitial(Vals[I]);
11195     if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
11196       IsDivergent |= Ops[I].getNode()->isDivergent();
11197   }
11198   Node->NumOperands = Vals.size();
11199   Node->OperandList = Ops;
11200   if (!TLI->isSDNodeAlwaysUniform(Node)) {
11201     IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
11202     Node->SDNodeBits.IsDivergent = IsDivergent;
11203   }
11204   checkForCycles(Node);
11205 }
11206 
11207 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
11208                                      SmallVectorImpl<SDValue> &Vals) {
11209   size_t Limit = SDNode::getMaxNumOperands();
11210   while (Vals.size() > Limit) {
11211     unsigned SliceIdx = Vals.size() - Limit;
11212     auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
11213     SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
11214     Vals.erase(Vals.begin() + SliceIdx, Vals.end());
11215     Vals.emplace_back(NewTF);
11216   }
11217   return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
11218 }
11219 
11220 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
11221                                         EVT VT, SDNodeFlags Flags) {
11222   switch (Opcode) {
11223   default:
11224     return SDValue();
11225   case ISD::ADD:
11226   case ISD::OR:
11227   case ISD::XOR:
11228   case ISD::UMAX:
11229     return getConstant(0, DL, VT);
11230   case ISD::MUL:
11231     return getConstant(1, DL, VT);
11232   case ISD::AND:
11233   case ISD::UMIN:
11234     return getAllOnesConstant(DL, VT);
11235   case ISD::SMAX:
11236     return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
11237   case ISD::SMIN:
11238     return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
11239   case ISD::FADD:
11240     return getConstantFP(-0.0, DL, VT);
11241   case ISD::FMUL:
11242     return getConstantFP(1.0, DL, VT);
11243   case ISD::FMINNUM:
11244   case ISD::FMAXNUM: {
11245     // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
11246     const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
11247     APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
11248                         !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
11249                         APFloat::getLargest(Semantics);
11250     if (Opcode == ISD::FMAXNUM)
11251       NeutralAF.changeSign();
11252 
11253     return getConstantFP(NeutralAF, DL, VT);
11254   }
11255   }
11256 }
11257 
11258 #ifndef NDEBUG
11259 static void checkForCyclesHelper(const SDNode *N,
11260                                  SmallPtrSetImpl<const SDNode*> &Visited,
11261                                  SmallPtrSetImpl<const SDNode*> &Checked,
11262                                  const llvm::SelectionDAG *DAG) {
11263   // If this node has already been checked, don't check it again.
11264   if (Checked.count(N))
11265     return;
11266 
11267   // If a node has already been visited on this depth-first walk, reject it as
11268   // a cycle.
11269   if (!Visited.insert(N).second) {
11270     errs() << "Detected cycle in SelectionDAG\n";
11271     dbgs() << "Offending node:\n";
11272     N->dumprFull(DAG); dbgs() << "\n";
11273     abort();
11274   }
11275 
11276   for (const SDValue &Op : N->op_values())
11277     checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
11278 
11279   Checked.insert(N);
11280   Visited.erase(N);
11281 }
11282 #endif
11283 
11284 void llvm::checkForCycles(const llvm::SDNode *N,
11285                           const llvm::SelectionDAG *DAG,
11286                           bool force) {
11287 #ifndef NDEBUG
11288   bool check = force;
11289 #ifdef EXPENSIVE_CHECKS
11290   check = true;
11291 #endif  // EXPENSIVE_CHECKS
11292   if (check) {
11293     assert(N && "Checking nonexistent SDNode");
11294     SmallPtrSet<const SDNode*, 32> visited;
11295     SmallPtrSet<const SDNode*, 32> checked;
11296     checkForCyclesHelper(N, visited, checked, DAG);
11297   }
11298 #endif  // !NDEBUG
11299 }
11300 
11301 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
11302   checkForCycles(DAG->getRoot().getNode(), DAG, force);
11303 }
11304